234c1c28bf4430c62d64dbd8ac227345a9c17441
[deliverable/linux.git] / drivers / scsi / qla2xxx / qla_init.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_gbl.h"
9
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13
14 #include "qla_devtbl.h"
15
16 #ifdef CONFIG_SPARC
17 #include <asm/prom.h>
18 #endif
19
20 /*
21 * QLogic ISP2x00 Hardware Support Function Prototypes.
22 */
23 static int qla2x00_isp_firmware(scsi_qla_host_t *);
24 static int qla2x00_setup_chip(scsi_qla_host_t *);
25 static int qla2x00_init_rings(scsi_qla_host_t *);
26 static int qla2x00_fw_ready(scsi_qla_host_t *);
27 static int qla2x00_configure_hba(scsi_qla_host_t *);
28 static int qla2x00_configure_loop(scsi_qla_host_t *);
29 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
30 static int qla2x00_configure_fabric(scsi_qla_host_t *);
31 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
32 static int qla2x00_device_resync(scsi_qla_host_t *);
33 static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
34 uint16_t *);
35
36 static int qla2x00_restart_isp(scsi_qla_host_t *);
37
38 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
39 static int qla84xx_init_chip(scsi_qla_host_t *);
40 static int qla25xx_init_queues(struct qla_hw_data *);
41
42 /* SRB Extensions ---------------------------------------------------------- */
43
44 static void
45 qla2x00_ctx_sp_timeout(unsigned long __data)
46 {
47 srb_t *sp = (srb_t *)__data;
48 struct srb_ctx *ctx;
49 struct srb_iocb *iocb;
50 fc_port_t *fcport = sp->fcport;
51 struct qla_hw_data *ha = fcport->vha->hw;
52 struct req_que *req;
53 unsigned long flags;
54
55 spin_lock_irqsave(&ha->hardware_lock, flags);
56 req = ha->req_q_map[0];
57 req->outstanding_cmds[sp->handle] = NULL;
58 ctx = sp->ctx;
59 iocb = ctx->u.iocb_cmd;
60 iocb->timeout(sp);
61 iocb->free(sp);
62 spin_unlock_irqrestore(&ha->hardware_lock, flags);
63 }
64
65 static void
66 qla2x00_ctx_sp_free(srb_t *sp)
67 {
68 struct srb_ctx *ctx = sp->ctx;
69 struct srb_iocb *iocb = ctx->u.iocb_cmd;
70 struct scsi_qla_host *vha = sp->fcport->vha;
71
72 del_timer(&iocb->timer);
73 kfree(iocb);
74 kfree(ctx);
75 mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
76
77 QLA_VHA_MARK_NOT_BUSY(vha);
78 }
79
80 inline srb_t *
81 qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
82 unsigned long tmo)
83 {
84 srb_t *sp = NULL;
85 struct qla_hw_data *ha = vha->hw;
86 struct srb_ctx *ctx;
87 struct srb_iocb *iocb;
88 uint8_t bail;
89
90 QLA_VHA_MARK_BUSY(vha, bail);
91 if (bail)
92 return NULL;
93
94 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
95 if (!sp)
96 goto done;
97 ctx = kzalloc(size, GFP_KERNEL);
98 if (!ctx) {
99 mempool_free(sp, ha->srb_mempool);
100 sp = NULL;
101 goto done;
102 }
103 iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL);
104 if (!iocb) {
105 mempool_free(sp, ha->srb_mempool);
106 sp = NULL;
107 kfree(ctx);
108 goto done;
109 }
110
111 memset(sp, 0, sizeof(*sp));
112 sp->fcport = fcport;
113 sp->ctx = ctx;
114 ctx->u.iocb_cmd = iocb;
115 iocb->free = qla2x00_ctx_sp_free;
116
117 init_timer(&iocb->timer);
118 if (!tmo)
119 goto done;
120 iocb->timer.expires = jiffies + tmo * HZ;
121 iocb->timer.data = (unsigned long)sp;
122 iocb->timer.function = qla2x00_ctx_sp_timeout;
123 add_timer(&iocb->timer);
124 done:
125 if (!sp)
126 QLA_VHA_MARK_NOT_BUSY(vha);
127 return sp;
128 }
129
130 /* Asynchronous Login/Logout Routines -------------------------------------- */
131
132 static inline unsigned long
133 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
134 {
135 unsigned long tmo;
136 struct qla_hw_data *ha = vha->hw;
137
138 /* Firmware should use switch negotiated r_a_tov for timeout. */
139 tmo = ha->r_a_tov / 10 * 2;
140 if (!IS_FWI2_CAPABLE(ha)) {
141 /*
142 * Except for earlier ISPs where the timeout is seeded from the
143 * initialization control block.
144 */
145 tmo = ha->login_timeout;
146 }
147 return tmo;
148 }
149
150 static void
151 qla2x00_async_iocb_timeout(srb_t *sp)
152 {
153 fc_port_t *fcport = sp->fcport;
154 struct srb_ctx *ctx = sp->ctx;
155
156 DEBUG2(printk(KERN_WARNING
157 "scsi(%ld:%x): Async-%s timeout - portid=%02x%02x%02x.\n",
158 fcport->vha->host_no, sp->handle,
159 ctx->name, fcport->d_id.b.domain,
160 fcport->d_id.b.area, fcport->d_id.b.al_pa));
161
162 fcport->flags &= ~FCF_ASYNC_SENT;
163 if (ctx->type == SRB_LOGIN_CMD) {
164 struct srb_iocb *lio = ctx->u.iocb_cmd;
165 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
166 /* Retry as needed. */
167 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
168 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
169 QLA_LOGIO_LOGIN_RETRIED : 0;
170 qla2x00_post_async_login_done_work(fcport->vha, fcport,
171 lio->u.logio.data);
172 }
173 }
174
175 static void
176 qla2x00_async_login_ctx_done(srb_t *sp)
177 {
178 struct srb_ctx *ctx = sp->ctx;
179 struct srb_iocb *lio = ctx->u.iocb_cmd;
180
181 qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
182 lio->u.logio.data);
183 lio->free(sp);
184 }
185
186 int
187 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
188 uint16_t *data)
189 {
190 srb_t *sp;
191 struct srb_ctx *ctx;
192 struct srb_iocb *lio;
193 int rval;
194
195 rval = QLA_FUNCTION_FAILED;
196 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
197 qla2x00_get_async_timeout(vha) + 2);
198 if (!sp)
199 goto done;
200
201 ctx = sp->ctx;
202 ctx->type = SRB_LOGIN_CMD;
203 ctx->name = "login";
204 lio = ctx->u.iocb_cmd;
205 lio->timeout = qla2x00_async_iocb_timeout;
206 lio->done = qla2x00_async_login_ctx_done;
207 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
208 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
209 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
210 rval = qla2x00_start_sp(sp);
211 if (rval != QLA_SUCCESS)
212 goto done_free_sp;
213
214 DEBUG2(printk(KERN_DEBUG
215 "scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x "
216 "retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id,
217 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
218 fcport->login_retry));
219 return rval;
220
221 done_free_sp:
222 lio->free(sp);
223 done:
224 return rval;
225 }
226
227 static void
228 qla2x00_async_logout_ctx_done(srb_t *sp)
229 {
230 struct srb_ctx *ctx = sp->ctx;
231 struct srb_iocb *lio = ctx->u.iocb_cmd;
232
233 qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
234 lio->u.logio.data);
235 lio->free(sp);
236 }
237
238 int
239 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
240 {
241 srb_t *sp;
242 struct srb_ctx *ctx;
243 struct srb_iocb *lio;
244 int rval;
245
246 rval = QLA_FUNCTION_FAILED;
247 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
248 qla2x00_get_async_timeout(vha) + 2);
249 if (!sp)
250 goto done;
251
252 ctx = sp->ctx;
253 ctx->type = SRB_LOGOUT_CMD;
254 ctx->name = "logout";
255 lio = ctx->u.iocb_cmd;
256 lio->timeout = qla2x00_async_iocb_timeout;
257 lio->done = qla2x00_async_logout_ctx_done;
258 rval = qla2x00_start_sp(sp);
259 if (rval != QLA_SUCCESS)
260 goto done_free_sp;
261
262 DEBUG2(printk(KERN_DEBUG
263 "scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
264 fcport->vha->host_no, sp->handle, fcport->loop_id,
265 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
266 return rval;
267
268 done_free_sp:
269 lio->free(sp);
270 done:
271 return rval;
272 }
273
274 static void
275 qla2x00_async_adisc_ctx_done(srb_t *sp)
276 {
277 struct srb_ctx *ctx = sp->ctx;
278 struct srb_iocb *lio = ctx->u.iocb_cmd;
279
280 qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
281 lio->u.logio.data);
282 lio->free(sp);
283 }
284
285 int
286 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
287 uint16_t *data)
288 {
289 srb_t *sp;
290 struct srb_ctx *ctx;
291 struct srb_iocb *lio;
292 int rval;
293
294 rval = QLA_FUNCTION_FAILED;
295 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
296 qla2x00_get_async_timeout(vha) + 2);
297 if (!sp)
298 goto done;
299
300 ctx = sp->ctx;
301 ctx->type = SRB_ADISC_CMD;
302 ctx->name = "adisc";
303 lio = ctx->u.iocb_cmd;
304 lio->timeout = qla2x00_async_iocb_timeout;
305 lio->done = qla2x00_async_adisc_ctx_done;
306 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
307 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
308 rval = qla2x00_start_sp(sp);
309 if (rval != QLA_SUCCESS)
310 goto done_free_sp;
311
312 DEBUG2(printk(KERN_DEBUG
313 "scsi(%ld:%x): Async-adisc - loop-id=%x portid=%02x%02x%02x.\n",
314 fcport->vha->host_no, sp->handle, fcport->loop_id,
315 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
316
317 return rval;
318
319 done_free_sp:
320 lio->free(sp);
321 done:
322 return rval;
323 }
324
325 static void
326 qla2x00_async_tm_cmd_ctx_done(srb_t *sp)
327 {
328 struct srb_ctx *ctx = sp->ctx;
329 struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
330
331 qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb);
332 iocb->free(sp);
333 }
334
335 int
336 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
337 uint32_t tag)
338 {
339 struct scsi_qla_host *vha = fcport->vha;
340 srb_t *sp;
341 struct srb_ctx *ctx;
342 struct srb_iocb *tcf;
343 int rval;
344
345 rval = QLA_FUNCTION_FAILED;
346 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
347 qla2x00_get_async_timeout(vha) + 2);
348 if (!sp)
349 goto done;
350
351 ctx = sp->ctx;
352 ctx->type = SRB_TM_CMD;
353 ctx->name = "tmf";
354 tcf = ctx->u.iocb_cmd;
355 tcf->u.tmf.flags = flags;
356 tcf->u.tmf.lun = lun;
357 tcf->u.tmf.data = tag;
358 tcf->timeout = qla2x00_async_iocb_timeout;
359 tcf->done = qla2x00_async_tm_cmd_ctx_done;
360
361 rval = qla2x00_start_sp(sp);
362 if (rval != QLA_SUCCESS)
363 goto done_free_sp;
364
365 DEBUG2(printk(KERN_DEBUG
366 "scsi(%ld:%x): Async-tmf - loop-id=%x portid=%02x%02x%02x.\n",
367 fcport->vha->host_no, sp->handle, fcport->loop_id,
368 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
369
370 return rval;
371
372 done_free_sp:
373 tcf->free(sp);
374 done:
375 return rval;
376 }
377
378 void
379 qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
380 uint16_t *data)
381 {
382 int rval;
383
384 switch (data[0]) {
385 case MBS_COMMAND_COMPLETE:
386 if (fcport->flags & FCF_FCP2_DEVICE) {
387 qla2x00_post_async_adisc_work(vha, fcport, data);
388 break;
389 }
390 qla2x00_update_fcport(vha, fcport);
391 break;
392 case MBS_COMMAND_ERROR:
393 fcport->flags &= ~FCF_ASYNC_SENT;
394 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
395 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
396 else
397 qla2x00_mark_device_lost(vha, fcport, 1, 0);
398 break;
399 case MBS_PORT_ID_USED:
400 fcport->loop_id = data[1];
401 qla2x00_post_async_logout_work(vha, fcport, NULL);
402 qla2x00_post_async_login_work(vha, fcport, NULL);
403 break;
404 case MBS_LOOP_ID_USED:
405 fcport->loop_id++;
406 rval = qla2x00_find_new_loop_id(vha, fcport);
407 if (rval != QLA_SUCCESS) {
408 fcport->flags &= ~FCF_ASYNC_SENT;
409 qla2x00_mark_device_lost(vha, fcport, 1, 0);
410 break;
411 }
412 qla2x00_post_async_login_work(vha, fcport, NULL);
413 break;
414 }
415 return;
416 }
417
418 void
419 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
420 uint16_t *data)
421 {
422 qla2x00_mark_device_lost(vha, fcport, 1, 0);
423 return;
424 }
425
426 void
427 qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
428 uint16_t *data)
429 {
430 if (data[0] == MBS_COMMAND_COMPLETE) {
431 qla2x00_update_fcport(vha, fcport);
432
433 return;
434 }
435
436 /* Retry login. */
437 fcport->flags &= ~FCF_ASYNC_SENT;
438 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
439 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
440 else
441 qla2x00_mark_device_lost(vha, fcport, 1, 0);
442
443 return;
444 }
445
446 void
447 qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
448 struct srb_iocb *iocb)
449 {
450 int rval;
451 uint32_t flags;
452 uint16_t lun;
453
454 flags = iocb->u.tmf.flags;
455 lun = (uint16_t)iocb->u.tmf.lun;
456
457 /* Issue Marker IOCB */
458 rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
459 vha->hw->rsp_q_map[0], fcport->loop_id, lun,
460 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
461
462 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
463 DEBUG2_3_11(printk(KERN_WARNING
464 "%s(%ld): TM IOCB failed (%x).\n",
465 __func__, vha->host_no, rval));
466 }
467
468 return;
469 }
470
471 /****************************************************************************/
472 /* QLogic ISP2x00 Hardware Support Functions. */
473 /****************************************************************************/
474
475 /*
476 * qla2x00_initialize_adapter
477 * Initialize board.
478 *
479 * Input:
480 * ha = adapter block pointer.
481 *
482 * Returns:
483 * 0 = success
484 */
485 int
486 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
487 {
488 int rval;
489 struct qla_hw_data *ha = vha->hw;
490 struct req_que *req = ha->req_q_map[0];
491
492 /* Clear adapter flags. */
493 vha->flags.online = 0;
494 ha->flags.chip_reset_done = 0;
495 vha->flags.reset_active = 0;
496 ha->flags.pci_channel_io_perm_failure = 0;
497 ha->flags.eeh_busy = 0;
498 ha->flags.thermal_supported = 1;
499 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
500 atomic_set(&vha->loop_state, LOOP_DOWN);
501 vha->device_flags = DFLG_NO_CABLE;
502 vha->dpc_flags = 0;
503 vha->flags.management_server_logged_in = 0;
504 vha->marker_needed = 0;
505 ha->isp_abort_cnt = 0;
506 ha->beacon_blink_led = 0;
507
508 set_bit(0, ha->req_qid_map);
509 set_bit(0, ha->rsp_qid_map);
510
511 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
512 rval = ha->isp_ops->pci_config(vha);
513 if (rval) {
514 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
515 vha->host_no));
516 return (rval);
517 }
518
519 ha->isp_ops->reset_chip(vha);
520
521 rval = qla2xxx_get_flash_info(vha);
522 if (rval) {
523 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
524 vha->host_no));
525 return (rval);
526 }
527
528 ha->isp_ops->get_flash_version(vha, req->ring);
529
530 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
531
532 ha->isp_ops->nvram_config(vha);
533
534 if (ha->flags.disable_serdes) {
535 /* Mask HBA via NVRAM settings? */
536 qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
537 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
538 vha->port_name[0], vha->port_name[1],
539 vha->port_name[2], vha->port_name[3],
540 vha->port_name[4], vha->port_name[5],
541 vha->port_name[6], vha->port_name[7]);
542 return QLA_FUNCTION_FAILED;
543 }
544
545 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
546
547 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
548 rval = ha->isp_ops->chip_diag(vha);
549 if (rval)
550 return (rval);
551 rval = qla2x00_setup_chip(vha);
552 if (rval)
553 return (rval);
554 }
555
556 if (IS_QLA84XX(ha)) {
557 ha->cs84xx = qla84xx_get_chip(vha);
558 if (!ha->cs84xx) {
559 qla_printk(KERN_ERR, ha,
560 "Unable to configure ISP84XX.\n");
561 return QLA_FUNCTION_FAILED;
562 }
563 }
564 rval = qla2x00_init_rings(vha);
565 ha->flags.chip_reset_done = 1;
566
567 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
568 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
569 rval = qla84xx_init_chip(vha);
570 if (rval != QLA_SUCCESS) {
571 qla_printk(KERN_ERR, ha,
572 "Unable to initialize ISP84XX.\n");
573 qla84xx_put_chip(vha);
574 }
575 }
576
577 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
578 qla24xx_read_fcp_prio_cfg(vha);
579
580 return (rval);
581 }
582
583 /**
584 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
585 * @ha: HA context
586 *
587 * Returns 0 on success.
588 */
589 int
590 qla2100_pci_config(scsi_qla_host_t *vha)
591 {
592 uint16_t w;
593 unsigned long flags;
594 struct qla_hw_data *ha = vha->hw;
595 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
596
597 pci_set_master(ha->pdev);
598 pci_try_set_mwi(ha->pdev);
599
600 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
601 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
602 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
603
604 pci_disable_rom(ha->pdev);
605
606 /* Get PCI bus information. */
607 spin_lock_irqsave(&ha->hardware_lock, flags);
608 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
609 spin_unlock_irqrestore(&ha->hardware_lock, flags);
610
611 return QLA_SUCCESS;
612 }
613
614 /**
615 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
616 * @ha: HA context
617 *
618 * Returns 0 on success.
619 */
620 int
621 qla2300_pci_config(scsi_qla_host_t *vha)
622 {
623 uint16_t w;
624 unsigned long flags = 0;
625 uint32_t cnt;
626 struct qla_hw_data *ha = vha->hw;
627 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
628
629 pci_set_master(ha->pdev);
630 pci_try_set_mwi(ha->pdev);
631
632 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
633 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
634
635 if (IS_QLA2322(ha) || IS_QLA6322(ha))
636 w &= ~PCI_COMMAND_INTX_DISABLE;
637 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
638
639 /*
640 * If this is a 2300 card and not 2312, reset the
641 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
642 * the 2310 also reports itself as a 2300 so we need to get the
643 * fb revision level -- a 6 indicates it really is a 2300 and
644 * not a 2310.
645 */
646 if (IS_QLA2300(ha)) {
647 spin_lock_irqsave(&ha->hardware_lock, flags);
648
649 /* Pause RISC. */
650 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
651 for (cnt = 0; cnt < 30000; cnt++) {
652 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
653 break;
654
655 udelay(10);
656 }
657
658 /* Select FPM registers. */
659 WRT_REG_WORD(&reg->ctrl_status, 0x20);
660 RD_REG_WORD(&reg->ctrl_status);
661
662 /* Get the fb rev level */
663 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
664
665 if (ha->fb_rev == FPM_2300)
666 pci_clear_mwi(ha->pdev);
667
668 /* Deselect FPM registers. */
669 WRT_REG_WORD(&reg->ctrl_status, 0x0);
670 RD_REG_WORD(&reg->ctrl_status);
671
672 /* Release RISC module. */
673 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
674 for (cnt = 0; cnt < 30000; cnt++) {
675 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
676 break;
677
678 udelay(10);
679 }
680
681 spin_unlock_irqrestore(&ha->hardware_lock, flags);
682 }
683
684 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
685
686 pci_disable_rom(ha->pdev);
687
688 /* Get PCI bus information. */
689 spin_lock_irqsave(&ha->hardware_lock, flags);
690 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
691 spin_unlock_irqrestore(&ha->hardware_lock, flags);
692
693 return QLA_SUCCESS;
694 }
695
696 /**
697 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
698 * @ha: HA context
699 *
700 * Returns 0 on success.
701 */
702 int
703 qla24xx_pci_config(scsi_qla_host_t *vha)
704 {
705 uint16_t w;
706 unsigned long flags = 0;
707 struct qla_hw_data *ha = vha->hw;
708 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
709
710 pci_set_master(ha->pdev);
711 pci_try_set_mwi(ha->pdev);
712
713 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
714 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
715 w &= ~PCI_COMMAND_INTX_DISABLE;
716 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
717
718 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
719
720 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
721 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
722 pcix_set_mmrbc(ha->pdev, 2048);
723
724 /* PCIe -- adjust Maximum Read Request Size (2048). */
725 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
726 pcie_set_readrq(ha->pdev, 2048);
727
728 pci_disable_rom(ha->pdev);
729
730 ha->chip_revision = ha->pdev->revision;
731
732 /* Get PCI bus information. */
733 spin_lock_irqsave(&ha->hardware_lock, flags);
734 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
735 spin_unlock_irqrestore(&ha->hardware_lock, flags);
736
737 return QLA_SUCCESS;
738 }
739
740 /**
741 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
742 * @ha: HA context
743 *
744 * Returns 0 on success.
745 */
746 int
747 qla25xx_pci_config(scsi_qla_host_t *vha)
748 {
749 uint16_t w;
750 struct qla_hw_data *ha = vha->hw;
751
752 pci_set_master(ha->pdev);
753 pci_try_set_mwi(ha->pdev);
754
755 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
756 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
757 w &= ~PCI_COMMAND_INTX_DISABLE;
758 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
759
760 /* PCIe -- adjust Maximum Read Request Size (2048). */
761 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
762 pcie_set_readrq(ha->pdev, 2048);
763
764 pci_disable_rom(ha->pdev);
765
766 ha->chip_revision = ha->pdev->revision;
767
768 return QLA_SUCCESS;
769 }
770
771 /**
772 * qla2x00_isp_firmware() - Choose firmware image.
773 * @ha: HA context
774 *
775 * Returns 0 on success.
776 */
777 static int
778 qla2x00_isp_firmware(scsi_qla_host_t *vha)
779 {
780 int rval;
781 uint16_t loop_id, topo, sw_cap;
782 uint8_t domain, area, al_pa;
783 struct qla_hw_data *ha = vha->hw;
784
785 /* Assume loading risc code */
786 rval = QLA_FUNCTION_FAILED;
787
788 if (ha->flags.disable_risc_code_load) {
789 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n",
790 vha->host_no));
791 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
792
793 /* Verify checksum of loaded RISC code. */
794 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
795 if (rval == QLA_SUCCESS) {
796 /* And, verify we are not in ROM code. */
797 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
798 &area, &domain, &topo, &sw_cap);
799 }
800 }
801
802 if (rval) {
803 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n",
804 vha->host_no));
805 }
806
807 return (rval);
808 }
809
810 /**
811 * qla2x00_reset_chip() - Reset ISP chip.
812 * @ha: HA context
813 *
814 * Returns 0 on success.
815 */
816 void
817 qla2x00_reset_chip(scsi_qla_host_t *vha)
818 {
819 unsigned long flags = 0;
820 struct qla_hw_data *ha = vha->hw;
821 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
822 uint32_t cnt;
823 uint16_t cmd;
824
825 if (unlikely(pci_channel_offline(ha->pdev)))
826 return;
827
828 ha->isp_ops->disable_intrs(ha);
829
830 spin_lock_irqsave(&ha->hardware_lock, flags);
831
832 /* Turn off master enable */
833 cmd = 0;
834 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
835 cmd &= ~PCI_COMMAND_MASTER;
836 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
837
838 if (!IS_QLA2100(ha)) {
839 /* Pause RISC. */
840 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
841 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
842 for (cnt = 0; cnt < 30000; cnt++) {
843 if ((RD_REG_WORD(&reg->hccr) &
844 HCCR_RISC_PAUSE) != 0)
845 break;
846 udelay(100);
847 }
848 } else {
849 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
850 udelay(10);
851 }
852
853 /* Select FPM registers. */
854 WRT_REG_WORD(&reg->ctrl_status, 0x20);
855 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
856
857 /* FPM Soft Reset. */
858 WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
859 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
860
861 /* Toggle Fpm Reset. */
862 if (!IS_QLA2200(ha)) {
863 WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
864 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
865 }
866
867 /* Select frame buffer registers. */
868 WRT_REG_WORD(&reg->ctrl_status, 0x10);
869 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
870
871 /* Reset frame buffer FIFOs. */
872 if (IS_QLA2200(ha)) {
873 WRT_FB_CMD_REG(ha, reg, 0xa000);
874 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
875 } else {
876 WRT_FB_CMD_REG(ha, reg, 0x00fc);
877
878 /* Read back fb_cmd until zero or 3 seconds max */
879 for (cnt = 0; cnt < 3000; cnt++) {
880 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
881 break;
882 udelay(100);
883 }
884 }
885
886 /* Select RISC module registers. */
887 WRT_REG_WORD(&reg->ctrl_status, 0);
888 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
889
890 /* Reset RISC processor. */
891 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
892 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
893
894 /* Release RISC processor. */
895 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
896 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
897 }
898
899 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
900 WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
901
902 /* Reset ISP chip. */
903 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
904
905 /* Wait for RISC to recover from reset. */
906 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
907 /*
908 * It is necessary to for a delay here since the card doesn't
909 * respond to PCI reads during a reset. On some architectures
910 * this will result in an MCA.
911 */
912 udelay(20);
913 for (cnt = 30000; cnt; cnt--) {
914 if ((RD_REG_WORD(&reg->ctrl_status) &
915 CSR_ISP_SOFT_RESET) == 0)
916 break;
917 udelay(100);
918 }
919 } else
920 udelay(10);
921
922 /* Reset RISC processor. */
923 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
924
925 WRT_REG_WORD(&reg->semaphore, 0);
926
927 /* Release RISC processor. */
928 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
929 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
930
931 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
932 for (cnt = 0; cnt < 30000; cnt++) {
933 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
934 break;
935
936 udelay(100);
937 }
938 } else
939 udelay(100);
940
941 /* Turn on master enable */
942 cmd |= PCI_COMMAND_MASTER;
943 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
944
945 /* Disable RISC pause on FPM parity error. */
946 if (!IS_QLA2100(ha)) {
947 WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
948 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
949 }
950
951 spin_unlock_irqrestore(&ha->hardware_lock, flags);
952 }
953
954 /**
955 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
956 *
957 * Returns 0 on success.
958 */
959 int
960 qla81xx_reset_mpi(scsi_qla_host_t *vha)
961 {
962 uint16_t mb[4] = {0x1010, 0, 1, 0};
963
964 return qla81xx_write_mpi_register(vha, mb);
965 }
966
967 /**
968 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
969 * @ha: HA context
970 *
971 * Returns 0 on success.
972 */
973 static inline void
974 qla24xx_reset_risc(scsi_qla_host_t *vha)
975 {
976 unsigned long flags = 0;
977 struct qla_hw_data *ha = vha->hw;
978 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
979 uint32_t cnt, d2;
980 uint16_t wd;
981 static int abts_cnt; /* ISP abort retry counts */
982
983 spin_lock_irqsave(&ha->hardware_lock, flags);
984
985 /* Reset RISC. */
986 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
987 for (cnt = 0; cnt < 30000; cnt++) {
988 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
989 break;
990
991 udelay(10);
992 }
993
994 WRT_REG_DWORD(&reg->ctrl_status,
995 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
996 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
997
998 udelay(100);
999 /* Wait for firmware to complete NVRAM accesses. */
1000 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1001 for (cnt = 10000 ; cnt && d2; cnt--) {
1002 udelay(5);
1003 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1004 barrier();
1005 }
1006
1007 /* Wait for soft-reset to complete. */
1008 d2 = RD_REG_DWORD(&reg->ctrl_status);
1009 for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) {
1010 udelay(5);
1011 d2 = RD_REG_DWORD(&reg->ctrl_status);
1012 barrier();
1013 }
1014
1015 /* If required, do an MPI FW reset now */
1016 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
1017 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
1018 if (++abts_cnt < 5) {
1019 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1020 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
1021 } else {
1022 /*
1023 * We exhausted the ISP abort retries. We have to
1024 * set the board offline.
1025 */
1026 abts_cnt = 0;
1027 vha->flags.online = 0;
1028 }
1029 }
1030 }
1031
1032 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
1033 RD_REG_DWORD(&reg->hccr);
1034
1035 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
1036 RD_REG_DWORD(&reg->hccr);
1037
1038 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
1039 RD_REG_DWORD(&reg->hccr);
1040
1041 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1042 for (cnt = 6000000 ; cnt && d2; cnt--) {
1043 udelay(5);
1044 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1045 barrier();
1046 }
1047
1048 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1049
1050 if (IS_NOPOLLING_TYPE(ha))
1051 ha->isp_ops->enable_intrs(ha);
1052 }
1053
1054 /**
1055 * qla24xx_reset_chip() - Reset ISP24xx chip.
1056 * @ha: HA context
1057 *
1058 * Returns 0 on success.
1059 */
1060 void
1061 qla24xx_reset_chip(scsi_qla_host_t *vha)
1062 {
1063 struct qla_hw_data *ha = vha->hw;
1064
1065 if (pci_channel_offline(ha->pdev) &&
1066 ha->flags.pci_channel_io_perm_failure) {
1067 return;
1068 }
1069
1070 ha->isp_ops->disable_intrs(ha);
1071
1072 /* Perform RISC reset. */
1073 qla24xx_reset_risc(vha);
1074 }
1075
1076 /**
1077 * qla2x00_chip_diag() - Test chip for proper operation.
1078 * @ha: HA context
1079 *
1080 * Returns 0 on success.
1081 */
1082 int
1083 qla2x00_chip_diag(scsi_qla_host_t *vha)
1084 {
1085 int rval;
1086 struct qla_hw_data *ha = vha->hw;
1087 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1088 unsigned long flags = 0;
1089 uint16_t data;
1090 uint32_t cnt;
1091 uint16_t mb[5];
1092 struct req_que *req = ha->req_q_map[0];
1093
1094 /* Assume a failed state */
1095 rval = QLA_FUNCTION_FAILED;
1096
1097 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n",
1098 vha->host_no, (u_long)&reg->flash_address));
1099
1100 spin_lock_irqsave(&ha->hardware_lock, flags);
1101
1102 /* Reset ISP chip. */
1103 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1104
1105 /*
1106 * We need to have a delay here since the card will not respond while
1107 * in reset causing an MCA on some architectures.
1108 */
1109 udelay(20);
1110 data = qla2x00_debounce_register(&reg->ctrl_status);
1111 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
1112 udelay(5);
1113 data = RD_REG_WORD(&reg->ctrl_status);
1114 barrier();
1115 }
1116
1117 if (!cnt)
1118 goto chip_diag_failed;
1119
1120 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n",
1121 vha->host_no));
1122
1123 /* Reset RISC processor. */
1124 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
1125 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1126
1127 /* Workaround for QLA2312 PCI parity error */
1128 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1129 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
1130 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
1131 udelay(5);
1132 data = RD_MAILBOX_REG(ha, reg, 0);
1133 barrier();
1134 }
1135 } else
1136 udelay(10);
1137
1138 if (!cnt)
1139 goto chip_diag_failed;
1140
1141 /* Check product ID of chip */
1142 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no));
1143
1144 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
1145 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
1146 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
1147 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
1148 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
1149 mb[3] != PROD_ID_3) {
1150 qla_printk(KERN_WARNING, ha,
1151 "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]);
1152
1153 goto chip_diag_failed;
1154 }
1155 ha->product_id[0] = mb[1];
1156 ha->product_id[1] = mb[2];
1157 ha->product_id[2] = mb[3];
1158 ha->product_id[3] = mb[4];
1159
1160 /* Adjust fw RISC transfer size */
1161 if (req->length > 1024)
1162 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
1163 else
1164 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
1165 req->length;
1166
1167 if (IS_QLA2200(ha) &&
1168 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
1169 /* Limit firmware transfer size with a 2200A */
1170 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n",
1171 vha->host_no));
1172
1173 ha->device_type |= DT_ISP2200A;
1174 ha->fw_transfer_size = 128;
1175 }
1176
1177 /* Wrap Incoming Mailboxes Test. */
1178 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1179
1180 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no));
1181 rval = qla2x00_mbx_reg_test(vha);
1182 if (rval) {
1183 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
1184 vha->host_no));
1185 qla_printk(KERN_WARNING, ha,
1186 "Failed mailbox send register test\n");
1187 }
1188 else {
1189 /* Flag a successful rval */
1190 rval = QLA_SUCCESS;
1191 }
1192 spin_lock_irqsave(&ha->hardware_lock, flags);
1193
1194 chip_diag_failed:
1195 if (rval)
1196 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED "
1197 "****\n", vha->host_no));
1198
1199 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1200
1201 return (rval);
1202 }
1203
1204 /**
1205 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
1206 * @ha: HA context
1207 *
1208 * Returns 0 on success.
1209 */
1210 int
1211 qla24xx_chip_diag(scsi_qla_host_t *vha)
1212 {
1213 int rval;
1214 struct qla_hw_data *ha = vha->hw;
1215 struct req_que *req = ha->req_q_map[0];
1216
1217 if (IS_QLA82XX(ha))
1218 return QLA_SUCCESS;
1219
1220 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
1221
1222 rval = qla2x00_mbx_reg_test(vha);
1223 if (rval) {
1224 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
1225 vha->host_no));
1226 qla_printk(KERN_WARNING, ha,
1227 "Failed mailbox send register test\n");
1228 } else {
1229 /* Flag a successful rval */
1230 rval = QLA_SUCCESS;
1231 }
1232
1233 return rval;
1234 }
1235
1236 void
1237 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1238 {
1239 int rval;
1240 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
1241 eft_size, fce_size, mq_size;
1242 dma_addr_t tc_dma;
1243 void *tc;
1244 struct qla_hw_data *ha = vha->hw;
1245 struct req_que *req = ha->req_q_map[0];
1246 struct rsp_que *rsp = ha->rsp_q_map[0];
1247
1248 if (ha->fw_dump) {
1249 qla_printk(KERN_WARNING, ha,
1250 "Firmware dump previously allocated.\n");
1251 return;
1252 }
1253
1254 ha->fw_dumped = 0;
1255 fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
1256 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
1257 fixed_size = sizeof(struct qla2100_fw_dump);
1258 } else if (IS_QLA23XX(ha)) {
1259 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
1260 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
1261 sizeof(uint16_t);
1262 } else if (IS_FWI2_CAPABLE(ha)) {
1263 if (IS_QLA81XX(ha))
1264 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
1265 else if (IS_QLA25XX(ha))
1266 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
1267 else
1268 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
1269 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1270 sizeof(uint32_t);
1271 if (ha->mqenable)
1272 mq_size = sizeof(struct qla2xxx_mq_chain);
1273 /* Allocate memory for Fibre Channel Event Buffer. */
1274 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
1275 goto try_eft;
1276
1277 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
1278 GFP_KERNEL);
1279 if (!tc) {
1280 qla_printk(KERN_WARNING, ha, "Unable to allocate "
1281 "(%d KB) for FCE.\n", FCE_SIZE / 1024);
1282 goto try_eft;
1283 }
1284
1285 memset(tc, 0, FCE_SIZE);
1286 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
1287 ha->fce_mb, &ha->fce_bufs);
1288 if (rval) {
1289 qla_printk(KERN_WARNING, ha, "Unable to initialize "
1290 "FCE (%d).\n", rval);
1291 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
1292 tc_dma);
1293 ha->flags.fce_enabled = 0;
1294 goto try_eft;
1295 }
1296
1297 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n",
1298 FCE_SIZE / 1024);
1299
1300 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
1301 ha->flags.fce_enabled = 1;
1302 ha->fce_dma = tc_dma;
1303 ha->fce = tc;
1304 try_eft:
1305 /* Allocate memory for Extended Trace Buffer. */
1306 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
1307 GFP_KERNEL);
1308 if (!tc) {
1309 qla_printk(KERN_WARNING, ha, "Unable to allocate "
1310 "(%d KB) for EFT.\n", EFT_SIZE / 1024);
1311 goto cont_alloc;
1312 }
1313
1314 memset(tc, 0, EFT_SIZE);
1315 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
1316 if (rval) {
1317 qla_printk(KERN_WARNING, ha, "Unable to initialize "
1318 "EFT (%d).\n", rval);
1319 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
1320 tc_dma);
1321 goto cont_alloc;
1322 }
1323
1324 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
1325 EFT_SIZE / 1024);
1326
1327 eft_size = EFT_SIZE;
1328 ha->eft_dma = tc_dma;
1329 ha->eft = tc;
1330 }
1331 cont_alloc:
1332 req_q_size = req->length * sizeof(request_t);
1333 rsp_q_size = rsp->length * sizeof(response_t);
1334
1335 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
1336 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
1337 ha->chain_offset = dump_size;
1338 dump_size += mq_size + fce_size;
1339
1340 ha->fw_dump = vmalloc(dump_size);
1341 if (!ha->fw_dump) {
1342 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for "
1343 "firmware dump!!!\n", dump_size / 1024);
1344
1345 if (ha->fce) {
1346 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
1347 ha->fce_dma);
1348 ha->fce = NULL;
1349 ha->fce_dma = 0;
1350 }
1351
1352 if (ha->eft) {
1353 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
1354 ha->eft_dma);
1355 ha->eft = NULL;
1356 ha->eft_dma = 0;
1357 }
1358 return;
1359 }
1360 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n",
1361 dump_size / 1024);
1362
1363 ha->fw_dump_len = dump_size;
1364 ha->fw_dump->signature[0] = 'Q';
1365 ha->fw_dump->signature[1] = 'L';
1366 ha->fw_dump->signature[2] = 'G';
1367 ha->fw_dump->signature[3] = 'C';
1368 ha->fw_dump->version = __constant_htonl(1);
1369
1370 ha->fw_dump->fixed_size = htonl(fixed_size);
1371 ha->fw_dump->mem_size = htonl(mem_size);
1372 ha->fw_dump->req_q_size = htonl(req_q_size);
1373 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
1374
1375 ha->fw_dump->eft_size = htonl(eft_size);
1376 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
1377 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
1378
1379 ha->fw_dump->header_size =
1380 htonl(offsetof(struct qla2xxx_fw_dump, isp));
1381 }
1382
1383 static int
1384 qla81xx_mpi_sync(scsi_qla_host_t *vha)
1385 {
1386 #define MPS_MASK 0xe0
1387 int rval;
1388 uint16_t dc;
1389 uint32_t dw;
1390 struct qla_hw_data *ha = vha->hw;
1391
1392 if (!IS_QLA81XX(vha->hw))
1393 return QLA_SUCCESS;
1394
1395 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
1396 if (rval != QLA_SUCCESS) {
1397 DEBUG2(qla_printk(KERN_WARNING, ha,
1398 "Sync-MPI: Unable to acquire semaphore.\n"));
1399 goto done;
1400 }
1401
1402 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
1403 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
1404 if (rval != QLA_SUCCESS) {
1405 DEBUG2(qla_printk(KERN_WARNING, ha,
1406 "Sync-MPI: Unable to read sync.\n"));
1407 goto done_release;
1408 }
1409
1410 dc &= MPS_MASK;
1411 if (dc == (dw & MPS_MASK))
1412 goto done_release;
1413
1414 dw &= ~MPS_MASK;
1415 dw |= dc;
1416 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
1417 if (rval != QLA_SUCCESS) {
1418 DEBUG2(qla_printk(KERN_WARNING, ha,
1419 "Sync-MPI: Unable to gain sync.\n"));
1420 }
1421
1422 done_release:
1423 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
1424 if (rval != QLA_SUCCESS) {
1425 DEBUG2(qla_printk(KERN_WARNING, ha,
1426 "Sync-MPI: Unable to release semaphore.\n"));
1427 }
1428
1429 done:
1430 return rval;
1431 }
1432
1433 /**
1434 * qla2x00_setup_chip() - Load and start RISC firmware.
1435 * @ha: HA context
1436 *
1437 * Returns 0 on success.
1438 */
1439 static int
1440 qla2x00_setup_chip(scsi_qla_host_t *vha)
1441 {
1442 int rval;
1443 uint32_t srisc_address = 0;
1444 struct qla_hw_data *ha = vha->hw;
1445 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1446 unsigned long flags;
1447 uint16_t fw_major_version;
1448
1449 if (IS_QLA82XX(ha)) {
1450 rval = ha->isp_ops->load_risc(vha, &srisc_address);
1451 if (rval == QLA_SUCCESS) {
1452 qla2x00_stop_firmware(vha);
1453 goto enable_82xx_npiv;
1454 } else
1455 goto failed;
1456 }
1457
1458 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1459 /* Disable SRAM, Instruction RAM and GP RAM parity. */
1460 spin_lock_irqsave(&ha->hardware_lock, flags);
1461 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
1462 RD_REG_WORD(&reg->hccr);
1463 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1464 }
1465
1466 qla81xx_mpi_sync(vha);
1467
1468 /* Load firmware sequences */
1469 rval = ha->isp_ops->load_risc(vha, &srisc_address);
1470 if (rval == QLA_SUCCESS) {
1471 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
1472 "code.\n", vha->host_no));
1473
1474 rval = qla2x00_verify_checksum(vha, srisc_address);
1475 if (rval == QLA_SUCCESS) {
1476 /* Start firmware execution. */
1477 DEBUG(printk("scsi(%ld): Checksum OK, start "
1478 "firmware.\n", vha->host_no));
1479
1480 rval = qla2x00_execute_fw(vha, srisc_address);
1481 /* Retrieve firmware information. */
1482 if (rval == QLA_SUCCESS) {
1483 enable_82xx_npiv:
1484 fw_major_version = ha->fw_major_version;
1485 rval = qla2x00_get_fw_version(vha,
1486 &ha->fw_major_version,
1487 &ha->fw_minor_version,
1488 &ha->fw_subminor_version,
1489 &ha->fw_attributes, &ha->fw_memory_size,
1490 ha->mpi_version, &ha->mpi_capabilities,
1491 ha->phy_version);
1492 if (rval != QLA_SUCCESS)
1493 goto failed;
1494 ha->flags.npiv_supported = 0;
1495 if (IS_QLA2XXX_MIDTYPE(ha) &&
1496 (ha->fw_attributes & BIT_2)) {
1497 ha->flags.npiv_supported = 1;
1498 if ((!ha->max_npiv_vports) ||
1499 ((ha->max_npiv_vports + 1) %
1500 MIN_MULTI_ID_FABRIC))
1501 ha->max_npiv_vports =
1502 MIN_MULTI_ID_FABRIC - 1;
1503 }
1504 qla2x00_get_resource_cnts(vha, NULL,
1505 &ha->fw_xcb_count, NULL, NULL,
1506 &ha->max_npiv_vports, NULL);
1507
1508 if (!fw_major_version && ql2xallocfwdump) {
1509 if (!IS_QLA82XX(ha))
1510 qla2x00_alloc_fw_dump(vha);
1511 }
1512 }
1513 } else {
1514 DEBUG2(printk(KERN_INFO
1515 "scsi(%ld): ISP Firmware failed checksum.\n",
1516 vha->host_no));
1517 }
1518 }
1519
1520 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1521 /* Enable proper parity. */
1522 spin_lock_irqsave(&ha->hardware_lock, flags);
1523 if (IS_QLA2300(ha))
1524 /* SRAM parity */
1525 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
1526 else
1527 /* SRAM, Instruction RAM and GP RAM parity */
1528 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
1529 RD_REG_WORD(&reg->hccr);
1530 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1531 }
1532
1533 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1534 uint32_t size;
1535
1536 rval = qla81xx_fac_get_sector_size(vha, &size);
1537 if (rval == QLA_SUCCESS) {
1538 ha->flags.fac_supported = 1;
1539 ha->fdt_block_size = size << 2;
1540 } else {
1541 qla_printk(KERN_ERR, ha,
1542 "Unsupported FAC firmware (%d.%02d.%02d).\n",
1543 ha->fw_major_version, ha->fw_minor_version,
1544 ha->fw_subminor_version);
1545 }
1546 }
1547 failed:
1548 if (rval) {
1549 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
1550 vha->host_no));
1551 }
1552
1553 return (rval);
1554 }
1555
1556 /**
1557 * qla2x00_init_response_q_entries() - Initializes response queue entries.
1558 * @ha: HA context
1559 *
1560 * Beginning of request ring has initialization control block already built
1561 * by nvram config routine.
1562 *
1563 * Returns 0 on success.
1564 */
1565 void
1566 qla2x00_init_response_q_entries(struct rsp_que *rsp)
1567 {
1568 uint16_t cnt;
1569 response_t *pkt;
1570
1571 rsp->ring_ptr = rsp->ring;
1572 rsp->ring_index = 0;
1573 rsp->status_srb = NULL;
1574 pkt = rsp->ring_ptr;
1575 for (cnt = 0; cnt < rsp->length; cnt++) {
1576 pkt->signature = RESPONSE_PROCESSED;
1577 pkt++;
1578 }
1579 }
1580
1581 /**
1582 * qla2x00_update_fw_options() - Read and process firmware options.
1583 * @ha: HA context
1584 *
1585 * Returns 0 on success.
1586 */
1587 void
1588 qla2x00_update_fw_options(scsi_qla_host_t *vha)
1589 {
1590 uint16_t swing, emphasis, tx_sens, rx_sens;
1591 struct qla_hw_data *ha = vha->hw;
1592
1593 memset(ha->fw_options, 0, sizeof(ha->fw_options));
1594 qla2x00_get_fw_options(vha, ha->fw_options);
1595
1596 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1597 return;
1598
1599 /* Serial Link options. */
1600 DEBUG3(printk("scsi(%ld): Serial link options:\n",
1601 vha->host_no));
1602 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options,
1603 sizeof(ha->fw_seriallink_options)));
1604
1605 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1606 if (ha->fw_seriallink_options[3] & BIT_2) {
1607 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
1608
1609 /* 1G settings */
1610 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
1611 emphasis = (ha->fw_seriallink_options[2] &
1612 (BIT_4 | BIT_3)) >> 3;
1613 tx_sens = ha->fw_seriallink_options[0] &
1614 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1615 rx_sens = (ha->fw_seriallink_options[0] &
1616 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1617 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
1618 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1619 if (rx_sens == 0x0)
1620 rx_sens = 0x3;
1621 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
1622 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1623 ha->fw_options[10] |= BIT_5 |
1624 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1625 (tx_sens & (BIT_1 | BIT_0));
1626
1627 /* 2G settings */
1628 swing = (ha->fw_seriallink_options[2] &
1629 (BIT_7 | BIT_6 | BIT_5)) >> 5;
1630 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
1631 tx_sens = ha->fw_seriallink_options[1] &
1632 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1633 rx_sens = (ha->fw_seriallink_options[1] &
1634 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1635 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
1636 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1637 if (rx_sens == 0x0)
1638 rx_sens = 0x3;
1639 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
1640 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1641 ha->fw_options[11] |= BIT_5 |
1642 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1643 (tx_sens & (BIT_1 | BIT_0));
1644 }
1645
1646 /* FCP2 options. */
1647 /* Return command IOCBs without waiting for an ABTS to complete. */
1648 ha->fw_options[3] |= BIT_13;
1649
1650 /* LED scheme. */
1651 if (ha->flags.enable_led_scheme)
1652 ha->fw_options[2] |= BIT_12;
1653
1654 /* Detect ISP6312. */
1655 if (IS_QLA6312(ha))
1656 ha->fw_options[2] |= BIT_13;
1657
1658 /* Update firmware options. */
1659 qla2x00_set_fw_options(vha, ha->fw_options);
1660 }
1661
1662 void
1663 qla24xx_update_fw_options(scsi_qla_host_t *vha)
1664 {
1665 int rval;
1666 struct qla_hw_data *ha = vha->hw;
1667
1668 if (IS_QLA82XX(ha))
1669 return;
1670
1671 /* Update Serial Link options. */
1672 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
1673 return;
1674
1675 rval = qla2x00_set_serdes_params(vha,
1676 le16_to_cpu(ha->fw_seriallink_options24[1]),
1677 le16_to_cpu(ha->fw_seriallink_options24[2]),
1678 le16_to_cpu(ha->fw_seriallink_options24[3]));
1679 if (rval != QLA_SUCCESS) {
1680 qla_printk(KERN_WARNING, ha,
1681 "Unable to update Serial Link options (%x).\n", rval);
1682 }
1683 }
1684
1685 void
1686 qla2x00_config_rings(struct scsi_qla_host *vha)
1687 {
1688 struct qla_hw_data *ha = vha->hw;
1689 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1690 struct req_que *req = ha->req_q_map[0];
1691 struct rsp_que *rsp = ha->rsp_q_map[0];
1692
1693 /* Setup ring parameters in initialization control block. */
1694 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
1695 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
1696 ha->init_cb->request_q_length = cpu_to_le16(req->length);
1697 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
1698 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1699 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1700 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1701 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1702
1703 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
1704 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
1705 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
1706 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
1707 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
1708 }
1709
1710 void
1711 qla24xx_config_rings(struct scsi_qla_host *vha)
1712 {
1713 struct qla_hw_data *ha = vha->hw;
1714 device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
1715 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1716 struct qla_msix_entry *msix;
1717 struct init_cb_24xx *icb;
1718 uint16_t rid = 0;
1719 struct req_que *req = ha->req_q_map[0];
1720 struct rsp_que *rsp = ha->rsp_q_map[0];
1721
1722 /* Setup ring parameters in initialization control block. */
1723 icb = (struct init_cb_24xx *)ha->init_cb;
1724 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1725 icb->response_q_inpointer = __constant_cpu_to_le16(0);
1726 icb->request_q_length = cpu_to_le16(req->length);
1727 icb->response_q_length = cpu_to_le16(rsp->length);
1728 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1729 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1730 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1731 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1732
1733 if (ha->mqenable) {
1734 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1735 icb->rid = __constant_cpu_to_le16(rid);
1736 if (ha->flags.msix_enabled) {
1737 msix = &ha->msix_entries[1];
1738 DEBUG2_17(printk(KERN_INFO
1739 "Registering vector 0x%x for base que\n", msix->entry));
1740 icb->msix = cpu_to_le16(msix->entry);
1741 }
1742 /* Use alternate PCI bus number */
1743 if (MSB(rid))
1744 icb->firmware_options_2 |=
1745 __constant_cpu_to_le32(BIT_19);
1746 /* Use alternate PCI devfn */
1747 if (LSB(rid))
1748 icb->firmware_options_2 |=
1749 __constant_cpu_to_le32(BIT_18);
1750
1751 /* Use Disable MSIX Handshake mode for capable adapters */
1752 if (IS_MSIX_NACK_CAPABLE(ha)) {
1753 icb->firmware_options_2 &=
1754 __constant_cpu_to_le32(~BIT_22);
1755 ha->flags.disable_msix_handshake = 1;
1756 qla_printk(KERN_INFO, ha,
1757 "MSIX Handshake Disable Mode turned on\n");
1758 } else {
1759 icb->firmware_options_2 |=
1760 __constant_cpu_to_le32(BIT_22);
1761 }
1762 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
1763
1764 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
1765 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
1766 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
1767 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
1768 } else {
1769 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
1770 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
1771 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
1772 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
1773 }
1774 /* PCI posting */
1775 RD_REG_DWORD(&ioreg->hccr);
1776 }
1777
1778 /**
1779 * qla2x00_init_rings() - Initializes firmware.
1780 * @ha: HA context
1781 *
1782 * Beginning of request ring has initialization control block already built
1783 * by nvram config routine.
1784 *
1785 * Returns 0 on success.
1786 */
1787 static int
1788 qla2x00_init_rings(scsi_qla_host_t *vha)
1789 {
1790 int rval;
1791 unsigned long flags = 0;
1792 int cnt, que;
1793 struct qla_hw_data *ha = vha->hw;
1794 struct req_que *req;
1795 struct rsp_que *rsp;
1796 struct scsi_qla_host *vp;
1797 struct mid_init_cb_24xx *mid_init_cb =
1798 (struct mid_init_cb_24xx *) ha->init_cb;
1799
1800 spin_lock_irqsave(&ha->hardware_lock, flags);
1801
1802 /* Clear outstanding commands array. */
1803 for (que = 0; que < ha->max_req_queues; que++) {
1804 req = ha->req_q_map[que];
1805 if (!req)
1806 continue;
1807 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1808 req->outstanding_cmds[cnt] = NULL;
1809
1810 req->current_outstanding_cmd = 1;
1811
1812 /* Initialize firmware. */
1813 req->ring_ptr = req->ring;
1814 req->ring_index = 0;
1815 req->cnt = req->length;
1816 }
1817
1818 for (que = 0; que < ha->max_rsp_queues; que++) {
1819 rsp = ha->rsp_q_map[que];
1820 if (!rsp)
1821 continue;
1822 /* Initialize response queue entries */
1823 qla2x00_init_response_q_entries(rsp);
1824 }
1825
1826 spin_lock(&ha->vport_slock);
1827 /* Clear RSCN queue. */
1828 list_for_each_entry(vp, &ha->vp_list, list) {
1829 vp->rscn_in_ptr = 0;
1830 vp->rscn_out_ptr = 0;
1831 }
1832
1833 spin_unlock(&ha->vport_slock);
1834
1835 ha->isp_ops->config_rings(vha);
1836
1837 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1838
1839 /* Update any ISP specific firmware options before initialization. */
1840 ha->isp_ops->update_fw_options(vha);
1841
1842 DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no));
1843
1844 if (ha->flags.npiv_supported) {
1845 if (ha->operating_mode == LOOP)
1846 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
1847 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
1848 }
1849
1850 if (IS_FWI2_CAPABLE(ha)) {
1851 mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
1852 mid_init_cb->init_cb.execution_throttle =
1853 cpu_to_le16(ha->fw_xcb_count);
1854 }
1855
1856 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
1857 if (rval) {
1858 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
1859 vha->host_no));
1860 } else {
1861 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n",
1862 vha->host_no));
1863 }
1864
1865 return (rval);
1866 }
1867
1868 /**
1869 * qla2x00_fw_ready() - Waits for firmware ready.
1870 * @ha: HA context
1871 *
1872 * Returns 0 on success.
1873 */
1874 static int
1875 qla2x00_fw_ready(scsi_qla_host_t *vha)
1876 {
1877 int rval;
1878 unsigned long wtime, mtime, cs84xx_time;
1879 uint16_t min_wait; /* Minimum wait time if loop is down */
1880 uint16_t wait_time; /* Wait time if loop is coming ready */
1881 uint16_t state[5];
1882 struct qla_hw_data *ha = vha->hw;
1883
1884 rval = QLA_SUCCESS;
1885
1886 /* 20 seconds for loop down. */
1887 min_wait = 20;
1888
1889 /*
1890 * Firmware should take at most one RATOV to login, plus 5 seconds for
1891 * our own processing.
1892 */
1893 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
1894 wait_time = min_wait;
1895 }
1896
1897 /* Min wait time if loop down */
1898 mtime = jiffies + (min_wait * HZ);
1899
1900 /* wait time before firmware ready */
1901 wtime = jiffies + (wait_time * HZ);
1902
1903 /* Wait for ISP to finish LIP */
1904 if (!vha->flags.init_done)
1905 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n");
1906
1907 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
1908 vha->host_no));
1909
1910 do {
1911 rval = qla2x00_get_firmware_state(vha, state);
1912 if (rval == QLA_SUCCESS) {
1913 if (state[0] < FSTATE_LOSS_OF_SYNC) {
1914 vha->device_flags &= ~DFLG_NO_CABLE;
1915 }
1916 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
1917 DEBUG16(printk("scsi(%ld): fw_state=%x "
1918 "84xx=%x.\n", vha->host_no, state[0],
1919 state[2]));
1920 if ((state[2] & FSTATE_LOGGED_IN) &&
1921 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
1922 DEBUG16(printk("scsi(%ld): Sending "
1923 "verify iocb.\n", vha->host_no));
1924
1925 cs84xx_time = jiffies;
1926 rval = qla84xx_init_chip(vha);
1927 if (rval != QLA_SUCCESS)
1928 break;
1929
1930 /* Add time taken to initialize. */
1931 cs84xx_time = jiffies - cs84xx_time;
1932 wtime += cs84xx_time;
1933 mtime += cs84xx_time;
1934 DEBUG16(printk("scsi(%ld): Increasing "
1935 "wait time by %ld. New time %ld\n",
1936 vha->host_no, cs84xx_time, wtime));
1937 }
1938 } else if (state[0] == FSTATE_READY) {
1939 DEBUG(printk("scsi(%ld): F/W Ready - OK \n",
1940 vha->host_no));
1941
1942 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1943 &ha->login_timeout, &ha->r_a_tov);
1944
1945 rval = QLA_SUCCESS;
1946 break;
1947 }
1948
1949 rval = QLA_FUNCTION_FAILED;
1950
1951 if (atomic_read(&vha->loop_down_timer) &&
1952 state[0] != FSTATE_READY) {
1953 /* Loop down. Timeout on min_wait for states
1954 * other than Wait for Login.
1955 */
1956 if (time_after_eq(jiffies, mtime)) {
1957 qla_printk(KERN_INFO, ha,
1958 "Cable is unplugged...\n");
1959
1960 vha->device_flags |= DFLG_NO_CABLE;
1961 break;
1962 }
1963 }
1964 } else {
1965 /* Mailbox cmd failed. Timeout on min_wait. */
1966 if (time_after_eq(jiffies, mtime) ||
1967 ha->flags.isp82xx_fw_hung)
1968 break;
1969 }
1970
1971 if (time_after_eq(jiffies, wtime))
1972 break;
1973
1974 /* Delay for a while */
1975 msleep(500);
1976
1977 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
1978 vha->host_no, state[0], jiffies));
1979 } while (1);
1980
1981 DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n",
1982 vha->host_no, state[0], state[1], state[2], state[3], state[4],
1983 jiffies));
1984
1985 if (rval) {
1986 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
1987 vha->host_no));
1988 }
1989
1990 return (rval);
1991 }
1992
1993 /*
1994 * qla2x00_configure_hba
1995 * Setup adapter context.
1996 *
1997 * Input:
1998 * ha = adapter state pointer.
1999 *
2000 * Returns:
2001 * 0 = success
2002 *
2003 * Context:
2004 * Kernel context.
2005 */
2006 static int
2007 qla2x00_configure_hba(scsi_qla_host_t *vha)
2008 {
2009 int rval;
2010 uint16_t loop_id;
2011 uint16_t topo;
2012 uint16_t sw_cap;
2013 uint8_t al_pa;
2014 uint8_t area;
2015 uint8_t domain;
2016 char connect_type[22];
2017 struct qla_hw_data *ha = vha->hw;
2018
2019 /* Get host addresses. */
2020 rval = qla2x00_get_adapter_id(vha,
2021 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
2022 if (rval != QLA_SUCCESS) {
2023 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
2024 IS_QLA8XXX_TYPE(ha) ||
2025 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
2026 DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
2027 __func__, vha->host_no));
2028 } else {
2029 qla_printk(KERN_WARNING, ha,
2030 "ERROR -- Unable to get host loop ID.\n");
2031 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2032 }
2033 return (rval);
2034 }
2035
2036 if (topo == 4) {
2037 qla_printk(KERN_INFO, ha,
2038 "Cannot get topology - retrying.\n");
2039 return (QLA_FUNCTION_FAILED);
2040 }
2041
2042 vha->loop_id = loop_id;
2043
2044 /* initialize */
2045 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
2046 ha->operating_mode = LOOP;
2047 ha->switch_cap = 0;
2048
2049 switch (topo) {
2050 case 0:
2051 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n",
2052 vha->host_no));
2053 ha->current_topology = ISP_CFG_NL;
2054 strcpy(connect_type, "(Loop)");
2055 break;
2056
2057 case 1:
2058 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
2059 vha->host_no));
2060 ha->switch_cap = sw_cap;
2061 ha->current_topology = ISP_CFG_FL;
2062 strcpy(connect_type, "(FL_Port)");
2063 break;
2064
2065 case 2:
2066 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n",
2067 vha->host_no));
2068 ha->operating_mode = P2P;
2069 ha->current_topology = ISP_CFG_N;
2070 strcpy(connect_type, "(N_Port-to-N_Port)");
2071 break;
2072
2073 case 3:
2074 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
2075 vha->host_no));
2076 ha->switch_cap = sw_cap;
2077 ha->operating_mode = P2P;
2078 ha->current_topology = ISP_CFG_F;
2079 strcpy(connect_type, "(F_Port)");
2080 break;
2081
2082 default:
2083 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. "
2084 "Using NL.\n",
2085 vha->host_no, topo));
2086 ha->current_topology = ISP_CFG_NL;
2087 strcpy(connect_type, "(Loop)");
2088 break;
2089 }
2090
2091 /* Save Host port and loop ID. */
2092 /* byte order - Big Endian */
2093 vha->d_id.b.domain = domain;
2094 vha->d_id.b.area = area;
2095 vha->d_id.b.al_pa = al_pa;
2096
2097 if (!vha->flags.init_done)
2098 qla_printk(KERN_INFO, ha,
2099 "Topology - %s, Host Loop address 0x%x\n",
2100 connect_type, vha->loop_id);
2101
2102 if (rval) {
2103 DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no));
2104 } else {
2105 DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no));
2106 }
2107
2108 return(rval);
2109 }
2110
2111 inline void
2112 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
2113 char *def)
2114 {
2115 char *st, *en;
2116 uint16_t index;
2117 struct qla_hw_data *ha = vha->hw;
2118 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
2119 !IS_QLA8XXX_TYPE(ha);
2120
2121 if (memcmp(model, BINZERO, len) != 0) {
2122 strncpy(ha->model_number, model, len);
2123 st = en = ha->model_number;
2124 en += len - 1;
2125 while (en > st) {
2126 if (*en != 0x20 && *en != 0x00)
2127 break;
2128 *en-- = '\0';
2129 }
2130
2131 index = (ha->pdev->subsystem_device & 0xff);
2132 if (use_tbl &&
2133 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
2134 index < QLA_MODEL_NAMES)
2135 strncpy(ha->model_desc,
2136 qla2x00_model_name[index * 2 + 1],
2137 sizeof(ha->model_desc) - 1);
2138 } else {
2139 index = (ha->pdev->subsystem_device & 0xff);
2140 if (use_tbl &&
2141 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
2142 index < QLA_MODEL_NAMES) {
2143 strcpy(ha->model_number,
2144 qla2x00_model_name[index * 2]);
2145 strncpy(ha->model_desc,
2146 qla2x00_model_name[index * 2 + 1],
2147 sizeof(ha->model_desc) - 1);
2148 } else {
2149 strcpy(ha->model_number, def);
2150 }
2151 }
2152 if (IS_FWI2_CAPABLE(ha))
2153 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
2154 sizeof(ha->model_desc));
2155 }
2156
2157 /* On sparc systems, obtain port and node WWN from firmware
2158 * properties.
2159 */
2160 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
2161 {
2162 #ifdef CONFIG_SPARC
2163 struct qla_hw_data *ha = vha->hw;
2164 struct pci_dev *pdev = ha->pdev;
2165 struct device_node *dp = pci_device_to_OF_node(pdev);
2166 const u8 *val;
2167 int len;
2168
2169 val = of_get_property(dp, "port-wwn", &len);
2170 if (val && len >= WWN_SIZE)
2171 memcpy(nv->port_name, val, WWN_SIZE);
2172
2173 val = of_get_property(dp, "node-wwn", &len);
2174 if (val && len >= WWN_SIZE)
2175 memcpy(nv->node_name, val, WWN_SIZE);
2176 #endif
2177 }
2178
2179 /*
2180 * NVRAM configuration for ISP 2xxx
2181 *
2182 * Input:
2183 * ha = adapter block pointer.
2184 *
2185 * Output:
2186 * initialization control block in response_ring
2187 * host adapters parameters in host adapter block
2188 *
2189 * Returns:
2190 * 0 = success.
2191 */
2192 int
2193 qla2x00_nvram_config(scsi_qla_host_t *vha)
2194 {
2195 int rval;
2196 uint8_t chksum = 0;
2197 uint16_t cnt;
2198 uint8_t *dptr1, *dptr2;
2199 struct qla_hw_data *ha = vha->hw;
2200 init_cb_t *icb = ha->init_cb;
2201 nvram_t *nv = ha->nvram;
2202 uint8_t *ptr = ha->nvram;
2203 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2204
2205 rval = QLA_SUCCESS;
2206
2207 /* Determine NVRAM starting address. */
2208 ha->nvram_size = sizeof(nvram_t);
2209 ha->nvram_base = 0;
2210 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
2211 if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
2212 ha->nvram_base = 0x80;
2213
2214 /* Get NVRAM data and calculate checksum. */
2215 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
2216 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
2217 chksum += *ptr++;
2218
2219 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
2220 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
2221
2222 /* Bad NVRAM data, set defaults parameters. */
2223 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2224 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
2225 /* Reset NVRAM data. */
2226 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
2227 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
2228 nv->nvram_version);
2229 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
2230 "invalid -- WWPN) defaults.\n");
2231
2232 /*
2233 * Set default initialization control block.
2234 */
2235 memset(nv, 0, ha->nvram_size);
2236 nv->parameter_block_version = ICB_VERSION;
2237
2238 if (IS_QLA23XX(ha)) {
2239 nv->firmware_options[0] = BIT_2 | BIT_1;
2240 nv->firmware_options[1] = BIT_7 | BIT_5;
2241 nv->add_firmware_options[0] = BIT_5;
2242 nv->add_firmware_options[1] = BIT_5 | BIT_4;
2243 nv->frame_payload_size = __constant_cpu_to_le16(2048);
2244 nv->special_options[1] = BIT_7;
2245 } else if (IS_QLA2200(ha)) {
2246 nv->firmware_options[0] = BIT_2 | BIT_1;
2247 nv->firmware_options[1] = BIT_7 | BIT_5;
2248 nv->add_firmware_options[0] = BIT_5;
2249 nv->add_firmware_options[1] = BIT_5 | BIT_4;
2250 nv->frame_payload_size = __constant_cpu_to_le16(1024);
2251 } else if (IS_QLA2100(ha)) {
2252 nv->firmware_options[0] = BIT_3 | BIT_1;
2253 nv->firmware_options[1] = BIT_5;
2254 nv->frame_payload_size = __constant_cpu_to_le16(1024);
2255 }
2256
2257 nv->max_iocb_allocation = __constant_cpu_to_le16(256);
2258 nv->execution_throttle = __constant_cpu_to_le16(16);
2259 nv->retry_count = 8;
2260 nv->retry_delay = 1;
2261
2262 nv->port_name[0] = 33;
2263 nv->port_name[3] = 224;
2264 nv->port_name[4] = 139;
2265
2266 qla2xxx_nvram_wwn_from_ofw(vha, nv);
2267
2268 nv->login_timeout = 4;
2269
2270 /*
2271 * Set default host adapter parameters
2272 */
2273 nv->host_p[1] = BIT_2;
2274 nv->reset_delay = 5;
2275 nv->port_down_retry_count = 8;
2276 nv->max_luns_per_target = __constant_cpu_to_le16(8);
2277 nv->link_down_timeout = 60;
2278
2279 rval = 1;
2280 }
2281
2282 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2283 /*
2284 * The SN2 does not provide BIOS emulation which means you can't change
2285 * potentially bogus BIOS settings. Force the use of default settings
2286 * for link rate and frame size. Hope that the rest of the settings
2287 * are valid.
2288 */
2289 if (ia64_platform_is("sn2")) {
2290 nv->frame_payload_size = __constant_cpu_to_le16(2048);
2291 if (IS_QLA23XX(ha))
2292 nv->special_options[1] = BIT_7;
2293 }
2294 #endif
2295
2296 /* Reset Initialization control block */
2297 memset(icb, 0, ha->init_cb_size);
2298
2299 /*
2300 * Setup driver NVRAM options.
2301 */
2302 nv->firmware_options[0] |= (BIT_6 | BIT_1);
2303 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
2304 nv->firmware_options[1] |= (BIT_5 | BIT_0);
2305 nv->firmware_options[1] &= ~BIT_4;
2306
2307 if (IS_QLA23XX(ha)) {
2308 nv->firmware_options[0] |= BIT_2;
2309 nv->firmware_options[0] &= ~BIT_3;
2310 nv->firmware_options[0] &= ~BIT_6;
2311 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
2312
2313 if (IS_QLA2300(ha)) {
2314 if (ha->fb_rev == FPM_2310) {
2315 strcpy(ha->model_number, "QLA2310");
2316 } else {
2317 strcpy(ha->model_number, "QLA2300");
2318 }
2319 } else {
2320 qla2x00_set_model_info(vha, nv->model_number,
2321 sizeof(nv->model_number), "QLA23xx");
2322 }
2323 } else if (IS_QLA2200(ha)) {
2324 nv->firmware_options[0] |= BIT_2;
2325 /*
2326 * 'Point-to-point preferred, else loop' is not a safe
2327 * connection mode setting.
2328 */
2329 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
2330 (BIT_5 | BIT_4)) {
2331 /* Force 'loop preferred, else point-to-point'. */
2332 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
2333 nv->add_firmware_options[0] |= BIT_5;
2334 }
2335 strcpy(ha->model_number, "QLA22xx");
2336 } else /*if (IS_QLA2100(ha))*/ {
2337 strcpy(ha->model_number, "QLA2100");
2338 }
2339
2340 /*
2341 * Copy over NVRAM RISC parameter block to initialization control block.
2342 */
2343 dptr1 = (uint8_t *)icb;
2344 dptr2 = (uint8_t *)&nv->parameter_block_version;
2345 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
2346 while (cnt--)
2347 *dptr1++ = *dptr2++;
2348
2349 /* Copy 2nd half. */
2350 dptr1 = (uint8_t *)icb->add_firmware_options;
2351 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
2352 while (cnt--)
2353 *dptr1++ = *dptr2++;
2354
2355 /* Use alternate WWN? */
2356 if (nv->host_p[1] & BIT_7) {
2357 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
2358 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
2359 }
2360
2361 /* Prepare nodename */
2362 if ((icb->firmware_options[1] & BIT_6) == 0) {
2363 /*
2364 * Firmware will apply the following mask if the nodename was
2365 * not provided.
2366 */
2367 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
2368 icb->node_name[0] &= 0xF0;
2369 }
2370
2371 /*
2372 * Set host adapter parameters.
2373 */
2374 if (nv->host_p[0] & BIT_7)
2375 ql2xextended_error_logging = 1;
2376 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
2377 /* Always load RISC code on non ISP2[12]00 chips. */
2378 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
2379 ha->flags.disable_risc_code_load = 0;
2380 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
2381 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
2382 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
2383 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
2384 ha->flags.disable_serdes = 0;
2385
2386 ha->operating_mode =
2387 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
2388
2389 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
2390 sizeof(ha->fw_seriallink_options));
2391
2392 /* save HBA serial number */
2393 ha->serial0 = icb->port_name[5];
2394 ha->serial1 = icb->port_name[6];
2395 ha->serial2 = icb->port_name[7];
2396 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
2397 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
2398
2399 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
2400
2401 ha->retry_count = nv->retry_count;
2402
2403 /* Set minimum login_timeout to 4 seconds. */
2404 if (nv->login_timeout != ql2xlogintimeout)
2405 nv->login_timeout = ql2xlogintimeout;
2406 if (nv->login_timeout < 4)
2407 nv->login_timeout = 4;
2408 ha->login_timeout = nv->login_timeout;
2409 icb->login_timeout = nv->login_timeout;
2410
2411 /* Set minimum RATOV to 100 tenths of a second. */
2412 ha->r_a_tov = 100;
2413
2414 ha->loop_reset_delay = nv->reset_delay;
2415
2416 /* Link Down Timeout = 0:
2417 *
2418 * When Port Down timer expires we will start returning
2419 * I/O's to OS with "DID_NO_CONNECT".
2420 *
2421 * Link Down Timeout != 0:
2422 *
2423 * The driver waits for the link to come up after link down
2424 * before returning I/Os to OS with "DID_NO_CONNECT".
2425 */
2426 if (nv->link_down_timeout == 0) {
2427 ha->loop_down_abort_time =
2428 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
2429 } else {
2430 ha->link_down_timeout = nv->link_down_timeout;
2431 ha->loop_down_abort_time =
2432 (LOOP_DOWN_TIME - ha->link_down_timeout);
2433 }
2434
2435 /*
2436 * Need enough time to try and get the port back.
2437 */
2438 ha->port_down_retry_count = nv->port_down_retry_count;
2439 if (qlport_down_retry)
2440 ha->port_down_retry_count = qlport_down_retry;
2441 /* Set login_retry_count */
2442 ha->login_retry_count = nv->retry_count;
2443 if (ha->port_down_retry_count == nv->port_down_retry_count &&
2444 ha->port_down_retry_count > 3)
2445 ha->login_retry_count = ha->port_down_retry_count;
2446 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
2447 ha->login_retry_count = ha->port_down_retry_count;
2448 if (ql2xloginretrycount)
2449 ha->login_retry_count = ql2xloginretrycount;
2450
2451 icb->lun_enables = __constant_cpu_to_le16(0);
2452 icb->command_resource_count = 0;
2453 icb->immediate_notify_resource_count = 0;
2454 icb->timeout = __constant_cpu_to_le16(0);
2455
2456 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2457 /* Enable RIO */
2458 icb->firmware_options[0] &= ~BIT_3;
2459 icb->add_firmware_options[0] &=
2460 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2461 icb->add_firmware_options[0] |= BIT_2;
2462 icb->response_accumulation_timer = 3;
2463 icb->interrupt_delay_timer = 5;
2464
2465 vha->flags.process_response_queue = 1;
2466 } else {
2467 /* Enable ZIO. */
2468 if (!vha->flags.init_done) {
2469 ha->zio_mode = icb->add_firmware_options[0] &
2470 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
2471 ha->zio_timer = icb->interrupt_delay_timer ?
2472 icb->interrupt_delay_timer: 2;
2473 }
2474 icb->add_firmware_options[0] &=
2475 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2476 vha->flags.process_response_queue = 0;
2477 if (ha->zio_mode != QLA_ZIO_DISABLED) {
2478 ha->zio_mode = QLA_ZIO_MODE_6;
2479
2480 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer "
2481 "delay (%d us).\n", vha->host_no, ha->zio_mode,
2482 ha->zio_timer * 100));
2483 qla_printk(KERN_INFO, ha,
2484 "ZIO mode %d enabled; timer delay (%d us).\n",
2485 ha->zio_mode, ha->zio_timer * 100);
2486
2487 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
2488 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
2489 vha->flags.process_response_queue = 1;
2490 }
2491 }
2492
2493 if (rval) {
2494 DEBUG2_3(printk(KERN_WARNING
2495 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
2496 }
2497 return (rval);
2498 }
2499
2500 static void
2501 qla2x00_rport_del(void *data)
2502 {
2503 fc_port_t *fcport = data;
2504 struct fc_rport *rport;
2505 unsigned long flags;
2506
2507 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2508 rport = fcport->drport ? fcport->drport: fcport->rport;
2509 fcport->drport = NULL;
2510 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2511 if (rport)
2512 fc_remote_port_delete(rport);
2513 }
2514
2515 /**
2516 * qla2x00_alloc_fcport() - Allocate a generic fcport.
2517 * @ha: HA context
2518 * @flags: allocation flags
2519 *
2520 * Returns a pointer to the allocated fcport, or NULL, if none available.
2521 */
2522 fc_port_t *
2523 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2524 {
2525 fc_port_t *fcport;
2526
2527 fcport = kzalloc(sizeof(fc_port_t), flags);
2528 if (!fcport)
2529 return NULL;
2530
2531 /* Setup fcport template structure. */
2532 fcport->vha = vha;
2533 fcport->vp_idx = vha->vp_idx;
2534 fcport->port_type = FCT_UNKNOWN;
2535 fcport->loop_id = FC_NO_LOOP_ID;
2536 atomic_set(&fcport->state, FCS_UNCONFIGURED);
2537 fcport->supported_classes = FC_COS_UNSPECIFIED;
2538
2539 return fcport;
2540 }
2541
2542 /*
2543 * qla2x00_configure_loop
2544 * Updates Fibre Channel Device Database with what is actually on loop.
2545 *
2546 * Input:
2547 * ha = adapter block pointer.
2548 *
2549 * Returns:
2550 * 0 = success.
2551 * 1 = error.
2552 * 2 = database was full and device was not configured.
2553 */
2554 static int
2555 qla2x00_configure_loop(scsi_qla_host_t *vha)
2556 {
2557 int rval;
2558 unsigned long flags, save_flags;
2559 struct qla_hw_data *ha = vha->hw;
2560 rval = QLA_SUCCESS;
2561
2562 /* Get Initiator ID */
2563 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
2564 rval = qla2x00_configure_hba(vha);
2565 if (rval != QLA_SUCCESS) {
2566 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n",
2567 vha->host_no));
2568 return (rval);
2569 }
2570 }
2571
2572 save_flags = flags = vha->dpc_flags;
2573 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n",
2574 vha->host_no, flags));
2575
2576 /*
2577 * If we have both an RSCN and PORT UPDATE pending then handle them
2578 * both at the same time.
2579 */
2580 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2581 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
2582
2583 qla2x00_get_data_rate(vha);
2584
2585 /* Determine what we need to do */
2586 if (ha->current_topology == ISP_CFG_FL &&
2587 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2588
2589 vha->flags.rscn_queue_overflow = 1;
2590 set_bit(RSCN_UPDATE, &flags);
2591
2592 } else if (ha->current_topology == ISP_CFG_F &&
2593 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2594
2595 vha->flags.rscn_queue_overflow = 1;
2596 set_bit(RSCN_UPDATE, &flags);
2597 clear_bit(LOCAL_LOOP_UPDATE, &flags);
2598
2599 } else if (ha->current_topology == ISP_CFG_N) {
2600 clear_bit(RSCN_UPDATE, &flags);
2601
2602 } else if (!vha->flags.online ||
2603 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
2604
2605 vha->flags.rscn_queue_overflow = 1;
2606 set_bit(RSCN_UPDATE, &flags);
2607 set_bit(LOCAL_LOOP_UPDATE, &flags);
2608 }
2609
2610 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
2611 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2612 rval = QLA_FUNCTION_FAILED;
2613 else
2614 rval = qla2x00_configure_local_loop(vha);
2615 }
2616
2617 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
2618 if (LOOP_TRANSITION(vha))
2619 rval = QLA_FUNCTION_FAILED;
2620 else
2621 rval = qla2x00_configure_fabric(vha);
2622 }
2623
2624 if (rval == QLA_SUCCESS) {
2625 if (atomic_read(&vha->loop_down_timer) ||
2626 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2627 rval = QLA_FUNCTION_FAILED;
2628 } else {
2629 atomic_set(&vha->loop_state, LOOP_READY);
2630
2631 DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no));
2632 }
2633 }
2634
2635 if (rval) {
2636 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n",
2637 __func__, vha->host_no));
2638 } else {
2639 DEBUG3(printk("%s: exiting normally\n", __func__));
2640 }
2641
2642 /* Restore state if a resync event occurred during processing */
2643 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2644 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2645 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2646 if (test_bit(RSCN_UPDATE, &save_flags)) {
2647 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2648 if (!IS_ALOGIO_CAPABLE(ha))
2649 vha->flags.rscn_queue_overflow = 1;
2650 }
2651 }
2652
2653 return (rval);
2654 }
2655
2656
2657
2658 /*
2659 * qla2x00_configure_local_loop
2660 * Updates Fibre Channel Device Database with local loop devices.
2661 *
2662 * Input:
2663 * ha = adapter block pointer.
2664 *
2665 * Returns:
2666 * 0 = success.
2667 */
2668 static int
2669 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2670 {
2671 int rval, rval2;
2672 int found_devs;
2673 int found;
2674 fc_port_t *fcport, *new_fcport;
2675
2676 uint16_t index;
2677 uint16_t entries;
2678 char *id_iter;
2679 uint16_t loop_id;
2680 uint8_t domain, area, al_pa;
2681 struct qla_hw_data *ha = vha->hw;
2682
2683 found_devs = 0;
2684 new_fcport = NULL;
2685 entries = MAX_FIBRE_DEVICES;
2686
2687 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no));
2688 DEBUG3(qla2x00_get_fcal_position_map(vha, NULL));
2689
2690 /* Get list of logged in devices. */
2691 memset(ha->gid_list, 0, GID_LIST_SIZE);
2692 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
2693 &entries);
2694 if (rval != QLA_SUCCESS)
2695 goto cleanup_allocation;
2696
2697 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
2698 vha->host_no, entries));
2699 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
2700 entries * sizeof(struct gid_list_info)));
2701
2702 /* Allocate temporary fcport for any new fcports discovered. */
2703 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2704 if (new_fcport == NULL) {
2705 rval = QLA_MEMORY_ALLOC_FAILED;
2706 goto cleanup_allocation;
2707 }
2708 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2709
2710 /*
2711 * Mark local devices that were present with FCF_DEVICE_LOST for now.
2712 */
2713 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2714 if (atomic_read(&fcport->state) == FCS_ONLINE &&
2715 fcport->port_type != FCT_BROADCAST &&
2716 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2717
2718 DEBUG(printk("scsi(%ld): Marking port lost, "
2719 "loop_id=0x%04x\n",
2720 vha->host_no, fcport->loop_id));
2721
2722 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2723 }
2724 }
2725
2726 /* Add devices to port list. */
2727 id_iter = (char *)ha->gid_list;
2728 for (index = 0; index < entries; index++) {
2729 domain = ((struct gid_list_info *)id_iter)->domain;
2730 area = ((struct gid_list_info *)id_iter)->area;
2731 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
2732 if (IS_QLA2100(ha) || IS_QLA2200(ha))
2733 loop_id = (uint16_t)
2734 ((struct gid_list_info *)id_iter)->loop_id_2100;
2735 else
2736 loop_id = le16_to_cpu(
2737 ((struct gid_list_info *)id_iter)->loop_id);
2738 id_iter += ha->gid_list_info_size;
2739
2740 /* Bypass reserved domain fields. */
2741 if ((domain & 0xf0) == 0xf0)
2742 continue;
2743
2744 /* Bypass if not same domain and area of adapter. */
2745 if (area && domain &&
2746 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
2747 continue;
2748
2749 /* Bypass invalid local loop ID. */
2750 if (loop_id > LAST_LOCAL_LOOP_ID)
2751 continue;
2752
2753 /* Fill in member data. */
2754 new_fcport->d_id.b.domain = domain;
2755 new_fcport->d_id.b.area = area;
2756 new_fcport->d_id.b.al_pa = al_pa;
2757 new_fcport->loop_id = loop_id;
2758 new_fcport->vp_idx = vha->vp_idx;
2759 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2760 if (rval2 != QLA_SUCCESS) {
2761 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
2762 "information -- get_port_database=%x, "
2763 "loop_id=0x%04x\n",
2764 vha->host_no, rval2, new_fcport->loop_id));
2765 DEBUG2(printk("scsi(%ld): Scheduling resync...\n",
2766 vha->host_no));
2767 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2768 continue;
2769 }
2770
2771 /* Check for matching device in port list. */
2772 found = 0;
2773 fcport = NULL;
2774 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2775 if (memcmp(new_fcport->port_name, fcport->port_name,
2776 WWN_SIZE))
2777 continue;
2778
2779 fcport->flags &= ~FCF_FABRIC_DEVICE;
2780 fcport->loop_id = new_fcport->loop_id;
2781 fcport->port_type = new_fcport->port_type;
2782 fcport->d_id.b24 = new_fcport->d_id.b24;
2783 memcpy(fcport->node_name, new_fcport->node_name,
2784 WWN_SIZE);
2785
2786 found++;
2787 break;
2788 }
2789
2790 if (!found) {
2791 /* New device, add to fcports list. */
2792 if (vha->vp_idx) {
2793 new_fcport->vha = vha;
2794 new_fcport->vp_idx = vha->vp_idx;
2795 }
2796 list_add_tail(&new_fcport->list, &vha->vp_fcports);
2797
2798 /* Allocate a new replacement fcport. */
2799 fcport = new_fcport;
2800 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2801 if (new_fcport == NULL) {
2802 rval = QLA_MEMORY_ALLOC_FAILED;
2803 goto cleanup_allocation;
2804 }
2805 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2806 }
2807
2808 /* Base iIDMA settings on HBA port speed. */
2809 fcport->fp_speed = ha->link_data_rate;
2810
2811 qla2x00_update_fcport(vha, fcport);
2812
2813 found_devs++;
2814 }
2815
2816 cleanup_allocation:
2817 kfree(new_fcport);
2818
2819 if (rval != QLA_SUCCESS) {
2820 DEBUG2(printk("scsi(%ld): Configure local loop error exit: "
2821 "rval=%x\n", vha->host_no, rval));
2822 }
2823
2824 return (rval);
2825 }
2826
2827 static void
2828 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2829 {
2830 #define LS_UNKNOWN 2
2831 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
2832 char *link_speed;
2833 int rval;
2834 uint16_t mb[4];
2835 struct qla_hw_data *ha = vha->hw;
2836
2837 if (!IS_IIDMA_CAPABLE(ha))
2838 return;
2839
2840 if (atomic_read(&fcport->state) != FCS_ONLINE)
2841 return;
2842
2843 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
2844 fcport->fp_speed > ha->link_data_rate)
2845 return;
2846
2847 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2848 mb);
2849 if (rval != QLA_SUCCESS) {
2850 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA "
2851 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
2852 vha->host_no, fcport->port_name[0], fcport->port_name[1],
2853 fcport->port_name[2], fcport->port_name[3],
2854 fcport->port_name[4], fcport->port_name[5],
2855 fcport->port_name[6], fcport->port_name[7], rval,
2856 fcport->fp_speed, mb[0], mb[1]));
2857 } else {
2858 link_speed = link_speeds[LS_UNKNOWN];
2859 if (fcport->fp_speed < 5)
2860 link_speed = link_speeds[fcport->fp_speed];
2861 else if (fcport->fp_speed == 0x13)
2862 link_speed = link_speeds[5];
2863 DEBUG2(qla_printk(KERN_INFO, ha,
2864 "iIDMA adjusted to %s GB/s on "
2865 "%02x%02x%02x%02x%02x%02x%02x%02x.\n",
2866 link_speed, fcport->port_name[0],
2867 fcport->port_name[1], fcport->port_name[2],
2868 fcport->port_name[3], fcport->port_name[4],
2869 fcport->port_name[5], fcport->port_name[6],
2870 fcport->port_name[7]));
2871 }
2872 }
2873
2874 static void
2875 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2876 {
2877 struct fc_rport_identifiers rport_ids;
2878 struct fc_rport *rport;
2879 struct qla_hw_data *ha = vha->hw;
2880 unsigned long flags;
2881
2882 qla2x00_rport_del(fcport);
2883
2884 rport_ids.node_name = wwn_to_u64(fcport->node_name);
2885 rport_ids.port_name = wwn_to_u64(fcport->port_name);
2886 rport_ids.port_id = fcport->d_id.b.domain << 16 |
2887 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2888 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2889 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
2890 if (!rport) {
2891 qla_printk(KERN_WARNING, ha,
2892 "Unable to allocate fc remote port!\n");
2893 return;
2894 }
2895 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2896 *((fc_port_t **)rport->dd_data) = fcport;
2897 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2898
2899 rport->supported_classes = fcport->supported_classes;
2900
2901 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2902 if (fcport->port_type == FCT_INITIATOR)
2903 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
2904 if (fcport->port_type == FCT_TARGET)
2905 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
2906 fc_remote_port_rolechg(rport, rport_ids.roles);
2907 }
2908
2909 /*
2910 * qla2x00_update_fcport
2911 * Updates device on list.
2912 *
2913 * Input:
2914 * ha = adapter block pointer.
2915 * fcport = port structure pointer.
2916 *
2917 * Return:
2918 * 0 - Success
2919 * BIT_0 - error
2920 *
2921 * Context:
2922 * Kernel context.
2923 */
2924 void
2925 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2926 {
2927 fcport->vha = vha;
2928 fcport->login_retry = 0;
2929 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
2930
2931 qla2x00_iidma_fcport(vha, fcport);
2932 qla24xx_update_fcport_fcp_prio(vha, fcport);
2933 qla2x00_reg_remote_port(vha, fcport);
2934 atomic_set(&fcport->state, FCS_ONLINE);
2935 }
2936
2937 /*
2938 * qla2x00_configure_fabric
2939 * Setup SNS devices with loop ID's.
2940 *
2941 * Input:
2942 * ha = adapter block pointer.
2943 *
2944 * Returns:
2945 * 0 = success.
2946 * BIT_0 = error
2947 */
2948 static int
2949 qla2x00_configure_fabric(scsi_qla_host_t *vha)
2950 {
2951 int rval, rval2;
2952 fc_port_t *fcport, *fcptemp;
2953 uint16_t next_loopid;
2954 uint16_t mb[MAILBOX_REGISTER_COUNT];
2955 uint16_t loop_id;
2956 LIST_HEAD(new_fcports);
2957 struct qla_hw_data *ha = vha->hw;
2958 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2959
2960 /* If FL port exists, then SNS is present */
2961 if (IS_FWI2_CAPABLE(ha))
2962 loop_id = NPH_F_PORT;
2963 else
2964 loop_id = SNS_FL_PORT;
2965 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
2966 if (rval != QLA_SUCCESS) {
2967 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL "
2968 "Port\n", vha->host_no));
2969
2970 vha->device_flags &= ~SWITCH_FOUND;
2971 return (QLA_SUCCESS);
2972 }
2973 vha->device_flags |= SWITCH_FOUND;
2974
2975 /* Mark devices that need re-synchronization. */
2976 rval2 = qla2x00_device_resync(vha);
2977 if (rval2 == QLA_RSCNS_HANDLED) {
2978 /* No point doing the scan, just continue. */
2979 return (QLA_SUCCESS);
2980 }
2981 do {
2982 /* FDMI support. */
2983 if (ql2xfdmienable &&
2984 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
2985 qla2x00_fdmi_register(vha);
2986
2987 /* Ensure we are logged into the SNS. */
2988 if (IS_FWI2_CAPABLE(ha))
2989 loop_id = NPH_SNS;
2990 else
2991 loop_id = SIMPLE_NAME_SERVER;
2992 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
2993 0xfc, mb, BIT_1 | BIT_0);
2994 if (mb[0] != MBS_COMMAND_COMPLETE) {
2995 DEBUG2(qla_printk(KERN_INFO, ha,
2996 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
2997 "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id,
2998 mb[0], mb[1], mb[2], mb[6], mb[7]));
2999 return (QLA_SUCCESS);
3000 }
3001
3002 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
3003 if (qla2x00_rft_id(vha)) {
3004 /* EMPTY */
3005 DEBUG2(printk("scsi(%ld): Register FC-4 "
3006 "TYPE failed.\n", vha->host_no));
3007 }
3008 if (qla2x00_rff_id(vha)) {
3009 /* EMPTY */
3010 DEBUG2(printk("scsi(%ld): Register FC-4 "
3011 "Features failed.\n", vha->host_no));
3012 }
3013 if (qla2x00_rnn_id(vha)) {
3014 /* EMPTY */
3015 DEBUG2(printk("scsi(%ld): Register Node Name "
3016 "failed.\n", vha->host_no));
3017 } else if (qla2x00_rsnn_nn(vha)) {
3018 /* EMPTY */
3019 DEBUG2(printk("scsi(%ld): Register Symbolic "
3020 "Node Name failed.\n", vha->host_no));
3021 }
3022 }
3023
3024 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
3025 if (rval != QLA_SUCCESS)
3026 break;
3027
3028 /*
3029 * Logout all previous fabric devices marked lost, except
3030 * FCP2 devices.
3031 */
3032 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3033 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3034 break;
3035
3036 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3037 continue;
3038
3039 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
3040 qla2x00_mark_device_lost(vha, fcport,
3041 ql2xplogiabsentdevice, 0);
3042 if (fcport->loop_id != FC_NO_LOOP_ID &&
3043 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3044 fcport->port_type != FCT_INITIATOR &&
3045 fcport->port_type != FCT_BROADCAST) {
3046 ha->isp_ops->fabric_logout(vha,
3047 fcport->loop_id,
3048 fcport->d_id.b.domain,
3049 fcport->d_id.b.area,
3050 fcport->d_id.b.al_pa);
3051 fcport->loop_id = FC_NO_LOOP_ID;
3052 }
3053 }
3054 }
3055
3056 /* Starting free loop ID. */
3057 next_loopid = ha->min_external_loopid;
3058
3059 /*
3060 * Scan through our port list and login entries that need to be
3061 * logged in.
3062 */
3063 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3064 if (atomic_read(&vha->loop_down_timer) ||
3065 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3066 break;
3067
3068 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3069 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3070 continue;
3071
3072 if (fcport->loop_id == FC_NO_LOOP_ID) {
3073 fcport->loop_id = next_loopid;
3074 rval = qla2x00_find_new_loop_id(
3075 base_vha, fcport);
3076 if (rval != QLA_SUCCESS) {
3077 /* Ran out of IDs to use */
3078 break;
3079 }
3080 }
3081 /* Login and update database */
3082 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3083 }
3084
3085 /* Exit if out of loop IDs. */
3086 if (rval != QLA_SUCCESS) {
3087 break;
3088 }
3089
3090 /*
3091 * Login and add the new devices to our port list.
3092 */
3093 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3094 if (atomic_read(&vha->loop_down_timer) ||
3095 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3096 break;
3097
3098 /* Find a new loop ID to use. */
3099 fcport->loop_id = next_loopid;
3100 rval = qla2x00_find_new_loop_id(base_vha, fcport);
3101 if (rval != QLA_SUCCESS) {
3102 /* Ran out of IDs to use */
3103 break;
3104 }
3105
3106 /* Login and update database */
3107 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3108
3109 if (vha->vp_idx) {
3110 fcport->vha = vha;
3111 fcport->vp_idx = vha->vp_idx;
3112 }
3113 list_move_tail(&fcport->list, &vha->vp_fcports);
3114 }
3115 } while (0);
3116
3117 /* Free all new device structures not processed. */
3118 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3119 list_del(&fcport->list);
3120 kfree(fcport);
3121 }
3122
3123 if (rval) {
3124 DEBUG2(printk("scsi(%ld): Configure fabric error exit: "
3125 "rval=%d\n", vha->host_no, rval));
3126 }
3127
3128 return (rval);
3129 }
3130
3131 /*
3132 * qla2x00_find_all_fabric_devs
3133 *
3134 * Input:
3135 * ha = adapter block pointer.
3136 * dev = database device entry pointer.
3137 *
3138 * Returns:
3139 * 0 = success.
3140 *
3141 * Context:
3142 * Kernel context.
3143 */
3144 static int
3145 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3146 struct list_head *new_fcports)
3147 {
3148 int rval;
3149 uint16_t loop_id;
3150 fc_port_t *fcport, *new_fcport, *fcptemp;
3151 int found;
3152
3153 sw_info_t *swl;
3154 int swl_idx;
3155 int first_dev, last_dev;
3156 port_id_t wrap = {}, nxt_d_id;
3157 struct qla_hw_data *ha = vha->hw;
3158 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
3159 struct scsi_qla_host *tvp;
3160
3161 rval = QLA_SUCCESS;
3162
3163 /* Try GID_PT to get device list, else GAN. */
3164 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
3165 if (!swl) {
3166 /*EMPTY*/
3167 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
3168 "on GA_NXT\n", vha->host_no));
3169 } else {
3170 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
3171 kfree(swl);
3172 swl = NULL;
3173 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
3174 kfree(swl);
3175 swl = NULL;
3176 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
3177 kfree(swl);
3178 swl = NULL;
3179 } else if (ql2xiidmaenable &&
3180 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
3181 qla2x00_gpsc(vha, swl);
3182 }
3183
3184 /* If other queries succeeded probe for FC-4 type */
3185 if (swl)
3186 qla2x00_gff_id(vha, swl);
3187 }
3188 swl_idx = 0;
3189
3190 /* Allocate temporary fcport for any new fcports discovered. */
3191 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3192 if (new_fcport == NULL) {
3193 kfree(swl);
3194 return (QLA_MEMORY_ALLOC_FAILED);
3195 }
3196 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3197 /* Set start port ID scan at adapter ID. */
3198 first_dev = 1;
3199 last_dev = 0;
3200
3201 /* Starting free loop ID. */
3202 loop_id = ha->min_external_loopid;
3203 for (; loop_id <= ha->max_loop_id; loop_id++) {
3204 if (qla2x00_is_reserved_id(vha, loop_id))
3205 continue;
3206
3207 if (ha->current_topology == ISP_CFG_FL &&
3208 (atomic_read(&vha->loop_down_timer) ||
3209 LOOP_TRANSITION(vha))) {
3210 atomic_set(&vha->loop_down_timer, 0);
3211 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3212 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3213 break;
3214 }
3215
3216 if (swl != NULL) {
3217 if (last_dev) {
3218 wrap.b24 = new_fcport->d_id.b24;
3219 } else {
3220 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
3221 memcpy(new_fcport->node_name,
3222 swl[swl_idx].node_name, WWN_SIZE);
3223 memcpy(new_fcport->port_name,
3224 swl[swl_idx].port_name, WWN_SIZE);
3225 memcpy(new_fcport->fabric_port_name,
3226 swl[swl_idx].fabric_port_name, WWN_SIZE);
3227 new_fcport->fp_speed = swl[swl_idx].fp_speed;
3228 new_fcport->fc4_type = swl[swl_idx].fc4_type;
3229
3230 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
3231 last_dev = 1;
3232 }
3233 swl_idx++;
3234 }
3235 } else {
3236 /* Send GA_NXT to the switch */
3237 rval = qla2x00_ga_nxt(vha, new_fcport);
3238 if (rval != QLA_SUCCESS) {
3239 qla_printk(KERN_WARNING, ha,
3240 "SNS scan failed -- assuming zero-entry "
3241 "result...\n");
3242 list_for_each_entry_safe(fcport, fcptemp,
3243 new_fcports, list) {
3244 list_del(&fcport->list);
3245 kfree(fcport);
3246 }
3247 rval = QLA_SUCCESS;
3248 break;
3249 }
3250 }
3251
3252 /* If wrap on switch device list, exit. */
3253 if (first_dev) {
3254 wrap.b24 = new_fcport->d_id.b24;
3255 first_dev = 0;
3256 } else if (new_fcport->d_id.b24 == wrap.b24) {
3257 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n",
3258 vha->host_no, new_fcport->d_id.b.domain,
3259 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa));
3260 break;
3261 }
3262
3263 /* Bypass if same physical adapter. */
3264 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
3265 continue;
3266
3267 /* Bypass virtual ports of the same host. */
3268 found = 0;
3269 if (ha->num_vhosts) {
3270 unsigned long flags;
3271
3272 spin_lock_irqsave(&ha->vport_slock, flags);
3273 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3274 if (new_fcport->d_id.b24 == vp->d_id.b24) {
3275 found = 1;
3276 break;
3277 }
3278 }
3279 spin_unlock_irqrestore(&ha->vport_slock, flags);
3280
3281 if (found)
3282 continue;
3283 }
3284
3285 /* Bypass if same domain and area of adapter. */
3286 if (((new_fcport->d_id.b24 & 0xffff00) ==
3287 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
3288 ISP_CFG_FL)
3289 continue;
3290
3291 /* Bypass reserved domain fields. */
3292 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
3293 continue;
3294
3295 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
3296 if (ql2xgffidenable &&
3297 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
3298 new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
3299 continue;
3300
3301 /* Locate matching device in database. */
3302 found = 0;
3303 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3304 if (memcmp(new_fcport->port_name, fcport->port_name,
3305 WWN_SIZE))
3306 continue;
3307
3308 found++;
3309
3310 /* Update port state. */
3311 memcpy(fcport->fabric_port_name,
3312 new_fcport->fabric_port_name, WWN_SIZE);
3313 fcport->fp_speed = new_fcport->fp_speed;
3314
3315 /*
3316 * If address the same and state FCS_ONLINE, nothing
3317 * changed.
3318 */
3319 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
3320 atomic_read(&fcport->state) == FCS_ONLINE) {
3321 break;
3322 }
3323
3324 /*
3325 * If device was not a fabric device before.
3326 */
3327 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3328 fcport->d_id.b24 = new_fcport->d_id.b24;
3329 fcport->loop_id = FC_NO_LOOP_ID;
3330 fcport->flags |= (FCF_FABRIC_DEVICE |
3331 FCF_LOGIN_NEEDED);
3332 break;
3333 }
3334
3335 /*
3336 * Port ID changed or device was marked to be updated;
3337 * Log it out if still logged in and mark it for
3338 * relogin later.
3339 */
3340 fcport->d_id.b24 = new_fcport->d_id.b24;
3341 fcport->flags |= FCF_LOGIN_NEEDED;
3342 if (fcport->loop_id != FC_NO_LOOP_ID &&
3343 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3344 fcport->port_type != FCT_INITIATOR &&
3345 fcport->port_type != FCT_BROADCAST) {
3346 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3347 fcport->d_id.b.domain, fcport->d_id.b.area,
3348 fcport->d_id.b.al_pa);
3349 fcport->loop_id = FC_NO_LOOP_ID;
3350 }
3351
3352 break;
3353 }
3354
3355 if (found)
3356 continue;
3357 /* If device was not in our fcports list, then add it. */
3358 list_add_tail(&new_fcport->list, new_fcports);
3359
3360 /* Allocate a new replacement fcport. */
3361 nxt_d_id.b24 = new_fcport->d_id.b24;
3362 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3363 if (new_fcport == NULL) {
3364 kfree(swl);
3365 return (QLA_MEMORY_ALLOC_FAILED);
3366 }
3367 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3368 new_fcport->d_id.b24 = nxt_d_id.b24;
3369 }
3370
3371 kfree(swl);
3372 kfree(new_fcport);
3373
3374 return (rval);
3375 }
3376
3377 /*
3378 * qla2x00_find_new_loop_id
3379 * Scan through our port list and find a new usable loop ID.
3380 *
3381 * Input:
3382 * ha: adapter state pointer.
3383 * dev: port structure pointer.
3384 *
3385 * Returns:
3386 * qla2x00 local function return status code.
3387 *
3388 * Context:
3389 * Kernel context.
3390 */
3391 int
3392 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3393 {
3394 int rval;
3395 int found;
3396 fc_port_t *fcport;
3397 uint16_t first_loop_id;
3398 struct qla_hw_data *ha = vha->hw;
3399 struct scsi_qla_host *vp;
3400 struct scsi_qla_host *tvp;
3401 unsigned long flags = 0;
3402
3403 rval = QLA_SUCCESS;
3404
3405 /* Save starting loop ID. */
3406 first_loop_id = dev->loop_id;
3407
3408 for (;;) {
3409 /* Skip loop ID if already used by adapter. */
3410 if (dev->loop_id == vha->loop_id)
3411 dev->loop_id++;
3412
3413 /* Skip reserved loop IDs. */
3414 while (qla2x00_is_reserved_id(vha, dev->loop_id))
3415 dev->loop_id++;
3416
3417 /* Reset loop ID if passed the end. */
3418 if (dev->loop_id > ha->max_loop_id) {
3419 /* first loop ID. */
3420 dev->loop_id = ha->min_external_loopid;
3421 }
3422
3423 /* Check for loop ID being already in use. */
3424 found = 0;
3425 fcport = NULL;
3426
3427 spin_lock_irqsave(&ha->vport_slock, flags);
3428 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3429 list_for_each_entry(fcport, &vp->vp_fcports, list) {
3430 if (fcport->loop_id == dev->loop_id &&
3431 fcport != dev) {
3432 /* ID possibly in use */
3433 found++;
3434 break;
3435 }
3436 }
3437 if (found)
3438 break;
3439 }
3440 spin_unlock_irqrestore(&ha->vport_slock, flags);
3441
3442 /* If not in use then it is free to use. */
3443 if (!found) {
3444 break;
3445 }
3446
3447 /* ID in use. Try next value. */
3448 dev->loop_id++;
3449
3450 /* If wrap around. No free ID to use. */
3451 if (dev->loop_id == first_loop_id) {
3452 dev->loop_id = FC_NO_LOOP_ID;
3453 rval = QLA_FUNCTION_FAILED;
3454 break;
3455 }
3456 }
3457
3458 return (rval);
3459 }
3460
3461 /*
3462 * qla2x00_device_resync
3463 * Marks devices in the database that needs resynchronization.
3464 *
3465 * Input:
3466 * ha = adapter block pointer.
3467 *
3468 * Context:
3469 * Kernel context.
3470 */
3471 static int
3472 qla2x00_device_resync(scsi_qla_host_t *vha)
3473 {
3474 int rval;
3475 uint32_t mask;
3476 fc_port_t *fcport;
3477 uint32_t rscn_entry;
3478 uint8_t rscn_out_iter;
3479 uint8_t format;
3480 port_id_t d_id = {};
3481
3482 rval = QLA_RSCNS_HANDLED;
3483
3484 while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
3485 vha->flags.rscn_queue_overflow) {
3486
3487 rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
3488 format = MSB(MSW(rscn_entry));
3489 d_id.b.domain = LSB(MSW(rscn_entry));
3490 d_id.b.area = MSB(LSW(rscn_entry));
3491 d_id.b.al_pa = LSB(LSW(rscn_entry));
3492
3493 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = "
3494 "[%02x/%02x%02x%02x].\n",
3495 vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain,
3496 d_id.b.area, d_id.b.al_pa));
3497
3498 vha->rscn_out_ptr++;
3499 if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
3500 vha->rscn_out_ptr = 0;
3501
3502 /* Skip duplicate entries. */
3503 for (rscn_out_iter = vha->rscn_out_ptr;
3504 !vha->flags.rscn_queue_overflow &&
3505 rscn_out_iter != vha->rscn_in_ptr;
3506 rscn_out_iter = (rscn_out_iter ==
3507 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
3508
3509 if (rscn_entry != vha->rscn_queue[rscn_out_iter])
3510 break;
3511
3512 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue "
3513 "entry found at [%d].\n", vha->host_no,
3514 rscn_out_iter));
3515
3516 vha->rscn_out_ptr = rscn_out_iter;
3517 }
3518
3519 /* Queue overflow, set switch default case. */
3520 if (vha->flags.rscn_queue_overflow) {
3521 DEBUG(printk("scsi(%ld): device_resync: rscn "
3522 "overflow.\n", vha->host_no));
3523
3524 format = 3;
3525 vha->flags.rscn_queue_overflow = 0;
3526 }
3527
3528 switch (format) {
3529 case 0:
3530 mask = 0xffffff;
3531 break;
3532 case 1:
3533 mask = 0xffff00;
3534 break;
3535 case 2:
3536 mask = 0xff0000;
3537 break;
3538 default:
3539 mask = 0x0;
3540 d_id.b24 = 0;
3541 vha->rscn_out_ptr = vha->rscn_in_ptr;
3542 break;
3543 }
3544
3545 rval = QLA_SUCCESS;
3546
3547 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3548 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3549 (fcport->d_id.b24 & mask) != d_id.b24 ||
3550 fcport->port_type == FCT_BROADCAST)
3551 continue;
3552
3553 if (atomic_read(&fcport->state) == FCS_ONLINE) {
3554 if (format != 3 ||
3555 fcport->port_type != FCT_INITIATOR) {
3556 qla2x00_mark_device_lost(vha, fcport,
3557 0, 0);
3558 }
3559 }
3560 }
3561 }
3562 return (rval);
3563 }
3564
3565 /*
3566 * qla2x00_fabric_dev_login
3567 * Login fabric target device and update FC port database.
3568 *
3569 * Input:
3570 * ha: adapter state pointer.
3571 * fcport: port structure list pointer.
3572 * next_loopid: contains value of a new loop ID that can be used
3573 * by the next login attempt.
3574 *
3575 * Returns:
3576 * qla2x00 local function return status code.
3577 *
3578 * Context:
3579 * Kernel context.
3580 */
3581 static int
3582 qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3583 uint16_t *next_loopid)
3584 {
3585 int rval;
3586 int retry;
3587 uint8_t opts;
3588 struct qla_hw_data *ha = vha->hw;
3589
3590 rval = QLA_SUCCESS;
3591 retry = 0;
3592
3593 if (IS_ALOGIO_CAPABLE(ha)) {
3594 if (fcport->flags & FCF_ASYNC_SENT)
3595 return rval;
3596 fcport->flags |= FCF_ASYNC_SENT;
3597 rval = qla2x00_post_async_login_work(vha, fcport, NULL);
3598 if (!rval)
3599 return rval;
3600 }
3601
3602 fcport->flags &= ~FCF_ASYNC_SENT;
3603 rval = qla2x00_fabric_login(vha, fcport, next_loopid);
3604 if (rval == QLA_SUCCESS) {
3605 /* Send an ADISC to FCP2 devices.*/
3606 opts = 0;
3607 if (fcport->flags & FCF_FCP2_DEVICE)
3608 opts |= BIT_1;
3609 rval = qla2x00_get_port_database(vha, fcport, opts);
3610 if (rval != QLA_SUCCESS) {
3611 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3612 fcport->d_id.b.domain, fcport->d_id.b.area,
3613 fcport->d_id.b.al_pa);
3614 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3615 } else {
3616 qla2x00_update_fcport(vha, fcport);
3617 }
3618 }
3619
3620 return (rval);
3621 }
3622
3623 /*
3624 * qla2x00_fabric_login
3625 * Issue fabric login command.
3626 *
3627 * Input:
3628 * ha = adapter block pointer.
3629 * device = pointer to FC device type structure.
3630 *
3631 * Returns:
3632 * 0 - Login successfully
3633 * 1 - Login failed
3634 * 2 - Initiator device
3635 * 3 - Fatal error
3636 */
3637 int
3638 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3639 uint16_t *next_loopid)
3640 {
3641 int rval;
3642 int retry;
3643 uint16_t tmp_loopid;
3644 uint16_t mb[MAILBOX_REGISTER_COUNT];
3645 struct qla_hw_data *ha = vha->hw;
3646
3647 retry = 0;
3648 tmp_loopid = 0;
3649
3650 for (;;) {
3651 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x "
3652 "for port %02x%02x%02x.\n",
3653 vha->host_no, fcport->loop_id, fcport->d_id.b.domain,
3654 fcport->d_id.b.area, fcport->d_id.b.al_pa));
3655
3656 /* Login fcport on switch. */
3657 ha->isp_ops->fabric_login(vha, fcport->loop_id,
3658 fcport->d_id.b.domain, fcport->d_id.b.area,
3659 fcport->d_id.b.al_pa, mb, BIT_0);
3660 if (mb[0] == MBS_PORT_ID_USED) {
3661 /*
3662 * Device has another loop ID. The firmware team
3663 * recommends the driver perform an implicit login with
3664 * the specified ID again. The ID we just used is save
3665 * here so we return with an ID that can be tried by
3666 * the next login.
3667 */
3668 retry++;
3669 tmp_loopid = fcport->loop_id;
3670 fcport->loop_id = mb[1];
3671
3672 DEBUG(printk("Fabric Login: port in use - next "
3673 "loop id=0x%04x, port Id=%02x%02x%02x.\n",
3674 fcport->loop_id, fcport->d_id.b.domain,
3675 fcport->d_id.b.area, fcport->d_id.b.al_pa));
3676
3677 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
3678 /*
3679 * Login succeeded.
3680 */
3681 if (retry) {
3682 /* A retry occurred before. */
3683 *next_loopid = tmp_loopid;
3684 } else {
3685 /*
3686 * No retry occurred before. Just increment the
3687 * ID value for next login.
3688 */
3689 *next_loopid = (fcport->loop_id + 1);
3690 }
3691
3692 if (mb[1] & BIT_0) {
3693 fcport->port_type = FCT_INITIATOR;
3694 } else {
3695 fcport->port_type = FCT_TARGET;
3696 if (mb[1] & BIT_1) {
3697 fcport->flags |= FCF_FCP2_DEVICE;
3698 }
3699 }
3700
3701 if (mb[10] & BIT_0)
3702 fcport->supported_classes |= FC_COS_CLASS2;
3703 if (mb[10] & BIT_1)
3704 fcport->supported_classes |= FC_COS_CLASS3;
3705
3706 rval = QLA_SUCCESS;
3707 break;
3708 } else if (mb[0] == MBS_LOOP_ID_USED) {
3709 /*
3710 * Loop ID already used, try next loop ID.
3711 */
3712 fcport->loop_id++;
3713 rval = qla2x00_find_new_loop_id(vha, fcport);
3714 if (rval != QLA_SUCCESS) {
3715 /* Ran out of loop IDs to use */
3716 break;
3717 }
3718 } else if (mb[0] == MBS_COMMAND_ERROR) {
3719 /*
3720 * Firmware possibly timed out during login. If NO
3721 * retries are left to do then the device is declared
3722 * dead.
3723 */
3724 *next_loopid = fcport->loop_id;
3725 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3726 fcport->d_id.b.domain, fcport->d_id.b.area,
3727 fcport->d_id.b.al_pa);
3728 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3729
3730 rval = 1;
3731 break;
3732 } else {
3733 /*
3734 * unrecoverable / not handled error
3735 */
3736 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x "
3737 "loop_id=%x jiffies=%lx.\n",
3738 __func__, vha->host_no, mb[0],
3739 fcport->d_id.b.domain, fcport->d_id.b.area,
3740 fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
3741
3742 *next_loopid = fcport->loop_id;
3743 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3744 fcport->d_id.b.domain, fcport->d_id.b.area,
3745 fcport->d_id.b.al_pa);
3746 fcport->loop_id = FC_NO_LOOP_ID;
3747 fcport->login_retry = 0;
3748
3749 rval = 3;
3750 break;
3751 }
3752 }
3753
3754 return (rval);
3755 }
3756
3757 /*
3758 * qla2x00_local_device_login
3759 * Issue local device login command.
3760 *
3761 * Input:
3762 * ha = adapter block pointer.
3763 * loop_id = loop id of device to login to.
3764 *
3765 * Returns (Where's the #define!!!!):
3766 * 0 - Login successfully
3767 * 1 - Login failed
3768 * 3 - Fatal error
3769 */
3770 int
3771 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
3772 {
3773 int rval;
3774 uint16_t mb[MAILBOX_REGISTER_COUNT];
3775
3776 memset(mb, 0, sizeof(mb));
3777 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
3778 if (rval == QLA_SUCCESS) {
3779 /* Interrogate mailbox registers for any errors */
3780 if (mb[0] == MBS_COMMAND_ERROR)
3781 rval = 1;
3782 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
3783 /* device not in PCB table */
3784 rval = 3;
3785 }
3786
3787 return (rval);
3788 }
3789
3790 /*
3791 * qla2x00_loop_resync
3792 * Resync with fibre channel devices.
3793 *
3794 * Input:
3795 * ha = adapter block pointer.
3796 *
3797 * Returns:
3798 * 0 = success
3799 */
3800 int
3801 qla2x00_loop_resync(scsi_qla_host_t *vha)
3802 {
3803 int rval = QLA_SUCCESS;
3804 uint32_t wait_time;
3805 struct req_que *req;
3806 struct rsp_que *rsp;
3807
3808 if (vha->hw->flags.cpu_affinity_enabled)
3809 req = vha->hw->req_q_map[0];
3810 else
3811 req = vha->req;
3812 rsp = req->rsp;
3813
3814 atomic_set(&vha->loop_state, LOOP_UPDATE);
3815 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3816 if (vha->flags.online) {
3817 if (!(rval = qla2x00_fw_ready(vha))) {
3818 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3819 wait_time = 256;
3820 do {
3821 atomic_set(&vha->loop_state, LOOP_UPDATE);
3822
3823 /* Issue a marker after FW becomes ready. */
3824 qla2x00_marker(vha, req, rsp, 0, 0,
3825 MK_SYNC_ALL);
3826 vha->marker_needed = 0;
3827
3828 /* Remap devices on Loop. */
3829 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3830
3831 qla2x00_configure_loop(vha);
3832 wait_time--;
3833 } while (!atomic_read(&vha->loop_down_timer) &&
3834 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3835 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3836 &vha->dpc_flags)));
3837 }
3838 }
3839
3840 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3841 return (QLA_FUNCTION_FAILED);
3842
3843 if (rval)
3844 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
3845
3846 return (rval);
3847 }
3848
3849 /*
3850 * qla2x00_perform_loop_resync
3851 * Description: This function will set the appropriate flags and call
3852 * qla2x00_loop_resync. If successful loop will be resynced
3853 * Arguments : scsi_qla_host_t pointer
3854 * returm : Success or Failure
3855 */
3856
3857 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
3858 {
3859 int32_t rval = 0;
3860
3861 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
3862 /*Configure the flags so that resync happens properly*/
3863 atomic_set(&ha->loop_down_timer, 0);
3864 if (!(ha->device_flags & DFLG_NO_CABLE)) {
3865 atomic_set(&ha->loop_state, LOOP_UP);
3866 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
3867 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
3868 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
3869
3870 rval = qla2x00_loop_resync(ha);
3871 } else
3872 atomic_set(&ha->loop_state, LOOP_DEAD);
3873
3874 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
3875 }
3876
3877 return rval;
3878 }
3879
3880 void
3881 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3882 {
3883 fc_port_t *fcport;
3884 struct scsi_qla_host *vha;
3885 struct qla_hw_data *ha = base_vha->hw;
3886 unsigned long flags;
3887
3888 spin_lock_irqsave(&ha->vport_slock, flags);
3889 /* Go with deferred removal of rport references. */
3890 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
3891 atomic_inc(&vha->vref_count);
3892 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3893 if (fcport->drport &&
3894 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
3895 spin_unlock_irqrestore(&ha->vport_slock, flags);
3896
3897 qla2x00_rport_del(fcport);
3898
3899 spin_lock_irqsave(&ha->vport_slock, flags);
3900 }
3901 }
3902 atomic_dec(&vha->vref_count);
3903 }
3904 spin_unlock_irqrestore(&ha->vport_slock, flags);
3905 }
3906
3907 /*
3908 * qla82xx_quiescent_state_cleanup
3909 * Description: This function will block the new I/Os
3910 * Its not aborting any I/Os as context
3911 * is not destroyed during quiescence
3912 * Arguments: scsi_qla_host_t
3913 * return : void
3914 */
3915 void
3916 qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha)
3917 {
3918 struct qla_hw_data *ha = vha->hw;
3919 struct scsi_qla_host *vp;
3920
3921 qla_printk(KERN_INFO, ha,
3922 "Performing ISP error recovery - ha= %p.\n", ha);
3923
3924 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
3925 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3926 atomic_set(&vha->loop_state, LOOP_DOWN);
3927 qla2x00_mark_all_devices_lost(vha, 0);
3928 list_for_each_entry(vp, &ha->vp_list, list)
3929 qla2x00_mark_all_devices_lost(vha, 0);
3930 } else {
3931 if (!atomic_read(&vha->loop_down_timer))
3932 atomic_set(&vha->loop_down_timer,
3933 LOOP_DOWN_TIME);
3934 }
3935 /* Wait for pending cmds to complete */
3936 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
3937 }
3938
3939 void
3940 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3941 {
3942 struct qla_hw_data *ha = vha->hw;
3943 struct scsi_qla_host *vp;
3944 unsigned long flags;
3945 fc_port_t *fcport;
3946
3947 /* For ISP82XX, driver waits for completion of the commands.
3948 * online flag should be set.
3949 */
3950 if (!IS_QLA82XX(ha))
3951 vha->flags.online = 0;
3952 ha->flags.chip_reset_done = 0;
3953 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3954 ha->qla_stats.total_isp_aborts++;
3955
3956 qla_printk(KERN_INFO, ha,
3957 "Performing ISP error recovery - ha= %p.\n", ha);
3958
3959 /* For ISP82XX, reset_chip is just disabling interrupts.
3960 * Driver waits for the completion of the commands.
3961 * the interrupts need to be enabled.
3962 */
3963 if (!IS_QLA82XX(ha))
3964 ha->isp_ops->reset_chip(vha);
3965
3966 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
3967 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3968 atomic_set(&vha->loop_state, LOOP_DOWN);
3969 qla2x00_mark_all_devices_lost(vha, 0);
3970
3971 spin_lock_irqsave(&ha->vport_slock, flags);
3972 list_for_each_entry(vp, &ha->vp_list, list) {
3973 atomic_inc(&vp->vref_count);
3974 spin_unlock_irqrestore(&ha->vport_slock, flags);
3975
3976 qla2x00_mark_all_devices_lost(vp, 0);
3977
3978 spin_lock_irqsave(&ha->vport_slock, flags);
3979 atomic_dec(&vp->vref_count);
3980 }
3981 spin_unlock_irqrestore(&ha->vport_slock, flags);
3982 } else {
3983 if (!atomic_read(&vha->loop_down_timer))
3984 atomic_set(&vha->loop_down_timer,
3985 LOOP_DOWN_TIME);
3986 }
3987
3988 /* Clear all async request states across all VPs. */
3989 list_for_each_entry(fcport, &vha->vp_fcports, list)
3990 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
3991 spin_lock_irqsave(&ha->vport_slock, flags);
3992 list_for_each_entry(vp, &ha->vp_list, list) {
3993 atomic_inc(&vp->vref_count);
3994 spin_unlock_irqrestore(&ha->vport_slock, flags);
3995
3996 list_for_each_entry(fcport, &vp->vp_fcports, list)
3997 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
3998
3999 spin_lock_irqsave(&ha->vport_slock, flags);
4000 atomic_dec(&vp->vref_count);
4001 }
4002 spin_unlock_irqrestore(&ha->vport_slock, flags);
4003
4004 if (!ha->flags.eeh_busy) {
4005 /* Make sure for ISP 82XX IO DMA is complete */
4006 if (IS_QLA82XX(ha)) {
4007 qla82xx_chip_reset_cleanup(vha);
4008
4009 /* Done waiting for pending commands.
4010 * Reset the online flag.
4011 */
4012 vha->flags.online = 0;
4013 }
4014
4015 /* Requeue all commands in outstanding command list. */
4016 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
4017 }
4018 }
4019
4020 /*
4021 * qla2x00_abort_isp
4022 * Resets ISP and aborts all outstanding commands.
4023 *
4024 * Input:
4025 * ha = adapter block pointer.
4026 *
4027 * Returns:
4028 * 0 = success
4029 */
4030 int
4031 qla2x00_abort_isp(scsi_qla_host_t *vha)
4032 {
4033 int rval;
4034 uint8_t status = 0;
4035 struct qla_hw_data *ha = vha->hw;
4036 struct scsi_qla_host *vp;
4037 struct req_que *req = ha->req_q_map[0];
4038 unsigned long flags;
4039
4040 if (vha->flags.online) {
4041 qla2x00_abort_isp_cleanup(vha);
4042
4043 if (unlikely(pci_channel_offline(ha->pdev) &&
4044 ha->flags.pci_channel_io_perm_failure)) {
4045 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4046 status = 0;
4047 return status;
4048 }
4049
4050 ha->isp_ops->get_flash_version(vha, req->ring);
4051
4052 ha->isp_ops->nvram_config(vha);
4053
4054 if (!qla2x00_restart_isp(vha)) {
4055 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4056
4057 if (!atomic_read(&vha->loop_down_timer)) {
4058 /*
4059 * Issue marker command only when we are going
4060 * to start the I/O .
4061 */
4062 vha->marker_needed = 1;
4063 }
4064
4065 vha->flags.online = 1;
4066
4067 ha->isp_ops->enable_intrs(ha);
4068
4069 ha->isp_abort_cnt = 0;
4070 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4071
4072 if (IS_QLA81XX(ha))
4073 qla2x00_get_fw_version(vha,
4074 &ha->fw_major_version,
4075 &ha->fw_minor_version,
4076 &ha->fw_subminor_version,
4077 &ha->fw_attributes, &ha->fw_memory_size,
4078 ha->mpi_version, &ha->mpi_capabilities,
4079 ha->phy_version);
4080
4081 if (ha->fce) {
4082 ha->flags.fce_enabled = 1;
4083 memset(ha->fce, 0,
4084 fce_calc_size(ha->fce_bufs));
4085 rval = qla2x00_enable_fce_trace(vha,
4086 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
4087 &ha->fce_bufs);
4088 if (rval) {
4089 qla_printk(KERN_WARNING, ha,
4090 "Unable to reinitialize FCE "
4091 "(%d).\n", rval);
4092 ha->flags.fce_enabled = 0;
4093 }
4094 }
4095
4096 if (ha->eft) {
4097 memset(ha->eft, 0, EFT_SIZE);
4098 rval = qla2x00_enable_eft_trace(vha,
4099 ha->eft_dma, EFT_NUM_BUFFERS);
4100 if (rval) {
4101 qla_printk(KERN_WARNING, ha,
4102 "Unable to reinitialize EFT "
4103 "(%d).\n", rval);
4104 }
4105 }
4106 } else { /* failed the ISP abort */
4107 vha->flags.online = 1;
4108 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
4109 if (ha->isp_abort_cnt == 0) {
4110 qla_printk(KERN_WARNING, ha,
4111 "ISP error recovery failed - "
4112 "board disabled\n");
4113 /*
4114 * The next call disables the board
4115 * completely.
4116 */
4117 ha->isp_ops->reset_adapter(vha);
4118 vha->flags.online = 0;
4119 clear_bit(ISP_ABORT_RETRY,
4120 &vha->dpc_flags);
4121 status = 0;
4122 } else { /* schedule another ISP abort */
4123 ha->isp_abort_cnt--;
4124 DEBUG(printk("qla%ld: ISP abort - "
4125 "retry remaining %d\n",
4126 vha->host_no, ha->isp_abort_cnt));
4127 status = 1;
4128 }
4129 } else {
4130 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
4131 DEBUG(printk("qla2x00(%ld): ISP error recovery "
4132 "- retrying (%d) more times\n",
4133 vha->host_no, ha->isp_abort_cnt));
4134 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4135 status = 1;
4136 }
4137 }
4138
4139 }
4140
4141 if (!status) {
4142 DEBUG(printk(KERN_INFO
4143 "qla2x00_abort_isp(%ld): succeeded.\n",
4144 vha->host_no));
4145
4146 spin_lock_irqsave(&ha->vport_slock, flags);
4147 list_for_each_entry(vp, &ha->vp_list, list) {
4148 if (vp->vp_idx) {
4149 atomic_inc(&vp->vref_count);
4150 spin_unlock_irqrestore(&ha->vport_slock, flags);
4151
4152 qla2x00_vp_abort_isp(vp);
4153
4154 spin_lock_irqsave(&ha->vport_slock, flags);
4155 atomic_dec(&vp->vref_count);
4156 }
4157 }
4158 spin_unlock_irqrestore(&ha->vport_slock, flags);
4159
4160 } else {
4161 qla_printk(KERN_INFO, ha,
4162 "qla2x00_abort_isp: **** FAILED ****\n");
4163 }
4164
4165 return(status);
4166 }
4167
4168 /*
4169 * qla2x00_restart_isp
4170 * restarts the ISP after a reset
4171 *
4172 * Input:
4173 * ha = adapter block pointer.
4174 *
4175 * Returns:
4176 * 0 = success
4177 */
4178 static int
4179 qla2x00_restart_isp(scsi_qla_host_t *vha)
4180 {
4181 int status = 0;
4182 uint32_t wait_time;
4183 struct qla_hw_data *ha = vha->hw;
4184 struct req_que *req = ha->req_q_map[0];
4185 struct rsp_que *rsp = ha->rsp_q_map[0];
4186
4187 /* If firmware needs to be loaded */
4188 if (qla2x00_isp_firmware(vha)) {
4189 vha->flags.online = 0;
4190 status = ha->isp_ops->chip_diag(vha);
4191 if (!status)
4192 status = qla2x00_setup_chip(vha);
4193 }
4194
4195 if (!status && !(status = qla2x00_init_rings(vha))) {
4196 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4197 ha->flags.chip_reset_done = 1;
4198 /* Initialize the queues in use */
4199 qla25xx_init_queues(ha);
4200
4201 status = qla2x00_fw_ready(vha);
4202 if (!status) {
4203 DEBUG(printk("%s(): Start configure loop, "
4204 "status = %d\n", __func__, status));
4205
4206 /* Issue a marker after FW becomes ready. */
4207 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4208
4209 vha->flags.online = 1;
4210 /* Wait at most MAX_TARGET RSCNs for a stable link. */
4211 wait_time = 256;
4212 do {
4213 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4214 qla2x00_configure_loop(vha);
4215 wait_time--;
4216 } while (!atomic_read(&vha->loop_down_timer) &&
4217 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
4218 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
4219 &vha->dpc_flags)));
4220 }
4221
4222 /* if no cable then assume it's good */
4223 if ((vha->device_flags & DFLG_NO_CABLE))
4224 status = 0;
4225
4226 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
4227 __func__,
4228 status));
4229 }
4230 return (status);
4231 }
4232
4233 static int
4234 qla25xx_init_queues(struct qla_hw_data *ha)
4235 {
4236 struct rsp_que *rsp = NULL;
4237 struct req_que *req = NULL;
4238 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4239 int ret = -1;
4240 int i;
4241
4242 for (i = 1; i < ha->max_rsp_queues; i++) {
4243 rsp = ha->rsp_q_map[i];
4244 if (rsp) {
4245 rsp->options &= ~BIT_0;
4246 ret = qla25xx_init_rsp_que(base_vha, rsp);
4247 if (ret != QLA_SUCCESS)
4248 DEBUG2_17(printk(KERN_WARNING
4249 "%s Rsp que:%d init failed\n", __func__,
4250 rsp->id));
4251 else
4252 DEBUG2_17(printk(KERN_INFO
4253 "%s Rsp que:%d inited\n", __func__,
4254 rsp->id));
4255 }
4256 }
4257 for (i = 1; i < ha->max_req_queues; i++) {
4258 req = ha->req_q_map[i];
4259 if (req) {
4260 /* Clear outstanding commands array. */
4261 req->options &= ~BIT_0;
4262 ret = qla25xx_init_req_que(base_vha, req);
4263 if (ret != QLA_SUCCESS)
4264 DEBUG2_17(printk(KERN_WARNING
4265 "%s Req que:%d init failed\n", __func__,
4266 req->id));
4267 else
4268 DEBUG2_17(printk(KERN_WARNING
4269 "%s Req que:%d inited\n", __func__,
4270 req->id));
4271 }
4272 }
4273 return ret;
4274 }
4275
4276 /*
4277 * qla2x00_reset_adapter
4278 * Reset adapter.
4279 *
4280 * Input:
4281 * ha = adapter block pointer.
4282 */
4283 void
4284 qla2x00_reset_adapter(scsi_qla_host_t *vha)
4285 {
4286 unsigned long flags = 0;
4287 struct qla_hw_data *ha = vha->hw;
4288 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4289
4290 vha->flags.online = 0;
4291 ha->isp_ops->disable_intrs(ha);
4292
4293 spin_lock_irqsave(&ha->hardware_lock, flags);
4294 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
4295 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
4296 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
4297 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
4298 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4299 }
4300
4301 void
4302 qla24xx_reset_adapter(scsi_qla_host_t *vha)
4303 {
4304 unsigned long flags = 0;
4305 struct qla_hw_data *ha = vha->hw;
4306 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4307
4308 if (IS_QLA82XX(ha))
4309 return;
4310
4311 vha->flags.online = 0;
4312 ha->isp_ops->disable_intrs(ha);
4313
4314 spin_lock_irqsave(&ha->hardware_lock, flags);
4315 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
4316 RD_REG_DWORD(&reg->hccr);
4317 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
4318 RD_REG_DWORD(&reg->hccr);
4319 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4320
4321 if (IS_NOPOLLING_TYPE(ha))
4322 ha->isp_ops->enable_intrs(ha);
4323 }
4324
4325 /* On sparc systems, obtain port and node WWN from firmware
4326 * properties.
4327 */
4328 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
4329 struct nvram_24xx *nv)
4330 {
4331 #ifdef CONFIG_SPARC
4332 struct qla_hw_data *ha = vha->hw;
4333 struct pci_dev *pdev = ha->pdev;
4334 struct device_node *dp = pci_device_to_OF_node(pdev);
4335 const u8 *val;
4336 int len;
4337
4338 val = of_get_property(dp, "port-wwn", &len);
4339 if (val && len >= WWN_SIZE)
4340 memcpy(nv->port_name, val, WWN_SIZE);
4341
4342 val = of_get_property(dp, "node-wwn", &len);
4343 if (val && len >= WWN_SIZE)
4344 memcpy(nv->node_name, val, WWN_SIZE);
4345 #endif
4346 }
4347
4348 int
4349 qla24xx_nvram_config(scsi_qla_host_t *vha)
4350 {
4351 int rval;
4352 struct init_cb_24xx *icb;
4353 struct nvram_24xx *nv;
4354 uint32_t *dptr;
4355 uint8_t *dptr1, *dptr2;
4356 uint32_t chksum;
4357 uint16_t cnt;
4358 struct qla_hw_data *ha = vha->hw;
4359
4360 rval = QLA_SUCCESS;
4361 icb = (struct init_cb_24xx *)ha->init_cb;
4362 nv = ha->nvram;
4363
4364 /* Determine NVRAM starting address. */
4365 if (ha->flags.port0) {
4366 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
4367 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
4368 } else {
4369 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
4370 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
4371 }
4372 ha->nvram_size = sizeof(struct nvram_24xx);
4373 ha->vpd_size = FA_NVRAM_VPD_SIZE;
4374 if (IS_QLA82XX(ha))
4375 ha->vpd_size = FA_VPD_SIZE_82XX;
4376
4377 /* Get VPD data into cache */
4378 ha->vpd = ha->nvram + VPD_OFFSET;
4379 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
4380 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
4381
4382 /* Get NVRAM data into cache and calculate checksum. */
4383 dptr = (uint32_t *)nv;
4384 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
4385 ha->nvram_size);
4386 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4387 chksum += le32_to_cpu(*dptr++);
4388
4389 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
4390 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
4391
4392 /* Bad NVRAM data, set defaults parameters. */
4393 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4394 || nv->id[3] != ' ' ||
4395 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4396 /* Reset NVRAM data. */
4397 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
4398 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
4399 le16_to_cpu(nv->nvram_version));
4400 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
4401 "invalid -- WWPN) defaults.\n");
4402
4403 /*
4404 * Set default initialization control block.
4405 */
4406 memset(nv, 0, ha->nvram_size);
4407 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
4408 nv->version = __constant_cpu_to_le16(ICB_VERSION);
4409 nv->frame_payload_size = __constant_cpu_to_le16(2048);
4410 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4411 nv->exchange_count = __constant_cpu_to_le16(0);
4412 nv->hard_address = __constant_cpu_to_le16(124);
4413 nv->port_name[0] = 0x21;
4414 nv->port_name[1] = 0x00 + ha->port_no;
4415 nv->port_name[2] = 0x00;
4416 nv->port_name[3] = 0xe0;
4417 nv->port_name[4] = 0x8b;
4418 nv->port_name[5] = 0x1c;
4419 nv->port_name[6] = 0x55;
4420 nv->port_name[7] = 0x86;
4421 nv->node_name[0] = 0x20;
4422 nv->node_name[1] = 0x00;
4423 nv->node_name[2] = 0x00;
4424 nv->node_name[3] = 0xe0;
4425 nv->node_name[4] = 0x8b;
4426 nv->node_name[5] = 0x1c;
4427 nv->node_name[6] = 0x55;
4428 nv->node_name[7] = 0x86;
4429 qla24xx_nvram_wwn_from_ofw(vha, nv);
4430 nv->login_retry_count = __constant_cpu_to_le16(8);
4431 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
4432 nv->login_timeout = __constant_cpu_to_le16(0);
4433 nv->firmware_options_1 =
4434 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
4435 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
4436 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4437 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
4438 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
4439 nv->efi_parameters = __constant_cpu_to_le32(0);
4440 nv->reset_delay = 5;
4441 nv->max_luns_per_target = __constant_cpu_to_le16(128);
4442 nv->port_down_retry_count = __constant_cpu_to_le16(30);
4443 nv->link_down_timeout = __constant_cpu_to_le16(30);
4444
4445 rval = 1;
4446 }
4447
4448 /* Reset Initialization control block */
4449 memset(icb, 0, ha->init_cb_size);
4450
4451 /* Copy 1st segment. */
4452 dptr1 = (uint8_t *)icb;
4453 dptr2 = (uint8_t *)&nv->version;
4454 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
4455 while (cnt--)
4456 *dptr1++ = *dptr2++;
4457
4458 icb->login_retry_count = nv->login_retry_count;
4459 icb->link_down_on_nos = nv->link_down_on_nos;
4460
4461 /* Copy 2nd segment. */
4462 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
4463 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
4464 cnt = (uint8_t *)&icb->reserved_3 -
4465 (uint8_t *)&icb->interrupt_delay_timer;
4466 while (cnt--)
4467 *dptr1++ = *dptr2++;
4468
4469 /*
4470 * Setup driver NVRAM options.
4471 */
4472 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
4473 "QLA2462");
4474
4475 /* Use alternate WWN? */
4476 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
4477 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4478 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4479 }
4480
4481 /* Prepare nodename */
4482 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
4483 /*
4484 * Firmware will apply the following mask if the nodename was
4485 * not provided.
4486 */
4487 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4488 icb->node_name[0] &= 0xF0;
4489 }
4490
4491 /* Set host adapter parameters. */
4492 ha->flags.disable_risc_code_load = 0;
4493 ha->flags.enable_lip_reset = 0;
4494 ha->flags.enable_lip_full_login =
4495 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
4496 ha->flags.enable_target_reset =
4497 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
4498 ha->flags.enable_led_scheme = 0;
4499 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
4500
4501 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
4502 (BIT_6 | BIT_5 | BIT_4)) >> 4;
4503
4504 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
4505 sizeof(ha->fw_seriallink_options24));
4506
4507 /* save HBA serial number */
4508 ha->serial0 = icb->port_name[5];
4509 ha->serial1 = icb->port_name[6];
4510 ha->serial2 = icb->port_name[7];
4511 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4512 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
4513
4514 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4515
4516 ha->retry_count = le16_to_cpu(nv->login_retry_count);
4517
4518 /* Set minimum login_timeout to 4 seconds. */
4519 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
4520 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
4521 if (le16_to_cpu(nv->login_timeout) < 4)
4522 nv->login_timeout = __constant_cpu_to_le16(4);
4523 ha->login_timeout = le16_to_cpu(nv->login_timeout);
4524 icb->login_timeout = nv->login_timeout;
4525
4526 /* Set minimum RATOV to 100 tenths of a second. */
4527 ha->r_a_tov = 100;
4528
4529 ha->loop_reset_delay = nv->reset_delay;
4530
4531 /* Link Down Timeout = 0:
4532 *
4533 * When Port Down timer expires we will start returning
4534 * I/O's to OS with "DID_NO_CONNECT".
4535 *
4536 * Link Down Timeout != 0:
4537 *
4538 * The driver waits for the link to come up after link down
4539 * before returning I/Os to OS with "DID_NO_CONNECT".
4540 */
4541 if (le16_to_cpu(nv->link_down_timeout) == 0) {
4542 ha->loop_down_abort_time =
4543 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4544 } else {
4545 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
4546 ha->loop_down_abort_time =
4547 (LOOP_DOWN_TIME - ha->link_down_timeout);
4548 }
4549
4550 /* Need enough time to try and get the port back. */
4551 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
4552 if (qlport_down_retry)
4553 ha->port_down_retry_count = qlport_down_retry;
4554
4555 /* Set login_retry_count */
4556 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
4557 if (ha->port_down_retry_count ==
4558 le16_to_cpu(nv->port_down_retry_count) &&
4559 ha->port_down_retry_count > 3)
4560 ha->login_retry_count = ha->port_down_retry_count;
4561 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4562 ha->login_retry_count = ha->port_down_retry_count;
4563 if (ql2xloginretrycount)
4564 ha->login_retry_count = ql2xloginretrycount;
4565
4566 /* Enable ZIO. */
4567 if (!vha->flags.init_done) {
4568 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
4569 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4570 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
4571 le16_to_cpu(icb->interrupt_delay_timer): 2;
4572 }
4573 icb->firmware_options_2 &= __constant_cpu_to_le32(
4574 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
4575 vha->flags.process_response_queue = 0;
4576 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4577 ha->zio_mode = QLA_ZIO_MODE_6;
4578
4579 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
4580 "(%d us).\n", vha->host_no, ha->zio_mode,
4581 ha->zio_timer * 100));
4582 qla_printk(KERN_INFO, ha,
4583 "ZIO mode %d enabled; timer delay (%d us).\n",
4584 ha->zio_mode, ha->zio_timer * 100);
4585
4586 icb->firmware_options_2 |= cpu_to_le32(
4587 (uint32_t)ha->zio_mode);
4588 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
4589 vha->flags.process_response_queue = 1;
4590 }
4591
4592 if (rval) {
4593 DEBUG2_3(printk(KERN_WARNING
4594 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
4595 }
4596 return (rval);
4597 }
4598
4599 static int
4600 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4601 uint32_t faddr)
4602 {
4603 int rval = QLA_SUCCESS;
4604 int segments, fragment;
4605 uint32_t *dcode, dlen;
4606 uint32_t risc_addr;
4607 uint32_t risc_size;
4608 uint32_t i;
4609 struct qla_hw_data *ha = vha->hw;
4610 struct req_que *req = ha->req_q_map[0];
4611
4612 qla_printk(KERN_INFO, ha,
4613 "FW: Loading from flash (%x)...\n", faddr);
4614
4615 rval = QLA_SUCCESS;
4616
4617 segments = FA_RISC_CODE_SEGMENTS;
4618 dcode = (uint32_t *)req->ring;
4619 *srisc_addr = 0;
4620
4621 /* Validate firmware image by checking version. */
4622 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
4623 for (i = 0; i < 4; i++)
4624 dcode[i] = be32_to_cpu(dcode[i]);
4625 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
4626 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4627 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4628 dcode[3] == 0)) {
4629 qla_printk(KERN_WARNING, ha,
4630 "Unable to verify integrity of flash firmware image!\n");
4631 qla_printk(KERN_WARNING, ha,
4632 "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
4633 dcode[1], dcode[2], dcode[3]);
4634
4635 return QLA_FUNCTION_FAILED;
4636 }
4637
4638 while (segments && rval == QLA_SUCCESS) {
4639 /* Read segment's load information. */
4640 qla24xx_read_flash_data(vha, dcode, faddr, 4);
4641
4642 risc_addr = be32_to_cpu(dcode[2]);
4643 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
4644 risc_size = be32_to_cpu(dcode[3]);
4645
4646 fragment = 0;
4647 while (risc_size > 0 && rval == QLA_SUCCESS) {
4648 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
4649 if (dlen > risc_size)
4650 dlen = risc_size;
4651
4652 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4653 "addr %x, number of dwords 0x%x, offset 0x%x.\n",
4654 vha->host_no, risc_addr, dlen, faddr));
4655
4656 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
4657 for (i = 0; i < dlen; i++)
4658 dcode[i] = swab32(dcode[i]);
4659
4660 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4661 dlen);
4662 if (rval) {
4663 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4664 "segment %d of firmware\n", vha->host_no,
4665 fragment));
4666 qla_printk(KERN_WARNING, ha,
4667 "[ERROR] Failed to load segment %d of "
4668 "firmware\n", fragment);
4669 break;
4670 }
4671
4672 faddr += dlen;
4673 risc_addr += dlen;
4674 risc_size -= dlen;
4675 fragment++;
4676 }
4677
4678 /* Next segment. */
4679 segments--;
4680 }
4681
4682 return rval;
4683 }
4684
4685 #define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
4686
4687 int
4688 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4689 {
4690 int rval;
4691 int i, fragment;
4692 uint16_t *wcode, *fwcode;
4693 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
4694 struct fw_blob *blob;
4695 struct qla_hw_data *ha = vha->hw;
4696 struct req_que *req = ha->req_q_map[0];
4697
4698 /* Load firmware blob. */
4699 blob = qla2x00_request_firmware(vha);
4700 if (!blob) {
4701 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
4702 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
4703 "from: " QLA_FW_URL ".\n");
4704 return QLA_FUNCTION_FAILED;
4705 }
4706
4707 rval = QLA_SUCCESS;
4708
4709 wcode = (uint16_t *)req->ring;
4710 *srisc_addr = 0;
4711 fwcode = (uint16_t *)blob->fw->data;
4712 fwclen = 0;
4713
4714 /* Validate firmware image by checking version. */
4715 if (blob->fw->size < 8 * sizeof(uint16_t)) {
4716 qla_printk(KERN_WARNING, ha,
4717 "Unable to verify integrity of firmware image (%Zd)!\n",
4718 blob->fw->size);
4719 goto fail_fw_integrity;
4720 }
4721 for (i = 0; i < 4; i++)
4722 wcode[i] = be16_to_cpu(fwcode[i + 4]);
4723 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
4724 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
4725 wcode[2] == 0 && wcode[3] == 0)) {
4726 qla_printk(KERN_WARNING, ha,
4727 "Unable to verify integrity of firmware image!\n");
4728 qla_printk(KERN_WARNING, ha,
4729 "Firmware data: %04x %04x %04x %04x!\n", wcode[0],
4730 wcode[1], wcode[2], wcode[3]);
4731 goto fail_fw_integrity;
4732 }
4733
4734 seg = blob->segs;
4735 while (*seg && rval == QLA_SUCCESS) {
4736 risc_addr = *seg;
4737 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
4738 risc_size = be16_to_cpu(fwcode[3]);
4739
4740 /* Validate firmware image size. */
4741 fwclen += risc_size * sizeof(uint16_t);
4742 if (blob->fw->size < fwclen) {
4743 qla_printk(KERN_WARNING, ha,
4744 "Unable to verify integrity of firmware image "
4745 "(%Zd)!\n", blob->fw->size);
4746 goto fail_fw_integrity;
4747 }
4748
4749 fragment = 0;
4750 while (risc_size > 0 && rval == QLA_SUCCESS) {
4751 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
4752 if (wlen > risc_size)
4753 wlen = risc_size;
4754
4755 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4756 "addr %x, number of words 0x%x.\n", vha->host_no,
4757 risc_addr, wlen));
4758
4759 for (i = 0; i < wlen; i++)
4760 wcode[i] = swab16(fwcode[i]);
4761
4762 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4763 wlen);
4764 if (rval) {
4765 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4766 "segment %d of firmware\n", vha->host_no,
4767 fragment));
4768 qla_printk(KERN_WARNING, ha,
4769 "[ERROR] Failed to load segment %d of "
4770 "firmware\n", fragment);
4771 break;
4772 }
4773
4774 fwcode += wlen;
4775 risc_addr += wlen;
4776 risc_size -= wlen;
4777 fragment++;
4778 }
4779
4780 /* Next segment. */
4781 seg++;
4782 }
4783 return rval;
4784
4785 fail_fw_integrity:
4786 return QLA_FUNCTION_FAILED;
4787 }
4788
4789 static int
4790 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4791 {
4792 int rval;
4793 int segments, fragment;
4794 uint32_t *dcode, dlen;
4795 uint32_t risc_addr;
4796 uint32_t risc_size;
4797 uint32_t i;
4798 struct fw_blob *blob;
4799 uint32_t *fwcode, fwclen;
4800 struct qla_hw_data *ha = vha->hw;
4801 struct req_que *req = ha->req_q_map[0];
4802
4803 /* Load firmware blob. */
4804 blob = qla2x00_request_firmware(vha);
4805 if (!blob) {
4806 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
4807 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
4808 "from: " QLA_FW_URL ".\n");
4809
4810 return QLA_FUNCTION_FAILED;
4811 }
4812
4813 qla_printk(KERN_INFO, ha,
4814 "FW: Loading via request-firmware...\n");
4815
4816 rval = QLA_SUCCESS;
4817
4818 segments = FA_RISC_CODE_SEGMENTS;
4819 dcode = (uint32_t *)req->ring;
4820 *srisc_addr = 0;
4821 fwcode = (uint32_t *)blob->fw->data;
4822 fwclen = 0;
4823
4824 /* Validate firmware image by checking version. */
4825 if (blob->fw->size < 8 * sizeof(uint32_t)) {
4826 qla_printk(KERN_WARNING, ha,
4827 "Unable to verify integrity of firmware image (%Zd)!\n",
4828 blob->fw->size);
4829 goto fail_fw_integrity;
4830 }
4831 for (i = 0; i < 4; i++)
4832 dcode[i] = be32_to_cpu(fwcode[i + 4]);
4833 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
4834 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4835 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4836 dcode[3] == 0)) {
4837 qla_printk(KERN_WARNING, ha,
4838 "Unable to verify integrity of firmware image!\n");
4839 qla_printk(KERN_WARNING, ha,
4840 "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
4841 dcode[1], dcode[2], dcode[3]);
4842 goto fail_fw_integrity;
4843 }
4844
4845 while (segments && rval == QLA_SUCCESS) {
4846 risc_addr = be32_to_cpu(fwcode[2]);
4847 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
4848 risc_size = be32_to_cpu(fwcode[3]);
4849
4850 /* Validate firmware image size. */
4851 fwclen += risc_size * sizeof(uint32_t);
4852 if (blob->fw->size < fwclen) {
4853 qla_printk(KERN_WARNING, ha,
4854 "Unable to verify integrity of firmware image "
4855 "(%Zd)!\n", blob->fw->size);
4856
4857 goto fail_fw_integrity;
4858 }
4859
4860 fragment = 0;
4861 while (risc_size > 0 && rval == QLA_SUCCESS) {
4862 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
4863 if (dlen > risc_size)
4864 dlen = risc_size;
4865
4866 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4867 "addr %x, number of dwords 0x%x.\n", vha->host_no,
4868 risc_addr, dlen));
4869
4870 for (i = 0; i < dlen; i++)
4871 dcode[i] = swab32(fwcode[i]);
4872
4873 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4874 dlen);
4875 if (rval) {
4876 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4877 "segment %d of firmware\n", vha->host_no,
4878 fragment));
4879 qla_printk(KERN_WARNING, ha,
4880 "[ERROR] Failed to load segment %d of "
4881 "firmware\n", fragment);
4882 break;
4883 }
4884
4885 fwcode += dlen;
4886 risc_addr += dlen;
4887 risc_size -= dlen;
4888 fragment++;
4889 }
4890
4891 /* Next segment. */
4892 segments--;
4893 }
4894 return rval;
4895
4896 fail_fw_integrity:
4897 return QLA_FUNCTION_FAILED;
4898 }
4899
4900 int
4901 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4902 {
4903 int rval;
4904
4905 if (ql2xfwloadbin == 1)
4906 return qla81xx_load_risc(vha, srisc_addr);
4907
4908 /*
4909 * FW Load priority:
4910 * 1) Firmware via request-firmware interface (.bin file).
4911 * 2) Firmware residing in flash.
4912 */
4913 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4914 if (rval == QLA_SUCCESS)
4915 return rval;
4916
4917 return qla24xx_load_risc_flash(vha, srisc_addr,
4918 vha->hw->flt_region_fw);
4919 }
4920
4921 int
4922 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4923 {
4924 int rval;
4925 struct qla_hw_data *ha = vha->hw;
4926
4927 if (ql2xfwloadbin == 2)
4928 goto try_blob_fw;
4929
4930 /*
4931 * FW Load priority:
4932 * 1) Firmware residing in flash.
4933 * 2) Firmware via request-firmware interface (.bin file).
4934 * 3) Golden-Firmware residing in flash -- limited operation.
4935 */
4936 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
4937 if (rval == QLA_SUCCESS)
4938 return rval;
4939
4940 try_blob_fw:
4941 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4942 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4943 return rval;
4944
4945 qla_printk(KERN_ERR, ha,
4946 "FW: Attempting to fallback to golden firmware...\n");
4947 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4948 if (rval != QLA_SUCCESS)
4949 return rval;
4950
4951 qla_printk(KERN_ERR, ha,
4952 "FW: Please update operational firmware...\n");
4953 ha->flags.running_gold_fw = 1;
4954
4955 return rval;
4956 }
4957
4958 void
4959 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4960 {
4961 int ret, retries;
4962 struct qla_hw_data *ha = vha->hw;
4963
4964 if (ha->flags.pci_channel_io_perm_failure)
4965 return;
4966 if (!IS_FWI2_CAPABLE(ha))
4967 return;
4968 if (!ha->fw_major_version)
4969 return;
4970
4971 ret = qla2x00_stop_firmware(vha);
4972 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4973 ret != QLA_INVALID_COMMAND && retries ; retries--) {
4974 ha->isp_ops->reset_chip(vha);
4975 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4976 continue;
4977 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
4978 continue;
4979 qla_printk(KERN_INFO, ha,
4980 "Attempting retry of stop-firmware command...\n");
4981 ret = qla2x00_stop_firmware(vha);
4982 }
4983 }
4984
4985 int
4986 qla24xx_configure_vhba(scsi_qla_host_t *vha)
4987 {
4988 int rval = QLA_SUCCESS;
4989 uint16_t mb[MAILBOX_REGISTER_COUNT];
4990 struct qla_hw_data *ha = vha->hw;
4991 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4992 struct req_que *req;
4993 struct rsp_que *rsp;
4994
4995 if (!vha->vp_idx)
4996 return -EINVAL;
4997
4998 rval = qla2x00_fw_ready(base_vha);
4999 if (ha->flags.cpu_affinity_enabled)
5000 req = ha->req_q_map[0];
5001 else
5002 req = vha->req;
5003 rsp = req->rsp;
5004
5005 if (rval == QLA_SUCCESS) {
5006 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5007 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
5008 }
5009
5010 vha->flags.management_server_logged_in = 0;
5011
5012 /* Login to SNS first */
5013 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
5014 if (mb[0] != MBS_COMMAND_COMPLETE) {
5015 DEBUG15(qla_printk(KERN_INFO, ha,
5016 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
5017 "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS,
5018 mb[0], mb[1], mb[2], mb[6], mb[7]));
5019 return (QLA_FUNCTION_FAILED);
5020 }
5021
5022 atomic_set(&vha->loop_down_timer, 0);
5023 atomic_set(&vha->loop_state, LOOP_UP);
5024 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5025 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5026 rval = qla2x00_loop_resync(base_vha);
5027
5028 return rval;
5029 }
5030
5031 /* 84XX Support **************************************************************/
5032
5033 static LIST_HEAD(qla_cs84xx_list);
5034 static DEFINE_MUTEX(qla_cs84xx_mutex);
5035
5036 static struct qla_chip_state_84xx *
5037 qla84xx_get_chip(struct scsi_qla_host *vha)
5038 {
5039 struct qla_chip_state_84xx *cs84xx;
5040 struct qla_hw_data *ha = vha->hw;
5041
5042 mutex_lock(&qla_cs84xx_mutex);
5043
5044 /* Find any shared 84xx chip. */
5045 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
5046 if (cs84xx->bus == ha->pdev->bus) {
5047 kref_get(&cs84xx->kref);
5048 goto done;
5049 }
5050 }
5051
5052 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
5053 if (!cs84xx)
5054 goto done;
5055
5056 kref_init(&cs84xx->kref);
5057 spin_lock_init(&cs84xx->access_lock);
5058 mutex_init(&cs84xx->fw_update_mutex);
5059 cs84xx->bus = ha->pdev->bus;
5060
5061 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
5062 done:
5063 mutex_unlock(&qla_cs84xx_mutex);
5064 return cs84xx;
5065 }
5066
5067 static void
5068 __qla84xx_chip_release(struct kref *kref)
5069 {
5070 struct qla_chip_state_84xx *cs84xx =
5071 container_of(kref, struct qla_chip_state_84xx, kref);
5072
5073 mutex_lock(&qla_cs84xx_mutex);
5074 list_del(&cs84xx->list);
5075 mutex_unlock(&qla_cs84xx_mutex);
5076 kfree(cs84xx);
5077 }
5078
5079 void
5080 qla84xx_put_chip(struct scsi_qla_host *vha)
5081 {
5082 struct qla_hw_data *ha = vha->hw;
5083 if (ha->cs84xx)
5084 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
5085 }
5086
5087 static int
5088 qla84xx_init_chip(scsi_qla_host_t *vha)
5089 {
5090 int rval;
5091 uint16_t status[2];
5092 struct qla_hw_data *ha = vha->hw;
5093
5094 mutex_lock(&ha->cs84xx->fw_update_mutex);
5095
5096 rval = qla84xx_verify_chip(vha, status);
5097
5098 mutex_unlock(&ha->cs84xx->fw_update_mutex);
5099
5100 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
5101 QLA_SUCCESS;
5102 }
5103
5104 /* 81XX Support **************************************************************/
5105
5106 int
5107 qla81xx_nvram_config(scsi_qla_host_t *vha)
5108 {
5109 int rval;
5110 struct init_cb_81xx *icb;
5111 struct nvram_81xx *nv;
5112 uint32_t *dptr;
5113 uint8_t *dptr1, *dptr2;
5114 uint32_t chksum;
5115 uint16_t cnt;
5116 struct qla_hw_data *ha = vha->hw;
5117
5118 rval = QLA_SUCCESS;
5119 icb = (struct init_cb_81xx *)ha->init_cb;
5120 nv = ha->nvram;
5121
5122 /* Determine NVRAM starting address. */
5123 ha->nvram_size = sizeof(struct nvram_81xx);
5124 ha->vpd_size = FA_NVRAM_VPD_SIZE;
5125
5126 /* Get VPD data into cache */
5127 ha->vpd = ha->nvram + VPD_OFFSET;
5128 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
5129 ha->vpd_size);
5130
5131 /* Get NVRAM data into cache and calculate checksum. */
5132 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
5133 ha->nvram_size);
5134 dptr = (uint32_t *)nv;
5135 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
5136 chksum += le32_to_cpu(*dptr++);
5137
5138 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
5139 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
5140
5141 /* Bad NVRAM data, set defaults parameters. */
5142 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
5143 || nv->id[3] != ' ' ||
5144 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
5145 /* Reset NVRAM data. */
5146 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
5147 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
5148 le16_to_cpu(nv->nvram_version));
5149 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
5150 "invalid -- WWPN) defaults.\n");
5151
5152 /*
5153 * Set default initialization control block.
5154 */
5155 memset(nv, 0, ha->nvram_size);
5156 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
5157 nv->version = __constant_cpu_to_le16(ICB_VERSION);
5158 nv->frame_payload_size = __constant_cpu_to_le16(2048);
5159 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5160 nv->exchange_count = __constant_cpu_to_le16(0);
5161 nv->port_name[0] = 0x21;
5162 nv->port_name[1] = 0x00 + ha->port_no;
5163 nv->port_name[2] = 0x00;
5164 nv->port_name[3] = 0xe0;
5165 nv->port_name[4] = 0x8b;
5166 nv->port_name[5] = 0x1c;
5167 nv->port_name[6] = 0x55;
5168 nv->port_name[7] = 0x86;
5169 nv->node_name[0] = 0x20;
5170 nv->node_name[1] = 0x00;
5171 nv->node_name[2] = 0x00;
5172 nv->node_name[3] = 0xe0;
5173 nv->node_name[4] = 0x8b;
5174 nv->node_name[5] = 0x1c;
5175 nv->node_name[6] = 0x55;
5176 nv->node_name[7] = 0x86;
5177 nv->login_retry_count = __constant_cpu_to_le16(8);
5178 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
5179 nv->login_timeout = __constant_cpu_to_le16(0);
5180 nv->firmware_options_1 =
5181 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
5182 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
5183 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
5184 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
5185 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
5186 nv->efi_parameters = __constant_cpu_to_le32(0);
5187 nv->reset_delay = 5;
5188 nv->max_luns_per_target = __constant_cpu_to_le16(128);
5189 nv->port_down_retry_count = __constant_cpu_to_le16(30);
5190 nv->link_down_timeout = __constant_cpu_to_le16(30);
5191 nv->enode_mac[0] = 0x00;
5192 nv->enode_mac[1] = 0x02;
5193 nv->enode_mac[2] = 0x03;
5194 nv->enode_mac[3] = 0x04;
5195 nv->enode_mac[4] = 0x05;
5196 nv->enode_mac[5] = 0x06 + ha->port_no;
5197
5198 rval = 1;
5199 }
5200
5201 /* Reset Initialization control block */
5202 memset(icb, 0, sizeof(struct init_cb_81xx));
5203
5204 /* Copy 1st segment. */
5205 dptr1 = (uint8_t *)icb;
5206 dptr2 = (uint8_t *)&nv->version;
5207 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
5208 while (cnt--)
5209 *dptr1++ = *dptr2++;
5210
5211 icb->login_retry_count = nv->login_retry_count;
5212
5213 /* Copy 2nd segment. */
5214 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
5215 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
5216 cnt = (uint8_t *)&icb->reserved_5 -
5217 (uint8_t *)&icb->interrupt_delay_timer;
5218 while (cnt--)
5219 *dptr1++ = *dptr2++;
5220
5221 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
5222 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
5223 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
5224 icb->enode_mac[0] = 0x01;
5225 icb->enode_mac[1] = 0x02;
5226 icb->enode_mac[2] = 0x03;
5227 icb->enode_mac[3] = 0x04;
5228 icb->enode_mac[4] = 0x05;
5229 icb->enode_mac[5] = 0x06 + ha->port_no;
5230 }
5231
5232 /* Use extended-initialization control block. */
5233 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
5234
5235 /*
5236 * Setup driver NVRAM options.
5237 */
5238 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
5239 "QLE8XXX");
5240
5241 /* Use alternate WWN? */
5242 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
5243 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
5244 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
5245 }
5246
5247 /* Prepare nodename */
5248 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
5249 /*
5250 * Firmware will apply the following mask if the nodename was
5251 * not provided.
5252 */
5253 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
5254 icb->node_name[0] &= 0xF0;
5255 }
5256
5257 /* Set host adapter parameters. */
5258 ha->flags.disable_risc_code_load = 0;
5259 ha->flags.enable_lip_reset = 0;
5260 ha->flags.enable_lip_full_login =
5261 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
5262 ha->flags.enable_target_reset =
5263 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
5264 ha->flags.enable_led_scheme = 0;
5265 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
5266
5267 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
5268 (BIT_6 | BIT_5 | BIT_4)) >> 4;
5269
5270 /* save HBA serial number */
5271 ha->serial0 = icb->port_name[5];
5272 ha->serial1 = icb->port_name[6];
5273 ha->serial2 = icb->port_name[7];
5274 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
5275 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
5276
5277 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5278
5279 ha->retry_count = le16_to_cpu(nv->login_retry_count);
5280
5281 /* Set minimum login_timeout to 4 seconds. */
5282 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
5283 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
5284 if (le16_to_cpu(nv->login_timeout) < 4)
5285 nv->login_timeout = __constant_cpu_to_le16(4);
5286 ha->login_timeout = le16_to_cpu(nv->login_timeout);
5287 icb->login_timeout = nv->login_timeout;
5288
5289 /* Set minimum RATOV to 100 tenths of a second. */
5290 ha->r_a_tov = 100;
5291
5292 ha->loop_reset_delay = nv->reset_delay;
5293
5294 /* Link Down Timeout = 0:
5295 *
5296 * When Port Down timer expires we will start returning
5297 * I/O's to OS with "DID_NO_CONNECT".
5298 *
5299 * Link Down Timeout != 0:
5300 *
5301 * The driver waits for the link to come up after link down
5302 * before returning I/Os to OS with "DID_NO_CONNECT".
5303 */
5304 if (le16_to_cpu(nv->link_down_timeout) == 0) {
5305 ha->loop_down_abort_time =
5306 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
5307 } else {
5308 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
5309 ha->loop_down_abort_time =
5310 (LOOP_DOWN_TIME - ha->link_down_timeout);
5311 }
5312
5313 /* Need enough time to try and get the port back. */
5314 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
5315 if (qlport_down_retry)
5316 ha->port_down_retry_count = qlport_down_retry;
5317
5318 /* Set login_retry_count */
5319 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
5320 if (ha->port_down_retry_count ==
5321 le16_to_cpu(nv->port_down_retry_count) &&
5322 ha->port_down_retry_count > 3)
5323 ha->login_retry_count = ha->port_down_retry_count;
5324 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
5325 ha->login_retry_count = ha->port_down_retry_count;
5326 if (ql2xloginretrycount)
5327 ha->login_retry_count = ql2xloginretrycount;
5328
5329 /* Enable ZIO. */
5330 if (!vha->flags.init_done) {
5331 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
5332 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
5333 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
5334 le16_to_cpu(icb->interrupt_delay_timer): 2;
5335 }
5336 icb->firmware_options_2 &= __constant_cpu_to_le32(
5337 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
5338 vha->flags.process_response_queue = 0;
5339 if (ha->zio_mode != QLA_ZIO_DISABLED) {
5340 ha->zio_mode = QLA_ZIO_MODE_6;
5341
5342 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
5343 "(%d us).\n", vha->host_no, ha->zio_mode,
5344 ha->zio_timer * 100));
5345 qla_printk(KERN_INFO, ha,
5346 "ZIO mode %d enabled; timer delay (%d us).\n",
5347 ha->zio_mode, ha->zio_timer * 100);
5348
5349 icb->firmware_options_2 |= cpu_to_le32(
5350 (uint32_t)ha->zio_mode);
5351 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
5352 vha->flags.process_response_queue = 1;
5353 }
5354
5355 if (rval) {
5356 DEBUG2_3(printk(KERN_WARNING
5357 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
5358 }
5359 return (rval);
5360 }
5361
5362 int
5363 qla82xx_restart_isp(scsi_qla_host_t *vha)
5364 {
5365 int status, rval;
5366 uint32_t wait_time;
5367 struct qla_hw_data *ha = vha->hw;
5368 struct req_que *req = ha->req_q_map[0];
5369 struct rsp_que *rsp = ha->rsp_q_map[0];
5370 struct scsi_qla_host *vp;
5371 unsigned long flags;
5372
5373 status = qla2x00_init_rings(vha);
5374 if (!status) {
5375 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5376 ha->flags.chip_reset_done = 1;
5377
5378 status = qla2x00_fw_ready(vha);
5379 if (!status) {
5380 qla_printk(KERN_INFO, ha,
5381 "%s(): Start configure loop, "
5382 "status = %d\n", __func__, status);
5383
5384 /* Issue a marker after FW becomes ready. */
5385 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
5386
5387 vha->flags.online = 1;
5388 /* Wait at most MAX_TARGET RSCNs for a stable link. */
5389 wait_time = 256;
5390 do {
5391 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5392 qla2x00_configure_loop(vha);
5393 wait_time--;
5394 } while (!atomic_read(&vha->loop_down_timer) &&
5395 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) &&
5396 wait_time &&
5397 (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)));
5398 }
5399
5400 /* if no cable then assume it's good */
5401 if ((vha->device_flags & DFLG_NO_CABLE))
5402 status = 0;
5403
5404 qla_printk(KERN_INFO, ha,
5405 "%s(): Configure loop done, status = 0x%x\n",
5406 __func__, status);
5407 }
5408
5409 if (!status) {
5410 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5411
5412 if (!atomic_read(&vha->loop_down_timer)) {
5413 /*
5414 * Issue marker command only when we are going
5415 * to start the I/O .
5416 */
5417 vha->marker_needed = 1;
5418 }
5419
5420 vha->flags.online = 1;
5421
5422 ha->isp_ops->enable_intrs(ha);
5423
5424 ha->isp_abort_cnt = 0;
5425 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5426
5427 if (ha->fce) {
5428 ha->flags.fce_enabled = 1;
5429 memset(ha->fce, 0,
5430 fce_calc_size(ha->fce_bufs));
5431 rval = qla2x00_enable_fce_trace(vha,
5432 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5433 &ha->fce_bufs);
5434 if (rval) {
5435 qla_printk(KERN_WARNING, ha,
5436 "Unable to reinitialize FCE "
5437 "(%d).\n", rval);
5438 ha->flags.fce_enabled = 0;
5439 }
5440 }
5441
5442 if (ha->eft) {
5443 memset(ha->eft, 0, EFT_SIZE);
5444 rval = qla2x00_enable_eft_trace(vha,
5445 ha->eft_dma, EFT_NUM_BUFFERS);
5446 if (rval) {
5447 qla_printk(KERN_WARNING, ha,
5448 "Unable to reinitialize EFT "
5449 "(%d).\n", rval);
5450 }
5451 }
5452 }
5453
5454 if (!status) {
5455 DEBUG(printk(KERN_INFO
5456 "qla82xx_restart_isp(%ld): succeeded.\n",
5457 vha->host_no));
5458
5459 spin_lock_irqsave(&ha->vport_slock, flags);
5460 list_for_each_entry(vp, &ha->vp_list, list) {
5461 if (vp->vp_idx) {
5462 atomic_inc(&vp->vref_count);
5463 spin_unlock_irqrestore(&ha->vport_slock, flags);
5464
5465 qla2x00_vp_abort_isp(vp);
5466
5467 spin_lock_irqsave(&ha->vport_slock, flags);
5468 atomic_dec(&vp->vref_count);
5469 }
5470 }
5471 spin_unlock_irqrestore(&ha->vport_slock, flags);
5472
5473 } else {
5474 qla_printk(KERN_INFO, ha,
5475 "qla82xx_restart_isp: **** FAILED ****\n");
5476 }
5477
5478 return status;
5479 }
5480
5481 void
5482 qla81xx_update_fw_options(scsi_qla_host_t *vha)
5483 {
5484 struct qla_hw_data *ha = vha->hw;
5485
5486 if (!ql2xetsenable)
5487 return;
5488
5489 /* Enable ETS Burst. */
5490 memset(ha->fw_options, 0, sizeof(ha->fw_options));
5491 ha->fw_options[2] |= BIT_9;
5492 qla2x00_set_fw_options(vha, ha->fw_options);
5493 }
5494
5495 /*
5496 * qla24xx_get_fcp_prio
5497 * Gets the fcp cmd priority value for the logged in port.
5498 * Looks for a match of the port descriptors within
5499 * each of the fcp prio config entries. If a match is found,
5500 * the tag (priority) value is returned.
5501 *
5502 * Input:
5503 * vha = scsi host structure pointer.
5504 * fcport = port structure pointer.
5505 *
5506 * Return:
5507 * non-zero (if found)
5508 * -1 (if not found)
5509 *
5510 * Context:
5511 * Kernel context
5512 */
5513 static int
5514 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5515 {
5516 int i, entries;
5517 uint8_t pid_match, wwn_match;
5518 int priority;
5519 uint32_t pid1, pid2;
5520 uint64_t wwn1, wwn2;
5521 struct qla_fcp_prio_entry *pri_entry;
5522 struct qla_hw_data *ha = vha->hw;
5523
5524 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
5525 return -1;
5526
5527 priority = -1;
5528 entries = ha->fcp_prio_cfg->num_entries;
5529 pri_entry = &ha->fcp_prio_cfg->entry[0];
5530
5531 for (i = 0; i < entries; i++) {
5532 pid_match = wwn_match = 0;
5533
5534 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
5535 pri_entry++;
5536 continue;
5537 }
5538
5539 /* check source pid for a match */
5540 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
5541 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
5542 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
5543 if (pid1 == INVALID_PORT_ID)
5544 pid_match++;
5545 else if (pid1 == pid2)
5546 pid_match++;
5547 }
5548
5549 /* check destination pid for a match */
5550 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
5551 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
5552 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
5553 if (pid1 == INVALID_PORT_ID)
5554 pid_match++;
5555 else if (pid1 == pid2)
5556 pid_match++;
5557 }
5558
5559 /* check source WWN for a match */
5560 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
5561 wwn1 = wwn_to_u64(vha->port_name);
5562 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
5563 if (wwn2 == (uint64_t)-1)
5564 wwn_match++;
5565 else if (wwn1 == wwn2)
5566 wwn_match++;
5567 }
5568
5569 /* check destination WWN for a match */
5570 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
5571 wwn1 = wwn_to_u64(fcport->port_name);
5572 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
5573 if (wwn2 == (uint64_t)-1)
5574 wwn_match++;
5575 else if (wwn1 == wwn2)
5576 wwn_match++;
5577 }
5578
5579 if (pid_match == 2 || wwn_match == 2) {
5580 /* Found a matching entry */
5581 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
5582 priority = pri_entry->tag;
5583 break;
5584 }
5585
5586 pri_entry++;
5587 }
5588
5589 return priority;
5590 }
5591
5592 /*
5593 * qla24xx_update_fcport_fcp_prio
5594 * Activates fcp priority for the logged in fc port
5595 *
5596 * Input:
5597 * vha = scsi host structure pointer.
5598 * fcp = port structure pointer.
5599 *
5600 * Return:
5601 * QLA_SUCCESS or QLA_FUNCTION_FAILED
5602 *
5603 * Context:
5604 * Kernel context.
5605 */
5606 int
5607 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5608 {
5609 int ret;
5610 int priority;
5611 uint16_t mb[5];
5612
5613 if (fcport->port_type != FCT_TARGET ||
5614 fcport->loop_id == FC_NO_LOOP_ID)
5615 return QLA_FUNCTION_FAILED;
5616
5617 priority = qla24xx_get_fcp_prio(vha, fcport);
5618 if (priority < 0)
5619 return QLA_FUNCTION_FAILED;
5620
5621 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
5622 if (ret == QLA_SUCCESS)
5623 fcport->fcp_prio = priority;
5624 else
5625 DEBUG2(printk(KERN_WARNING
5626 "scsi(%ld): Unable to activate fcp priority, "
5627 " ret=0x%x\n", vha->host_no, ret));
5628
5629 return ret;
5630 }
5631
5632 /*
5633 * qla24xx_update_all_fcp_prio
5634 * Activates fcp priority for all the logged in ports
5635 *
5636 * Input:
5637 * ha = adapter block pointer.
5638 *
5639 * Return:
5640 * QLA_SUCCESS or QLA_FUNCTION_FAILED
5641 *
5642 * Context:
5643 * Kernel context.
5644 */
5645 int
5646 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
5647 {
5648 int ret;
5649 fc_port_t *fcport;
5650
5651 ret = QLA_FUNCTION_FAILED;
5652 /* We need to set priority for all logged in ports */
5653 list_for_each_entry(fcport, &vha->vp_fcports, list)
5654 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
5655
5656 return ret;
5657 }
This page took 0.151741 seconds and 4 git commands to generate.