2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
22 #include "bfi_ctreg.h"
26 * IOC local definitions
29 #define bfa_ioc_timer_start(__ioc) \
30 mod_timer(&(__ioc)->ioc_timer, jiffies + \
31 msecs_to_jiffies(BFA_IOC_TOV))
32 #define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer)
34 #define bfa_ioc_recovery_timer_start(__ioc) \
35 mod_timer(&(__ioc)->ioc_timer, jiffies + \
36 msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
38 #define bfa_sem_timer_start(__ioc) \
39 mod_timer(&(__ioc)->sem_timer, jiffies + \
40 msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
41 #define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer)
43 #define bfa_hb_timer_start(__ioc) \
44 mod_timer(&(__ioc)->hb_timer, jiffies + \
45 msecs_to_jiffies(BFA_IOC_HB_TOV))
46 #define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer)
49 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
52 #define bfa_ioc_firmware_lock(__ioc) \
53 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54 #define bfa_ioc_firmware_unlock(__ioc) \
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58 #define bfa_ioc_notify_hbfail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
61 #define bfa_ioc_is_optrom(__ioc) \
62 (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
64 #define bfa_ioc_mbox_cmd_pending(__ioc) \
65 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
66 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
68 bool bfa_auto_recover
= true;
71 * forward declarations
73 static void bfa_ioc_hw_sem_get(struct bfa_ioc
*ioc
);
74 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc
*ioc
);
75 static void bfa_ioc_hwinit(struct bfa_ioc
*ioc
, bool force
);
76 static void bfa_ioc_send_enable(struct bfa_ioc
*ioc
);
77 static void bfa_ioc_send_disable(struct bfa_ioc
*ioc
);
78 static void bfa_ioc_send_getattr(struct bfa_ioc
*ioc
);
79 static void bfa_ioc_hb_monitor(struct bfa_ioc
*ioc
);
80 static void bfa_ioc_hb_stop(struct bfa_ioc
*ioc
);
81 static void bfa_ioc_reset(struct bfa_ioc
*ioc
, bool force
);
82 static void bfa_ioc_mbox_poll(struct bfa_ioc
*ioc
);
83 static void bfa_ioc_mbox_hbfail(struct bfa_ioc
*ioc
);
84 static void bfa_ioc_recover(struct bfa_ioc
*ioc
);
85 static void bfa_ioc_check_attr_wwns(struct bfa_ioc
*ioc
);
86 static void bfa_ioc_disable_comp(struct bfa_ioc
*ioc
);
87 static void bfa_ioc_lpu_stop(struct bfa_ioc
*ioc
);
90 * IOC state machine events
93 IOC_E_ENABLE
= 1, /*!< IOC enable request */
94 IOC_E_DISABLE
= 2, /*!< IOC disable request */
95 IOC_E_TIMEOUT
= 3, /*!< f/w response timeout */
96 IOC_E_FWREADY
= 4, /*!< f/w initialization done */
97 IOC_E_FWRSP_GETATTR
= 5, /*!< IOC get attribute response */
98 IOC_E_FWRSP_ENABLE
= 6, /*!< enable f/w response */
99 IOC_E_FWRSP_DISABLE
= 7, /*!< disable f/w response */
100 IOC_E_HBFAIL
= 8, /*!< heartbeat failure */
101 IOC_E_HWERROR
= 9, /*!< hardware error interrupt */
102 IOC_E_SEMLOCKED
= 10, /*!< h/w semaphore is locked */
103 IOC_E_DETACH
= 11, /*!< driver detach cleanup */
106 bfa_fsm_state_decl(bfa_ioc
, reset
, struct bfa_ioc
, enum ioc_event
);
107 bfa_fsm_state_decl(bfa_ioc
, fwcheck
, struct bfa_ioc
, enum ioc_event
);
108 bfa_fsm_state_decl(bfa_ioc
, mismatch
, struct bfa_ioc
, enum ioc_event
);
109 bfa_fsm_state_decl(bfa_ioc
, semwait
, struct bfa_ioc
, enum ioc_event
);
110 bfa_fsm_state_decl(bfa_ioc
, hwinit
, struct bfa_ioc
, enum ioc_event
);
111 bfa_fsm_state_decl(bfa_ioc
, enabling
, struct bfa_ioc
, enum ioc_event
);
112 bfa_fsm_state_decl(bfa_ioc
, getattr
, struct bfa_ioc
, enum ioc_event
);
113 bfa_fsm_state_decl(bfa_ioc
, op
, struct bfa_ioc
, enum ioc_event
);
114 bfa_fsm_state_decl(bfa_ioc
, initfail
, struct bfa_ioc
, enum ioc_event
);
115 bfa_fsm_state_decl(bfa_ioc
, hbfail
, struct bfa_ioc
, enum ioc_event
);
116 bfa_fsm_state_decl(bfa_ioc
, disabling
, struct bfa_ioc
, enum ioc_event
);
117 bfa_fsm_state_decl(bfa_ioc
, disabled
, struct bfa_ioc
, enum ioc_event
);
119 static struct bfa_sm_table ioc_sm_table
[] = {
120 {BFA_SM(bfa_ioc_sm_reset
), BFA_IOC_RESET
},
121 {BFA_SM(bfa_ioc_sm_fwcheck
), BFA_IOC_FWMISMATCH
},
122 {BFA_SM(bfa_ioc_sm_mismatch
), BFA_IOC_FWMISMATCH
},
123 {BFA_SM(bfa_ioc_sm_semwait
), BFA_IOC_SEMWAIT
},
124 {BFA_SM(bfa_ioc_sm_hwinit
), BFA_IOC_HWINIT
},
125 {BFA_SM(bfa_ioc_sm_enabling
), BFA_IOC_HWINIT
},
126 {BFA_SM(bfa_ioc_sm_getattr
), BFA_IOC_GETATTR
},
127 {BFA_SM(bfa_ioc_sm_op
), BFA_IOC_OPERATIONAL
},
128 {BFA_SM(bfa_ioc_sm_initfail
), BFA_IOC_INITFAIL
},
129 {BFA_SM(bfa_ioc_sm_hbfail
), BFA_IOC_HBFAIL
},
130 {BFA_SM(bfa_ioc_sm_disabling
), BFA_IOC_DISABLING
},
131 {BFA_SM(bfa_ioc_sm_disabled
), BFA_IOC_DISABLED
},
135 * Reset entry actions -- initialize state machine
138 bfa_ioc_sm_reset_entry(struct bfa_ioc
*ioc
)
140 ioc
->retry_count
= 0;
141 ioc
->auto_recover
= bfa_auto_recover
;
145 * Beginning state. IOC is in reset state.
148 bfa_ioc_sm_reset(struct bfa_ioc
*ioc
, enum ioc_event event
)
152 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fwcheck
);
156 bfa_ioc_disable_comp(ioc
);
163 bfa_sm_fault(ioc
, event
);
168 * Semaphore should be acquired for version check.
171 bfa_ioc_sm_fwcheck_entry(struct bfa_ioc
*ioc
)
173 bfa_ioc_hw_sem_get(ioc
);
177 * Awaiting h/w semaphore to continue with version check.
180 bfa_ioc_sm_fwcheck(struct bfa_ioc
*ioc
, enum ioc_event event
)
183 case IOC_E_SEMLOCKED
:
184 if (bfa_ioc_firmware_lock(ioc
)) {
185 ioc
->retry_count
= 0;
186 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwinit
);
188 bfa_ioc_hw_sem_release(ioc
);
189 bfa_fsm_set_state(ioc
, bfa_ioc_sm_mismatch
);
194 bfa_ioc_disable_comp(ioc
);
198 bfa_ioc_hw_sem_get_cancel(ioc
);
199 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
206 bfa_sm_fault(ioc
, event
);
211 * Notify enable completion callback and generate mismatch AEN.
214 bfa_ioc_sm_mismatch_entry(struct bfa_ioc
*ioc
)
217 * Provide enable completion callback and AEN notification only once.
219 if (ioc
->retry_count
== 0)
220 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
222 bfa_ioc_timer_start(ioc
);
226 * Awaiting firmware version match.
229 bfa_ioc_sm_mismatch(struct bfa_ioc
*ioc
, enum ioc_event event
)
233 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fwcheck
);
237 bfa_ioc_disable_comp(ioc
);
241 bfa_ioc_timer_stop(ioc
);
242 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
249 bfa_sm_fault(ioc
, event
);
254 * Request for semaphore.
257 bfa_ioc_sm_semwait_entry(struct bfa_ioc
*ioc
)
259 bfa_ioc_hw_sem_get(ioc
);
263 * Awaiting semaphore for h/w initialzation.
266 bfa_ioc_sm_semwait(struct bfa_ioc
*ioc
, enum ioc_event event
)
269 case IOC_E_SEMLOCKED
:
270 ioc
->retry_count
= 0;
271 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwinit
);
275 bfa_ioc_hw_sem_get_cancel(ioc
);
276 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
280 bfa_sm_fault(ioc
, event
);
285 bfa_ioc_sm_hwinit_entry(struct bfa_ioc
*ioc
)
287 bfa_ioc_timer_start(ioc
);
288 bfa_ioc_reset(ioc
, false);
293 * Hardware is being initialized. Interrupts are enabled.
294 * Holding hardware semaphore lock.
297 bfa_ioc_sm_hwinit(struct bfa_ioc
*ioc
, enum ioc_event event
)
301 bfa_ioc_timer_stop(ioc
);
302 bfa_fsm_set_state(ioc
, bfa_ioc_sm_enabling
);
306 bfa_ioc_timer_stop(ioc
);
311 if (ioc
->retry_count
< BFA_IOC_HWINIT_MAX
) {
312 bfa_ioc_timer_start(ioc
);
313 bfa_ioc_reset(ioc
, true);
317 bfa_ioc_hw_sem_release(ioc
);
318 bfa_fsm_set_state(ioc
, bfa_ioc_sm_initfail
);
322 bfa_ioc_hw_sem_release(ioc
);
323 bfa_ioc_timer_stop(ioc
);
324 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
328 bfa_sm_fault(ioc
, event
);
333 bfa_ioc_sm_enabling_entry(struct bfa_ioc
*ioc
)
335 bfa_ioc_timer_start(ioc
);
336 bfa_ioc_send_enable(ioc
);
340 * Host IOC function is being enabled, awaiting response from firmware.
341 * Semaphore is acquired.
344 bfa_ioc_sm_enabling(struct bfa_ioc
*ioc
, enum ioc_event event
)
347 case IOC_E_FWRSP_ENABLE
:
348 bfa_ioc_timer_stop(ioc
);
349 bfa_ioc_hw_sem_release(ioc
);
350 bfa_fsm_set_state(ioc
, bfa_ioc_sm_getattr
);
354 bfa_ioc_timer_stop(ioc
);
359 if (ioc
->retry_count
< BFA_IOC_HWINIT_MAX
) {
360 writel(BFI_IOC_UNINIT
,
361 ioc
->ioc_regs
.ioc_fwstate
);
362 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwinit
);
366 bfa_ioc_hw_sem_release(ioc
);
367 bfa_fsm_set_state(ioc
, bfa_ioc_sm_initfail
);
371 bfa_ioc_timer_stop(ioc
);
372 bfa_ioc_hw_sem_release(ioc
);
373 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
377 bfa_ioc_send_enable(ioc
);
381 bfa_sm_fault(ioc
, event
);
386 bfa_ioc_sm_getattr_entry(struct bfa_ioc
*ioc
)
388 bfa_ioc_timer_start(ioc
);
389 bfa_ioc_send_getattr(ioc
);
394 * IOC configuration in progress. Timer is active.
397 bfa_ioc_sm_getattr(struct bfa_ioc
*ioc
, enum ioc_event event
)
400 case IOC_E_FWRSP_GETATTR
:
401 bfa_ioc_timer_stop(ioc
);
402 bfa_ioc_check_attr_wwns(ioc
);
403 bfa_fsm_set_state(ioc
, bfa_ioc_sm_op
);
407 bfa_ioc_timer_stop(ioc
);
411 bfa_fsm_set_state(ioc
, bfa_ioc_sm_initfail
);
415 bfa_ioc_timer_stop(ioc
);
416 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
420 bfa_sm_fault(ioc
, event
);
425 bfa_ioc_sm_op_entry(struct bfa_ioc
*ioc
)
427 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_OK
);
428 bfa_ioc_hb_monitor(ioc
);
432 bfa_ioc_sm_op(struct bfa_ioc
*ioc
, enum ioc_event event
)
439 bfa_ioc_hb_stop(ioc
);
440 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
446 * Hard error or IOC recovery by other function.
447 * Treat it same as heartbeat failure.
449 bfa_ioc_hb_stop(ioc
);
450 /* !!! fall through !!! */
453 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hbfail
);
457 bfa_sm_fault(ioc
, event
);
462 bfa_ioc_sm_disabling_entry(struct bfa_ioc
*ioc
)
464 bfa_ioc_timer_start(ioc
);
465 bfa_ioc_send_disable(ioc
);
469 * IOC is being disabled
472 bfa_ioc_sm_disabling(struct bfa_ioc
*ioc
, enum ioc_event event
)
475 case IOC_E_FWRSP_DISABLE
:
476 bfa_ioc_timer_stop(ioc
);
477 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
481 bfa_ioc_timer_stop(ioc
);
483 * !!! fall through !!!
487 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
488 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
492 bfa_sm_fault(ioc
, event
);
497 * IOC disable completion entry.
500 bfa_ioc_sm_disabled_entry(struct bfa_ioc
*ioc
)
502 bfa_ioc_disable_comp(ioc
);
506 bfa_ioc_sm_disabled(struct bfa_ioc
*ioc
, enum ioc_event event
)
510 bfa_fsm_set_state(ioc
, bfa_ioc_sm_semwait
);
514 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
521 bfa_ioc_firmware_unlock(ioc
);
522 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
526 bfa_sm_fault(ioc
, event
);
531 bfa_ioc_sm_initfail_entry(struct bfa_ioc
*ioc
)
533 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
534 bfa_ioc_timer_start(ioc
);
539 * Hardware initialization failed.
542 bfa_ioc_sm_initfail(struct bfa_ioc
*ioc
, enum ioc_event event
)
546 bfa_ioc_timer_stop(ioc
);
547 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
551 bfa_ioc_timer_stop(ioc
);
552 bfa_ioc_firmware_unlock(ioc
);
553 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
557 bfa_fsm_set_state(ioc
, bfa_ioc_sm_semwait
);
561 bfa_sm_fault(ioc
, event
);
566 bfa_ioc_sm_hbfail_entry(struct bfa_ioc
*ioc
)
568 struct list_head
*qe
;
569 struct bfa_ioc_hbfail_notify
*notify
;
572 * Mark IOC as failed in hardware and stop firmware.
574 bfa_ioc_lpu_stop(ioc
);
575 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
578 * Notify other functions on HB failure.
580 bfa_ioc_notify_hbfail(ioc
);
583 * Notify driver and common modules registered for notification.
585 ioc
->cbfn
->hbfail_cbfn(ioc
->bfa
);
586 list_for_each(qe
, &ioc
->hb_notify_q
) {
587 notify
= (struct bfa_ioc_hbfail_notify
*) qe
;
588 notify
->cbfn(notify
->cbarg
);
592 * Flush any queued up mailbox requests.
594 bfa_ioc_mbox_hbfail(ioc
);
597 * Trigger auto-recovery after a delay.
599 if (ioc
->auto_recover
)
600 mod_timer(&ioc
->ioc_timer
, jiffies
+
601 msecs_to_jiffies(BFA_IOC_TOV_RECOVER
));
606 * IOC heartbeat failure.
609 bfa_ioc_sm_hbfail(struct bfa_ioc
*ioc
, enum ioc_event event
)
614 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
618 if (ioc
->auto_recover
)
619 bfa_ioc_timer_stop(ioc
);
620 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
624 bfa_fsm_set_state(ioc
, bfa_ioc_sm_semwait
);
629 * Recovery is already initiated by other function.
635 * HB failure notification, ignore.
639 bfa_sm_fault(ioc
, event
);
644 * BFA IOC private functions
648 bfa_ioc_disable_comp(struct bfa_ioc
*ioc
)
650 struct list_head
*qe
;
651 struct bfa_ioc_hbfail_notify
*notify
;
653 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
656 * Notify common modules registered for notification.
658 list_for_each(qe
, &ioc
->hb_notify_q
) {
659 notify
= (struct bfa_ioc_hbfail_notify
*) qe
;
660 notify
->cbfn(notify
->cbarg
);
665 bfa_ioc_sem_timeout(void *ioc_arg
)
667 struct bfa_ioc
*ioc
= (struct bfa_ioc
*) ioc_arg
;
669 bfa_ioc_hw_sem_get(ioc
);
673 bfa_ioc_sem_get(void __iomem
*sem_reg
)
677 #define BFA_SEM_SPINCNT 3000
679 r32
= readl(sem_reg
);
681 while (r32
&& (cnt
< BFA_SEM_SPINCNT
)) {
684 r32
= readl(sem_reg
);
690 BUG_ON(!(cnt
< BFA_SEM_SPINCNT
));
695 bfa_ioc_sem_release(void __iomem
*sem_reg
)
701 bfa_ioc_hw_sem_get(struct bfa_ioc
*ioc
)
706 * First read to the semaphore register will return 0, subsequent reads
707 * will return 1. Semaphore is released by writing 1 to the register
709 r32
= readl(ioc
->ioc_regs
.ioc_sem_reg
);
711 bfa_fsm_send_event(ioc
, IOC_E_SEMLOCKED
);
715 mod_timer(&ioc
->sem_timer
, jiffies
+
716 msecs_to_jiffies(BFA_IOC_HWSEM_TOV
));
720 bfa_ioc_hw_sem_release(struct bfa_ioc
*ioc
)
722 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
726 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc
*ioc
)
728 del_timer(&ioc
->sem_timer
);
733 * Initialize LPU local memory (aka secondary memory / SRAM)
736 bfa_ioc_lmem_init(struct bfa_ioc
*ioc
)
740 #define PSS_LMEM_INIT_TIME 10000
742 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
743 pss_ctl
&= ~__PSS_LMEM_RESET
;
744 pss_ctl
|= __PSS_LMEM_INIT_EN
;
747 * i2c workaround 12.5khz clock
749 pss_ctl
|= __PSS_I2C_CLK_DIV(3UL);
750 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
753 * wait for memory initialization to be complete
757 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
759 } while (!(pss_ctl
& __PSS_LMEM_INIT_DONE
) && (i
< PSS_LMEM_INIT_TIME
));
762 * If memory initialization is not successful, IOC timeout will catch
765 BUG_ON(!(pss_ctl
& __PSS_LMEM_INIT_DONE
));
767 pss_ctl
&= ~(__PSS_LMEM_INIT_DONE
| __PSS_LMEM_INIT_EN
);
768 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
772 bfa_ioc_lpu_start(struct bfa_ioc
*ioc
)
777 * Take processor out of reset.
779 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
780 pss_ctl
&= ~__PSS_LPU0_RESET
;
782 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
786 bfa_ioc_lpu_stop(struct bfa_ioc
*ioc
)
791 * Put processors in reset.
793 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
794 pss_ctl
|= (__PSS_LPU0_RESET
| __PSS_LPU1_RESET
);
796 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
800 * Get driver and firmware versions.
803 bfa_ioc_fwver_get(struct bfa_ioc
*ioc
, struct bfi_ioc_image_hdr
*fwhdr
)
808 u32
*fwsig
= (u32
*) fwhdr
;
810 pgnum
= bfa_ioc_smem_pgnum(ioc
, loff
);
811 pgoff
= bfa_ioc_smem_pgoff(ioc
, loff
);
812 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
814 for (i
= 0; i
< (sizeof(struct bfi_ioc_image_hdr
) / sizeof(u32
));
817 swab32(readl((loff
) + (ioc
->ioc_regs
.smem_page_start
)));
823 * Returns TRUE if same.
826 bfa_ioc_fwver_cmp(struct bfa_ioc
*ioc
, struct bfi_ioc_image_hdr
*fwhdr
)
828 struct bfi_ioc_image_hdr
*drv_fwhdr
;
831 drv_fwhdr
= (struct bfi_ioc_image_hdr
*)
832 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc
), 0);
834 for (i
= 0; i
< BFI_IOC_MD5SUM_SZ
; i
++) {
835 if (fwhdr
->md5sum
[i
] != drv_fwhdr
->md5sum
[i
])
843 * Return true if current running version is valid. Firmware signature and
844 * execution context (driver/bios) must match.
847 bfa_ioc_fwver_valid(struct bfa_ioc
*ioc
)
849 struct bfi_ioc_image_hdr fwhdr
, *drv_fwhdr
;
852 * If bios/efi boot (flash based) -- return true
854 if (bfa_ioc_is_optrom(ioc
))
857 bfa_ioc_fwver_get(ioc
, &fwhdr
);
858 drv_fwhdr
= (struct bfi_ioc_image_hdr
*)
859 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc
), 0);
861 if (fwhdr
.signature
!= drv_fwhdr
->signature
)
864 if (fwhdr
.exec
!= drv_fwhdr
->exec
)
867 return bfa_ioc_fwver_cmp(ioc
, &fwhdr
);
871 * Conditionally flush any pending message from firmware at start.
874 bfa_ioc_msgflush(struct bfa_ioc
*ioc
)
878 r32
= readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
880 writel(1, ioc
->ioc_regs
.lpu_mbox_cmd
);
884 * @img ioc_init_logic.jpg
887 bfa_ioc_hwinit(struct bfa_ioc
*ioc
, bool force
)
889 enum bfi_ioc_state ioc_fwstate
;
892 ioc_fwstate
= readl(ioc
->ioc_regs
.ioc_fwstate
);
895 ioc_fwstate
= BFI_IOC_UNINIT
;
898 * check if firmware is valid
900 fwvalid
= (ioc_fwstate
== BFI_IOC_UNINIT
) ?
901 false : bfa_ioc_fwver_valid(ioc
);
904 bfa_ioc_boot(ioc
, BFI_BOOT_TYPE_NORMAL
, ioc
->pcidev
.device_id
);
909 * If hardware initialization is in progress (initialized by other IOC),
910 * just wait for an initialization completion interrupt.
912 if (ioc_fwstate
== BFI_IOC_INITING
) {
913 ioc
->cbfn
->reset_cbfn(ioc
->bfa
);
918 * If IOC function is disabled and firmware version is same,
919 * just re-enable IOC.
921 * If option rom, IOC must not be in operational state. With
922 * convergence, IOC will be in operational state when 2nd driver
925 if (ioc_fwstate
== BFI_IOC_DISABLED
||
926 (!bfa_ioc_is_optrom(ioc
) && ioc_fwstate
== BFI_IOC_OP
)) {
928 * When using MSI-X any pending firmware ready event should
929 * be flushed. Otherwise MSI-X interrupts are not delivered.
931 bfa_ioc_msgflush(ioc
);
932 ioc
->cbfn
->reset_cbfn(ioc
->bfa
);
933 bfa_fsm_send_event(ioc
, IOC_E_FWREADY
);
938 * Initialize the h/w for any other states.
940 bfa_ioc_boot(ioc
, BFI_BOOT_TYPE_NORMAL
, ioc
->pcidev
.device_id
);
944 bfa_ioc_timeout(void *ioc_arg
)
946 struct bfa_ioc
*ioc
= (struct bfa_ioc
*) ioc_arg
;
948 bfa_fsm_send_event(ioc
, IOC_E_TIMEOUT
);
952 bfa_ioc_mbox_send(struct bfa_ioc
*ioc
, void *ioc_msg
, int len
)
954 u32
*msgp
= (u32
*) ioc_msg
;
957 BUG_ON(!(len
<= BFI_IOC_MSGLEN_MAX
));
960 * first write msg to mailbox registers
962 for (i
= 0; i
< len
/ sizeof(u32
); i
++)
963 writel(cpu_to_le32(msgp
[i
]),
964 ioc
->ioc_regs
.hfn_mbox
+ i
* sizeof(u32
));
966 for (; i
< BFI_IOC_MSGLEN_MAX
/ sizeof(u32
); i
++)
967 writel(0, ioc
->ioc_regs
.hfn_mbox
+ i
* sizeof(u32
));
970 * write 1 to mailbox CMD to trigger LPU event
972 writel(1, ioc
->ioc_regs
.hfn_mbox_cmd
);
973 (void) readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
977 bfa_ioc_send_enable(struct bfa_ioc
*ioc
)
979 struct bfi_ioc_ctrl_req enable_req
;
982 bfi_h2i_set(enable_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_ENABLE_REQ
,
983 bfa_ioc_portid(ioc
));
984 enable_req
.ioc_class
= ioc
->ioc_mc
;
985 do_gettimeofday(&tv
);
986 enable_req
.tv_sec
= ntohl(tv
.tv_sec
);
987 bfa_ioc_mbox_send(ioc
, &enable_req
, sizeof(struct bfi_ioc_ctrl_req
));
991 bfa_ioc_send_disable(struct bfa_ioc
*ioc
)
993 struct bfi_ioc_ctrl_req disable_req
;
995 bfi_h2i_set(disable_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_DISABLE_REQ
,
996 bfa_ioc_portid(ioc
));
997 bfa_ioc_mbox_send(ioc
, &disable_req
, sizeof(struct bfi_ioc_ctrl_req
));
1001 bfa_ioc_send_getattr(struct bfa_ioc
*ioc
)
1003 struct bfi_ioc_getattr_req attr_req
;
1005 bfi_h2i_set(attr_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_GETATTR_REQ
,
1006 bfa_ioc_portid(ioc
));
1007 bfa_dma_be_addr_set(attr_req
.attr_addr
, ioc
->attr_dma
.pa
);
1008 bfa_ioc_mbox_send(ioc
, &attr_req
, sizeof(attr_req
));
1012 bfa_ioc_hb_check(void *cbarg
)
1014 struct bfa_ioc
*ioc
= cbarg
;
1017 hb_count
= readl(ioc
->ioc_regs
.heartbeat
);
1018 if (ioc
->hb_count
== hb_count
) {
1019 pr_crit("Firmware heartbeat failure at %d", hb_count
);
1020 bfa_ioc_recover(ioc
);
1023 ioc
->hb_count
= hb_count
;
1026 bfa_ioc_mbox_poll(ioc
);
1027 mod_timer(&ioc
->hb_timer
, jiffies
+
1028 msecs_to_jiffies(BFA_IOC_HB_TOV
));
1032 bfa_ioc_hb_monitor(struct bfa_ioc
*ioc
)
1034 ioc
->hb_count
= readl(ioc
->ioc_regs
.heartbeat
);
1035 mod_timer(&ioc
->hb_timer
, jiffies
+
1036 msecs_to_jiffies(BFA_IOC_HB_TOV
));
1040 bfa_ioc_hb_stop(struct bfa_ioc
*ioc
)
1042 del_timer(&ioc
->hb_timer
);
1047 * Initiate a full firmware download.
1050 bfa_ioc_download_fw(struct bfa_ioc
*ioc
, u32 boot_type
,
1060 * Initialize LMEM first before code download
1062 bfa_ioc_lmem_init(ioc
);
1065 * Flash based firmware boot
1067 if (bfa_ioc_is_optrom(ioc
))
1068 boot_type
= BFI_BOOT_TYPE_FLASH
;
1069 fwimg
= bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc
), chunkno
);
1071 pgnum
= bfa_ioc_smem_pgnum(ioc
, loff
);
1072 pgoff
= bfa_ioc_smem_pgoff(ioc
, loff
);
1074 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1076 for (i
= 0; i
< bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc
)); i
++) {
1077 if (BFA_IOC_FLASH_CHUNK_NO(i
) != chunkno
) {
1078 chunkno
= BFA_IOC_FLASH_CHUNK_NO(i
);
1079 fwimg
= bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc
),
1080 BFA_IOC_FLASH_CHUNK_ADDR(chunkno
));
1086 writel((swab32(fwimg
[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i
)])),
1087 ((ioc
->ioc_regs
.smem_page_start
) + (loff
)));
1089 loff
+= sizeof(u32
);
1092 * handle page offset wrap around
1094 loff
= PSS_SMEM_PGOFF(loff
);
1098 ioc
->ioc_regs
.host_page_num_fn
);
1102 writel(bfa_ioc_smem_pgnum(ioc
, 0),
1103 ioc
->ioc_regs
.host_page_num_fn
);
1106 * Set boot type and boot param at the end.
1108 writel((swab32(swab32(boot_type
))), ((ioc
->ioc_regs
.smem_page_start
)
1109 + (BFI_BOOT_TYPE_OFF
)));
1110 writel((swab32(swab32(boot_param
))), ((ioc
->ioc_regs
.smem_page_start
)
1111 + (BFI_BOOT_PARAM_OFF
)));
1115 bfa_ioc_reset(struct bfa_ioc
*ioc
, bool force
)
1117 bfa_ioc_hwinit(ioc
, force
);
1122 * Update BFA configuration from firmware configuration.
1125 bfa_ioc_getattr_reply(struct bfa_ioc
*ioc
)
1127 struct bfi_ioc_attr
*attr
= ioc
->attr
;
1129 attr
->adapter_prop
= ntohl(attr
->adapter_prop
);
1130 attr
->card_type
= ntohl(attr
->card_type
);
1131 attr
->maxfrsize
= ntohs(attr
->maxfrsize
);
1133 bfa_fsm_send_event(ioc
, IOC_E_FWRSP_GETATTR
);
1137 * Attach time initialization of mbox logic.
1140 bfa_ioc_mbox_attach(struct bfa_ioc
*ioc
)
1142 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
1145 INIT_LIST_HEAD(&mod
->cmd_q
);
1146 for (mc
= 0; mc
< BFI_MC_MAX
; mc
++) {
1147 mod
->mbhdlr
[mc
].cbfn
= NULL
;
1148 mod
->mbhdlr
[mc
].cbarg
= ioc
->bfa
;
1153 * Mbox poll timer -- restarts any pending mailbox requests.
1156 bfa_ioc_mbox_poll(struct bfa_ioc
*ioc
)
1158 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
1159 struct bfa_mbox_cmd
*cmd
;
1163 * If no command pending, do nothing
1165 if (list_empty(&mod
->cmd_q
))
1169 * If previous command is not yet fetched by firmware, do nothing
1171 stat
= readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
1176 * Enqueue command to firmware.
1178 bfa_q_deq(&mod
->cmd_q
, &cmd
);
1179 bfa_ioc_mbox_send(ioc
, cmd
->msg
, sizeof(cmd
->msg
));
1183 * Cleanup any pending requests.
1186 bfa_ioc_mbox_hbfail(struct bfa_ioc
*ioc
)
1188 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
1189 struct bfa_mbox_cmd
*cmd
;
1191 while (!list_empty(&mod
->cmd_q
))
1192 bfa_q_deq(&mod
->cmd_q
, &cmd
);
1199 bfa_ioc_pll_init(struct bfa_ioc
*ioc
)
1202 * Hold semaphore so that nobody can access the chip during init.
1204 bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_init_sem_reg
);
1206 bfa_ioc_pll_init_asic(ioc
);
1208 ioc
->pllinit
= true;
1210 * release semaphore.
1212 bfa_ioc_sem_release(ioc
->ioc_regs
.ioc_init_sem_reg
);
1214 return BFA_STATUS_OK
;
1218 * Interface used by diag module to do firmware boot with memory test
1219 * as the entry vector.
1222 bfa_ioc_boot(struct bfa_ioc
*ioc
, u32 boot_type
, u32 boot_param
)
1226 bfa_ioc_stats(ioc
, ioc_boots
);
1228 if (bfa_ioc_pll_init(ioc
) != BFA_STATUS_OK
)
1232 * Initialize IOC state of all functions on a chip reset.
1234 rb
= ioc
->pcidev
.pci_bar_kva
;
1235 if (boot_param
== BFI_BOOT_TYPE_MEMTEST
) {
1236 writel(BFI_IOC_MEMTEST
, (rb
+ BFA_IOC0_STATE_REG
));
1237 writel(BFI_IOC_MEMTEST
, (rb
+ BFA_IOC1_STATE_REG
));
1239 writel(BFI_IOC_INITING
, (rb
+ BFA_IOC0_STATE_REG
));
1240 writel(BFI_IOC_INITING
, (rb
+ BFA_IOC1_STATE_REG
));
1243 bfa_ioc_msgflush(ioc
);
1244 bfa_ioc_download_fw(ioc
, boot_type
, boot_param
);
1247 * Enable interrupts just before starting LPU
1249 ioc
->cbfn
->reset_cbfn(ioc
->bfa
);
1250 bfa_ioc_lpu_start(ioc
);
1254 * Enable/disable IOC failure auto recovery.
1257 bfa_ioc_auto_recover(bool auto_recover
)
1259 bfa_auto_recover
= auto_recover
;
1263 bfa_ioc_is_operational(struct bfa_ioc
*ioc
)
1265 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_op
);
1269 bfa_ioc_is_initialized(struct bfa_ioc
*ioc
)
1271 u32 r32
= readl(ioc
->ioc_regs
.ioc_fwstate
);
1273 return ((r32
!= BFI_IOC_UNINIT
) &&
1274 (r32
!= BFI_IOC_INITING
) &&
1275 (r32
!= BFI_IOC_MEMTEST
));
1279 bfa_ioc_msgget(struct bfa_ioc
*ioc
, void *mbmsg
)
1288 for (i
= 0; i
< (sizeof(union bfi_ioc_i2h_msg_u
) / sizeof(u32
));
1290 r32
= readl(ioc
->ioc_regs
.lpu_mbox
+
1292 msgp
[i
] = htonl(r32
);
1296 * turn off mailbox interrupt by clearing mailbox status
1298 writel(1, ioc
->ioc_regs
.lpu_mbox_cmd
);
1299 readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
1303 bfa_ioc_isr(struct bfa_ioc
*ioc
, struct bfi_mbmsg
*m
)
1305 union bfi_ioc_i2h_msg_u
*msg
;
1307 msg
= (union bfi_ioc_i2h_msg_u
*) m
;
1309 bfa_ioc_stats(ioc
, ioc_isrs
);
1311 switch (msg
->mh
.msg_id
) {
1312 case BFI_IOC_I2H_HBEAT
:
1315 case BFI_IOC_I2H_READY_EVENT
:
1316 bfa_fsm_send_event(ioc
, IOC_E_FWREADY
);
1319 case BFI_IOC_I2H_ENABLE_REPLY
:
1320 bfa_fsm_send_event(ioc
, IOC_E_FWRSP_ENABLE
);
1323 case BFI_IOC_I2H_DISABLE_REPLY
:
1324 bfa_fsm_send_event(ioc
, IOC_E_FWRSP_DISABLE
);
1327 case BFI_IOC_I2H_GETATTR_REPLY
:
1328 bfa_ioc_getattr_reply(ioc
);
1337 * IOC attach time initialization and setup.
1339 * @param[in] ioc memory for IOC
1340 * @param[in] bfa driver instance structure
1343 bfa_ioc_attach(struct bfa_ioc
*ioc
, void *bfa
, struct bfa_ioc_cbfn
*cbfn
)
1347 ioc
->fcmode
= false;
1348 ioc
->pllinit
= false;
1349 ioc
->dbg_fwsave_once
= true;
1351 bfa_ioc_mbox_attach(ioc
);
1352 INIT_LIST_HEAD(&ioc
->hb_notify_q
);
1354 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
1358 * Driver detach time IOC cleanup.
1361 bfa_ioc_detach(struct bfa_ioc
*ioc
)
1363 bfa_fsm_send_event(ioc
, IOC_E_DETACH
);
1367 * Setup IOC PCI properties.
1369 * @param[in] pcidev PCI device information for this IOC
1372 bfa_ioc_pci_init(struct bfa_ioc
*ioc
, struct bfa_pcidev
*pcidev
,
1376 ioc
->pcidev
= *pcidev
;
1377 ioc
->ctdev
= bfa_asic_id_ct(ioc
->pcidev
.device_id
);
1378 ioc
->cna
= ioc
->ctdev
&& !ioc
->fcmode
;
1380 bfa_ioc_set_ct_hwif(ioc
);
1382 bfa_ioc_map_port(ioc
);
1383 bfa_ioc_reg_init(ioc
);
1387 * Initialize IOC dma memory
1389 * @param[in] dm_kva kernel virtual address of IOC dma memory
1390 * @param[in] dm_pa physical address of IOC dma memory
1393 bfa_ioc_mem_claim(struct bfa_ioc
*ioc
, u8
*dm_kva
, u64 dm_pa
)
1396 * dma memory for firmware attribute
1398 ioc
->attr_dma
.kva
= dm_kva
;
1399 ioc
->attr_dma
.pa
= dm_pa
;
1400 ioc
->attr
= (struct bfi_ioc_attr
*) dm_kva
;
1404 * Return size of dma memory required.
1407 bfa_ioc_meminfo(void)
1409 return roundup(sizeof(struct bfi_ioc_attr
), BFA_DMA_ALIGN_SZ
);
1413 bfa_ioc_enable(struct bfa_ioc
*ioc
)
1415 bfa_ioc_stats(ioc
, ioc_enables
);
1416 ioc
->dbg_fwsave_once
= true;
1418 bfa_fsm_send_event(ioc
, IOC_E_ENABLE
);
1422 bfa_ioc_disable(struct bfa_ioc
*ioc
)
1424 bfa_ioc_stats(ioc
, ioc_disables
);
1425 bfa_fsm_send_event(ioc
, IOC_E_DISABLE
);
1429 bfa_ioc_smem_pgnum(struct bfa_ioc
*ioc
, u32 fmaddr
)
1431 return PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, fmaddr
);
1435 bfa_ioc_smem_pgoff(struct bfa_ioc
*ioc
, u32 fmaddr
)
1437 return PSS_SMEM_PGOFF(fmaddr
);
1441 * Register mailbox message handler functions
1443 * @param[in] ioc IOC instance
1444 * @param[in] mcfuncs message class handler functions
1447 bfa_ioc_mbox_register(struct bfa_ioc
*ioc
, bfa_ioc_mbox_mcfunc_t
*mcfuncs
)
1449 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
1452 for (mc
= 0; mc
< BFI_MC_MAX
; mc
++)
1453 mod
->mbhdlr
[mc
].cbfn
= mcfuncs
[mc
];
1457 * Register mailbox message handler function, to be called by common modules
1460 bfa_ioc_mbox_regisr(struct bfa_ioc
*ioc
, enum bfi_mclass mc
,
1461 bfa_ioc_mbox_mcfunc_t cbfn
, void *cbarg
)
1463 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
1465 mod
->mbhdlr
[mc
].cbfn
= cbfn
;
1466 mod
->mbhdlr
[mc
].cbarg
= cbarg
;
1470 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1471 * Responsibility of caller to serialize
1473 * @param[in] ioc IOC instance
1474 * @param[i] cmd Mailbox command
1477 bfa_ioc_mbox_queue(struct bfa_ioc
*ioc
, struct bfa_mbox_cmd
*cmd
)
1479 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
1483 * If a previous command is pending, queue new command
1485 if (!list_empty(&mod
->cmd_q
)) {
1486 list_add_tail(&cmd
->qe
, &mod
->cmd_q
);
1491 * If mailbox is busy, queue command for poll timer
1493 stat
= readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
1495 list_add_tail(&cmd
->qe
, &mod
->cmd_q
);
1500 * mailbox is free -- queue command to firmware
1502 bfa_ioc_mbox_send(ioc
, cmd
->msg
, sizeof(cmd
->msg
));
1506 * Handle mailbox interrupts
1509 bfa_ioc_mbox_isr(struct bfa_ioc
*ioc
)
1511 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
1515 bfa_ioc_msgget(ioc
, &m
);
1518 * Treat IOC message class as special.
1520 mc
= m
.mh
.msg_class
;
1521 if (mc
== BFI_MC_IOC
) {
1522 bfa_ioc_isr(ioc
, &m
);
1526 if ((mc
> BFI_MC_MAX
) || (mod
->mbhdlr
[mc
].cbfn
== NULL
))
1529 mod
->mbhdlr
[mc
].cbfn(mod
->mbhdlr
[mc
].cbarg
, &m
);
1533 bfa_ioc_error_isr(struct bfa_ioc
*ioc
)
1535 bfa_fsm_send_event(ioc
, IOC_E_HWERROR
);
1539 bfa_ioc_set_fcmode(struct bfa_ioc
*ioc
)
1542 ioc
->port_id
= bfa_ioc_pcifn(ioc
);
1546 * return true if IOC is disabled
1549 bfa_ioc_is_disabled(struct bfa_ioc
*ioc
)
1551 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabling
) ||
1552 bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabled
);
1556 * return true if IOC firmware is different.
1559 bfa_ioc_fw_mismatch(struct bfa_ioc
*ioc
)
1561 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_reset
) ||
1562 bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_fwcheck
) ||
1563 bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_mismatch
);
1566 #define bfa_ioc_state_disabled(__sm) \
1567 (((__sm) == BFI_IOC_UNINIT) || \
1568 ((__sm) == BFI_IOC_INITING) || \
1569 ((__sm) == BFI_IOC_HWINIT) || \
1570 ((__sm) == BFI_IOC_DISABLED) || \
1571 ((__sm) == BFI_IOC_FAIL) || \
1572 ((__sm) == BFI_IOC_CFG_DISABLED))
1575 * Check if adapter is disabled -- both IOCs should be in a disabled
1579 bfa_ioc_adapter_is_disabled(struct bfa_ioc
*ioc
)
1582 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
1584 if (!bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabled
))
1587 ioc_state
= readl(rb
+ BFA_IOC0_STATE_REG
);
1588 if (!bfa_ioc_state_disabled(ioc_state
))
1591 if (ioc
->pcidev
.device_id
!= PCI_DEVICE_ID_BROCADE_FC_8G1P
) {
1592 ioc_state
= readl(rb
+ BFA_IOC1_STATE_REG
);
1593 if (!bfa_ioc_state_disabled(ioc_state
))
1601 * Add to IOC heartbeat failure notification queue. To be used by common
1602 * modules such as cee, port, diag.
1605 bfa_ioc_hbfail_register(struct bfa_ioc
*ioc
,
1606 struct bfa_ioc_hbfail_notify
*notify
)
1608 list_add_tail(¬ify
->qe
, &ioc
->hb_notify_q
);
1611 #define BFA_MFG_NAME "Brocade"
1613 bfa_ioc_get_adapter_attr(struct bfa_ioc
*ioc
,
1614 struct bfa_adapter_attr
*ad_attr
)
1616 struct bfi_ioc_attr
*ioc_attr
;
1618 ioc_attr
= ioc
->attr
;
1620 bfa_ioc_get_adapter_serial_num(ioc
, ad_attr
->serial_num
);
1621 bfa_ioc_get_adapter_fw_ver(ioc
, ad_attr
->fw_ver
);
1622 bfa_ioc_get_adapter_optrom_ver(ioc
, ad_attr
->optrom_ver
);
1623 bfa_ioc_get_adapter_manufacturer(ioc
, ad_attr
->manufacturer
);
1624 memcpy(&ad_attr
->vpd
, &ioc_attr
->vpd
,
1625 sizeof(struct bfa_mfg_vpd
));
1627 ad_attr
->nports
= bfa_ioc_get_nports(ioc
);
1628 ad_attr
->max_speed
= bfa_ioc_speed_sup(ioc
);
1630 bfa_ioc_get_adapter_model(ioc
, ad_attr
->model
);
1631 /* For now, model descr uses same model string */
1632 bfa_ioc_get_adapter_model(ioc
, ad_attr
->model_descr
);
1634 ad_attr
->card_type
= ioc_attr
->card_type
;
1635 ad_attr
->is_mezz
= bfa_mfg_is_mezz(ioc_attr
->card_type
);
1637 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr
->adapter_prop
))
1638 ad_attr
->prototype
= 1;
1640 ad_attr
->prototype
= 0;
1642 ad_attr
->pwwn
= bfa_ioc_get_pwwn(ioc
);
1643 ad_attr
->mac
= bfa_ioc_get_mac(ioc
);
1645 ad_attr
->pcie_gen
= ioc_attr
->pcie_gen
;
1646 ad_attr
->pcie_lanes
= ioc_attr
->pcie_lanes
;
1647 ad_attr
->pcie_lanes_orig
= ioc_attr
->pcie_lanes_orig
;
1648 ad_attr
->asic_rev
= ioc_attr
->asic_rev
;
1650 bfa_ioc_get_pci_chip_rev(ioc
, ad_attr
->hw_ver
);
1652 ad_attr
->cna_capable
= ioc
->cna
;
1653 ad_attr
->trunk_capable
= (ad_attr
->nports
> 1) && !ioc
->cna
;
1657 bfa_ioc_get_type(struct bfa_ioc
*ioc
)
1659 if (!ioc
->ctdev
|| ioc
->fcmode
)
1660 return BFA_IOC_TYPE_FC
;
1661 else if (ioc
->ioc_mc
== BFI_MC_IOCFC
)
1662 return BFA_IOC_TYPE_FCoE
;
1663 else if (ioc
->ioc_mc
== BFI_MC_LL
)
1664 return BFA_IOC_TYPE_LL
;
1666 BUG_ON(!(ioc
->ioc_mc
== BFI_MC_LL
));
1667 return BFA_IOC_TYPE_LL
;
1672 bfa_ioc_get_adapter_serial_num(struct bfa_ioc
*ioc
, char *serial_num
)
1674 memset(serial_num
, 0, BFA_ADAPTER_SERIAL_NUM_LEN
);
1676 (void *)ioc
->attr
->brcd_serialnum
,
1677 BFA_ADAPTER_SERIAL_NUM_LEN
);
1681 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc
*ioc
, char *fw_ver
)
1683 memset(fw_ver
, 0, BFA_VERSION_LEN
);
1684 memcpy(fw_ver
, ioc
->attr
->fw_version
, BFA_VERSION_LEN
);
1688 bfa_ioc_get_pci_chip_rev(struct bfa_ioc
*ioc
, char *chip_rev
)
1690 BUG_ON(!(chip_rev
));
1692 memset(chip_rev
, 0, BFA_IOC_CHIP_REV_LEN
);
1698 chip_rev
[4] = ioc
->attr
->asic_rev
;
1703 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc
*ioc
, char *optrom_ver
)
1705 memset(optrom_ver
, 0, BFA_VERSION_LEN
);
1706 memcpy(optrom_ver
, ioc
->attr
->optrom_version
,
1711 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc
*ioc
, char *manufacturer
)
1713 memset(manufacturer
, 0, BFA_ADAPTER_MFG_NAME_LEN
);
1714 memcpy(manufacturer
, BFA_MFG_NAME
, BFA_ADAPTER_MFG_NAME_LEN
);
1718 bfa_ioc_get_adapter_model(struct bfa_ioc
*ioc
, char *model
)
1720 struct bfi_ioc_attr
*ioc_attr
;
1723 memset(model
, 0, BFA_ADAPTER_MODEL_NAME_LEN
);
1725 ioc_attr
= ioc
->attr
;
1730 snprintf(model
, BFA_ADAPTER_MODEL_NAME_LEN
, "%s-%u",
1731 BFA_MFG_NAME
, ioc_attr
->card_type
);
1735 bfa_ioc_get_state(struct bfa_ioc
*ioc
)
1737 return bfa_sm_to_state(ioc_sm_table
, ioc
->fsm
);
1741 bfa_ioc_get_attr(struct bfa_ioc
*ioc
, struct bfa_ioc_attr
*ioc_attr
)
1743 memset((void *)ioc_attr
, 0, sizeof(struct bfa_ioc_attr
));
1745 ioc_attr
->state
= bfa_ioc_get_state(ioc
);
1746 ioc_attr
->port_id
= ioc
->port_id
;
1748 ioc_attr
->ioc_type
= bfa_ioc_get_type(ioc
);
1750 bfa_ioc_get_adapter_attr(ioc
, &ioc_attr
->adapter_attr
);
1752 ioc_attr
->pci_attr
.device_id
= ioc
->pcidev
.device_id
;
1753 ioc_attr
->pci_attr
.pcifn
= ioc
->pcidev
.pci_func
;
1754 bfa_ioc_get_pci_chip_rev(ioc
, ioc_attr
->pci_attr
.chip_rev
);
1761 bfa_ioc_get_pwwn(struct bfa_ioc
*ioc
)
1763 return ioc
->attr
->pwwn
;
1767 bfa_ioc_get_nwwn(struct bfa_ioc
*ioc
)
1769 return ioc
->attr
->nwwn
;
1773 bfa_ioc_get_adid(struct bfa_ioc
*ioc
)
1775 return ioc
->attr
->mfg_pwwn
;
1779 bfa_ioc_get_mac(struct bfa_ioc
*ioc
)
1782 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
1784 if (bfa_ioc_get_type(ioc
) == BFA_IOC_TYPE_FCoE
)
1785 return bfa_ioc_get_mfg_mac(ioc
);
1787 return ioc
->attr
->mac
;
1791 bfa_ioc_get_mfg_pwwn(struct bfa_ioc
*ioc
)
1793 return ioc
->attr
->mfg_pwwn
;
1797 bfa_ioc_get_mfg_nwwn(struct bfa_ioc
*ioc
)
1799 return ioc
->attr
->mfg_nwwn
;
1803 bfa_ioc_get_mfg_mac(struct bfa_ioc
*ioc
)
1807 m
= ioc
->attr
->mfg_mac
;
1808 if (bfa_mfg_is_old_wwn_mac_model(ioc
->attr
->card_type
))
1809 m
.mac
[MAC_ADDRLEN
- 1] += bfa_ioc_pcifn(ioc
);
1811 bfa_mfg_increment_wwn_mac(&(m
.mac
[MAC_ADDRLEN
-3]),
1812 bfa_ioc_pcifn(ioc
));
1818 bfa_ioc_get_fcmode(struct bfa_ioc
*ioc
)
1820 return ioc
->fcmode
|| !bfa_asic_id_ct(ioc
->pcidev
.device_id
);
1824 * Firmware failure detected. Start recovery actions.
1827 bfa_ioc_recover(struct bfa_ioc
*ioc
)
1829 bfa_ioc_stats(ioc
, ioc_hbfails
);
1830 bfa_fsm_send_event(ioc
, IOC_E_HBFAIL
);
1834 bfa_ioc_check_attr_wwns(struct bfa_ioc
*ioc
)
1836 if (bfa_ioc_get_type(ioc
) == BFA_IOC_TYPE_LL
)