bna: Fixed build break for allyesconfig
[deliverable/linux.git] / drivers / net / bna / bfa_ioc.c
1 /*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 /*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19 #include "bfa_ioc.h"
20 #include "cna.h"
21 #include "bfi.h"
22 #include "bfi_ctreg.h"
23 #include "bfa_defs.h"
24
25 /**
26 * IOC local definitions
27 */
28
29 #define bfa_ioc_timer_start(__ioc) \
30 mod_timer(&(__ioc)->ioc_timer, jiffies + \
31 msecs_to_jiffies(BFA_IOC_TOV))
32 #define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer)
33
34 #define bfa_ioc_recovery_timer_start(__ioc) \
35 mod_timer(&(__ioc)->ioc_timer, jiffies + \
36 msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
37
38 #define bfa_sem_timer_start(__ioc) \
39 mod_timer(&(__ioc)->sem_timer, jiffies + \
40 msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
41 #define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer)
42
43 #define bfa_hb_timer_start(__ioc) \
44 mod_timer(&(__ioc)->hb_timer, jiffies + \
45 msecs_to_jiffies(BFA_IOC_HB_TOV))
46 #define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer)
47
48 /**
49 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
50 */
51
52 #define bfa_ioc_firmware_lock(__ioc) \
53 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54 #define bfa_ioc_firmware_unlock(__ioc) \
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58 #define bfa_ioc_notify_hbfail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
60
61 #define bfa_ioc_is_optrom(__ioc) \
62 (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
63
64 #define bfa_ioc_mbox_cmd_pending(__ioc) \
65 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
66 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
67
68 bool bfa_nw_auto_recover = true;
69
70 /*
71 * forward declarations
72 */
73 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
74 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
75 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
76 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
77 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
78 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
79 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
80 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
81 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
82 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
83 static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
84 static void bfa_ioc_recover(struct bfa_ioc *ioc);
85 static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
86 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
87 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
88 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
89 u32 boot_param);
90 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
91 static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
92 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
93 char *serial_num);
94 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
95 char *fw_ver);
96 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
97 char *chip_rev);
98 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
99 char *optrom_ver);
100 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
101 char *manufacturer);
102 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
103 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
104 static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
105
106 /**
107 * IOC state machine events
108 */
109 enum ioc_event {
110 IOC_E_ENABLE = 1, /*!< IOC enable request */
111 IOC_E_DISABLE = 2, /*!< IOC disable request */
112 IOC_E_TIMEOUT = 3, /*!< f/w response timeout */
113 IOC_E_FWREADY = 4, /*!< f/w initialization done */
114 IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */
115 IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */
116 IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */
117 IOC_E_HBFAIL = 8, /*!< heartbeat failure */
118 IOC_E_HWERROR = 9, /*!< hardware error interrupt */
119 IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
120 IOC_E_DETACH = 11, /*!< driver detach cleanup */
121 };
122
123 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event);
127 bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event);
128 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
129 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
130 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
131 bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event);
132 bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event);
133 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
134 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
135
136 static struct bfa_sm_table ioc_sm_table[] = {
137 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
138 {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
139 {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
140 {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
141 {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
142 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
143 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
144 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
145 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
146 {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
147 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
148 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
149 };
150
151 /**
152 * Reset entry actions -- initialize state machine
153 */
154 static void
155 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
156 {
157 ioc->retry_count = 0;
158 ioc->auto_recover = bfa_nw_auto_recover;
159 }
160
161 /**
162 * Beginning state. IOC is in reset state.
163 */
164 static void
165 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
166 {
167 switch (event) {
168 case IOC_E_ENABLE:
169 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
170 break;
171
172 case IOC_E_DISABLE:
173 bfa_ioc_disable_comp(ioc);
174 break;
175
176 case IOC_E_DETACH:
177 break;
178
179 default:
180 bfa_sm_fault(ioc, event);
181 }
182 }
183
184 /**
185 * Semaphore should be acquired for version check.
186 */
187 static void
188 bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc)
189 {
190 bfa_ioc_hw_sem_get(ioc);
191 }
192
193 /**
194 * Awaiting h/w semaphore to continue with version check.
195 */
196 static void
197 bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
198 {
199 switch (event) {
200 case IOC_E_SEMLOCKED:
201 if (bfa_ioc_firmware_lock(ioc)) {
202 ioc->retry_count = 0;
203 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
204 } else {
205 bfa_nw_ioc_hw_sem_release(ioc);
206 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
207 }
208 break;
209
210 case IOC_E_DISABLE:
211 bfa_ioc_disable_comp(ioc);
212 /* fall through */
213
214 case IOC_E_DETACH:
215 bfa_ioc_hw_sem_get_cancel(ioc);
216 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
217 break;
218
219 case IOC_E_FWREADY:
220 break;
221
222 default:
223 bfa_sm_fault(ioc, event);
224 }
225 }
226
227 /**
228 * Notify enable completion callback and generate mismatch AEN.
229 */
230 static void
231 bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc)
232 {
233 /**
234 * Provide enable completion callback and AEN notification only once.
235 */
236 if (ioc->retry_count == 0)
237 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
238 ioc->retry_count++;
239 bfa_ioc_timer_start(ioc);
240 }
241
242 /**
243 * Awaiting firmware version match.
244 */
245 static void
246 bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
247 {
248 switch (event) {
249 case IOC_E_TIMEOUT:
250 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
251 break;
252
253 case IOC_E_DISABLE:
254 bfa_ioc_disable_comp(ioc);
255 /* fall through */
256
257 case IOC_E_DETACH:
258 bfa_ioc_timer_stop(ioc);
259 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
260 break;
261
262 case IOC_E_FWREADY:
263 break;
264
265 default:
266 bfa_sm_fault(ioc, event);
267 }
268 }
269
270 /**
271 * Request for semaphore.
272 */
273 static void
274 bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc)
275 {
276 bfa_ioc_hw_sem_get(ioc);
277 }
278
279 /**
280 * Awaiting semaphore for h/w initialzation.
281 */
282 static void
283 bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
284 {
285 switch (event) {
286 case IOC_E_SEMLOCKED:
287 ioc->retry_count = 0;
288 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
289 break;
290
291 case IOC_E_DISABLE:
292 bfa_ioc_hw_sem_get_cancel(ioc);
293 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
294 break;
295
296 default:
297 bfa_sm_fault(ioc, event);
298 }
299 }
300
301 static void
302 bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc)
303 {
304 bfa_ioc_timer_start(ioc);
305 bfa_ioc_reset(ioc, false);
306 }
307
308 /**
309 * @brief
310 * Hardware is being initialized. Interrupts are enabled.
311 * Holding hardware semaphore lock.
312 */
313 static void
314 bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
315 {
316 switch (event) {
317 case IOC_E_FWREADY:
318 bfa_ioc_timer_stop(ioc);
319 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
320 break;
321
322 case IOC_E_HWERROR:
323 bfa_ioc_timer_stop(ioc);
324 /* fall through */
325
326 case IOC_E_TIMEOUT:
327 ioc->retry_count++;
328 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
329 bfa_ioc_timer_start(ioc);
330 bfa_ioc_reset(ioc, true);
331 break;
332 }
333
334 bfa_nw_ioc_hw_sem_release(ioc);
335 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
336 break;
337
338 case IOC_E_DISABLE:
339 bfa_nw_ioc_hw_sem_release(ioc);
340 bfa_ioc_timer_stop(ioc);
341 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
342 break;
343
344 default:
345 bfa_sm_fault(ioc, event);
346 }
347 }
348
349 static void
350 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
351 {
352 bfa_ioc_timer_start(ioc);
353 bfa_ioc_send_enable(ioc);
354 }
355
356 /**
357 * Host IOC function is being enabled, awaiting response from firmware.
358 * Semaphore is acquired.
359 */
360 static void
361 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
362 {
363 switch (event) {
364 case IOC_E_FWRSP_ENABLE:
365 bfa_ioc_timer_stop(ioc);
366 bfa_nw_ioc_hw_sem_release(ioc);
367 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
368 break;
369
370 case IOC_E_HWERROR:
371 bfa_ioc_timer_stop(ioc);
372 /* fall through */
373
374 case IOC_E_TIMEOUT:
375 ioc->retry_count++;
376 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
377 writel(BFI_IOC_UNINIT,
378 ioc->ioc_regs.ioc_fwstate);
379 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
380 break;
381 }
382
383 bfa_nw_ioc_hw_sem_release(ioc);
384 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
385 break;
386
387 case IOC_E_DISABLE:
388 bfa_ioc_timer_stop(ioc);
389 bfa_nw_ioc_hw_sem_release(ioc);
390 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
391 break;
392
393 case IOC_E_FWREADY:
394 bfa_ioc_send_enable(ioc);
395 break;
396
397 default:
398 bfa_sm_fault(ioc, event);
399 }
400 }
401
402 static void
403 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
404 {
405 bfa_ioc_timer_start(ioc);
406 bfa_ioc_send_getattr(ioc);
407 }
408
409 /**
410 * @brief
411 * IOC configuration in progress. Timer is active.
412 */
413 static void
414 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
415 {
416 switch (event) {
417 case IOC_E_FWRSP_GETATTR:
418 bfa_ioc_timer_stop(ioc);
419 bfa_ioc_check_attr_wwns(ioc);
420 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
421 break;
422
423 case IOC_E_HWERROR:
424 bfa_ioc_timer_stop(ioc);
425 /* fall through */
426
427 case IOC_E_TIMEOUT:
428 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
429 break;
430
431 case IOC_E_DISABLE:
432 bfa_ioc_timer_stop(ioc);
433 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
434 break;
435
436 default:
437 bfa_sm_fault(ioc, event);
438 }
439 }
440
441 static void
442 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
443 {
444 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
445 bfa_ioc_hb_monitor(ioc);
446 }
447
448 static void
449 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
450 {
451 switch (event) {
452 case IOC_E_ENABLE:
453 break;
454
455 case IOC_E_DISABLE:
456 bfa_ioc_hb_stop(ioc);
457 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
458 break;
459
460 case IOC_E_HWERROR:
461 case IOC_E_FWREADY:
462 /**
463 * Hard error or IOC recovery by other function.
464 * Treat it same as heartbeat failure.
465 */
466 bfa_ioc_hb_stop(ioc);
467 /* !!! fall through !!! */
468
469 case IOC_E_HBFAIL:
470 bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
471 break;
472
473 default:
474 bfa_sm_fault(ioc, event);
475 }
476 }
477
478 static void
479 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
480 {
481 bfa_ioc_timer_start(ioc);
482 bfa_ioc_send_disable(ioc);
483 }
484
485 /**
486 * IOC is being disabled
487 */
488 static void
489 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
490 {
491 switch (event) {
492 case IOC_E_FWRSP_DISABLE:
493 bfa_ioc_timer_stop(ioc);
494 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
495 break;
496
497 case IOC_E_HWERROR:
498 bfa_ioc_timer_stop(ioc);
499 /*
500 * !!! fall through !!!
501 */
502
503 case IOC_E_TIMEOUT:
504 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
505 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
506 break;
507
508 default:
509 bfa_sm_fault(ioc, event);
510 }
511 }
512
513 /**
514 * IOC disable completion entry.
515 */
516 static void
517 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
518 {
519 bfa_ioc_disable_comp(ioc);
520 }
521
522 static void
523 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
524 {
525 switch (event) {
526 case IOC_E_ENABLE:
527 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
528 break;
529
530 case IOC_E_DISABLE:
531 ioc->cbfn->disable_cbfn(ioc->bfa);
532 break;
533
534 case IOC_E_FWREADY:
535 break;
536
537 case IOC_E_DETACH:
538 bfa_ioc_firmware_unlock(ioc);
539 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
540 break;
541
542 default:
543 bfa_sm_fault(ioc, event);
544 }
545 }
546
547 static void
548 bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc)
549 {
550 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
551 bfa_ioc_timer_start(ioc);
552 }
553
554 /**
555 * @brief
556 * Hardware initialization failed.
557 */
558 static void
559 bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
560 {
561 switch (event) {
562 case IOC_E_DISABLE:
563 bfa_ioc_timer_stop(ioc);
564 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
565 break;
566
567 case IOC_E_DETACH:
568 bfa_ioc_timer_stop(ioc);
569 bfa_ioc_firmware_unlock(ioc);
570 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
571 break;
572
573 case IOC_E_TIMEOUT:
574 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
575 break;
576
577 default:
578 bfa_sm_fault(ioc, event);
579 }
580 }
581
582 static void
583 bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc)
584 {
585 struct list_head *qe;
586 struct bfa_ioc_hbfail_notify *notify;
587
588 /**
589 * Mark IOC as failed in hardware and stop firmware.
590 */
591 bfa_ioc_lpu_stop(ioc);
592 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
593
594 /**
595 * Notify other functions on HB failure.
596 */
597 bfa_ioc_notify_hbfail(ioc);
598
599 /**
600 * Notify driver and common modules registered for notification.
601 */
602 ioc->cbfn->hbfail_cbfn(ioc->bfa);
603 list_for_each(qe, &ioc->hb_notify_q) {
604 notify = (struct bfa_ioc_hbfail_notify *) qe;
605 notify->cbfn(notify->cbarg);
606 }
607
608 /**
609 * Flush any queued up mailbox requests.
610 */
611 bfa_ioc_mbox_hbfail(ioc);
612
613 /**
614 * Trigger auto-recovery after a delay.
615 */
616 if (ioc->auto_recover)
617 mod_timer(&ioc->ioc_timer, jiffies +
618 msecs_to_jiffies(BFA_IOC_TOV_RECOVER));
619 }
620
621 /**
622 * @brief
623 * IOC heartbeat failure.
624 */
625 static void
626 bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event)
627 {
628 switch (event) {
629
630 case IOC_E_ENABLE:
631 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
632 break;
633
634 case IOC_E_DISABLE:
635 if (ioc->auto_recover)
636 bfa_ioc_timer_stop(ioc);
637 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
638 break;
639
640 case IOC_E_TIMEOUT:
641 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
642 break;
643
644 case IOC_E_FWREADY:
645 /**
646 * Recovery is already initiated by other function.
647 */
648 break;
649
650 case IOC_E_HWERROR:
651 /*
652 * HB failure notification, ignore.
653 */
654 break;
655 default:
656 bfa_sm_fault(ioc, event);
657 }
658 }
659
660 /**
661 * BFA IOC private functions
662 */
663
664 static void
665 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
666 {
667 struct list_head *qe;
668 struct bfa_ioc_hbfail_notify *notify;
669
670 ioc->cbfn->disable_cbfn(ioc->bfa);
671
672 /**
673 * Notify common modules registered for notification.
674 */
675 list_for_each(qe, &ioc->hb_notify_q) {
676 notify = (struct bfa_ioc_hbfail_notify *) qe;
677 notify->cbfn(notify->cbarg);
678 }
679 }
680
681 void
682 bfa_nw_ioc_sem_timeout(void *ioc_arg)
683 {
684 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
685
686 bfa_ioc_hw_sem_get(ioc);
687 }
688
689 bool
690 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
691 {
692 u32 r32;
693 int cnt = 0;
694 #define BFA_SEM_SPINCNT 3000
695
696 r32 = readl(sem_reg);
697
698 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
699 cnt++;
700 udelay(2);
701 r32 = readl(sem_reg);
702 }
703
704 if (r32 == 0)
705 return true;
706
707 BUG_ON(!(cnt < BFA_SEM_SPINCNT));
708 return false;
709 }
710
711 void
712 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
713 {
714 writel(1, sem_reg);
715 }
716
717 static void
718 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
719 {
720 u32 r32;
721
722 /**
723 * First read to the semaphore register will return 0, subsequent reads
724 * will return 1. Semaphore is released by writing 1 to the register
725 */
726 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
727 if (r32 == 0) {
728 bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
729 return;
730 }
731
732 mod_timer(&ioc->sem_timer, jiffies +
733 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
734 }
735
736 void
737 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
738 {
739 writel(1, ioc->ioc_regs.ioc_sem_reg);
740 }
741
742 static void
743 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
744 {
745 del_timer(&ioc->sem_timer);
746 }
747
748 /**
749 * @brief
750 * Initialize LPU local memory (aka secondary memory / SRAM)
751 */
752 static void
753 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
754 {
755 u32 pss_ctl;
756 int i;
757 #define PSS_LMEM_INIT_TIME 10000
758
759 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
760 pss_ctl &= ~__PSS_LMEM_RESET;
761 pss_ctl |= __PSS_LMEM_INIT_EN;
762
763 /*
764 * i2c workaround 12.5khz clock
765 */
766 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
767 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
768
769 /**
770 * wait for memory initialization to be complete
771 */
772 i = 0;
773 do {
774 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
775 i++;
776 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
777
778 /**
779 * If memory initialization is not successful, IOC timeout will catch
780 * such failures.
781 */
782 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
783
784 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
785 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
786 }
787
788 static void
789 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
790 {
791 u32 pss_ctl;
792
793 /**
794 * Take processor out of reset.
795 */
796 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
797 pss_ctl &= ~__PSS_LPU0_RESET;
798
799 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
800 }
801
802 static void
803 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
804 {
805 u32 pss_ctl;
806
807 /**
808 * Put processors in reset.
809 */
810 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
811 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
812
813 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
814 }
815
816 /**
817 * Get driver and firmware versions.
818 */
819 void
820 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
821 {
822 u32 pgnum, pgoff;
823 u32 loff = 0;
824 int i;
825 u32 *fwsig = (u32 *) fwhdr;
826
827 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
828 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
829 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
830
831 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
832 i++) {
833 fwsig[i] =
834 swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
835 loff += sizeof(u32);
836 }
837 }
838
839 /**
840 * Returns TRUE if same.
841 */
842 bool
843 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
844 {
845 struct bfi_ioc_image_hdr *drv_fwhdr;
846 int i;
847
848 drv_fwhdr = (struct bfi_ioc_image_hdr *)
849 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
850
851 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
852 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
853 return false;
854 }
855
856 return true;
857 }
858
859 /**
860 * Return true if current running version is valid. Firmware signature and
861 * execution context (driver/bios) must match.
862 */
863 static bool
864 bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
865 {
866 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
867
868 /**
869 * If bios/efi boot (flash based) -- return true
870 */
871 if (bfa_ioc_is_optrom(ioc))
872 return true;
873
874 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
875 drv_fwhdr = (struct bfi_ioc_image_hdr *)
876 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
877
878 if (fwhdr.signature != drv_fwhdr->signature)
879 return false;
880
881 if (fwhdr.exec != drv_fwhdr->exec)
882 return false;
883
884 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
885 }
886
887 /**
888 * Conditionally flush any pending message from firmware at start.
889 */
890 static void
891 bfa_ioc_msgflush(struct bfa_ioc *ioc)
892 {
893 u32 r32;
894
895 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
896 if (r32)
897 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
898 }
899
900 /**
901 * @img ioc_init_logic.jpg
902 */
903 static void
904 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
905 {
906 enum bfi_ioc_state ioc_fwstate;
907 bool fwvalid;
908
909 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
910
911 if (force)
912 ioc_fwstate = BFI_IOC_UNINIT;
913
914 /**
915 * check if firmware is valid
916 */
917 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
918 false : bfa_ioc_fwver_valid(ioc);
919
920 if (!fwvalid) {
921 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
922 return;
923 }
924
925 /**
926 * If hardware initialization is in progress (initialized by other IOC),
927 * just wait for an initialization completion interrupt.
928 */
929 if (ioc_fwstate == BFI_IOC_INITING) {
930 ioc->cbfn->reset_cbfn(ioc->bfa);
931 return;
932 }
933
934 /**
935 * If IOC function is disabled and firmware version is same,
936 * just re-enable IOC.
937 *
938 * If option rom, IOC must not be in operational state. With
939 * convergence, IOC will be in operational state when 2nd driver
940 * is loaded.
941 */
942 if (ioc_fwstate == BFI_IOC_DISABLED ||
943 (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
944 /**
945 * When using MSI-X any pending firmware ready event should
946 * be flushed. Otherwise MSI-X interrupts are not delivered.
947 */
948 bfa_ioc_msgflush(ioc);
949 ioc->cbfn->reset_cbfn(ioc->bfa);
950 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
951 return;
952 }
953
954 /**
955 * Initialize the h/w for any other states.
956 */
957 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
958 }
959
960 void
961 bfa_nw_ioc_timeout(void *ioc_arg)
962 {
963 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
964
965 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
966 }
967
968 static void
969 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
970 {
971 u32 *msgp = (u32 *) ioc_msg;
972 u32 i;
973
974 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
975
976 /*
977 * first write msg to mailbox registers
978 */
979 for (i = 0; i < len / sizeof(u32); i++)
980 writel(cpu_to_le32(msgp[i]),
981 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
982
983 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
984 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
985
986 /*
987 * write 1 to mailbox CMD to trigger LPU event
988 */
989 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
990 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
991 }
992
993 static void
994 bfa_ioc_send_enable(struct bfa_ioc *ioc)
995 {
996 struct bfi_ioc_ctrl_req enable_req;
997 struct timeval tv;
998
999 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1000 bfa_ioc_portid(ioc));
1001 enable_req.ioc_class = ioc->ioc_mc;
1002 do_gettimeofday(&tv);
1003 enable_req.tv_sec = ntohl(tv.tv_sec);
1004 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1005 }
1006
1007 static void
1008 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1009 {
1010 struct bfi_ioc_ctrl_req disable_req;
1011
1012 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1013 bfa_ioc_portid(ioc));
1014 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1015 }
1016
1017 static void
1018 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1019 {
1020 struct bfi_ioc_getattr_req attr_req;
1021
1022 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1023 bfa_ioc_portid(ioc));
1024 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1025 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1026 }
1027
1028 void
1029 bfa_nw_ioc_hb_check(void *cbarg)
1030 {
1031 struct bfa_ioc *ioc = cbarg;
1032 u32 hb_count;
1033
1034 hb_count = readl(ioc->ioc_regs.heartbeat);
1035 if (ioc->hb_count == hb_count) {
1036 pr_crit("Firmware heartbeat failure at %d", hb_count);
1037 bfa_ioc_recover(ioc);
1038 return;
1039 } else {
1040 ioc->hb_count = hb_count;
1041 }
1042
1043 bfa_ioc_mbox_poll(ioc);
1044 mod_timer(&ioc->hb_timer, jiffies +
1045 msecs_to_jiffies(BFA_IOC_HB_TOV));
1046 }
1047
1048 static void
1049 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1050 {
1051 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1052 mod_timer(&ioc->hb_timer, jiffies +
1053 msecs_to_jiffies(BFA_IOC_HB_TOV));
1054 }
1055
1056 static void
1057 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1058 {
1059 del_timer(&ioc->hb_timer);
1060 }
1061
1062 /**
1063 * @brief
1064 * Initiate a full firmware download.
1065 */
1066 static void
1067 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1068 u32 boot_param)
1069 {
1070 u32 *fwimg;
1071 u32 pgnum, pgoff;
1072 u32 loff = 0;
1073 u32 chunkno = 0;
1074 u32 i;
1075
1076 /**
1077 * Initialize LMEM first before code download
1078 */
1079 bfa_ioc_lmem_init(ioc);
1080
1081 /**
1082 * Flash based firmware boot
1083 */
1084 if (bfa_ioc_is_optrom(ioc))
1085 boot_type = BFI_BOOT_TYPE_FLASH;
1086 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1087
1088 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1089 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1090
1091 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1092
1093 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1094 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1095 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1096 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1097 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1098 }
1099
1100 /**
1101 * write smem
1102 */
1103 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1104 ((ioc->ioc_regs.smem_page_start) + (loff)));
1105
1106 loff += sizeof(u32);
1107
1108 /**
1109 * handle page offset wrap around
1110 */
1111 loff = PSS_SMEM_PGOFF(loff);
1112 if (loff == 0) {
1113 pgnum++;
1114 writel(pgnum,
1115 ioc->ioc_regs.host_page_num_fn);
1116 }
1117 }
1118
1119 writel(bfa_ioc_smem_pgnum(ioc, 0),
1120 ioc->ioc_regs.host_page_num_fn);
1121
1122 /*
1123 * Set boot type and boot param at the end.
1124 */
1125 writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
1126 + (BFI_BOOT_TYPE_OFF)));
1127 writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
1128 + (BFI_BOOT_PARAM_OFF)));
1129 }
1130
1131 static void
1132 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1133 {
1134 bfa_ioc_hwinit(ioc, force);
1135 }
1136
1137 /**
1138 * @brief
1139 * Update BFA configuration from firmware configuration.
1140 */
1141 static void
1142 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1143 {
1144 struct bfi_ioc_attr *attr = ioc->attr;
1145
1146 attr->adapter_prop = ntohl(attr->adapter_prop);
1147 attr->card_type = ntohl(attr->card_type);
1148 attr->maxfrsize = ntohs(attr->maxfrsize);
1149
1150 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1151 }
1152
1153 /**
1154 * Attach time initialization of mbox logic.
1155 */
1156 static void
1157 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1158 {
1159 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1160 int mc;
1161
1162 INIT_LIST_HEAD(&mod->cmd_q);
1163 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1164 mod->mbhdlr[mc].cbfn = NULL;
1165 mod->mbhdlr[mc].cbarg = ioc->bfa;
1166 }
1167 }
1168
1169 /**
1170 * Mbox poll timer -- restarts any pending mailbox requests.
1171 */
1172 static void
1173 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1174 {
1175 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1176 struct bfa_mbox_cmd *cmd;
1177 u32 stat;
1178
1179 /**
1180 * If no command pending, do nothing
1181 */
1182 if (list_empty(&mod->cmd_q))
1183 return;
1184
1185 /**
1186 * If previous command is not yet fetched by firmware, do nothing
1187 */
1188 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1189 if (stat)
1190 return;
1191
1192 /**
1193 * Enqueue command to firmware.
1194 */
1195 bfa_q_deq(&mod->cmd_q, &cmd);
1196 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1197 }
1198
1199 /**
1200 * Cleanup any pending requests.
1201 */
1202 static void
1203 bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
1204 {
1205 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1206 struct bfa_mbox_cmd *cmd;
1207
1208 while (!list_empty(&mod->cmd_q))
1209 bfa_q_deq(&mod->cmd_q, &cmd);
1210 }
1211
1212 /**
1213 * IOC public
1214 */
1215 static enum bfa_status
1216 bfa_ioc_pll_init(struct bfa_ioc *ioc)
1217 {
1218 /*
1219 * Hold semaphore so that nobody can access the chip during init.
1220 */
1221 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1222
1223 bfa_ioc_pll_init_asic(ioc);
1224
1225 ioc->pllinit = true;
1226 /*
1227 * release semaphore.
1228 */
1229 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1230
1231 return BFA_STATUS_OK;
1232 }
1233
1234 /**
1235 * Interface used by diag module to do firmware boot with memory test
1236 * as the entry vector.
1237 */
1238 static void
1239 bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
1240 {
1241 void __iomem *rb;
1242
1243 bfa_ioc_stats(ioc, ioc_boots);
1244
1245 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1246 return;
1247
1248 /**
1249 * Initialize IOC state of all functions on a chip reset.
1250 */
1251 rb = ioc->pcidev.pci_bar_kva;
1252 if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
1253 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1254 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1255 } else {
1256 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1257 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1258 }
1259
1260 bfa_ioc_msgflush(ioc);
1261 bfa_ioc_download_fw(ioc, boot_type, boot_param);
1262
1263 /**
1264 * Enable interrupts just before starting LPU
1265 */
1266 ioc->cbfn->reset_cbfn(ioc->bfa);
1267 bfa_ioc_lpu_start(ioc);
1268 }
1269
1270 /**
1271 * Enable/disable IOC failure auto recovery.
1272 */
1273 void
1274 bfa_nw_ioc_auto_recover(bool auto_recover)
1275 {
1276 bfa_nw_auto_recover = auto_recover;
1277 }
1278
1279 bool
1280 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
1281 {
1282 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1283 }
1284
1285 static void
1286 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1287 {
1288 u32 *msgp = mbmsg;
1289 u32 r32;
1290 int i;
1291
1292 /**
1293 * read the MBOX msg
1294 */
1295 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1296 i++) {
1297 r32 = readl(ioc->ioc_regs.lpu_mbox +
1298 i * sizeof(u32));
1299 msgp[i] = htonl(r32);
1300 }
1301
1302 /**
1303 * turn off mailbox interrupt by clearing mailbox status
1304 */
1305 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1306 readl(ioc->ioc_regs.lpu_mbox_cmd);
1307 }
1308
1309 static void
1310 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1311 {
1312 union bfi_ioc_i2h_msg_u *msg;
1313
1314 msg = (union bfi_ioc_i2h_msg_u *) m;
1315
1316 bfa_ioc_stats(ioc, ioc_isrs);
1317
1318 switch (msg->mh.msg_id) {
1319 case BFI_IOC_I2H_HBEAT:
1320 break;
1321
1322 case BFI_IOC_I2H_READY_EVENT:
1323 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
1324 break;
1325
1326 case BFI_IOC_I2H_ENABLE_REPLY:
1327 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
1328 break;
1329
1330 case BFI_IOC_I2H_DISABLE_REPLY:
1331 bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
1332 break;
1333
1334 case BFI_IOC_I2H_GETATTR_REPLY:
1335 bfa_ioc_getattr_reply(ioc);
1336 break;
1337
1338 default:
1339 BUG_ON(1);
1340 }
1341 }
1342
1343 /**
1344 * IOC attach time initialization and setup.
1345 *
1346 * @param[in] ioc memory for IOC
1347 * @param[in] bfa driver instance structure
1348 */
1349 void
1350 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1351 {
1352 ioc->bfa = bfa;
1353 ioc->cbfn = cbfn;
1354 ioc->fcmode = false;
1355 ioc->pllinit = false;
1356 ioc->dbg_fwsave_once = true;
1357
1358 bfa_ioc_mbox_attach(ioc);
1359 INIT_LIST_HEAD(&ioc->hb_notify_q);
1360
1361 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
1362 }
1363
1364 /**
1365 * Driver detach time IOC cleanup.
1366 */
1367 void
1368 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1369 {
1370 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1371 }
1372
1373 /**
1374 * Setup IOC PCI properties.
1375 *
1376 * @param[in] pcidev PCI device information for this IOC
1377 */
1378 void
1379 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1380 enum bfi_mclass mc)
1381 {
1382 ioc->ioc_mc = mc;
1383 ioc->pcidev = *pcidev;
1384 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
1385 ioc->cna = ioc->ctdev && !ioc->fcmode;
1386
1387 bfa_nw_ioc_set_ct_hwif(ioc);
1388
1389 bfa_ioc_map_port(ioc);
1390 bfa_ioc_reg_init(ioc);
1391 }
1392
1393 /**
1394 * Initialize IOC dma memory
1395 *
1396 * @param[in] dm_kva kernel virtual address of IOC dma memory
1397 * @param[in] dm_pa physical address of IOC dma memory
1398 */
1399 void
1400 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
1401 {
1402 /**
1403 * dma memory for firmware attribute
1404 */
1405 ioc->attr_dma.kva = dm_kva;
1406 ioc->attr_dma.pa = dm_pa;
1407 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
1408 }
1409
1410 /**
1411 * Return size of dma memory required.
1412 */
1413 u32
1414 bfa_nw_ioc_meminfo(void)
1415 {
1416 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
1417 }
1418
1419 void
1420 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
1421 {
1422 bfa_ioc_stats(ioc, ioc_enables);
1423 ioc->dbg_fwsave_once = true;
1424
1425 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1426 }
1427
1428 void
1429 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
1430 {
1431 bfa_ioc_stats(ioc, ioc_disables);
1432 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1433 }
1434
1435 static u32
1436 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
1437 {
1438 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1439 }
1440
1441 static u32
1442 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
1443 {
1444 return PSS_SMEM_PGOFF(fmaddr);
1445 }
1446
1447 /**
1448 * Register mailbox message handler function, to be called by common modules
1449 */
1450 void
1451 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
1452 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1453 {
1454 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1455
1456 mod->mbhdlr[mc].cbfn = cbfn;
1457 mod->mbhdlr[mc].cbarg = cbarg;
1458 }
1459
1460 /**
1461 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1462 * Responsibility of caller to serialize
1463 *
1464 * @param[in] ioc IOC instance
1465 * @param[i] cmd Mailbox command
1466 */
1467 void
1468 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
1469 {
1470 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1471 u32 stat;
1472
1473 /**
1474 * If a previous command is pending, queue new command
1475 */
1476 if (!list_empty(&mod->cmd_q)) {
1477 list_add_tail(&cmd->qe, &mod->cmd_q);
1478 return;
1479 }
1480
1481 /**
1482 * If mailbox is busy, queue command for poll timer
1483 */
1484 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1485 if (stat) {
1486 list_add_tail(&cmd->qe, &mod->cmd_q);
1487 return;
1488 }
1489
1490 /**
1491 * mailbox is free -- queue command to firmware
1492 */
1493 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1494 }
1495
1496 /**
1497 * Handle mailbox interrupts
1498 */
1499 void
1500 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
1501 {
1502 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1503 struct bfi_mbmsg m;
1504 int mc;
1505
1506 bfa_ioc_msgget(ioc, &m);
1507
1508 /**
1509 * Treat IOC message class as special.
1510 */
1511 mc = m.mh.msg_class;
1512 if (mc == BFI_MC_IOC) {
1513 bfa_ioc_isr(ioc, &m);
1514 return;
1515 }
1516
1517 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
1518 return;
1519
1520 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
1521 }
1522
1523 void
1524 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
1525 {
1526 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
1527 }
1528
1529 /**
1530 * Add to IOC heartbeat failure notification queue. To be used by common
1531 * modules such as cee, port, diag.
1532 */
1533 void
1534 bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
1535 struct bfa_ioc_hbfail_notify *notify)
1536 {
1537 list_add_tail(&notify->qe, &ioc->hb_notify_q);
1538 }
1539
1540 #define BFA_MFG_NAME "Brocade"
1541 static void
1542 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
1543 struct bfa_adapter_attr *ad_attr)
1544 {
1545 struct bfi_ioc_attr *ioc_attr;
1546
1547 ioc_attr = ioc->attr;
1548
1549 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
1550 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
1551 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
1552 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
1553 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
1554 sizeof(struct bfa_mfg_vpd));
1555
1556 ad_attr->nports = bfa_ioc_get_nports(ioc);
1557 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
1558
1559 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
1560 /* For now, model descr uses same model string */
1561 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
1562
1563 ad_attr->card_type = ioc_attr->card_type;
1564 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
1565
1566 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
1567 ad_attr->prototype = 1;
1568 else
1569 ad_attr->prototype = 0;
1570
1571 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
1572 ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
1573
1574 ad_attr->pcie_gen = ioc_attr->pcie_gen;
1575 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
1576 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
1577 ad_attr->asic_rev = ioc_attr->asic_rev;
1578
1579 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
1580
1581 ad_attr->cna_capable = ioc->cna;
1582 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
1583 }
1584
1585 static enum bfa_ioc_type
1586 bfa_ioc_get_type(struct bfa_ioc *ioc)
1587 {
1588 if (!ioc->ctdev || ioc->fcmode)
1589 return BFA_IOC_TYPE_FC;
1590 else if (ioc->ioc_mc == BFI_MC_IOCFC)
1591 return BFA_IOC_TYPE_FCoE;
1592 else if (ioc->ioc_mc == BFI_MC_LL)
1593 return BFA_IOC_TYPE_LL;
1594 else {
1595 BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
1596 return BFA_IOC_TYPE_LL;
1597 }
1598 }
1599
1600 static void
1601 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
1602 {
1603 memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
1604 memcpy(serial_num,
1605 (void *)ioc->attr->brcd_serialnum,
1606 BFA_ADAPTER_SERIAL_NUM_LEN);
1607 }
1608
1609 static void
1610 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
1611 {
1612 memset(fw_ver, 0, BFA_VERSION_LEN);
1613 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
1614 }
1615
1616 static void
1617 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
1618 {
1619 BUG_ON(!(chip_rev));
1620
1621 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
1622
1623 chip_rev[0] = 'R';
1624 chip_rev[1] = 'e';
1625 chip_rev[2] = 'v';
1626 chip_rev[3] = '-';
1627 chip_rev[4] = ioc->attr->asic_rev;
1628 chip_rev[5] = '\0';
1629 }
1630
1631 static void
1632 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
1633 {
1634 memset(optrom_ver, 0, BFA_VERSION_LEN);
1635 memcpy(optrom_ver, ioc->attr->optrom_version,
1636 BFA_VERSION_LEN);
1637 }
1638
1639 static void
1640 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
1641 {
1642 memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
1643 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1644 }
1645
1646 static void
1647 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
1648 {
1649 struct bfi_ioc_attr *ioc_attr;
1650
1651 BUG_ON(!(model));
1652 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
1653
1654 ioc_attr = ioc->attr;
1655
1656 /**
1657 * model name
1658 */
1659 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
1660 BFA_MFG_NAME, ioc_attr->card_type);
1661 }
1662
1663 static enum bfa_ioc_state
1664 bfa_ioc_get_state(struct bfa_ioc *ioc)
1665 {
1666 return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
1667 }
1668
1669 void
1670 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
1671 {
1672 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
1673
1674 ioc_attr->state = bfa_ioc_get_state(ioc);
1675 ioc_attr->port_id = ioc->port_id;
1676
1677 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
1678
1679 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
1680
1681 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
1682 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
1683 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
1684 }
1685
1686 /**
1687 * WWN public
1688 */
1689 static u64
1690 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
1691 {
1692 return ioc->attr->pwwn;
1693 }
1694
1695 mac_t
1696 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
1697 {
1698 /*
1699 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
1700 */
1701 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
1702 return bfa_ioc_get_mfg_mac(ioc);
1703 else
1704 return ioc->attr->mac;
1705 }
1706
1707 static mac_t
1708 bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
1709 {
1710 mac_t m;
1711
1712 m = ioc->attr->mfg_mac;
1713 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
1714 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
1715 else
1716 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
1717 bfa_ioc_pcifn(ioc));
1718
1719 return m;
1720 }
1721
1722 /**
1723 * Firmware failure detected. Start recovery actions.
1724 */
1725 static void
1726 bfa_ioc_recover(struct bfa_ioc *ioc)
1727 {
1728 bfa_ioc_stats(ioc, ioc_hbfails);
1729 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
1730 }
1731
1732 static void
1733 bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
1734 {
1735 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
1736 return;
1737
1738 }
This page took 0.0902500000000001 seconds and 5 git commands to generate.