2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
28 return msg_id
== BFI_LL_I2H_LINK_DOWN_AEN
||
29 msg_id
== BFI_LL_I2H_LINK_UP_AEN
;
33 bna_mbox_aen_callback(struct bna
*bna
, struct bfi_mbmsg
*msg
)
35 struct bfi_ll_aen
*aen
= (struct bfi_ll_aen
*)(msg
);
37 switch (aen
->mh
.msg_id
) {
38 case BFI_LL_I2H_LINK_UP_AEN
:
39 bna_port_cb_link_up(&bna
->port
, aen
, aen
->reason
);
41 case BFI_LL_I2H_LINK_DOWN_AEN
:
42 bna_port_cb_link_down(&bna
->port
, aen
->reason
);
50 bna_ll_isr(void *llarg
, struct bfi_mbmsg
*msg
)
52 struct bna
*bna
= (struct bna
*)(llarg
);
53 struct bfi_ll_rsp
*mb_rsp
= (struct bfi_ll_rsp
*)(msg
);
54 struct bfi_mhdr
*cmd_h
, *rsp_h
;
55 struct bna_mbox_qe
*mb_qe
= NULL
;
58 char message
[BNA_MESSAGE_SIZE
];
60 aen
= bna_is_aen(mb_rsp
->mh
.msg_id
);
63 mb_qe
= bfa_q_first(&bna
->mbox_mod
.posted_q
);
64 cmd_h
= (struct bfi_mhdr
*)(&mb_qe
->cmd
.msg
[0]);
65 rsp_h
= (struct bfi_mhdr
*)(&mb_rsp
->mh
);
67 if ((BFA_I2HM(cmd_h
->msg_id
) == rsp_h
->msg_id
) &&
68 (cmd_h
->mtag
.i2htok
== rsp_h
->mtag
.i2htok
)) {
69 /* Remove the request from posted_q, update state */
71 bna
->mbox_mod
.msg_pending
--;
72 if (list_empty(&bna
->mbox_mod
.posted_q
))
73 bna
->mbox_mod
.state
= BNA_MBOX_FREE
;
77 /* Dispatch the cbfn */
79 mb_qe
->cbfn(mb_qe
->cbarg
, mb_rsp
->error
);
81 /* Post the next entry, if needed */
83 mb_qe
= bfa_q_first(&bna
->mbox_mod
.posted_q
);
84 bfa_nw_ioc_mbox_queue(&bna
->device
.ioc
,
88 snprintf(message
, BNA_MESSAGE_SIZE
,
89 "No matching rsp for [%d:%d:%d]\n",
90 mb_rsp
->mh
.msg_class
, mb_rsp
->mh
.msg_id
,
91 mb_rsp
->mh
.mtag
.i2htok
);
92 pr_info("%s", message
);
96 bna_mbox_aen_callback(bna
, msg
);
100 bna_err_handler(struct bna
*bna
, u32 intr_status
)
104 if (intr_status
& __HALT_STATUS_BITS
) {
105 init_halt
= readl(bna
->device
.ioc
.ioc_regs
.ll_halt
);
106 init_halt
&= ~__FW_INIT_HALT_P
;
107 writel(init_halt
, bna
->device
.ioc
.ioc_regs
.ll_halt
);
110 bfa_nw_ioc_error_isr(&bna
->device
.ioc
);
114 bna_mbox_handler(struct bna
*bna
, u32 intr_status
)
116 if (BNA_IS_ERR_INTR(intr_status
)) {
117 bna_err_handler(bna
, intr_status
);
120 if (BNA_IS_MBOX_INTR(intr_status
))
121 bfa_nw_ioc_mbox_isr(&bna
->device
.ioc
);
125 bna_mbox_send(struct bna
*bna
, struct bna_mbox_qe
*mbox_qe
)
129 mh
= (struct bfi_mhdr
*)(&mbox_qe
->cmd
.msg
[0]);
131 mh
->mtag
.i2htok
= htons(bna
->mbox_mod
.msg_ctr
);
132 bna
->mbox_mod
.msg_ctr
++;
133 bna
->mbox_mod
.msg_pending
++;
134 if (bna
->mbox_mod
.state
== BNA_MBOX_FREE
) {
135 list_add_tail(&mbox_qe
->qe
, &bna
->mbox_mod
.posted_q
);
136 bfa_nw_ioc_mbox_queue(&bna
->device
.ioc
, &mbox_qe
->cmd
);
137 bna
->mbox_mod
.state
= BNA_MBOX_POSTED
;
139 list_add_tail(&mbox_qe
->qe
, &bna
->mbox_mod
.posted_q
);
144 bna_mbox_flush_q(struct bna
*bna
, struct list_head
*q
)
146 struct bna_mbox_qe
*mb_qe
= NULL
;
147 struct bfi_mhdr
*cmd_h
;
148 struct list_head
*mb_q
;
149 void (*cbfn
)(void *arg
, int status
);
152 mb_q
= &bna
->mbox_mod
.posted_q
;
154 while (!list_empty(mb_q
)) {
155 bfa_q_deq(mb_q
, &mb_qe
);
157 cbarg
= mb_qe
->cbarg
;
158 bfa_q_qe_init(mb_qe
);
159 bna
->mbox_mod
.msg_pending
--;
161 cmd_h
= (struct bfi_mhdr
*)(&mb_qe
->cmd
.msg
[0]);
163 cbfn(cbarg
, BNA_CB_NOT_EXEC
);
166 bna
->mbox_mod
.state
= BNA_MBOX_FREE
;
170 bna_mbox_mod_start(struct bna_mbox_mod
*mbox_mod
)
175 bna_mbox_mod_stop(struct bna_mbox_mod
*mbox_mod
)
177 bna_mbox_flush_q(mbox_mod
->bna
, &mbox_mod
->posted_q
);
181 bna_mbox_mod_init(struct bna_mbox_mod
*mbox_mod
, struct bna
*bna
)
183 bfa_nw_ioc_mbox_regisr(&bna
->device
.ioc
, BFI_MC_LL
, bna_ll_isr
, bna
);
184 mbox_mod
->state
= BNA_MBOX_FREE
;
185 mbox_mod
->msg_ctr
= mbox_mod
->msg_pending
= 0;
186 INIT_LIST_HEAD(&mbox_mod
->posted_q
);
191 bna_mbox_mod_uninit(struct bna_mbox_mod
*mbox_mod
)
193 mbox_mod
->bna
= NULL
;
199 #define call_llport_stop_cbfn(llport, status)\
201 if ((llport)->stop_cbfn)\
202 (llport)->stop_cbfn(&(llport)->bna->port, status);\
203 (llport)->stop_cbfn = NULL;\
206 static void bna_fw_llport_up(struct bna_llport
*llport
);
207 static void bna_fw_cb_llport_up(void *arg
, int status
);
208 static void bna_fw_llport_down(struct bna_llport
*llport
);
209 static void bna_fw_cb_llport_down(void *arg
, int status
);
210 static void bna_llport_start(struct bna_llport
*llport
);
211 static void bna_llport_stop(struct bna_llport
*llport
);
212 static void bna_llport_fail(struct bna_llport
*llport
);
214 enum bna_llport_event
{
220 LLPORT_E_FWRESP_UP
= 6,
221 LLPORT_E_FWRESP_DOWN
= 7
224 enum bna_llport_state
{
225 BNA_LLPORT_STOPPED
= 1,
227 BNA_LLPORT_UP_RESP_WAIT
= 3,
228 BNA_LLPORT_DOWN_RESP_WAIT
= 4,
230 BNA_LLPORT_LAST_RESP_WAIT
= 6
233 bfa_fsm_state_decl(bna_llport
, stopped
, struct bna_llport
,
234 enum bna_llport_event
);
235 bfa_fsm_state_decl(bna_llport
, down
, struct bna_llport
,
236 enum bna_llport_event
);
237 bfa_fsm_state_decl(bna_llport
, up_resp_wait
, struct bna_llport
,
238 enum bna_llport_event
);
239 bfa_fsm_state_decl(bna_llport
, down_resp_wait
, struct bna_llport
,
240 enum bna_llport_event
);
241 bfa_fsm_state_decl(bna_llport
, up
, struct bna_llport
,
242 enum bna_llport_event
);
243 bfa_fsm_state_decl(bna_llport
, last_resp_wait
, struct bna_llport
,
244 enum bna_llport_event
);
246 static struct bfa_sm_table llport_sm_table
[] = {
247 {BFA_SM(bna_llport_sm_stopped
), BNA_LLPORT_STOPPED
},
248 {BFA_SM(bna_llport_sm_down
), BNA_LLPORT_DOWN
},
249 {BFA_SM(bna_llport_sm_up_resp_wait
), BNA_LLPORT_UP_RESP_WAIT
},
250 {BFA_SM(bna_llport_sm_down_resp_wait
), BNA_LLPORT_DOWN_RESP_WAIT
},
251 {BFA_SM(bna_llport_sm_up
), BNA_LLPORT_UP
},
252 {BFA_SM(bna_llport_sm_last_resp_wait
), BNA_LLPORT_LAST_RESP_WAIT
}
256 bna_llport_sm_stopped_entry(struct bna_llport
*llport
)
258 llport
->bna
->port
.link_cbfn((llport
)->bna
->bnad
, BNA_LINK_DOWN
);
259 call_llport_stop_cbfn(llport
, BNA_CB_SUCCESS
);
263 bna_llport_sm_stopped(struct bna_llport
*llport
,
264 enum bna_llport_event event
)
268 bfa_fsm_set_state(llport
, bna_llport_sm_down
);
272 call_llport_stop_cbfn(llport
, BNA_CB_SUCCESS
);
279 /* This event is received due to Rx objects failing */
283 case LLPORT_E_FWRESP_UP
:
284 case LLPORT_E_FWRESP_DOWN
:
286 * These events are received due to flushing of mbox when
293 bfa_sm_fault(llport
->bna
, event
);
298 bna_llport_sm_down_entry(struct bna_llport
*llport
)
300 bnad_cb_port_link_status((llport
)->bna
->bnad
, BNA_LINK_DOWN
);
304 bna_llport_sm_down(struct bna_llport
*llport
,
305 enum bna_llport_event event
)
309 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
313 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
317 bfa_fsm_set_state(llport
, bna_llport_sm_up_resp_wait
);
318 bna_fw_llport_up(llport
);
322 bfa_sm_fault(llport
->bna
, event
);
327 bna_llport_sm_up_resp_wait_entry(struct bna_llport
*llport
)
330 * NOTE: Do not call bna_fw_llport_up() here. That will over step
331 * mbox due to down_resp_wait -> up_resp_wait transition on event
337 bna_llport_sm_up_resp_wait(struct bna_llport
*llport
,
338 enum bna_llport_event event
)
342 bfa_fsm_set_state(llport
, bna_llport_sm_last_resp_wait
);
346 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
350 bfa_fsm_set_state(llport
, bna_llport_sm_down_resp_wait
);
353 case LLPORT_E_FWRESP_UP
:
354 bfa_fsm_set_state(llport
, bna_llport_sm_up
);
357 case LLPORT_E_FWRESP_DOWN
:
358 /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
359 bna_fw_llport_up(llport
);
363 bfa_sm_fault(llport
->bna
, event
);
368 bna_llport_sm_down_resp_wait_entry(struct bna_llport
*llport
)
371 * NOTE: Do not call bna_fw_llport_down() here. That will over step
372 * mbox due to up_resp_wait -> down_resp_wait transition on event
378 bna_llport_sm_down_resp_wait(struct bna_llport
*llport
,
379 enum bna_llport_event event
)
383 bfa_fsm_set_state(llport
, bna_llport_sm_last_resp_wait
);
387 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
391 bfa_fsm_set_state(llport
, bna_llport_sm_up_resp_wait
);
394 case LLPORT_E_FWRESP_UP
:
395 /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
396 bna_fw_llport_down(llport
);
399 case LLPORT_E_FWRESP_DOWN
:
400 bfa_fsm_set_state(llport
, bna_llport_sm_down
);
404 bfa_sm_fault(llport
->bna
, event
);
409 bna_llport_sm_up_entry(struct bna_llport
*llport
)
414 bna_llport_sm_up(struct bna_llport
*llport
,
415 enum bna_llport_event event
)
419 bfa_fsm_set_state(llport
, bna_llport_sm_last_resp_wait
);
420 bna_fw_llport_down(llport
);
424 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
428 bfa_fsm_set_state(llport
, bna_llport_sm_down_resp_wait
);
429 bna_fw_llport_down(llport
);
433 bfa_sm_fault(llport
->bna
, event
);
438 bna_llport_sm_last_resp_wait_entry(struct bna_llport
*llport
)
443 bna_llport_sm_last_resp_wait(struct bna_llport
*llport
,
444 enum bna_llport_event event
)
448 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
453 * This event is received due to Rx objects stopping in
459 case LLPORT_E_FWRESP_UP
:
460 /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
461 bna_fw_llport_down(llport
);
464 case LLPORT_E_FWRESP_DOWN
:
465 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
469 bfa_sm_fault(llport
->bna
, event
);
474 bna_fw_llport_admin_up(struct bna_llport
*llport
)
476 struct bfi_ll_port_admin_req ll_req
;
478 memset(&ll_req
, 0, sizeof(ll_req
));
479 ll_req
.mh
.msg_class
= BFI_MC_LL
;
480 ll_req
.mh
.msg_id
= BFI_LL_H2I_PORT_ADMIN_REQ
;
481 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
483 ll_req
.up
= BNA_STATUS_T_ENABLED
;
485 bna_mbox_qe_fill(&llport
->mbox_qe
, &ll_req
, sizeof(ll_req
),
486 bna_fw_cb_llport_up
, llport
);
488 bna_mbox_send(llport
->bna
, &llport
->mbox_qe
);
492 bna_fw_llport_up(struct bna_llport
*llport
)
494 if (llport
->type
== BNA_PORT_T_REGULAR
)
495 bna_fw_llport_admin_up(llport
);
499 bna_fw_cb_llport_up(void *arg
, int status
)
501 struct bna_llport
*llport
= (struct bna_llport
*)arg
;
503 bfa_q_qe_init(&llport
->mbox_qe
.qe
);
504 bfa_fsm_send_event(llport
, LLPORT_E_FWRESP_UP
);
508 bna_fw_llport_admin_down(struct bna_llport
*llport
)
510 struct bfi_ll_port_admin_req ll_req
;
512 memset(&ll_req
, 0, sizeof(ll_req
));
513 ll_req
.mh
.msg_class
= BFI_MC_LL
;
514 ll_req
.mh
.msg_id
= BFI_LL_H2I_PORT_ADMIN_REQ
;
515 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
517 ll_req
.up
= BNA_STATUS_T_DISABLED
;
519 bna_mbox_qe_fill(&llport
->mbox_qe
, &ll_req
, sizeof(ll_req
),
520 bna_fw_cb_llport_down
, llport
);
522 bna_mbox_send(llport
->bna
, &llport
->mbox_qe
);
526 bna_fw_llport_down(struct bna_llport
*llport
)
528 if (llport
->type
== BNA_PORT_T_REGULAR
)
529 bna_fw_llport_admin_down(llport
);
533 bna_fw_cb_llport_down(void *arg
, int status
)
535 struct bna_llport
*llport
= (struct bna_llport
*)arg
;
537 bfa_q_qe_init(&llport
->mbox_qe
.qe
);
538 bfa_fsm_send_event(llport
, LLPORT_E_FWRESP_DOWN
);
542 bna_port_cb_llport_stopped(struct bna_port
*port
,
543 enum bna_cb_status status
)
545 bfa_wc_down(&port
->chld_stop_wc
);
549 bna_llport_init(struct bna_llport
*llport
, struct bna
*bna
)
551 llport
->flags
|= BNA_LLPORT_F_ENABLED
;
552 llport
->type
= BNA_PORT_T_REGULAR
;
555 llport
->link_status
= BNA_LINK_DOWN
;
557 llport
->admin_up_count
= 0;
559 llport
->stop_cbfn
= NULL
;
561 bfa_q_qe_init(&llport
->mbox_qe
.qe
);
563 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
567 bna_llport_uninit(struct bna_llport
*llport
)
569 llport
->flags
&= ~BNA_LLPORT_F_ENABLED
;
575 bna_llport_start(struct bna_llport
*llport
)
577 bfa_fsm_send_event(llport
, LLPORT_E_START
);
581 bna_llport_stop(struct bna_llport
*llport
)
583 llport
->stop_cbfn
= bna_port_cb_llport_stopped
;
585 bfa_fsm_send_event(llport
, LLPORT_E_STOP
);
589 bna_llport_fail(struct bna_llport
*llport
)
591 bfa_fsm_send_event(llport
, LLPORT_E_FAIL
);
595 bna_llport_state_get(struct bna_llport
*llport
)
597 return bfa_sm_to_state(llport_sm_table
, llport
->fsm
);
601 bna_llport_admin_up(struct bna_llport
*llport
)
603 llport
->admin_up_count
++;
605 if (llport
->admin_up_count
== 1) {
606 llport
->flags
|= BNA_LLPORT_F_RX_ENABLED
;
607 if (llport
->flags
& BNA_LLPORT_F_ENABLED
)
608 bfa_fsm_send_event(llport
, LLPORT_E_UP
);
613 bna_llport_admin_down(struct bna_llport
*llport
)
615 llport
->admin_up_count
--;
617 if (llport
->admin_up_count
== 0) {
618 llport
->flags
&= ~BNA_LLPORT_F_RX_ENABLED
;
619 if (llport
->flags
& BNA_LLPORT_F_ENABLED
)
620 bfa_fsm_send_event(llport
, LLPORT_E_DOWN
);
627 #define bna_port_chld_start(port)\
629 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
630 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
631 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
632 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
633 bna_llport_start(&(port)->llport);\
634 bna_tx_mod_start(&(port)->bna->tx_mod, tx_type);\
635 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
638 #define bna_port_chld_stop(port)\
640 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
641 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
642 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
643 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
644 bfa_wc_up(&(port)->chld_stop_wc);\
645 bfa_wc_up(&(port)->chld_stop_wc);\
646 bfa_wc_up(&(port)->chld_stop_wc);\
647 bna_llport_stop(&(port)->llport);\
648 bna_tx_mod_stop(&(port)->bna->tx_mod, tx_type);\
649 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
652 #define bna_port_chld_fail(port)\
654 bna_llport_fail(&(port)->llport);\
655 bna_tx_mod_fail(&(port)->bna->tx_mod);\
656 bna_rx_mod_fail(&(port)->bna->rx_mod);\
659 #define bna_port_rx_start(port)\
661 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
662 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
663 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
666 #define bna_port_rx_stop(port)\
668 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
669 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
670 bfa_wc_up(&(port)->chld_stop_wc);\
671 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
674 #define call_port_stop_cbfn(port, status)\
676 if ((port)->stop_cbfn)\
677 (port)->stop_cbfn((port)->stop_cbarg, status);\
678 (port)->stop_cbfn = NULL;\
679 (port)->stop_cbarg = NULL;\
682 #define call_port_pause_cbfn(port, status)\
684 if ((port)->pause_cbfn)\
685 (port)->pause_cbfn((port)->bna->bnad, status);\
686 (port)->pause_cbfn = NULL;\
689 #define call_port_mtu_cbfn(port, status)\
691 if ((port)->mtu_cbfn)\
692 (port)->mtu_cbfn((port)->bna->bnad, status);\
693 (port)->mtu_cbfn = NULL;\
696 static void bna_fw_pause_set(struct bna_port
*port
);
697 static void bna_fw_cb_pause_set(void *arg
, int status
);
698 static void bna_fw_mtu_set(struct bna_port
*port
);
699 static void bna_fw_cb_mtu_set(void *arg
, int status
);
701 enum bna_port_event
{
705 PORT_E_PAUSE_CFG
= 4,
707 PORT_E_CHLD_STOPPED
= 6,
708 PORT_E_FWRESP_PAUSE
= 7,
709 PORT_E_FWRESP_MTU
= 8
712 enum bna_port_state
{
713 BNA_PORT_STOPPED
= 1,
714 BNA_PORT_MTU_INIT_WAIT
= 2,
715 BNA_PORT_PAUSE_INIT_WAIT
= 3,
716 BNA_PORT_LAST_RESP_WAIT
= 4,
717 BNA_PORT_STARTED
= 5,
718 BNA_PORT_PAUSE_CFG_WAIT
= 6,
719 BNA_PORT_RX_STOP_WAIT
= 7,
720 BNA_PORT_MTU_CFG_WAIT
= 8,
721 BNA_PORT_CHLD_STOP_WAIT
= 9
724 bfa_fsm_state_decl(bna_port
, stopped
, struct bna_port
,
725 enum bna_port_event
);
726 bfa_fsm_state_decl(bna_port
, mtu_init_wait
, struct bna_port
,
727 enum bna_port_event
);
728 bfa_fsm_state_decl(bna_port
, pause_init_wait
, struct bna_port
,
729 enum bna_port_event
);
730 bfa_fsm_state_decl(bna_port
, last_resp_wait
, struct bna_port
,
731 enum bna_port_event
);
732 bfa_fsm_state_decl(bna_port
, started
, struct bna_port
,
733 enum bna_port_event
);
734 bfa_fsm_state_decl(bna_port
, pause_cfg_wait
, struct bna_port
,
735 enum bna_port_event
);
736 bfa_fsm_state_decl(bna_port
, rx_stop_wait
, struct bna_port
,
737 enum bna_port_event
);
738 bfa_fsm_state_decl(bna_port
, mtu_cfg_wait
, struct bna_port
,
739 enum bna_port_event
);
740 bfa_fsm_state_decl(bna_port
, chld_stop_wait
, struct bna_port
,
741 enum bna_port_event
);
743 static struct bfa_sm_table port_sm_table
[] = {
744 {BFA_SM(bna_port_sm_stopped
), BNA_PORT_STOPPED
},
745 {BFA_SM(bna_port_sm_mtu_init_wait
), BNA_PORT_MTU_INIT_WAIT
},
746 {BFA_SM(bna_port_sm_pause_init_wait
), BNA_PORT_PAUSE_INIT_WAIT
},
747 {BFA_SM(bna_port_sm_last_resp_wait
), BNA_PORT_LAST_RESP_WAIT
},
748 {BFA_SM(bna_port_sm_started
), BNA_PORT_STARTED
},
749 {BFA_SM(bna_port_sm_pause_cfg_wait
), BNA_PORT_PAUSE_CFG_WAIT
},
750 {BFA_SM(bna_port_sm_rx_stop_wait
), BNA_PORT_RX_STOP_WAIT
},
751 {BFA_SM(bna_port_sm_mtu_cfg_wait
), BNA_PORT_MTU_CFG_WAIT
},
752 {BFA_SM(bna_port_sm_chld_stop_wait
), BNA_PORT_CHLD_STOP_WAIT
}
756 bna_port_sm_stopped_entry(struct bna_port
*port
)
758 call_port_pause_cbfn(port
, BNA_CB_SUCCESS
);
759 call_port_mtu_cbfn(port
, BNA_CB_SUCCESS
);
760 call_port_stop_cbfn(port
, BNA_CB_SUCCESS
);
764 bna_port_sm_stopped(struct bna_port
*port
, enum bna_port_event event
)
768 bfa_fsm_set_state(port
, bna_port_sm_mtu_init_wait
);
772 call_port_stop_cbfn(port
, BNA_CB_SUCCESS
);
779 case PORT_E_PAUSE_CFG
:
780 call_port_pause_cbfn(port
, BNA_CB_SUCCESS
);
784 call_port_mtu_cbfn(port
, BNA_CB_SUCCESS
);
787 case PORT_E_CHLD_STOPPED
:
789 * This event is received due to LLPort, Tx and Rx objects
795 case PORT_E_FWRESP_PAUSE
:
796 case PORT_E_FWRESP_MTU
:
798 * These events are received due to flushing of mbox when
805 bfa_sm_fault(port
->bna
, event
);
810 bna_port_sm_mtu_init_wait_entry(struct bna_port
*port
)
812 bna_fw_mtu_set(port
);
816 bna_port_sm_mtu_init_wait(struct bna_port
*port
, enum bna_port_event event
)
820 bfa_fsm_set_state(port
, bna_port_sm_last_resp_wait
);
824 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
827 case PORT_E_PAUSE_CFG
:
832 port
->flags
|= BNA_PORT_F_MTU_CHANGED
;
835 case PORT_E_FWRESP_MTU
:
836 if (port
->flags
& BNA_PORT_F_MTU_CHANGED
) {
837 port
->flags
&= ~BNA_PORT_F_MTU_CHANGED
;
838 bna_fw_mtu_set(port
);
840 bfa_fsm_set_state(port
, bna_port_sm_pause_init_wait
);
845 bfa_sm_fault(port
->bna
, event
);
850 bna_port_sm_pause_init_wait_entry(struct bna_port
*port
)
852 bna_fw_pause_set(port
);
856 bna_port_sm_pause_init_wait(struct bna_port
*port
,
857 enum bna_port_event event
)
861 bfa_fsm_set_state(port
, bna_port_sm_last_resp_wait
);
865 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
868 case PORT_E_PAUSE_CFG
:
869 port
->flags
|= BNA_PORT_F_PAUSE_CHANGED
;
873 port
->flags
|= BNA_PORT_F_MTU_CHANGED
;
876 case PORT_E_FWRESP_PAUSE
:
877 if (port
->flags
& BNA_PORT_F_PAUSE_CHANGED
) {
878 port
->flags
&= ~BNA_PORT_F_PAUSE_CHANGED
;
879 bna_fw_pause_set(port
);
880 } else if (port
->flags
& BNA_PORT_F_MTU_CHANGED
) {
881 port
->flags
&= ~BNA_PORT_F_MTU_CHANGED
;
882 bfa_fsm_set_state(port
, bna_port_sm_mtu_init_wait
);
884 bfa_fsm_set_state(port
, bna_port_sm_started
);
885 bna_port_chld_start(port
);
890 bfa_sm_fault(port
->bna
, event
);
895 bna_port_sm_last_resp_wait_entry(struct bna_port
*port
)
900 bna_port_sm_last_resp_wait(struct bna_port
*port
,
901 enum bna_port_event event
)
905 case PORT_E_FWRESP_PAUSE
:
906 case PORT_E_FWRESP_MTU
:
907 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
911 bfa_sm_fault(port
->bna
, event
);
916 bna_port_sm_started_entry(struct bna_port
*port
)
919 * NOTE: Do not call bna_port_chld_start() here, since it will be
920 * inadvertently called during pause_cfg_wait->started transition
923 call_port_pause_cbfn(port
, BNA_CB_SUCCESS
);
924 call_port_mtu_cbfn(port
, BNA_CB_SUCCESS
);
928 bna_port_sm_started(struct bna_port
*port
,
929 enum bna_port_event event
)
933 bfa_fsm_set_state(port
, bna_port_sm_chld_stop_wait
);
937 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
938 bna_port_chld_fail(port
);
941 case PORT_E_PAUSE_CFG
:
942 bfa_fsm_set_state(port
, bna_port_sm_pause_cfg_wait
);
946 bfa_fsm_set_state(port
, bna_port_sm_rx_stop_wait
);
950 bfa_sm_fault(port
->bna
, event
);
955 bna_port_sm_pause_cfg_wait_entry(struct bna_port
*port
)
957 bna_fw_pause_set(port
);
961 bna_port_sm_pause_cfg_wait(struct bna_port
*port
,
962 enum bna_port_event event
)
966 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
967 bna_port_chld_fail(port
);
970 case PORT_E_FWRESP_PAUSE
:
971 bfa_fsm_set_state(port
, bna_port_sm_started
);
975 bfa_sm_fault(port
->bna
, event
);
980 bna_port_sm_rx_stop_wait_entry(struct bna_port
*port
)
982 bna_port_rx_stop(port
);
986 bna_port_sm_rx_stop_wait(struct bna_port
*port
,
987 enum bna_port_event event
)
991 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
992 bna_port_chld_fail(port
);
995 case PORT_E_CHLD_STOPPED
:
996 bfa_fsm_set_state(port
, bna_port_sm_mtu_cfg_wait
);
1000 bfa_sm_fault(port
->bna
, event
);
1005 bna_port_sm_mtu_cfg_wait_entry(struct bna_port
*port
)
1007 bna_fw_mtu_set(port
);
1011 bna_port_sm_mtu_cfg_wait(struct bna_port
*port
, enum bna_port_event event
)
1015 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1016 bna_port_chld_fail(port
);
1019 case PORT_E_FWRESP_MTU
:
1020 bfa_fsm_set_state(port
, bna_port_sm_started
);
1021 bna_port_rx_start(port
);
1025 bfa_sm_fault(port
->bna
, event
);
1030 bna_port_sm_chld_stop_wait_entry(struct bna_port
*port
)
1032 bna_port_chld_stop(port
);
1036 bna_port_sm_chld_stop_wait(struct bna_port
*port
,
1037 enum bna_port_event event
)
1041 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1042 bna_port_chld_fail(port
);
1045 case PORT_E_CHLD_STOPPED
:
1046 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1050 bfa_sm_fault(port
->bna
, event
);
1055 bna_fw_pause_set(struct bna_port
*port
)
1057 struct bfi_ll_set_pause_req ll_req
;
1059 memset(&ll_req
, 0, sizeof(ll_req
));
1060 ll_req
.mh
.msg_class
= BFI_MC_LL
;
1061 ll_req
.mh
.msg_id
= BFI_LL_H2I_SET_PAUSE_REQ
;
1062 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
1064 ll_req
.tx_pause
= port
->pause_config
.tx_pause
;
1065 ll_req
.rx_pause
= port
->pause_config
.rx_pause
;
1067 bna_mbox_qe_fill(&port
->mbox_qe
, &ll_req
, sizeof(ll_req
),
1068 bna_fw_cb_pause_set
, port
);
1070 bna_mbox_send(port
->bna
, &port
->mbox_qe
);
1074 bna_fw_cb_pause_set(void *arg
, int status
)
1076 struct bna_port
*port
= (struct bna_port
*)arg
;
1078 bfa_q_qe_init(&port
->mbox_qe
.qe
);
1079 bfa_fsm_send_event(port
, PORT_E_FWRESP_PAUSE
);
1083 bna_fw_mtu_set(struct bna_port
*port
)
1085 struct bfi_ll_mtu_info_req ll_req
;
1087 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_MTU_INFO_REQ
, 0);
1088 ll_req
.mtu
= htons((u16
)port
->mtu
);
1090 bna_mbox_qe_fill(&port
->mbox_qe
, &ll_req
, sizeof(ll_req
),
1091 bna_fw_cb_mtu_set
, port
);
1092 bna_mbox_send(port
->bna
, &port
->mbox_qe
);
1096 bna_fw_cb_mtu_set(void *arg
, int status
)
1098 struct bna_port
*port
= (struct bna_port
*)arg
;
1100 bfa_q_qe_init(&port
->mbox_qe
.qe
);
1101 bfa_fsm_send_event(port
, PORT_E_FWRESP_MTU
);
1105 bna_port_cb_chld_stopped(void *arg
)
1107 struct bna_port
*port
= (struct bna_port
*)arg
;
1109 bfa_fsm_send_event(port
, PORT_E_CHLD_STOPPED
);
1113 bna_port_init(struct bna_port
*port
, struct bna
*bna
)
1118 port
->type
= BNA_PORT_T_REGULAR
;
1120 port
->link_cbfn
= bnad_cb_port_link_status
;
1122 port
->chld_stop_wc
.wc_resume
= bna_port_cb_chld_stopped
;
1123 port
->chld_stop_wc
.wc_cbarg
= port
;
1124 port
->chld_stop_wc
.wc_count
= 0;
1126 port
->stop_cbfn
= NULL
;
1127 port
->stop_cbarg
= NULL
;
1129 port
->pause_cbfn
= NULL
;
1131 port
->mtu_cbfn
= NULL
;
1133 bfa_q_qe_init(&port
->mbox_qe
.qe
);
1135 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1137 bna_llport_init(&port
->llport
, bna
);
1141 bna_port_uninit(struct bna_port
*port
)
1143 bna_llport_uninit(&port
->llport
);
1151 bna_port_state_get(struct bna_port
*port
)
1153 return bfa_sm_to_state(port_sm_table
, port
->fsm
);
1157 bna_port_start(struct bna_port
*port
)
1159 port
->flags
|= BNA_PORT_F_DEVICE_READY
;
1160 if (port
->flags
& BNA_PORT_F_ENABLED
)
1161 bfa_fsm_send_event(port
, PORT_E_START
);
1165 bna_port_stop(struct bna_port
*port
)
1167 port
->stop_cbfn
= bna_device_cb_port_stopped
;
1168 port
->stop_cbarg
= &port
->bna
->device
;
1170 port
->flags
&= ~BNA_PORT_F_DEVICE_READY
;
1171 bfa_fsm_send_event(port
, PORT_E_STOP
);
1175 bna_port_fail(struct bna_port
*port
)
1177 port
->flags
&= ~BNA_PORT_F_DEVICE_READY
;
1178 bfa_fsm_send_event(port
, PORT_E_FAIL
);
1182 bna_port_cb_tx_stopped(struct bna_port
*port
, enum bna_cb_status status
)
1184 bfa_wc_down(&port
->chld_stop_wc
);
1188 bna_port_cb_rx_stopped(struct bna_port
*port
, enum bna_cb_status status
)
1190 bfa_wc_down(&port
->chld_stop_wc
);
1194 bna_port_cb_link_up(struct bna_port
*port
, struct bfi_ll_aen
*aen
,
1200 port
->llport
.link_status
= BNA_LINK_UP
;
1201 if (aen
->cee_linkup
)
1202 port
->llport
.link_status
= BNA_CEE_UP
;
1204 /* Compute the priority */
1205 prio_map
= aen
->prio_map
;
1207 for (i
= 0; i
< 8; i
++) {
1208 if ((prio_map
>> i
) & 0x1)
1215 /* Dispatch events */
1216 bna_tx_mod_cee_link_status(&port
->bna
->tx_mod
, aen
->cee_linkup
);
1217 bna_tx_mod_prio_changed(&port
->bna
->tx_mod
, port
->priority
);
1218 port
->link_cbfn(port
->bna
->bnad
, port
->llport
.link_status
);
1222 bna_port_cb_link_down(struct bna_port
*port
, int status
)
1224 port
->llport
.link_status
= BNA_LINK_DOWN
;
1226 /* Dispatch events */
1227 bna_tx_mod_cee_link_status(&port
->bna
->tx_mod
, BNA_LINK_DOWN
);
1228 port
->link_cbfn(port
->bna
->bnad
, BNA_LINK_DOWN
);
1232 bna_port_mtu_get(struct bna_port
*port
)
1238 bna_port_enable(struct bna_port
*port
)
1240 if (port
->fsm
!= (bfa_sm_t
)bna_port_sm_stopped
)
1243 port
->flags
|= BNA_PORT_F_ENABLED
;
1245 if (port
->flags
& BNA_PORT_F_DEVICE_READY
)
1246 bfa_fsm_send_event(port
, PORT_E_START
);
1250 bna_port_disable(struct bna_port
*port
, enum bna_cleanup_type type
,
1251 void (*cbfn
)(void *, enum bna_cb_status
))
1253 if (type
== BNA_SOFT_CLEANUP
) {
1254 (*cbfn
)(port
->bna
->bnad
, BNA_CB_SUCCESS
);
1258 port
->stop_cbfn
= cbfn
;
1259 port
->stop_cbarg
= port
->bna
->bnad
;
1261 port
->flags
&= ~BNA_PORT_F_ENABLED
;
1263 bfa_fsm_send_event(port
, PORT_E_STOP
);
1267 bna_port_pause_config(struct bna_port
*port
,
1268 struct bna_pause_config
*pause_config
,
1269 void (*cbfn
)(struct bnad
*, enum bna_cb_status
))
1271 port
->pause_config
= *pause_config
;
1273 port
->pause_cbfn
= cbfn
;
1275 bfa_fsm_send_event(port
, PORT_E_PAUSE_CFG
);
1279 bna_port_mtu_set(struct bna_port
*port
, int mtu
,
1280 void (*cbfn
)(struct bnad
*, enum bna_cb_status
))
1284 port
->mtu_cbfn
= cbfn
;
1286 bfa_fsm_send_event(port
, PORT_E_MTU_CFG
);
1290 bna_port_mac_get(struct bna_port
*port
, mac_t
*mac
)
1292 *mac
= bfa_nw_ioc_get_mac(&port
->bna
->device
.ioc
);
1296 * Should be called only when port is disabled
1299 bna_port_type_set(struct bna_port
*port
, enum bna_port_type type
)
1302 port
->llport
.type
= type
;
1306 * Should be called only when port is disabled
1309 bna_port_linkcbfn_set(struct bna_port
*port
,
1310 void (*linkcbfn
)(struct bnad
*, enum bna_link_status
))
1312 port
->link_cbfn
= linkcbfn
;
1316 bna_port_admin_up(struct bna_port
*port
)
1318 struct bna_llport
*llport
= &port
->llport
;
1320 if (llport
->flags
& BNA_LLPORT_F_ENABLED
)
1323 llport
->flags
|= BNA_LLPORT_F_ENABLED
;
1325 if (llport
->flags
& BNA_LLPORT_F_RX_ENABLED
)
1326 bfa_fsm_send_event(llport
, LLPORT_E_UP
);
1330 bna_port_admin_down(struct bna_port
*port
)
1332 struct bna_llport
*llport
= &port
->llport
;
1334 if (!(llport
->flags
& BNA_LLPORT_F_ENABLED
))
1337 llport
->flags
&= ~BNA_LLPORT_F_ENABLED
;
1339 if (llport
->flags
& BNA_LLPORT_F_RX_ENABLED
)
1340 bfa_fsm_send_event(llport
, LLPORT_E_DOWN
);
1346 #define enable_mbox_intr(_device)\
1349 bna_intr_status_get((_device)->bna, intr_status);\
1350 bnad_cb_device_enable_mbox_intr((_device)->bna->bnad);\
1351 bna_mbox_intr_enable((_device)->bna);\
1354 #define disable_mbox_intr(_device)\
1356 bna_mbox_intr_disable((_device)->bna);\
1357 bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
1360 const struct bna_chip_regs_offset reg_offset
[] =
1361 {{HOST_PAGE_NUM_FN0
, HOSTFN0_INT_STATUS
,
1362 HOSTFN0_INT_MASK
, HOST_MSIX_ERR_INDEX_FN0
},
1363 {HOST_PAGE_NUM_FN1
, HOSTFN1_INT_STATUS
,
1364 HOSTFN1_INT_MASK
, HOST_MSIX_ERR_INDEX_FN1
},
1365 {HOST_PAGE_NUM_FN2
, HOSTFN2_INT_STATUS
,
1366 HOSTFN2_INT_MASK
, HOST_MSIX_ERR_INDEX_FN2
},
1367 {HOST_PAGE_NUM_FN3
, HOSTFN3_INT_STATUS
,
1368 HOSTFN3_INT_MASK
, HOST_MSIX_ERR_INDEX_FN3
},
1371 enum bna_device_event
{
1372 DEVICE_E_ENABLE
= 1,
1373 DEVICE_E_DISABLE
= 2,
1374 DEVICE_E_IOC_READY
= 3,
1375 DEVICE_E_IOC_FAILED
= 4,
1376 DEVICE_E_IOC_DISABLED
= 5,
1377 DEVICE_E_IOC_RESET
= 6,
1378 DEVICE_E_PORT_STOPPED
= 7,
1381 enum bna_device_state
{
1382 BNA_DEVICE_STOPPED
= 1,
1383 BNA_DEVICE_IOC_READY_WAIT
= 2,
1384 BNA_DEVICE_READY
= 3,
1385 BNA_DEVICE_PORT_STOP_WAIT
= 4,
1386 BNA_DEVICE_IOC_DISABLE_WAIT
= 5,
1387 BNA_DEVICE_FAILED
= 6
1390 bfa_fsm_state_decl(bna_device
, stopped
, struct bna_device
,
1391 enum bna_device_event
);
1392 bfa_fsm_state_decl(bna_device
, ioc_ready_wait
, struct bna_device
,
1393 enum bna_device_event
);
1394 bfa_fsm_state_decl(bna_device
, ready
, struct bna_device
,
1395 enum bna_device_event
);
1396 bfa_fsm_state_decl(bna_device
, port_stop_wait
, struct bna_device
,
1397 enum bna_device_event
);
1398 bfa_fsm_state_decl(bna_device
, ioc_disable_wait
, struct bna_device
,
1399 enum bna_device_event
);
1400 bfa_fsm_state_decl(bna_device
, failed
, struct bna_device
,
1401 enum bna_device_event
);
1403 static struct bfa_sm_table device_sm_table
[] = {
1404 {BFA_SM(bna_device_sm_stopped
), BNA_DEVICE_STOPPED
},
1405 {BFA_SM(bna_device_sm_ioc_ready_wait
), BNA_DEVICE_IOC_READY_WAIT
},
1406 {BFA_SM(bna_device_sm_ready
), BNA_DEVICE_READY
},
1407 {BFA_SM(bna_device_sm_port_stop_wait
), BNA_DEVICE_PORT_STOP_WAIT
},
1408 {BFA_SM(bna_device_sm_ioc_disable_wait
), BNA_DEVICE_IOC_DISABLE_WAIT
},
1409 {BFA_SM(bna_device_sm_failed
), BNA_DEVICE_FAILED
},
1413 bna_device_sm_stopped_entry(struct bna_device
*device
)
1415 if (device
->stop_cbfn
)
1416 device
->stop_cbfn(device
->stop_cbarg
, BNA_CB_SUCCESS
);
1418 device
->stop_cbfn
= NULL
;
1419 device
->stop_cbarg
= NULL
;
1423 bna_device_sm_stopped(struct bna_device
*device
,
1424 enum bna_device_event event
)
1427 case DEVICE_E_ENABLE
:
1428 if (device
->intr_type
== BNA_INTR_T_MSIX
)
1429 bna_mbox_msix_idx_set(device
);
1430 bfa_nw_ioc_enable(&device
->ioc
);
1431 bfa_fsm_set_state(device
, bna_device_sm_ioc_ready_wait
);
1434 case DEVICE_E_DISABLE
:
1435 bfa_fsm_set_state(device
, bna_device_sm_stopped
);
1438 case DEVICE_E_IOC_RESET
:
1439 enable_mbox_intr(device
);
1442 case DEVICE_E_IOC_FAILED
:
1443 bfa_fsm_set_state(device
, bna_device_sm_failed
);
1447 bfa_sm_fault(device
->bna
, event
);
1452 bna_device_sm_ioc_ready_wait_entry(struct bna_device
*device
)
1455 * Do not call bfa_ioc_enable() here. It must be called in the
1456 * previous state due to failed -> ioc_ready_wait transition.
1461 bna_device_sm_ioc_ready_wait(struct bna_device
*device
,
1462 enum bna_device_event event
)
1465 case DEVICE_E_DISABLE
:
1466 if (device
->ready_cbfn
)
1467 device
->ready_cbfn(device
->ready_cbarg
,
1469 device
->ready_cbfn
= NULL
;
1470 device
->ready_cbarg
= NULL
;
1471 bfa_fsm_set_state(device
, bna_device_sm_ioc_disable_wait
);
1474 case DEVICE_E_IOC_READY
:
1475 bfa_fsm_set_state(device
, bna_device_sm_ready
);
1478 case DEVICE_E_IOC_FAILED
:
1479 bfa_fsm_set_state(device
, bna_device_sm_failed
);
1482 case DEVICE_E_IOC_RESET
:
1483 enable_mbox_intr(device
);
1487 bfa_sm_fault(device
->bna
, event
);
1492 bna_device_sm_ready_entry(struct bna_device
*device
)
1494 bna_mbox_mod_start(&device
->bna
->mbox_mod
);
1495 bna_port_start(&device
->bna
->port
);
1497 if (device
->ready_cbfn
)
1498 device
->ready_cbfn(device
->ready_cbarg
,
1500 device
->ready_cbfn
= NULL
;
1501 device
->ready_cbarg
= NULL
;
1505 bna_device_sm_ready(struct bna_device
*device
, enum bna_device_event event
)
1508 case DEVICE_E_DISABLE
:
1509 bfa_fsm_set_state(device
, bna_device_sm_port_stop_wait
);
1512 case DEVICE_E_IOC_FAILED
:
1513 bfa_fsm_set_state(device
, bna_device_sm_failed
);
1517 bfa_sm_fault(device
->bna
, event
);
1522 bna_device_sm_port_stop_wait_entry(struct bna_device
*device
)
1524 bna_port_stop(&device
->bna
->port
);
1528 bna_device_sm_port_stop_wait(struct bna_device
*device
,
1529 enum bna_device_event event
)
1532 case DEVICE_E_PORT_STOPPED
:
1533 bna_mbox_mod_stop(&device
->bna
->mbox_mod
);
1534 bfa_fsm_set_state(device
, bna_device_sm_ioc_disable_wait
);
1537 case DEVICE_E_IOC_FAILED
:
1538 disable_mbox_intr(device
);
1539 bna_port_fail(&device
->bna
->port
);
1543 bfa_sm_fault(device
->bna
, event
);
1548 bna_device_sm_ioc_disable_wait_entry(struct bna_device
*device
)
1550 bfa_nw_ioc_disable(&device
->ioc
);
1554 bna_device_sm_ioc_disable_wait(struct bna_device
*device
,
1555 enum bna_device_event event
)
1558 case DEVICE_E_IOC_DISABLED
:
1559 disable_mbox_intr(device
);
1560 bfa_fsm_set_state(device
, bna_device_sm_stopped
);
1564 bfa_sm_fault(device
->bna
, event
);
1569 bna_device_sm_failed_entry(struct bna_device
*device
)
1571 disable_mbox_intr(device
);
1572 bna_port_fail(&device
->bna
->port
);
1573 bna_mbox_mod_stop(&device
->bna
->mbox_mod
);
1575 if (device
->ready_cbfn
)
1576 device
->ready_cbfn(device
->ready_cbarg
,
1578 device
->ready_cbfn
= NULL
;
1579 device
->ready_cbarg
= NULL
;
1583 bna_device_sm_failed(struct bna_device
*device
,
1584 enum bna_device_event event
)
1587 case DEVICE_E_DISABLE
:
1588 bfa_fsm_set_state(device
, bna_device_sm_ioc_disable_wait
);
1591 case DEVICE_E_IOC_RESET
:
1592 enable_mbox_intr(device
);
1593 bfa_fsm_set_state(device
, bna_device_sm_ioc_ready_wait
);
1597 bfa_sm_fault(device
->bna
, event
);
1601 /* IOC callback functions */
1604 bna_device_cb_iocll_ready(void *dev
, enum bfa_status error
)
1606 struct bna_device
*device
= (struct bna_device
*)dev
;
1609 bfa_fsm_send_event(device
, DEVICE_E_IOC_FAILED
);
1611 bfa_fsm_send_event(device
, DEVICE_E_IOC_READY
);
1615 bna_device_cb_iocll_disabled(void *dev
)
1617 struct bna_device
*device
= (struct bna_device
*)dev
;
1619 bfa_fsm_send_event(device
, DEVICE_E_IOC_DISABLED
);
1623 bna_device_cb_iocll_failed(void *dev
)
1625 struct bna_device
*device
= (struct bna_device
*)dev
;
1627 bfa_fsm_send_event(device
, DEVICE_E_IOC_FAILED
);
1631 bna_device_cb_iocll_reset(void *dev
)
1633 struct bna_device
*device
= (struct bna_device
*)dev
;
1635 bfa_fsm_send_event(device
, DEVICE_E_IOC_RESET
);
1638 static struct bfa_ioc_cbfn bfa_iocll_cbfn
= {
1639 bna_device_cb_iocll_ready
,
1640 bna_device_cb_iocll_disabled
,
1641 bna_device_cb_iocll_failed
,
1642 bna_device_cb_iocll_reset
1646 bna_device_init(struct bna_device
*device
, struct bna
*bna
,
1647 struct bna_res_info
*res_info
)
1654 * Attach IOC and claim:
1655 * 1. DMA memory for IOC attributes
1656 * 2. Kernel memory for FW trace
1658 bfa_nw_ioc_attach(&device
->ioc
, device
, &bfa_iocll_cbfn
);
1659 bfa_nw_ioc_pci_init(&device
->ioc
, &bna
->pcidev
, BFI_MC_LL
);
1662 &res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1663 bfa_nw_ioc_mem_claim(&device
->ioc
,
1664 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].kva
,
1667 bna_adv_device_init(device
, bna
, res_info
);
1669 * Initialize mbox_mod only after IOC, so that mbox handler
1670 * registration goes through
1673 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.intr_type
;
1675 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.idl
[0].vector
;
1676 bna_mbox_mod_init(&bna
->mbox_mod
, bna
);
1678 device
->ready_cbfn
= device
->stop_cbfn
= NULL
;
1679 device
->ready_cbarg
= device
->stop_cbarg
= NULL
;
1681 bfa_fsm_set_state(device
, bna_device_sm_stopped
);
1685 bna_device_uninit(struct bna_device
*device
)
1687 bna_mbox_mod_uninit(&device
->bna
->mbox_mod
);
1689 bfa_nw_ioc_detach(&device
->ioc
);
1695 bna_device_cb_port_stopped(void *arg
, enum bna_cb_status status
)
1697 struct bna_device
*device
= (struct bna_device
*)arg
;
1699 bfa_fsm_send_event(device
, DEVICE_E_PORT_STOPPED
);
1703 bna_device_status_get(struct bna_device
*device
)
1705 return device
->fsm
== (bfa_fsm_t
)bna_device_sm_ready
;
1709 bna_device_enable(struct bna_device
*device
)
1711 if (device
->fsm
!= (bfa_fsm_t
)bna_device_sm_stopped
) {
1712 bnad_cb_device_enabled(device
->bna
->bnad
, BNA_CB_BUSY
);
1716 device
->ready_cbfn
= bnad_cb_device_enabled
;
1717 device
->ready_cbarg
= device
->bna
->bnad
;
1719 bfa_fsm_send_event(device
, DEVICE_E_ENABLE
);
1723 bna_device_disable(struct bna_device
*device
, enum bna_cleanup_type type
)
1725 if (type
== BNA_SOFT_CLEANUP
) {
1726 bnad_cb_device_disabled(device
->bna
->bnad
, BNA_CB_SUCCESS
);
1730 device
->stop_cbfn
= bnad_cb_device_disabled
;
1731 device
->stop_cbarg
= device
->bna
->bnad
;
1733 bfa_fsm_send_event(device
, DEVICE_E_DISABLE
);
1737 bna_device_state_get(struct bna_device
*device
)
1739 return bfa_sm_to_state(device_sm_table
, device
->fsm
);
1742 u32 bna_dim_vector
[BNA_LOAD_T_MAX
][BNA_BIAS_T_MAX
] = {
1753 u32 bna_napi_dim_vector
[BNA_LOAD_T_MAX
][BNA_BIAS_T_MAX
] = {
1766 bna_adv_device_init(struct bna_device
*device
, struct bna
*bna
,
1767 struct bna_res_info
*res_info
)
1774 kva
= res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mdl
[0].kva
;
1777 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1781 &res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1782 kva
= res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].kva
;
1784 bfa_nw_cee_attach(&bna
->cee
, &device
->ioc
, bna
);
1785 bfa_nw_cee_mem_claim(&bna
->cee
, kva
, dma
);
1786 kva
+= bfa_nw_cee_meminfo();
1787 dma
+= bfa_nw_cee_meminfo();
1794 bna_adv_res_req(struct bna_res_info
*res_info
)
1796 /* DMA memory for COMMON_MODULE */
1797 res_info
[BNA_RES_MEM_T_COM
].res_type
= BNA_RES_T_MEM
;
1798 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1799 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.num
= 1;
1800 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.len
= ALIGN(
1801 bfa_nw_cee_meminfo(), PAGE_SIZE
);
1803 /* Virtual memory for retreiving fw_trc */
1804 res_info
[BNA_RES_MEM_T_FWTRC
].res_type
= BNA_RES_T_MEM
;
1805 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mem_type
= BNA_MEM_T_KVA
;
1806 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.num
= 0;
1807 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.len
= 0;
1809 /* DMA memory for retreiving stats */
1810 res_info
[BNA_RES_MEM_T_STATS
].res_type
= BNA_RES_T_MEM
;
1811 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1812 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.num
= 1;
1813 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.len
=
1814 ALIGN(BFI_HW_STATS_SIZE
, PAGE_SIZE
);
1816 /* Virtual memory for soft stats */
1817 res_info
[BNA_RES_MEM_T_SWSTATS
].res_type
= BNA_RES_T_MEM
;
1818 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.mem_type
= BNA_MEM_T_KVA
;
1819 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.num
= 1;
1820 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.len
=
1821 sizeof(struct bna_sw_stats
);
1825 bna_sw_stats_get(struct bna
*bna
, struct bna_sw_stats
*sw_stats
)
1828 struct bna_txq
*txq
;
1830 struct bna_rxp
*rxp
;
1831 struct list_head
*qe
;
1832 struct list_head
*txq_qe
;
1833 struct list_head
*rxp_qe
;
1834 struct list_head
*mac_qe
;
1837 sw_stats
->device_state
= bna_device_state_get(&bna
->device
);
1838 sw_stats
->port_state
= bna_port_state_get(&bna
->port
);
1839 sw_stats
->port_flags
= bna
->port
.flags
;
1840 sw_stats
->llport_state
= bna_llport_state_get(&bna
->port
.llport
);
1841 sw_stats
->priority
= bna
->port
.priority
;
1844 list_for_each(qe
, &bna
->tx_mod
.tx_active_q
) {
1845 tx
= (struct bna_tx
*)qe
;
1846 sw_stats
->tx_stats
[i
].tx_state
= bna_tx_state_get(tx
);
1847 sw_stats
->tx_stats
[i
].tx_flags
= tx
->flags
;
1849 sw_stats
->tx_stats
[i
].num_txqs
= 0;
1850 sw_stats
->tx_stats
[i
].txq_bmap
[0] = 0;
1851 sw_stats
->tx_stats
[i
].txq_bmap
[1] = 0;
1852 list_for_each(txq_qe
, &tx
->txq_q
) {
1853 txq
= (struct bna_txq
*)txq_qe
;
1854 if (txq
->txq_id
< 32)
1855 sw_stats
->tx_stats
[i
].txq_bmap
[0] |=
1856 ((u32
)1 << txq
->txq_id
);
1858 sw_stats
->tx_stats
[i
].txq_bmap
[1] |=
1860 1 << (txq
->txq_id
- 32));
1861 sw_stats
->tx_stats
[i
].num_txqs
++;
1864 sw_stats
->tx_stats
[i
].txf_id
= tx
->txf
.txf_id
;
1868 sw_stats
->num_active_tx
= i
;
1871 list_for_each(qe
, &bna
->rx_mod
.rx_active_q
) {
1872 rx
= (struct bna_rx
*)qe
;
1873 sw_stats
->rx_stats
[i
].rx_state
= bna_rx_state_get(rx
);
1874 sw_stats
->rx_stats
[i
].rx_flags
= rx
->rx_flags
;
1876 sw_stats
->rx_stats
[i
].num_rxps
= 0;
1877 sw_stats
->rx_stats
[i
].num_rxqs
= 0;
1878 sw_stats
->rx_stats
[i
].rxq_bmap
[0] = 0;
1879 sw_stats
->rx_stats
[i
].rxq_bmap
[1] = 0;
1880 sw_stats
->rx_stats
[i
].cq_bmap
[0] = 0;
1881 sw_stats
->rx_stats
[i
].cq_bmap
[1] = 0;
1882 list_for_each(rxp_qe
, &rx
->rxp_q
) {
1883 rxp
= (struct bna_rxp
*)rxp_qe
;
1885 sw_stats
->rx_stats
[i
].num_rxqs
+= 1;
1887 if (rxp
->type
== BNA_RXP_SINGLE
) {
1888 if (rxp
->rxq
.single
.only
->rxq_id
< 32) {
1889 sw_stats
->rx_stats
[i
].rxq_bmap
[0] |=
1891 rxp
->rxq
.single
.only
->rxq_id
);
1893 sw_stats
->rx_stats
[i
].rxq_bmap
[1] |=
1895 (rxp
->rxq
.single
.only
->rxq_id
- 32));
1898 if (rxp
->rxq
.slr
.large
->rxq_id
< 32) {
1899 sw_stats
->rx_stats
[i
].rxq_bmap
[0] |=
1901 rxp
->rxq
.slr
.large
->rxq_id
);
1903 sw_stats
->rx_stats
[i
].rxq_bmap
[1] |=
1905 (rxp
->rxq
.slr
.large
->rxq_id
- 32));
1908 if (rxp
->rxq
.slr
.small
->rxq_id
< 32) {
1909 sw_stats
->rx_stats
[i
].rxq_bmap
[0] |=
1911 rxp
->rxq
.slr
.small
->rxq_id
);
1913 sw_stats
->rx_stats
[i
].rxq_bmap
[1] |=
1915 (rxp
->rxq
.slr
.small
->rxq_id
- 32));
1917 sw_stats
->rx_stats
[i
].num_rxqs
+= 1;
1920 if (rxp
->cq
.cq_id
< 32)
1921 sw_stats
->rx_stats
[i
].cq_bmap
[0] |=
1922 (1 << rxp
->cq
.cq_id
);
1924 sw_stats
->rx_stats
[i
].cq_bmap
[1] |=
1925 (1 << (rxp
->cq
.cq_id
- 32));
1927 sw_stats
->rx_stats
[i
].num_rxps
++;
1930 sw_stats
->rx_stats
[i
].rxf_id
= rx
->rxf
.rxf_id
;
1931 sw_stats
->rx_stats
[i
].rxf_state
= bna_rxf_state_get(&rx
->rxf
);
1932 sw_stats
->rx_stats
[i
].rxf_oper_state
= rx
->rxf
.rxf_oper_state
;
1934 sw_stats
->rx_stats
[i
].num_active_ucast
= 0;
1935 if (rx
->rxf
.ucast_active_mac
)
1936 sw_stats
->rx_stats
[i
].num_active_ucast
++;
1937 list_for_each(mac_qe
, &rx
->rxf
.ucast_active_q
)
1938 sw_stats
->rx_stats
[i
].num_active_ucast
++;
1940 sw_stats
->rx_stats
[i
].num_active_mcast
= 0;
1941 list_for_each(mac_qe
, &rx
->rxf
.mcast_active_q
)
1942 sw_stats
->rx_stats
[i
].num_active_mcast
++;
1944 sw_stats
->rx_stats
[i
].rxmode_active
= rx
->rxf
.rxmode_active
;
1945 sw_stats
->rx_stats
[i
].vlan_filter_status
=
1946 rx
->rxf
.vlan_filter_status
;
1947 memcpy(sw_stats
->rx_stats
[i
].vlan_filter_table
,
1948 rx
->rxf
.vlan_filter_table
,
1949 sizeof(u32
) * ((BFI_MAX_VLAN
+ 1) / 32));
1951 sw_stats
->rx_stats
[i
].rss_status
= rx
->rxf
.rss_status
;
1952 sw_stats
->rx_stats
[i
].hds_status
= rx
->rxf
.hds_status
;
1956 sw_stats
->num_active_rx
= i
;
1960 bna_fw_cb_stats_get(void *arg
, int status
)
1962 struct bna
*bna
= (struct bna
*)arg
;
1965 int rxf_count
, txf_count
;
1966 u64 rxf_bmap
, txf_bmap
;
1968 bfa_q_qe_init(&bna
->mbox_qe
.qe
);
1971 p_stats
= (u64
*)bna
->stats
.hw_stats
;
1972 count
= sizeof(struct bfi_ll_stats
) / sizeof(u64
);
1973 for (i
= 0; i
< count
; i
++)
1974 p_stats
[i
] = cpu_to_be64(p_stats
[i
]);
1977 rxf_bmap
= (u64
)bna
->stats
.rxf_bmap
[0] |
1978 ((u64
)bna
->stats
.rxf_bmap
[1] << 32);
1979 for (i
= 0; i
< BFI_LL_RXF_ID_MAX
; i
++)
1980 if (rxf_bmap
& ((u64
)1 << i
))
1984 txf_bmap
= (u64
)bna
->stats
.txf_bmap
[0] |
1985 ((u64
)bna
->stats
.txf_bmap
[1] << 32);
1986 for (i
= 0; i
< BFI_LL_TXF_ID_MAX
; i
++)
1987 if (txf_bmap
& ((u64
)1 << i
))
1990 p_stats
= (u64
*)&bna
->stats
.hw_stats
->rxf_stats
[0] +
1991 ((rxf_count
* sizeof(struct bfi_ll_stats_rxf
) +
1992 txf_count
* sizeof(struct bfi_ll_stats_txf
))/
1995 /* Populate the TXF stats from the firmware DMAed copy */
1996 for (i
= (BFI_LL_TXF_ID_MAX
- 1); i
>= 0; i
--)
1997 if (txf_bmap
& ((u64
)1 << i
)) {
1998 p_stats
-= sizeof(struct bfi_ll_stats_txf
)/
2000 memcpy(&bna
->stats
.hw_stats
->txf_stats
[i
],
2002 sizeof(struct bfi_ll_stats_txf
));
2005 /* Populate the RXF stats from the firmware DMAed copy */
2006 for (i
= (BFI_LL_RXF_ID_MAX
- 1); i
>= 0; i
--)
2007 if (rxf_bmap
& ((u64
)1 << i
)) {
2008 p_stats
-= sizeof(struct bfi_ll_stats_rxf
)/
2010 memcpy(&bna
->stats
.hw_stats
->rxf_stats
[i
],
2012 sizeof(struct bfi_ll_stats_rxf
));
2015 bna_sw_stats_get(bna
, bna
->stats
.sw_stats
);
2016 bnad_cb_stats_get(bna
->bnad
, BNA_CB_SUCCESS
, &bna
->stats
);
2018 bnad_cb_stats_get(bna
->bnad
, BNA_CB_FAIL
, &bna
->stats
);
2022 bna_fw_stats_get(struct bna
*bna
)
2024 struct bfi_ll_stats_req ll_req
;
2026 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_STATS_GET_REQ
, 0);
2027 ll_req
.stats_mask
= htons(BFI_LL_STATS_ALL
);
2029 ll_req
.rxf_id_mask
[0] = htonl(bna
->rx_mod
.rxf_bmap
[0]);
2030 ll_req
.rxf_id_mask
[1] = htonl(bna
->rx_mod
.rxf_bmap
[1]);
2031 ll_req
.txf_id_mask
[0] = htonl(bna
->tx_mod
.txf_bmap
[0]);
2032 ll_req
.txf_id_mask
[1] = htonl(bna
->tx_mod
.txf_bmap
[1]);
2034 ll_req
.host_buffer
.a32
.addr_hi
= bna
->hw_stats_dma
.msb
;
2035 ll_req
.host_buffer
.a32
.addr_lo
= bna
->hw_stats_dma
.lsb
;
2037 bna_mbox_qe_fill(&bna
->mbox_qe
, &ll_req
, sizeof(ll_req
),
2038 bna_fw_cb_stats_get
, bna
);
2039 bna_mbox_send(bna
, &bna
->mbox_qe
);
2041 bna
->stats
.rxf_bmap
[0] = bna
->rx_mod
.rxf_bmap
[0];
2042 bna
->stats
.rxf_bmap
[1] = bna
->rx_mod
.rxf_bmap
[1];
2043 bna
->stats
.txf_bmap
[0] = bna
->tx_mod
.txf_bmap
[0];
2044 bna
->stats
.txf_bmap
[1] = bna
->tx_mod
.txf_bmap
[1];
2048 bna_fw_cb_stats_clr(void *arg
, int status
)
2050 struct bna
*bna
= (struct bna
*)arg
;
2052 bfa_q_qe_init(&bna
->mbox_qe
.qe
);
2054 memset(bna
->stats
.sw_stats
, 0, sizeof(struct bna_sw_stats
));
2055 memset(bna
->stats
.hw_stats
, 0, sizeof(struct bfi_ll_stats
));
2057 bnad_cb_stats_clr(bna
->bnad
);
2061 bna_fw_stats_clr(struct bna
*bna
)
2063 struct bfi_ll_stats_req ll_req
;
2065 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_STATS_CLEAR_REQ
, 0);
2066 ll_req
.stats_mask
= htons(BFI_LL_STATS_ALL
);
2067 ll_req
.rxf_id_mask
[0] = htonl(0xffffffff);
2068 ll_req
.rxf_id_mask
[1] = htonl(0xffffffff);
2069 ll_req
.txf_id_mask
[0] = htonl(0xffffffff);
2070 ll_req
.txf_id_mask
[1] = htonl(0xffffffff);
2072 bna_mbox_qe_fill(&bna
->mbox_qe
, &ll_req
, sizeof(ll_req
),
2073 bna_fw_cb_stats_clr
, bna
);
2074 bna_mbox_send(bna
, &bna
->mbox_qe
);
2078 bna_stats_get(struct bna
*bna
)
2080 if (bna_device_status_get(&bna
->device
))
2081 bna_fw_stats_get(bna
);
2083 bnad_cb_stats_get(bna
->bnad
, BNA_CB_FAIL
, &bna
->stats
);
2087 bna_stats_clr(struct bna
*bna
)
2089 if (bna_device_status_get(&bna
->device
))
2090 bna_fw_stats_clr(bna
);
2092 memset(&bna
->stats
.sw_stats
, 0,
2093 sizeof(struct bna_sw_stats
));
2094 memset(bna
->stats
.hw_stats
, 0,
2095 sizeof(struct bfi_ll_stats
));
2096 bnad_cb_stats_clr(bna
->bnad
);
2102 bna_ib_coalescing_timeo_set(struct bna_ib
*ib
, u8 coalescing_timeo
)
2104 ib
->ib_config
.coalescing_timeo
= coalescing_timeo
;
2106 if (ib
->start_count
)
2107 ib
->door_bell
.doorbell_ack
= BNA_DOORBELL_IB_INT_ACK(
2108 (u32
)ib
->ib_config
.coalescing_timeo
, 0);
2113 bna_rxf_adv_init(struct bna_rxf
*rxf
,
2115 struct bna_rx_config
*q_config
)
2117 switch (q_config
->rxp_type
) {
2118 case BNA_RXP_SINGLE
:
2122 rxf
->ctrl_flags
|= BNA_RXF_CF_SM_LG_RXQ
;
2125 rxf
->hds_cfg
.hdr_type
= q_config
->hds_config
.hdr_type
;
2126 rxf
->hds_cfg
.header_size
=
2127 q_config
->hds_config
.header_size
;
2128 rxf
->forced_offset
= 0;
2134 if (q_config
->rss_status
== BNA_STATUS_T_ENABLED
) {
2135 rxf
->ctrl_flags
|= BNA_RXF_CF_RSS_ENABLE
;
2136 rxf
->rss_cfg
.hash_type
= q_config
->rss_config
.hash_type
;
2137 rxf
->rss_cfg
.hash_mask
= q_config
->rss_config
.hash_mask
;
2138 memcpy(&rxf
->rss_cfg
.toeplitz_hash_key
[0],
2139 &q_config
->rss_config
.toeplitz_hash_key
[0],
2140 sizeof(rxf
->rss_cfg
.toeplitz_hash_key
));
2145 rxf_fltr_mbox_cmd(struct bna_rxf
*rxf
, u8 cmd
, enum bna_status status
)
2147 struct bfi_ll_rxf_req req
;
2149 bfi_h2i_set(req
.mh
, BFI_MC_LL
, cmd
, 0);
2151 req
.rxf_id
= rxf
->rxf_id
;
2152 req
.enable
= status
;
2154 bna_mbox_qe_fill(&rxf
->mbox_qe
, &req
, sizeof(req
),
2155 rxf_cb_cam_fltr_mbox_cmd
, rxf
);
2157 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
2161 __rxf_default_function_config(struct bna_rxf
*rxf
, enum bna_status status
)
2163 struct bna_rx_fndb_ram
*rx_fndb_ram
;
2167 rx_fndb_ram
= (struct bna_rx_fndb_ram
*)
2168 BNA_GET_MEM_BASE_ADDR(rxf
->rx
->bna
->pcidev
.pci_bar_kva
,
2169 RX_FNDB_RAM_BASE_OFFSET
);
2171 for (i
= 0; i
< BFI_MAX_RXF
; i
++) {
2172 if (status
== BNA_STATUS_T_ENABLED
) {
2173 if (i
== rxf
->rxf_id
)
2177 readl(&rx_fndb_ram
[i
].control_flags
);
2178 ctrl_flags
|= BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE
;
2180 &rx_fndb_ram
[i
].control_flags
);
2183 readl(&rx_fndb_ram
[i
].control_flags
);
2184 ctrl_flags
&= ~BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE
;
2186 &rx_fndb_ram
[i
].control_flags
);
2192 rxf_process_packet_filter_ucast(struct bna_rxf
*rxf
)
2194 struct bna_mac
*mac
= NULL
;
2195 struct list_head
*qe
;
2197 /* Add additional MAC entries */
2198 if (!list_empty(&rxf
->ucast_pending_add_q
)) {
2199 bfa_q_deq(&rxf
->ucast_pending_add_q
, &qe
);
2201 mac
= (struct bna_mac
*)qe
;
2202 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_ADD_REQ
, mac
);
2203 list_add_tail(&mac
->qe
, &rxf
->ucast_active_q
);
2207 /* Delete MAC addresses previousely added */
2208 if (!list_empty(&rxf
->ucast_pending_del_q
)) {
2209 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
2211 mac
= (struct bna_mac
*)qe
;
2212 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_DEL_REQ
, mac
);
2213 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2221 rxf_process_packet_filter_promisc(struct bna_rxf
*rxf
)
2223 struct bna
*bna
= rxf
->rx
->bna
;
2225 /* Enable/disable promiscuous mode */
2226 if (is_promisc_enable(rxf
->rxmode_pending
,
2227 rxf
->rxmode_pending_bitmask
)) {
2228 /* move promisc configuration from pending -> active */
2229 promisc_inactive(rxf
->rxmode_pending
,
2230 rxf
->rxmode_pending_bitmask
);
2231 rxf
->rxmode_active
|= BNA_RXMODE_PROMISC
;
2233 /* Disable VLAN filter to allow all VLANs */
2234 __rxf_vlan_filter_set(rxf
, BNA_STATUS_T_DISABLED
);
2235 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2236 BNA_STATUS_T_ENABLED
);
2238 } else if (is_promisc_disable(rxf
->rxmode_pending
,
2239 rxf
->rxmode_pending_bitmask
)) {
2240 /* move promisc configuration from pending -> active */
2241 promisc_inactive(rxf
->rxmode_pending
,
2242 rxf
->rxmode_pending_bitmask
);
2243 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2244 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2246 /* Revert VLAN filter */
2247 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2248 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2249 BNA_STATUS_T_DISABLED
);
2257 rxf_process_packet_filter_default(struct bna_rxf
*rxf
)
2259 struct bna
*bna
= rxf
->rx
->bna
;
2261 /* Enable/disable default mode */
2262 if (is_default_enable(rxf
->rxmode_pending
,
2263 rxf
->rxmode_pending_bitmask
)) {
2264 /* move default configuration from pending -> active */
2265 default_inactive(rxf
->rxmode_pending
,
2266 rxf
->rxmode_pending_bitmask
);
2267 rxf
->rxmode_active
|= BNA_RXMODE_DEFAULT
;
2269 /* Disable VLAN filter to allow all VLANs */
2270 __rxf_vlan_filter_set(rxf
, BNA_STATUS_T_DISABLED
);
2271 /* Redirect all other RxF vlan filtering to this one */
2272 __rxf_default_function_config(rxf
, BNA_STATUS_T_ENABLED
);
2273 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_DEFAULT_SET_REQ
,
2274 BNA_STATUS_T_ENABLED
);
2276 } else if (is_default_disable(rxf
->rxmode_pending
,
2277 rxf
->rxmode_pending_bitmask
)) {
2278 /* move default configuration from pending -> active */
2279 default_inactive(rxf
->rxmode_pending
,
2280 rxf
->rxmode_pending_bitmask
);
2281 rxf
->rxmode_active
&= ~BNA_RXMODE_DEFAULT
;
2282 bna
->rxf_default_id
= BFI_MAX_RXF
;
2284 /* Revert VLAN filter */
2285 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2286 /* Stop RxF vlan filter table redirection */
2287 __rxf_default_function_config(rxf
, BNA_STATUS_T_DISABLED
);
2288 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_DEFAULT_SET_REQ
,
2289 BNA_STATUS_T_DISABLED
);
2297 rxf_process_packet_filter_allmulti(struct bna_rxf
*rxf
)
2299 /* Enable/disable allmulti mode */
2300 if (is_allmulti_enable(rxf
->rxmode_pending
,
2301 rxf
->rxmode_pending_bitmask
)) {
2302 /* move allmulti configuration from pending -> active */
2303 allmulti_inactive(rxf
->rxmode_pending
,
2304 rxf
->rxmode_pending_bitmask
);
2305 rxf
->rxmode_active
|= BNA_RXMODE_ALLMULTI
;
2307 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2308 BNA_STATUS_T_ENABLED
);
2310 } else if (is_allmulti_disable(rxf
->rxmode_pending
,
2311 rxf
->rxmode_pending_bitmask
)) {
2312 /* move allmulti configuration from pending -> active */
2313 allmulti_inactive(rxf
->rxmode_pending
,
2314 rxf
->rxmode_pending_bitmask
);
2315 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2317 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2318 BNA_STATUS_T_DISABLED
);
2326 rxf_clear_packet_filter_ucast(struct bna_rxf
*rxf
)
2328 struct bna_mac
*mac
= NULL
;
2329 struct list_head
*qe
;
2331 /* 1. delete pending ucast entries */
2332 if (!list_empty(&rxf
->ucast_pending_del_q
)) {
2333 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
2335 mac
= (struct bna_mac
*)qe
;
2336 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_DEL_REQ
, mac
);
2337 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2341 /* 2. clear active ucast entries; move them to pending_add_q */
2342 if (!list_empty(&rxf
->ucast_active_q
)) {
2343 bfa_q_deq(&rxf
->ucast_active_q
, &qe
);
2345 mac
= (struct bna_mac
*)qe
;
2346 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_DEL_REQ
, mac
);
2347 list_add_tail(&mac
->qe
, &rxf
->ucast_pending_add_q
);
2355 rxf_clear_packet_filter_promisc(struct bna_rxf
*rxf
)
2357 struct bna
*bna
= rxf
->rx
->bna
;
2359 /* 6. Execute pending promisc mode disable command */
2360 if (is_promisc_disable(rxf
->rxmode_pending
,
2361 rxf
->rxmode_pending_bitmask
)) {
2362 /* move promisc configuration from pending -> active */
2363 promisc_inactive(rxf
->rxmode_pending
,
2364 rxf
->rxmode_pending_bitmask
);
2365 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2366 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2368 /* Revert VLAN filter */
2369 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2370 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2371 BNA_STATUS_T_DISABLED
);
2375 /* 7. Clear active promisc mode; move it to pending enable */
2376 if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
2377 /* move promisc configuration from active -> pending */
2378 promisc_enable(rxf
->rxmode_pending
,
2379 rxf
->rxmode_pending_bitmask
);
2380 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2382 /* Revert VLAN filter */
2383 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2384 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2385 BNA_STATUS_T_DISABLED
);
2393 rxf_clear_packet_filter_default(struct bna_rxf
*rxf
)
2395 struct bna
*bna
= rxf
->rx
->bna
;
2397 /* 8. Execute pending default mode disable command */
2398 if (is_default_disable(rxf
->rxmode_pending
,
2399 rxf
->rxmode_pending_bitmask
)) {
2400 /* move default configuration from pending -> active */
2401 default_inactive(rxf
->rxmode_pending
,
2402 rxf
->rxmode_pending_bitmask
);
2403 rxf
->rxmode_active
&= ~BNA_RXMODE_DEFAULT
;
2404 bna
->rxf_default_id
= BFI_MAX_RXF
;
2406 /* Revert VLAN filter */
2407 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2408 /* Stop RxF vlan filter table redirection */
2409 __rxf_default_function_config(rxf
, BNA_STATUS_T_DISABLED
);
2410 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_DEFAULT_SET_REQ
,
2411 BNA_STATUS_T_DISABLED
);
2415 /* 9. Clear active default mode; move it to pending enable */
2416 if (rxf
->rxmode_active
& BNA_RXMODE_DEFAULT
) {
2417 /* move default configuration from active -> pending */
2418 default_enable(rxf
->rxmode_pending
,
2419 rxf
->rxmode_pending_bitmask
);
2420 rxf
->rxmode_active
&= ~BNA_RXMODE_DEFAULT
;
2422 /* Revert VLAN filter */
2423 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2424 /* Stop RxF vlan filter table redirection */
2425 __rxf_default_function_config(rxf
, BNA_STATUS_T_DISABLED
);
2426 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_DEFAULT_SET_REQ
,
2427 BNA_STATUS_T_DISABLED
);
2435 rxf_clear_packet_filter_allmulti(struct bna_rxf
*rxf
)
2437 /* 10. Execute pending allmulti mode disable command */
2438 if (is_allmulti_disable(rxf
->rxmode_pending
,
2439 rxf
->rxmode_pending_bitmask
)) {
2440 /* move allmulti configuration from pending -> active */
2441 allmulti_inactive(rxf
->rxmode_pending
,
2442 rxf
->rxmode_pending_bitmask
);
2443 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2444 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2445 BNA_STATUS_T_DISABLED
);
2449 /* 11. Clear active allmulti mode; move it to pending enable */
2450 if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
2451 /* move allmulti configuration from active -> pending */
2452 allmulti_enable(rxf
->rxmode_pending
,
2453 rxf
->rxmode_pending_bitmask
);
2454 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2455 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2456 BNA_STATUS_T_DISABLED
);
2464 rxf_reset_packet_filter_ucast(struct bna_rxf
*rxf
)
2466 struct list_head
*qe
;
2467 struct bna_mac
*mac
;
2469 /* 1. Move active ucast entries to pending_add_q */
2470 while (!list_empty(&rxf
->ucast_active_q
)) {
2471 bfa_q_deq(&rxf
->ucast_active_q
, &qe
);
2473 list_add_tail(qe
, &rxf
->ucast_pending_add_q
);
2476 /* 2. Throw away delete pending ucast entries */
2477 while (!list_empty(&rxf
->ucast_pending_del_q
)) {
2478 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
2480 mac
= (struct bna_mac
*)qe
;
2481 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2486 rxf_reset_packet_filter_promisc(struct bna_rxf
*rxf
)
2488 struct bna
*bna
= rxf
->rx
->bna
;
2490 /* 6. Clear pending promisc mode disable */
2491 if (is_promisc_disable(rxf
->rxmode_pending
,
2492 rxf
->rxmode_pending_bitmask
)) {
2493 promisc_inactive(rxf
->rxmode_pending
,
2494 rxf
->rxmode_pending_bitmask
);
2495 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2496 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2499 /* 7. Move promisc mode config from active -> pending */
2500 if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
2501 promisc_enable(rxf
->rxmode_pending
,
2502 rxf
->rxmode_pending_bitmask
);
2503 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2509 rxf_reset_packet_filter_default(struct bna_rxf
*rxf
)
2511 struct bna
*bna
= rxf
->rx
->bna
;
2513 /* 8. Clear pending default mode disable */
2514 if (is_default_disable(rxf
->rxmode_pending
,
2515 rxf
->rxmode_pending_bitmask
)) {
2516 default_inactive(rxf
->rxmode_pending
,
2517 rxf
->rxmode_pending_bitmask
);
2518 rxf
->rxmode_active
&= ~BNA_RXMODE_DEFAULT
;
2519 bna
->rxf_default_id
= BFI_MAX_RXF
;
2522 /* 9. Move default mode config from active -> pending */
2523 if (rxf
->rxmode_active
& BNA_RXMODE_DEFAULT
) {
2524 default_enable(rxf
->rxmode_pending
,
2525 rxf
->rxmode_pending_bitmask
);
2526 rxf
->rxmode_active
&= ~BNA_RXMODE_DEFAULT
;
2531 rxf_reset_packet_filter_allmulti(struct bna_rxf
*rxf
)
2533 /* 10. Clear pending allmulti mode disable */
2534 if (is_allmulti_disable(rxf
->rxmode_pending
,
2535 rxf
->rxmode_pending_bitmask
)) {
2536 allmulti_inactive(rxf
->rxmode_pending
,
2537 rxf
->rxmode_pending_bitmask
);
2538 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2541 /* 11. Move allmulti mode config from active -> pending */
2542 if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
2543 allmulti_enable(rxf
->rxmode_pending
,
2544 rxf
->rxmode_pending_bitmask
);
2545 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2550 * Should only be called by bna_rxf_mode_set.
2551 * Helps deciding if h/w configuration is needed or not.
2554 * 1 = need h/w change
2557 rxf_promisc_enable(struct bna_rxf
*rxf
)
2559 struct bna
*bna
= rxf
->rx
->bna
;
2562 /* There can not be any pending disable command */
2564 /* Do nothing if pending enable or already enabled */
2565 if (is_promisc_enable(rxf
->rxmode_pending
,
2566 rxf
->rxmode_pending_bitmask
) ||
2567 (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
)) {
2568 /* Schedule enable */
2570 /* Promisc mode should not be active in the system */
2571 promisc_enable(rxf
->rxmode_pending
,
2572 rxf
->rxmode_pending_bitmask
);
2573 bna
->rxf_promisc_id
= rxf
->rxf_id
;
2581 * Should only be called by bna_rxf_mode_set.
2582 * Helps deciding if h/w configuration is needed or not.
2585 * 1 = need h/w change
2588 rxf_promisc_disable(struct bna_rxf
*rxf
)
2590 struct bna
*bna
= rxf
->rx
->bna
;
2593 /* There can not be any pending disable */
2595 /* Turn off pending enable command , if any */
2596 if (is_promisc_enable(rxf
->rxmode_pending
,
2597 rxf
->rxmode_pending_bitmask
)) {
2598 /* Promisc mode should not be active */
2599 /* system promisc state should be pending */
2600 promisc_inactive(rxf
->rxmode_pending
,
2601 rxf
->rxmode_pending_bitmask
);
2602 /* Remove the promisc state from the system */
2603 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2605 /* Schedule disable */
2606 } else if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
2607 /* Promisc mode should be active in the system */
2608 promisc_disable(rxf
->rxmode_pending
,
2609 rxf
->rxmode_pending_bitmask
);
2612 /* Do nothing if already disabled */
2620 * Should only be called by bna_rxf_mode_set.
2621 * Helps deciding if h/w configuration is needed or not.
2624 * 1 = need h/w change
2627 rxf_default_enable(struct bna_rxf
*rxf
)
2629 struct bna
*bna
= rxf
->rx
->bna
;
2632 /* There can not be any pending disable command */
2634 /* Do nothing if pending enable or already enabled */
2635 if (is_default_enable(rxf
->rxmode_pending
,
2636 rxf
->rxmode_pending_bitmask
) ||
2637 (rxf
->rxmode_active
& BNA_RXMODE_DEFAULT
)) {
2638 /* Schedule enable */
2640 /* Default mode should not be active in the system */
2641 default_enable(rxf
->rxmode_pending
,
2642 rxf
->rxmode_pending_bitmask
);
2643 bna
->rxf_default_id
= rxf
->rxf_id
;
2651 * Should only be called by bna_rxf_mode_set.
2652 * Helps deciding if h/w configuration is needed or not.
2655 * 1 = need h/w change
2658 rxf_default_disable(struct bna_rxf
*rxf
)
2660 struct bna
*bna
= rxf
->rx
->bna
;
2663 /* There can not be any pending disable */
2665 /* Turn off pending enable command , if any */
2666 if (is_default_enable(rxf
->rxmode_pending
,
2667 rxf
->rxmode_pending_bitmask
)) {
2668 /* Promisc mode should not be active */
2669 /* system default state should be pending */
2670 default_inactive(rxf
->rxmode_pending
,
2671 rxf
->rxmode_pending_bitmask
);
2672 /* Remove the default state from the system */
2673 bna
->rxf_default_id
= BFI_MAX_RXF
;
2675 /* Schedule disable */
2676 } else if (rxf
->rxmode_active
& BNA_RXMODE_DEFAULT
) {
2677 /* Default mode should be active in the system */
2678 default_disable(rxf
->rxmode_pending
,
2679 rxf
->rxmode_pending_bitmask
);
2682 /* Do nothing if already disabled */
2690 * Should only be called by bna_rxf_mode_set.
2691 * Helps deciding if h/w configuration is needed or not.
2694 * 1 = need h/w change
2697 rxf_allmulti_enable(struct bna_rxf
*rxf
)
2701 /* There can not be any pending disable command */
2703 /* Do nothing if pending enable or already enabled */
2704 if (is_allmulti_enable(rxf
->rxmode_pending
,
2705 rxf
->rxmode_pending_bitmask
) ||
2706 (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
)) {
2707 /* Schedule enable */
2709 allmulti_enable(rxf
->rxmode_pending
,
2710 rxf
->rxmode_pending_bitmask
);
2718 * Should only be called by bna_rxf_mode_set.
2719 * Helps deciding if h/w configuration is needed or not.
2722 * 1 = need h/w change
2725 rxf_allmulti_disable(struct bna_rxf
*rxf
)
2729 /* There can not be any pending disable */
2731 /* Turn off pending enable command , if any */
2732 if (is_allmulti_enable(rxf
->rxmode_pending
,
2733 rxf
->rxmode_pending_bitmask
)) {
2734 /* Allmulti mode should not be active */
2735 allmulti_inactive(rxf
->rxmode_pending
,
2736 rxf
->rxmode_pending_bitmask
);
2738 /* Schedule disable */
2739 } else if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
2740 allmulti_disable(rxf
->rxmode_pending
,
2741 rxf
->rxmode_pending_bitmask
);
2750 bna_rx_mcast_delall(struct bna_rx
*rx
,
2751 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
2752 enum bna_cb_status
))
2754 struct bna_rxf
*rxf
= &rx
->rxf
;
2755 struct list_head
*qe
;
2756 struct bna_mac
*mac
;
2757 int need_hw_config
= 0;
2759 /* Purge all entries from pending_add_q */
2760 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
2761 bfa_q_deq(&rxf
->mcast_pending_add_q
, &qe
);
2762 mac
= (struct bna_mac
*)qe
;
2763 bfa_q_qe_init(&mac
->qe
);
2764 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
2767 /* Schedule all entries in active_q for deletion */
2768 while (!list_empty(&rxf
->mcast_active_q
)) {
2769 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
2770 mac
= (struct bna_mac
*)qe
;
2771 bfa_q_qe_init(&mac
->qe
);
2772 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_del_q
);
2776 if (need_hw_config
) {
2777 rxf
->cam_fltr_cbfn
= cbfn
;
2778 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
2779 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
2784 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2789 bna_rx_receive_resume(struct bna_rx
*rx
,
2790 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
2791 enum bna_cb_status
))
2793 struct bna_rxf
*rxf
= &rx
->rxf
;
2795 if (rxf
->rxf_oper_state
== BNA_RXF_OPER_STATE_PAUSED
) {
2796 rxf
->oper_state_cbfn
= cbfn
;
2797 rxf
->oper_state_cbarg
= rx
->bna
->bnad
;
2798 bfa_fsm_send_event(rxf
, RXF_E_RESUME
);
2800 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2804 bna_rx_receive_pause(struct bna_rx
*rx
,
2805 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
2806 enum bna_cb_status
))
2808 struct bna_rxf
*rxf
= &rx
->rxf
;
2810 if (rxf
->rxf_oper_state
== BNA_RXF_OPER_STATE_RUNNING
) {
2811 rxf
->oper_state_cbfn
= cbfn
;
2812 rxf
->oper_state_cbarg
= rx
->bna
->bnad
;
2813 bfa_fsm_send_event(rxf
, RXF_E_PAUSE
);
2815 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2820 bna_rx_ucast_add(struct bna_rx
*rx
, u8
*addr
,
2821 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
2822 enum bna_cb_status
))
2824 struct bna_rxf
*rxf
= &rx
->rxf
;
2825 struct list_head
*qe
;
2826 struct bna_mac
*mac
;
2828 /* Check if already added */
2829 list_for_each(qe
, &rxf
->ucast_active_q
) {
2830 mac
= (struct bna_mac
*)qe
;
2831 if (BNA_MAC_IS_EQUAL(mac
->addr
, addr
)) {
2833 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2834 return BNA_CB_SUCCESS
;
2838 /* Check if pending addition */
2839 list_for_each(qe
, &rxf
->ucast_pending_add_q
) {
2840 mac
= (struct bna_mac
*)qe
;
2841 if (BNA_MAC_IS_EQUAL(mac
->addr
, addr
)) {
2843 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2844 return BNA_CB_SUCCESS
;
2848 mac
= bna_ucam_mod_mac_get(&rxf
->rx
->bna
->ucam_mod
);
2850 return BNA_CB_UCAST_CAM_FULL
;
2851 bfa_q_qe_init(&mac
->qe
);
2852 memcpy(mac
->addr
, addr
, ETH_ALEN
);
2853 list_add_tail(&mac
->qe
, &rxf
->ucast_pending_add_q
);
2855 rxf
->cam_fltr_cbfn
= cbfn
;
2856 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
2858 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
2860 return BNA_CB_SUCCESS
;
2865 bna_rx_ucast_del(struct bna_rx
*rx
, u8
*addr
,
2866 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
2867 enum bna_cb_status
))
2869 struct bna_rxf
*rxf
= &rx
->rxf
;
2870 struct list_head
*qe
;
2871 struct bna_mac
*mac
;
2873 list_for_each(qe
, &rxf
->ucast_pending_add_q
) {
2874 mac
= (struct bna_mac
*)qe
;
2875 if (BNA_MAC_IS_EQUAL(mac
->addr
, addr
)) {
2878 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2880 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2881 return BNA_CB_SUCCESS
;
2885 list_for_each(qe
, &rxf
->ucast_active_q
) {
2886 mac
= (struct bna_mac
*)qe
;
2887 if (BNA_MAC_IS_EQUAL(mac
->addr
, addr
)) {
2890 list_add_tail(qe
, &rxf
->ucast_pending_del_q
);
2891 rxf
->cam_fltr_cbfn
= cbfn
;
2892 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
2893 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
2894 return BNA_CB_SUCCESS
;
2898 return BNA_CB_INVALID_MAC
;
2903 bna_rx_mode_set(struct bna_rx
*rx
, enum bna_rxmode new_mode
,
2904 enum bna_rxmode bitmask
,
2905 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
2906 enum bna_cb_status
))
2908 struct bna_rxf
*rxf
= &rx
->rxf
;
2909 int need_hw_config
= 0;
2913 if (is_promisc_enable(new_mode
, bitmask
)) {
2914 /* If promisc mode is already enabled elsewhere in the system */
2915 if ((rx
->bna
->rxf_promisc_id
!= BFI_MAX_RXF
) &&
2916 (rx
->bna
->rxf_promisc_id
!= rxf
->rxf_id
))
2919 /* If default mode is already enabled in the system */
2920 if (rx
->bna
->rxf_default_id
!= BFI_MAX_RXF
)
2923 /* Trying to enable promiscuous and default mode together */
2924 if (is_default_enable(new_mode
, bitmask
))
2928 if (is_default_enable(new_mode
, bitmask
)) {
2929 /* If default mode is already enabled elsewhere in the system */
2930 if ((rx
->bna
->rxf_default_id
!= BFI_MAX_RXF
) &&
2931 (rx
->bna
->rxf_default_id
!= rxf
->rxf_id
)) {
2935 /* If promiscuous mode is already enabled in the system */
2936 if (rx
->bna
->rxf_promisc_id
!= BFI_MAX_RXF
)
2940 /* Process the commands */
2942 if (is_promisc_enable(new_mode
, bitmask
)) {
2943 if (rxf_promisc_enable(rxf
))
2945 } else if (is_promisc_disable(new_mode
, bitmask
)) {
2946 if (rxf_promisc_disable(rxf
))
2950 if (is_default_enable(new_mode
, bitmask
)) {
2951 if (rxf_default_enable(rxf
))
2953 } else if (is_default_disable(new_mode
, bitmask
)) {
2954 if (rxf_default_disable(rxf
))
2958 if (is_allmulti_enable(new_mode
, bitmask
)) {
2959 if (rxf_allmulti_enable(rxf
))
2961 } else if (is_allmulti_disable(new_mode
, bitmask
)) {
2962 if (rxf_allmulti_disable(rxf
))
2966 /* Trigger h/w if needed */
2968 if (need_hw_config
) {
2969 rxf
->cam_fltr_cbfn
= cbfn
;
2970 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
2971 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
2973 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2975 return BNA_CB_SUCCESS
;
2983 bna_rx_rss_enable(struct bna_rx
*rx
)
2985 struct bna_rxf
*rxf
= &rx
->rxf
;
2987 rxf
->rxf_flags
|= BNA_RXF_FL_RSS_CONFIG_PENDING
;
2988 rxf
->rss_status
= BNA_STATUS_T_ENABLED
;
2989 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
2994 bna_rx_rss_disable(struct bna_rx
*rx
)
2996 struct bna_rxf
*rxf
= &rx
->rxf
;
2998 rxf
->rxf_flags
|= BNA_RXF_FL_RSS_CONFIG_PENDING
;
2999 rxf
->rss_status
= BNA_STATUS_T_DISABLED
;
3000 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
3005 bna_rx_rss_reconfig(struct bna_rx
*rx
, struct bna_rxf_rss
*rss_config
)
3007 struct bna_rxf
*rxf
= &rx
->rxf
;
3008 rxf
->rxf_flags
|= BNA_RXF_FL_RSS_CONFIG_PENDING
;
3009 rxf
->rss_status
= BNA_STATUS_T_ENABLED
;
3010 rxf
->rss_cfg
= *rss_config
;
3011 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
3016 bna_rx_vlanfilter_enable(struct bna_rx
*rx
)
3018 struct bna_rxf
*rxf
= &rx
->rxf
;
3020 if (rxf
->vlan_filter_status
== BNA_STATUS_T_DISABLED
) {
3021 rxf
->rxf_flags
|= BNA_RXF_FL_VLAN_CONFIG_PENDING
;
3022 rxf
->vlan_filter_status
= BNA_STATUS_T_ENABLED
;
3023 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
3029 bna_rx_vlanfilter_disable(struct bna_rx
*rx
)
3031 struct bna_rxf
*rxf
= &rx
->rxf
;
3033 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
) {
3034 rxf
->rxf_flags
|= BNA_RXF_FL_VLAN_CONFIG_PENDING
;
3035 rxf
->vlan_filter_status
= BNA_STATUS_T_DISABLED
;
3036 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
3043 bna_rx_get_rxp(struct bna_rx
*rx
, int vector
)
3045 struct bna_rxp
*rxp
;
3046 struct list_head
*qe
;
3048 list_for_each(qe
, &rx
->rxp_q
) {
3049 rxp
= (struct bna_rxp
*)qe
;
3050 if (rxp
->vector
== vector
)
3057 * bna_rx_rss_rit_set()
3058 * Sets the Q ids for the specified msi-x vectors in the RIT.
3059 * Maximum rit size supported is 64, which should be the max size of the
3064 bna_rx_rss_rit_set(struct bna_rx
*rx
, unsigned int *vectors
, int nvectors
)
3067 struct bna_rxp
*rxp
;
3068 struct bna_rxq
*q0
= NULL
, *q1
= NULL
;
3070 struct bna_rxf
*rxf
;
3072 /* Build the RIT contents for this RX */
3076 for (i
= 0; i
< nvectors
; i
++) {
3077 rxp
= bna_rx_get_rxp(rx
, vectors
[i
]);
3079 GET_RXQS(rxp
, q0
, q1
);
3080 rxf
->rit_segment
->rit
[i
].large_rxq_id
= q0
->rxq_id
;
3081 rxf
->rit_segment
->rit
[i
].small_rxq_id
= (q1
? q1
->rxq_id
: 0);
3084 rxf
->rit_segment
->rit_size
= nvectors
;
3086 /* Subsequent call to enable/reconfig RSS will update the RIT in h/w */
3091 bna_rx_coalescing_timeo_set(struct bna_rx
*rx
, int coalescing_timeo
)
3093 struct bna_rxp
*rxp
;
3094 struct list_head
*qe
;
3096 list_for_each(qe
, &rx
->rxp_q
) {
3097 rxp
= (struct bna_rxp
*)qe
;
3098 rxp
->cq
.ccb
->rx_coalescing_timeo
= coalescing_timeo
;
3099 bna_ib_coalescing_timeo_set(rxp
->cq
.ib
, coalescing_timeo
);
3105 bna_rx_dim_reconfig(struct bna
*bna
, u32 vector
[][BNA_BIAS_T_MAX
])
3109 for (i
= 0; i
< BNA_LOAD_T_MAX
; i
++)
3110 for (j
= 0; j
< BNA_BIAS_T_MAX
; j
++)
3111 bna
->rx_mod
.dim_vector
[i
][j
] = vector
[i
][j
];
3116 bna_rx_dim_update(struct bna_ccb
*ccb
)
3118 struct bna
*bna
= ccb
->cq
->rx
->bna
;
3120 u32 pkt_rt
, small_rt
, large_rt
;
3121 u8 coalescing_timeo
;
3123 if ((ccb
->pkt_rate
.small_pkt_cnt
== 0) &&
3124 (ccb
->pkt_rate
.large_pkt_cnt
== 0))
3127 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
3129 small_rt
= ccb
->pkt_rate
.small_pkt_cnt
;
3130 large_rt
= ccb
->pkt_rate
.large_pkt_cnt
;
3132 pkt_rt
= small_rt
+ large_rt
;
3134 if (pkt_rt
< BNA_PKT_RATE_10K
)
3135 load
= BNA_LOAD_T_LOW_4
;
3136 else if (pkt_rt
< BNA_PKT_RATE_20K
)
3137 load
= BNA_LOAD_T_LOW_3
;
3138 else if (pkt_rt
< BNA_PKT_RATE_30K
)
3139 load
= BNA_LOAD_T_LOW_2
;
3140 else if (pkt_rt
< BNA_PKT_RATE_40K
)
3141 load
= BNA_LOAD_T_LOW_1
;
3142 else if (pkt_rt
< BNA_PKT_RATE_50K
)
3143 load
= BNA_LOAD_T_HIGH_1
;
3144 else if (pkt_rt
< BNA_PKT_RATE_60K
)
3145 load
= BNA_LOAD_T_HIGH_2
;
3146 else if (pkt_rt
< BNA_PKT_RATE_80K
)
3147 load
= BNA_LOAD_T_HIGH_3
;
3149 load
= BNA_LOAD_T_HIGH_4
;
3151 if (small_rt
> (large_rt
<< 1))
3156 ccb
->pkt_rate
.small_pkt_cnt
= 0;
3157 ccb
->pkt_rate
.large_pkt_cnt
= 0;
3159 coalescing_timeo
= bna
->rx_mod
.dim_vector
[load
][bias
];
3160 ccb
->rx_coalescing_timeo
= coalescing_timeo
;
3163 bna_ib_coalescing_timeo_set(ccb
->cq
->ib
, coalescing_timeo
);
3169 bna_tx_prio_set(struct bna_tx
*tx
, int prio
,
3170 void (*cbfn
)(struct bnad
*, struct bna_tx
*,
3171 enum bna_cb_status
))
3173 if (tx
->flags
& BNA_TX_F_PRIO_LOCK
)
3176 tx
->prio_change_cbfn
= cbfn
;
3177 bna_tx_prio_changed(tx
, prio
);
3180 return BNA_CB_SUCCESS
;
3185 bna_tx_coalescing_timeo_set(struct bna_tx
*tx
, int coalescing_timeo
)
3187 struct bna_txq
*txq
;
3188 struct list_head
*qe
;
3190 list_for_each(qe
, &tx
->txq_q
) {
3191 txq
= (struct bna_txq
*)qe
;
3192 bna_ib_coalescing_timeo_set(txq
->ib
, coalescing_timeo
);
3200 struct bna_ritseg_pool_cfg
{
3202 u32 pool_entry_size
;
3204 init_ritseg_pool(ritseg_pool_cfg
);
3210 bna_ucam_mod_init(struct bna_ucam_mod
*ucam_mod
, struct bna
*bna
,
3211 struct bna_res_info
*res_info
)
3215 ucam_mod
->ucmac
= (struct bna_mac
*)
3216 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
3218 INIT_LIST_HEAD(&ucam_mod
->free_q
);
3219 for (i
= 0; i
< BFI_MAX_UCMAC
; i
++) {
3220 bfa_q_qe_init(&ucam_mod
->ucmac
[i
].qe
);
3221 list_add_tail(&ucam_mod
->ucmac
[i
].qe
, &ucam_mod
->free_q
);
3224 ucam_mod
->bna
= bna
;
3228 bna_ucam_mod_uninit(struct bna_ucam_mod
*ucam_mod
)
3230 struct list_head
*qe
;
3233 list_for_each(qe
, &ucam_mod
->free_q
)
3236 ucam_mod
->bna
= NULL
;
3240 bna_mcam_mod_init(struct bna_mcam_mod
*mcam_mod
, struct bna
*bna
,
3241 struct bna_res_info
*res_info
)
3245 mcam_mod
->mcmac
= (struct bna_mac
*)
3246 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
3248 INIT_LIST_HEAD(&mcam_mod
->free_q
);
3249 for (i
= 0; i
< BFI_MAX_MCMAC
; i
++) {
3250 bfa_q_qe_init(&mcam_mod
->mcmac
[i
].qe
);
3251 list_add_tail(&mcam_mod
->mcmac
[i
].qe
, &mcam_mod
->free_q
);
3254 mcam_mod
->bna
= bna
;
3258 bna_mcam_mod_uninit(struct bna_mcam_mod
*mcam_mod
)
3260 struct list_head
*qe
;
3263 list_for_each(qe
, &mcam_mod
->free_q
)
3266 mcam_mod
->bna
= NULL
;
3270 bna_rit_mod_init(struct bna_rit_mod
*rit_mod
,
3271 struct bna_res_info
*res_info
)
3278 rit_mod
->rit
= (struct bna_rit_entry
*)
3279 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.mdl
[0].kva
;
3280 rit_mod
->rit_segment
= (struct bna_rit_segment
*)
3281 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.mdl
[0].kva
;
3285 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3286 INIT_LIST_HEAD(&rit_mod
->rit_seg_pool
[i
]);
3287 for (j
= 0; j
< ritseg_pool_cfg
[i
].pool_size
; j
++) {
3288 bfa_q_qe_init(&rit_mod
->rit_segment
[count
].qe
);
3289 rit_mod
->rit_segment
[count
].max_rit_size
=
3290 ritseg_pool_cfg
[i
].pool_entry_size
;
3291 rit_mod
->rit_segment
[count
].rit_offset
= offset
;
3292 rit_mod
->rit_segment
[count
].rit
=
3293 &rit_mod
->rit
[offset
];
3294 list_add_tail(&rit_mod
->rit_segment
[count
].qe
,
3295 &rit_mod
->rit_seg_pool
[i
]);
3297 offset
+= ritseg_pool_cfg
[i
].pool_entry_size
;
3303 bna_rit_mod_uninit(struct bna_rit_mod
*rit_mod
)
3305 struct bna_rit_segment
*rit_segment
;
3306 struct list_head
*qe
;
3310 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3312 list_for_each(qe
, &rit_mod
->rit_seg_pool
[i
]) {
3313 rit_segment
= (struct bna_rit_segment
*)qe
;
3323 /* Called during probe(), before calling bna_init() */
3325 bna_res_req(struct bna_res_info
*res_info
)
3327 bna_adv_res_req(res_info
);
3329 /* DMA memory for retrieving IOC attributes */
3330 res_info
[BNA_RES_MEM_T_ATTR
].res_type
= BNA_RES_T_MEM
;
3331 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
3332 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.num
= 1;
3333 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.len
=
3334 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE
);
3336 /* DMA memory for index segment of an IB */
3337 res_info
[BNA_RES_MEM_T_IBIDX
].res_type
= BNA_RES_T_MEM
;
3338 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
3339 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.len
=
3340 BFI_IBIDX_SIZE
* BFI_IBIDX_MAX_SEGSIZE
;
3341 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.num
= BFI_MAX_IB
;
3343 /* Virtual memory for IB objects - stored by IB module */
3344 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_type
= BNA_RES_T_MEM
;
3345 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.mem_type
=
3347 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.num
= 1;
3348 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.len
=
3349 BFI_MAX_IB
* sizeof(struct bna_ib
);
3351 /* Virtual memory for intr objects - stored by IB module */
3352 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_type
= BNA_RES_T_MEM
;
3353 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.mem_type
=
3355 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.num
= 1;
3356 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.len
=
3357 BFI_MAX_IB
* sizeof(struct bna_intr
);
3359 /* Virtual memory for idx_seg objects - stored by IB module */
3360 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_type
= BNA_RES_T_MEM
;
3361 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.mem_type
=
3363 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.num
= 1;
3364 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.len
=
3365 BFI_IBIDX_TOTAL_SEGS
* sizeof(struct bna_ibidx_seg
);
3367 /* Virtual memory for Tx objects - stored by Tx module */
3368 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_type
= BNA_RES_T_MEM
;
3369 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.mem_type
=
3371 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.num
= 1;
3372 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.len
=
3373 BFI_MAX_TXQ
* sizeof(struct bna_tx
);
3375 /* Virtual memory for TxQ - stored by Tx module */
3376 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
3377 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.mem_type
=
3379 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.num
= 1;
3380 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.len
=
3381 BFI_MAX_TXQ
* sizeof(struct bna_txq
);
3383 /* Virtual memory for Rx objects - stored by Rx module */
3384 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_type
= BNA_RES_T_MEM
;
3385 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.mem_type
=
3387 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.num
= 1;
3388 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.len
=
3389 BFI_MAX_RXQ
* sizeof(struct bna_rx
);
3391 /* Virtual memory for RxPath - stored by Rx module */
3392 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_type
= BNA_RES_T_MEM
;
3393 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.mem_type
=
3395 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.num
= 1;
3396 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.len
=
3397 BFI_MAX_RXQ
* sizeof(struct bna_rxp
);
3399 /* Virtual memory for RxQ - stored by Rx module */
3400 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
3401 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.mem_type
=
3403 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.num
= 1;
3404 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.len
=
3405 BFI_MAX_RXQ
* sizeof(struct bna_rxq
);
3407 /* Virtual memory for Unicast MAC address - stored by ucam module */
3408 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
3409 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
3411 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
3412 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.len
=
3413 BFI_MAX_UCMAC
* sizeof(struct bna_mac
);
3415 /* Virtual memory for Multicast MAC address - stored by mcam module */
3416 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
3417 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
3419 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
3420 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.len
=
3421 BFI_MAX_MCMAC
* sizeof(struct bna_mac
);
3423 /* Virtual memory for RIT entries */
3424 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_type
= BNA_RES_T_MEM
;
3425 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.mem_type
=
3427 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.num
= 1;
3428 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.len
=
3429 BFI_MAX_RIT_SIZE
* sizeof(struct bna_rit_entry
);
3431 /* Virtual memory for RIT segment table */
3432 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_type
= BNA_RES_T_MEM
;
3433 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.mem_type
=
3435 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.num
= 1;
3436 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.len
=
3437 BFI_RIT_TOTAL_SEGS
* sizeof(struct bna_rit_segment
);
3439 /* Interrupt resource for mailbox interrupt */
3440 res_info
[BNA_RES_INTR_T_MBOX
].res_type
= BNA_RES_T_INTR
;
3441 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.intr_type
=
3443 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.num
= 1;
3446 /* Called during probe() */
3448 bna_init(struct bna
*bna
, struct bnad
*bnad
, struct bfa_pcidev
*pcidev
,
3449 struct bna_res_info
*res_info
)
3452 bna
->pcidev
= *pcidev
;
3454 bna
->stats
.hw_stats
= (struct bfi_ll_stats
*)
3455 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].kva
;
3456 bna
->hw_stats_dma
.msb
=
3457 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.msb
;
3458 bna
->hw_stats_dma
.lsb
=
3459 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.lsb
;
3460 bna
->stats
.sw_stats
= (struct bna_sw_stats
*)
3461 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.mdl
[0].kva
;
3463 bna
->regs
.page_addr
= bna
->pcidev
.pci_bar_kva
+
3464 reg_offset
[bna
->pcidev
.pci_func
].page_addr
;
3465 bna
->regs
.fn_int_status
= bna
->pcidev
.pci_bar_kva
+
3466 reg_offset
[bna
->pcidev
.pci_func
].fn_int_status
;
3467 bna
->regs
.fn_int_mask
= bna
->pcidev
.pci_bar_kva
+
3468 reg_offset
[bna
->pcidev
.pci_func
].fn_int_mask
;
3470 if (bna
->pcidev
.pci_func
< 3)
3475 /* Also initializes diag, cee, sfp, phy_port and mbox_mod */
3476 bna_device_init(&bna
->device
, bna
, res_info
);
3478 bna_port_init(&bna
->port
, bna
);
3480 bna_tx_mod_init(&bna
->tx_mod
, bna
, res_info
);
3482 bna_rx_mod_init(&bna
->rx_mod
, bna
, res_info
);
3484 bna_ib_mod_init(&bna
->ib_mod
, bna
, res_info
);
3486 bna_rit_mod_init(&bna
->rit_mod
, res_info
);
3488 bna_ucam_mod_init(&bna
->ucam_mod
, bna
, res_info
);
3490 bna_mcam_mod_init(&bna
->mcam_mod
, bna
, res_info
);
3492 bna
->rxf_default_id
= BFI_MAX_RXF
;
3493 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
3495 /* Mbox q element for posting stat request to f/w */
3496 bfa_q_qe_init(&bna
->mbox_qe
.qe
);
3500 bna_uninit(struct bna
*bna
)
3502 bna_mcam_mod_uninit(&bna
->mcam_mod
);
3504 bna_ucam_mod_uninit(&bna
->ucam_mod
);
3506 bna_rit_mod_uninit(&bna
->rit_mod
);
3508 bna_ib_mod_uninit(&bna
->ib_mod
);
3510 bna_rx_mod_uninit(&bna
->rx_mod
);
3512 bna_tx_mod_uninit(&bna
->tx_mod
);
3514 bna_port_uninit(&bna
->port
);
3516 bna_device_uninit(&bna
->device
);
3522 bna_ucam_mod_mac_get(struct bna_ucam_mod
*ucam_mod
)
3524 struct list_head
*qe
;
3526 if (list_empty(&ucam_mod
->free_q
))
3529 bfa_q_deq(&ucam_mod
->free_q
, &qe
);
3531 return (struct bna_mac
*)qe
;
3535 bna_ucam_mod_mac_put(struct bna_ucam_mod
*ucam_mod
, struct bna_mac
*mac
)
3537 list_add_tail(&mac
->qe
, &ucam_mod
->free_q
);
3541 bna_mcam_mod_mac_get(struct bna_mcam_mod
*mcam_mod
)
3543 struct list_head
*qe
;
3545 if (list_empty(&mcam_mod
->free_q
))
3548 bfa_q_deq(&mcam_mod
->free_q
, &qe
);
3550 return (struct bna_mac
*)qe
;
3554 bna_mcam_mod_mac_put(struct bna_mcam_mod
*mcam_mod
, struct bna_mac
*mac
)
3556 list_add_tail(&mac
->qe
, &mcam_mod
->free_q
);
3560 * Note: This should be called in the same locking context as the call to
3561 * bna_rit_mod_seg_get()
3564 bna_rit_mod_can_satisfy(struct bna_rit_mod
*rit_mod
, int seg_size
)
3568 /* Select the pool for seg_size */
3569 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3570 if (seg_size
<= ritseg_pool_cfg
[i
].pool_entry_size
)
3574 if (i
== BFI_RIT_SEG_TOTAL_POOLS
)
3577 if (list_empty(&rit_mod
->rit_seg_pool
[i
]))
3583 struct bna_rit_segment
*
3584 bna_rit_mod_seg_get(struct bna_rit_mod
*rit_mod
, int seg_size
)
3586 struct bna_rit_segment
*seg
;
3587 struct list_head
*qe
;
3590 /* Select the pool for seg_size */
3591 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3592 if (seg_size
<= ritseg_pool_cfg
[i
].pool_entry_size
)
3596 if (i
== BFI_RIT_SEG_TOTAL_POOLS
)
3599 if (list_empty(&rit_mod
->rit_seg_pool
[i
]))
3602 bfa_q_deq(&rit_mod
->rit_seg_pool
[i
], &qe
);
3603 seg
= (struct bna_rit_segment
*)qe
;
3604 bfa_q_qe_init(&seg
->qe
);
3605 seg
->rit_size
= seg_size
;
3611 bna_rit_mod_seg_put(struct bna_rit_mod
*rit_mod
,
3612 struct bna_rit_segment
*seg
)
3616 /* Select the pool for seg->max_rit_size */
3617 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3618 if (seg
->max_rit_size
== ritseg_pool_cfg
[i
].pool_entry_size
)
3623 list_add_tail(&seg
->qe
, &rit_mod
->rit_seg_pool
[i
]);