2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
28 return (msg_id
== BFI_LL_I2H_LINK_DOWN_AEN
||
29 msg_id
== BFI_LL_I2H_LINK_UP_AEN
);
33 bna_mbox_aen_callback(struct bna
*bna
, struct bfi_mbmsg
*msg
)
35 struct bfi_ll_aen
*aen
= (struct bfi_ll_aen
*)(msg
);
37 switch (aen
->mh
.msg_id
) {
38 case BFI_LL_I2H_LINK_UP_AEN
:
39 bna_port_cb_link_up(&bna
->port
, aen
, aen
->reason
);
41 case BFI_LL_I2H_LINK_DOWN_AEN
:
42 bna_port_cb_link_down(&bna
->port
, aen
->reason
);
50 bna_ll_isr(void *llarg
, struct bfi_mbmsg
*msg
)
52 struct bna
*bna
= (struct bna
*)(llarg
);
53 struct bfi_ll_rsp
*mb_rsp
= (struct bfi_ll_rsp
*)(msg
);
54 struct bfi_mhdr
*cmd_h
, *rsp_h
;
55 struct bna_mbox_qe
*mb_qe
= NULL
;
58 char message
[BNA_MESSAGE_SIZE
];
60 aen
= bna_is_aen(mb_rsp
->mh
.msg_id
);
63 mb_qe
= bfa_q_first(&bna
->mbox_mod
.posted_q
);
64 cmd_h
= (struct bfi_mhdr
*)(&mb_qe
->cmd
.msg
[0]);
65 rsp_h
= (struct bfi_mhdr
*)(&mb_rsp
->mh
);
67 if ((BFA_I2HM(cmd_h
->msg_id
) == rsp_h
->msg_id
) &&
68 (cmd_h
->mtag
.i2htok
== rsp_h
->mtag
.i2htok
)) {
69 /* Remove the request from posted_q, update state */
71 bna
->mbox_mod
.msg_pending
--;
72 if (list_empty(&bna
->mbox_mod
.posted_q
))
73 bna
->mbox_mod
.state
= BNA_MBOX_FREE
;
77 /* Dispatch the cbfn */
79 mb_qe
->cbfn(mb_qe
->cbarg
, mb_rsp
->error
);
81 /* Post the next entry, if needed */
83 mb_qe
= bfa_q_first(&bna
->mbox_mod
.posted_q
);
84 bfa_ioc_mbox_queue(&bna
->device
.ioc
,
88 snprintf(message
, BNA_MESSAGE_SIZE
,
89 "No matching rsp for [%d:%d:%d]\n",
90 mb_rsp
->mh
.msg_class
, mb_rsp
->mh
.msg_id
,
91 mb_rsp
->mh
.mtag
.i2htok
);
92 pr_info("%s", message
);
96 bna_mbox_aen_callback(bna
, msg
);
100 bna_err_handler(struct bna
*bna
, u32 intr_status
)
104 if (intr_status
& __HALT_STATUS_BITS
) {
105 init_halt
= readl(bna
->device
.ioc
.ioc_regs
.ll_halt
);
106 init_halt
&= ~__FW_INIT_HALT_P
;
107 writel(init_halt
, bna
->device
.ioc
.ioc_regs
.ll_halt
);
110 bfa_ioc_error_isr(&bna
->device
.ioc
);
114 bna_mbox_handler(struct bna
*bna
, u32 intr_status
)
116 if (BNA_IS_ERR_INTR(intr_status
)) {
117 bna_err_handler(bna
, intr_status
);
120 if (BNA_IS_MBOX_INTR(intr_status
))
121 bfa_ioc_mbox_isr(&bna
->device
.ioc
);
125 bna_mbox_send(struct bna
*bna
, struct bna_mbox_qe
*mbox_qe
)
129 mh
= (struct bfi_mhdr
*)(&mbox_qe
->cmd
.msg
[0]);
131 mh
->mtag
.i2htok
= htons(bna
->mbox_mod
.msg_ctr
);
132 bna
->mbox_mod
.msg_ctr
++;
133 bna
->mbox_mod
.msg_pending
++;
134 if (bna
->mbox_mod
.state
== BNA_MBOX_FREE
) {
135 list_add_tail(&mbox_qe
->qe
, &bna
->mbox_mod
.posted_q
);
136 bfa_ioc_mbox_queue(&bna
->device
.ioc
, &mbox_qe
->cmd
);
137 bna
->mbox_mod
.state
= BNA_MBOX_POSTED
;
139 list_add_tail(&mbox_qe
->qe
, &bna
->mbox_mod
.posted_q
);
144 bna_mbox_flush_q(struct bna
*bna
, struct list_head
*q
)
146 struct bna_mbox_qe
*mb_qe
= NULL
;
147 struct bfi_mhdr
*cmd_h
;
148 struct list_head
*mb_q
;
149 void (*cbfn
)(void *arg
, int status
);
152 mb_q
= &bna
->mbox_mod
.posted_q
;
154 while (!list_empty(mb_q
)) {
155 bfa_q_deq(mb_q
, &mb_qe
);
157 cbarg
= mb_qe
->cbarg
;
158 bfa_q_qe_init(mb_qe
);
159 bna
->mbox_mod
.msg_pending
--;
161 cmd_h
= (struct bfi_mhdr
*)(&mb_qe
->cmd
.msg
[0]);
163 cbfn(cbarg
, BNA_CB_NOT_EXEC
);
166 bna
->mbox_mod
.state
= BNA_MBOX_FREE
;
170 bna_mbox_mod_start(struct bna_mbox_mod
*mbox_mod
)
175 bna_mbox_mod_stop(struct bna_mbox_mod
*mbox_mod
)
177 bna_mbox_flush_q(mbox_mod
->bna
, &mbox_mod
->posted_q
);
181 bna_mbox_mod_init(struct bna_mbox_mod
*mbox_mod
, struct bna
*bna
)
183 bfa_ioc_mbox_regisr(&bna
->device
.ioc
, BFI_MC_LL
, bna_ll_isr
, bna
);
184 mbox_mod
->state
= BNA_MBOX_FREE
;
185 mbox_mod
->msg_ctr
= mbox_mod
->msg_pending
= 0;
186 INIT_LIST_HEAD(&mbox_mod
->posted_q
);
191 bna_mbox_mod_uninit(struct bna_mbox_mod
*mbox_mod
)
193 mbox_mod
->bna
= NULL
;
199 #define call_llport_stop_cbfn(llport, status)\
201 if ((llport)->stop_cbfn)\
202 (llport)->stop_cbfn(&(llport)->bna->port, status);\
203 (llport)->stop_cbfn = NULL;\
206 static void bna_fw_llport_up(struct bna_llport
*llport
);
207 static void bna_fw_cb_llport_up(void *arg
, int status
);
208 static void bna_fw_llport_down(struct bna_llport
*llport
);
209 static void bna_fw_cb_llport_down(void *arg
, int status
);
210 static void bna_llport_start(struct bna_llport
*llport
);
211 static void bna_llport_stop(struct bna_llport
*llport
);
212 static void bna_llport_fail(struct bna_llport
*llport
);
214 enum bna_llport_event
{
220 LLPORT_E_FWRESP_UP
= 6,
221 LLPORT_E_FWRESP_DOWN
= 7
224 enum bna_llport_state
{
225 BNA_LLPORT_STOPPED
= 1,
227 BNA_LLPORT_UP_RESP_WAIT
= 3,
228 BNA_LLPORT_DOWN_RESP_WAIT
= 4,
230 BNA_LLPORT_LAST_RESP_WAIT
= 6
233 bfa_fsm_state_decl(bna_llport
, stopped
, struct bna_llport
,
234 enum bna_llport_event
);
235 bfa_fsm_state_decl(bna_llport
, down
, struct bna_llport
,
236 enum bna_llport_event
);
237 bfa_fsm_state_decl(bna_llport
, up_resp_wait
, struct bna_llport
,
238 enum bna_llport_event
);
239 bfa_fsm_state_decl(bna_llport
, down_resp_wait
, struct bna_llport
,
240 enum bna_llport_event
);
241 bfa_fsm_state_decl(bna_llport
, up
, struct bna_llport
,
242 enum bna_llport_event
);
243 bfa_fsm_state_decl(bna_llport
, last_resp_wait
, struct bna_llport
,
244 enum bna_llport_event
);
246 static struct bfa_sm_table llport_sm_table
[] = {
247 {BFA_SM(bna_llport_sm_stopped
), BNA_LLPORT_STOPPED
},
248 {BFA_SM(bna_llport_sm_down
), BNA_LLPORT_DOWN
},
249 {BFA_SM(bna_llport_sm_up_resp_wait
), BNA_LLPORT_UP_RESP_WAIT
},
250 {BFA_SM(bna_llport_sm_down_resp_wait
), BNA_LLPORT_DOWN_RESP_WAIT
},
251 {BFA_SM(bna_llport_sm_up
), BNA_LLPORT_UP
},
252 {BFA_SM(bna_llport_sm_last_resp_wait
), BNA_LLPORT_LAST_RESP_WAIT
}
256 bna_llport_sm_stopped_entry(struct bna_llport
*llport
)
258 llport
->bna
->port
.link_cbfn((llport
)->bna
->bnad
, BNA_LINK_DOWN
);
259 call_llport_stop_cbfn(llport
, BNA_CB_SUCCESS
);
263 bna_llport_sm_stopped(struct bna_llport
*llport
,
264 enum bna_llport_event event
)
268 bfa_fsm_set_state(llport
, bna_llport_sm_down
);
272 call_llport_stop_cbfn(llport
, BNA_CB_SUCCESS
);
279 /* This event is received due to Rx objects failing */
283 case LLPORT_E_FWRESP_UP
:
284 case LLPORT_E_FWRESP_DOWN
:
286 * These events are received due to flushing of mbox when
293 bfa_sm_fault(llport
->bna
, event
);
298 bna_llport_sm_down_entry(struct bna_llport
*llport
)
300 bnad_cb_port_link_status((llport
)->bna
->bnad
, BNA_LINK_DOWN
);
304 bna_llport_sm_down(struct bna_llport
*llport
,
305 enum bna_llport_event event
)
309 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
313 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
317 bfa_fsm_set_state(llport
, bna_llport_sm_up_resp_wait
);
318 bna_fw_llport_up(llport
);
322 bfa_sm_fault(llport
->bna
, event
);
327 bna_llport_sm_up_resp_wait_entry(struct bna_llport
*llport
)
330 * NOTE: Do not call bna_fw_llport_up() here. That will over step
331 * mbox due to down_resp_wait -> up_resp_wait transition on event
337 bna_llport_sm_up_resp_wait(struct bna_llport
*llport
,
338 enum bna_llport_event event
)
342 bfa_fsm_set_state(llport
, bna_llport_sm_last_resp_wait
);
346 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
350 bfa_fsm_set_state(llport
, bna_llport_sm_down_resp_wait
);
353 case LLPORT_E_FWRESP_UP
:
354 bfa_fsm_set_state(llport
, bna_llport_sm_up
);
357 case LLPORT_E_FWRESP_DOWN
:
358 /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
359 bna_fw_llport_up(llport
);
363 bfa_sm_fault(llport
->bna
, event
);
368 bna_llport_sm_down_resp_wait_entry(struct bna_llport
*llport
)
371 * NOTE: Do not call bna_fw_llport_down() here. That will over step
372 * mbox due to up_resp_wait -> down_resp_wait transition on event
378 bna_llport_sm_down_resp_wait(struct bna_llport
*llport
,
379 enum bna_llport_event event
)
383 bfa_fsm_set_state(llport
, bna_llport_sm_last_resp_wait
);
387 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
391 bfa_fsm_set_state(llport
, bna_llport_sm_up_resp_wait
);
394 case LLPORT_E_FWRESP_UP
:
395 /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
396 bna_fw_llport_down(llport
);
399 case LLPORT_E_FWRESP_DOWN
:
400 bfa_fsm_set_state(llport
, bna_llport_sm_down
);
404 bfa_sm_fault(llport
->bna
, event
);
409 bna_llport_sm_up_entry(struct bna_llport
*llport
)
414 bna_llport_sm_up(struct bna_llport
*llport
,
415 enum bna_llport_event event
)
419 bfa_fsm_set_state(llport
, bna_llport_sm_last_resp_wait
);
420 bna_fw_llport_down(llport
);
424 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
428 bfa_fsm_set_state(llport
, bna_llport_sm_down_resp_wait
);
429 bna_fw_llport_down(llport
);
433 bfa_sm_fault(llport
->bna
, event
);
438 bna_llport_sm_last_resp_wait_entry(struct bna_llport
*llport
)
443 bna_llport_sm_last_resp_wait(struct bna_llport
*llport
,
444 enum bna_llport_event event
)
448 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
453 * This event is received due to Rx objects stopping in
459 case LLPORT_E_FWRESP_UP
:
460 /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
461 bna_fw_llport_down(llport
);
464 case LLPORT_E_FWRESP_DOWN
:
465 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
469 bfa_sm_fault(llport
->bna
, event
);
474 bna_fw_llport_admin_up(struct bna_llport
*llport
)
476 struct bfi_ll_port_admin_req ll_req
;
478 memset(&ll_req
, 0, sizeof(ll_req
));
479 ll_req
.mh
.msg_class
= BFI_MC_LL
;
480 ll_req
.mh
.msg_id
= BFI_LL_H2I_PORT_ADMIN_REQ
;
481 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
483 ll_req
.up
= BNA_STATUS_T_ENABLED
;
485 bna_mbox_qe_fill(&llport
->mbox_qe
, &ll_req
, sizeof(ll_req
),
486 bna_fw_cb_llport_up
, llport
);
488 bna_mbox_send(llport
->bna
, &llport
->mbox_qe
);
492 bna_fw_llport_up(struct bna_llport
*llport
)
494 if (llport
->type
== BNA_PORT_T_REGULAR
)
495 bna_fw_llport_admin_up(llport
);
499 bna_fw_cb_llport_up(void *arg
, int status
)
501 struct bna_llport
*llport
= (struct bna_llport
*)arg
;
503 bfa_q_qe_init(&llport
->mbox_qe
.qe
);
504 bfa_fsm_send_event(llport
, LLPORT_E_FWRESP_UP
);
508 bna_fw_llport_admin_down(struct bna_llport
*llport
)
510 struct bfi_ll_port_admin_req ll_req
;
512 memset(&ll_req
, 0, sizeof(ll_req
));
513 ll_req
.mh
.msg_class
= BFI_MC_LL
;
514 ll_req
.mh
.msg_id
= BFI_LL_H2I_PORT_ADMIN_REQ
;
515 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
517 ll_req
.up
= BNA_STATUS_T_DISABLED
;
519 bna_mbox_qe_fill(&llport
->mbox_qe
, &ll_req
, sizeof(ll_req
),
520 bna_fw_cb_llport_down
, llport
);
522 bna_mbox_send(llport
->bna
, &llport
->mbox_qe
);
526 bna_fw_llport_down(struct bna_llport
*llport
)
528 if (llport
->type
== BNA_PORT_T_REGULAR
)
529 bna_fw_llport_admin_down(llport
);
533 bna_fw_cb_llport_down(void *arg
, int status
)
535 struct bna_llport
*llport
= (struct bna_llport
*)arg
;
537 bfa_q_qe_init(&llport
->mbox_qe
.qe
);
538 bfa_fsm_send_event(llport
, LLPORT_E_FWRESP_DOWN
);
542 bna_port_cb_llport_stopped(struct bna_port
*port
,
543 enum bna_cb_status status
)
545 bfa_wc_down(&port
->chld_stop_wc
);
549 bna_llport_init(struct bna_llport
*llport
, struct bna
*bna
)
551 llport
->flags
|= BNA_LLPORT_F_ENABLED
;
552 llport
->type
= BNA_PORT_T_REGULAR
;
555 llport
->link_status
= BNA_LINK_DOWN
;
557 llport
->admin_up_count
= 0;
559 llport
->stop_cbfn
= NULL
;
561 bfa_q_qe_init(&llport
->mbox_qe
.qe
);
563 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
567 bna_llport_uninit(struct bna_llport
*llport
)
569 llport
->flags
&= ~BNA_LLPORT_F_ENABLED
;
575 bna_llport_start(struct bna_llport
*llport
)
577 bfa_fsm_send_event(llport
, LLPORT_E_START
);
581 bna_llport_stop(struct bna_llport
*llport
)
583 llport
->stop_cbfn
= bna_port_cb_llport_stopped
;
585 bfa_fsm_send_event(llport
, LLPORT_E_STOP
);
589 bna_llport_fail(struct bna_llport
*llport
)
591 bfa_fsm_send_event(llport
, LLPORT_E_FAIL
);
595 bna_llport_state_get(struct bna_llport
*llport
)
597 return bfa_sm_to_state(llport_sm_table
, llport
->fsm
);
601 bna_llport_admin_up(struct bna_llport
*llport
)
603 llport
->admin_up_count
++;
605 if (llport
->admin_up_count
== 1) {
606 llport
->flags
|= BNA_LLPORT_F_RX_ENABLED
;
607 if (llport
->flags
& BNA_LLPORT_F_ENABLED
)
608 bfa_fsm_send_event(llport
, LLPORT_E_UP
);
613 bna_llport_admin_down(struct bna_llport
*llport
)
615 llport
->admin_up_count
--;
617 if (llport
->admin_up_count
== 0) {
618 llport
->flags
&= ~BNA_LLPORT_F_RX_ENABLED
;
619 if (llport
->flags
& BNA_LLPORT_F_ENABLED
)
620 bfa_fsm_send_event(llport
, LLPORT_E_DOWN
);
627 #define bna_port_chld_start(port)\
629 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
630 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
631 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
632 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
633 bna_llport_start(&(port)->llport);\
634 bna_tx_mod_start(&(port)->bna->tx_mod, tx_type);\
635 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
638 #define bna_port_chld_stop(port)\
640 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
641 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
642 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
643 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
644 bfa_wc_up(&(port)->chld_stop_wc);\
645 bfa_wc_up(&(port)->chld_stop_wc);\
646 bfa_wc_up(&(port)->chld_stop_wc);\
647 bna_llport_stop(&(port)->llport);\
648 bna_tx_mod_stop(&(port)->bna->tx_mod, tx_type);\
649 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
652 #define bna_port_chld_fail(port)\
654 bna_llport_fail(&(port)->llport);\
655 bna_tx_mod_fail(&(port)->bna->tx_mod);\
656 bna_rx_mod_fail(&(port)->bna->rx_mod);\
659 #define bna_port_rx_start(port)\
661 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
662 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
663 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
666 #define bna_port_rx_stop(port)\
668 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
669 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
670 bfa_wc_up(&(port)->chld_stop_wc);\
671 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
674 #define call_port_stop_cbfn(port, status)\
676 if ((port)->stop_cbfn)\
677 (port)->stop_cbfn((port)->stop_cbarg, status);\
678 (port)->stop_cbfn = NULL;\
679 (port)->stop_cbarg = NULL;\
682 #define call_port_pause_cbfn(port, status)\
684 if ((port)->pause_cbfn)\
685 (port)->pause_cbfn((port)->bna->bnad, status);\
686 (port)->pause_cbfn = NULL;\
689 #define call_port_mtu_cbfn(port, status)\
691 if ((port)->mtu_cbfn)\
692 (port)->mtu_cbfn((port)->bna->bnad, status);\
693 (port)->mtu_cbfn = NULL;\
696 static void bna_fw_pause_set(struct bna_port
*port
);
697 static void bna_fw_cb_pause_set(void *arg
, int status
);
698 static void bna_fw_mtu_set(struct bna_port
*port
);
699 static void bna_fw_cb_mtu_set(void *arg
, int status
);
701 enum bna_port_event
{
705 PORT_E_PAUSE_CFG
= 4,
707 PORT_E_CHLD_STOPPED
= 6,
708 PORT_E_FWRESP_PAUSE
= 7,
709 PORT_E_FWRESP_MTU
= 8
712 enum bna_port_state
{
713 BNA_PORT_STOPPED
= 1,
714 BNA_PORT_MTU_INIT_WAIT
= 2,
715 BNA_PORT_PAUSE_INIT_WAIT
= 3,
716 BNA_PORT_LAST_RESP_WAIT
= 4,
717 BNA_PORT_STARTED
= 5,
718 BNA_PORT_PAUSE_CFG_WAIT
= 6,
719 BNA_PORT_RX_STOP_WAIT
= 7,
720 BNA_PORT_MTU_CFG_WAIT
= 8,
721 BNA_PORT_CHLD_STOP_WAIT
= 9
724 bfa_fsm_state_decl(bna_port
, stopped
, struct bna_port
,
725 enum bna_port_event
);
726 bfa_fsm_state_decl(bna_port
, mtu_init_wait
, struct bna_port
,
727 enum bna_port_event
);
728 bfa_fsm_state_decl(bna_port
, pause_init_wait
, struct bna_port
,
729 enum bna_port_event
);
730 bfa_fsm_state_decl(bna_port
, last_resp_wait
, struct bna_port
,
731 enum bna_port_event
);
732 bfa_fsm_state_decl(bna_port
, started
, struct bna_port
,
733 enum bna_port_event
);
734 bfa_fsm_state_decl(bna_port
, pause_cfg_wait
, struct bna_port
,
735 enum bna_port_event
);
736 bfa_fsm_state_decl(bna_port
, rx_stop_wait
, struct bna_port
,
737 enum bna_port_event
);
738 bfa_fsm_state_decl(bna_port
, mtu_cfg_wait
, struct bna_port
,
739 enum bna_port_event
);
740 bfa_fsm_state_decl(bna_port
, chld_stop_wait
, struct bna_port
,
741 enum bna_port_event
);
743 static struct bfa_sm_table port_sm_table
[] = {
744 {BFA_SM(bna_port_sm_stopped
), BNA_PORT_STOPPED
},
745 {BFA_SM(bna_port_sm_mtu_init_wait
), BNA_PORT_MTU_INIT_WAIT
},
746 {BFA_SM(bna_port_sm_pause_init_wait
), BNA_PORT_PAUSE_INIT_WAIT
},
747 {BFA_SM(bna_port_sm_last_resp_wait
), BNA_PORT_LAST_RESP_WAIT
},
748 {BFA_SM(bna_port_sm_started
), BNA_PORT_STARTED
},
749 {BFA_SM(bna_port_sm_pause_cfg_wait
), BNA_PORT_PAUSE_CFG_WAIT
},
750 {BFA_SM(bna_port_sm_rx_stop_wait
), BNA_PORT_RX_STOP_WAIT
},
751 {BFA_SM(bna_port_sm_mtu_cfg_wait
), BNA_PORT_MTU_CFG_WAIT
},
752 {BFA_SM(bna_port_sm_chld_stop_wait
), BNA_PORT_CHLD_STOP_WAIT
}
756 bna_port_sm_stopped_entry(struct bna_port
*port
)
758 call_port_pause_cbfn(port
, BNA_CB_SUCCESS
);
759 call_port_mtu_cbfn(port
, BNA_CB_SUCCESS
);
760 call_port_stop_cbfn(port
, BNA_CB_SUCCESS
);
764 bna_port_sm_stopped(struct bna_port
*port
, enum bna_port_event event
)
768 bfa_fsm_set_state(port
, bna_port_sm_mtu_init_wait
);
772 call_port_stop_cbfn(port
, BNA_CB_SUCCESS
);
779 case PORT_E_PAUSE_CFG
:
780 call_port_pause_cbfn(port
, BNA_CB_SUCCESS
);
784 call_port_mtu_cbfn(port
, BNA_CB_SUCCESS
);
787 case PORT_E_CHLD_STOPPED
:
789 * This event is received due to LLPort, Tx and Rx objects
795 case PORT_E_FWRESP_PAUSE
:
796 case PORT_E_FWRESP_MTU
:
798 * These events are received due to flushing of mbox when
805 bfa_sm_fault(port
->bna
, event
);
810 bna_port_sm_mtu_init_wait_entry(struct bna_port
*port
)
812 bna_fw_mtu_set(port
);
816 bna_port_sm_mtu_init_wait(struct bna_port
*port
, enum bna_port_event event
)
820 bfa_fsm_set_state(port
, bna_port_sm_last_resp_wait
);
824 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
827 case PORT_E_PAUSE_CFG
:
832 port
->flags
|= BNA_PORT_F_MTU_CHANGED
;
835 case PORT_E_FWRESP_MTU
:
836 if (port
->flags
& BNA_PORT_F_MTU_CHANGED
) {
837 port
->flags
&= ~BNA_PORT_F_MTU_CHANGED
;
838 bna_fw_mtu_set(port
);
840 bfa_fsm_set_state(port
, bna_port_sm_pause_init_wait
);
845 bfa_sm_fault(port
->bna
, event
);
850 bna_port_sm_pause_init_wait_entry(struct bna_port
*port
)
852 bna_fw_pause_set(port
);
856 bna_port_sm_pause_init_wait(struct bna_port
*port
,
857 enum bna_port_event event
)
861 bfa_fsm_set_state(port
, bna_port_sm_last_resp_wait
);
865 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
868 case PORT_E_PAUSE_CFG
:
869 port
->flags
|= BNA_PORT_F_PAUSE_CHANGED
;
873 port
->flags
|= BNA_PORT_F_MTU_CHANGED
;
876 case PORT_E_FWRESP_PAUSE
:
877 if (port
->flags
& BNA_PORT_F_PAUSE_CHANGED
) {
878 port
->flags
&= ~BNA_PORT_F_PAUSE_CHANGED
;
879 bna_fw_pause_set(port
);
880 } else if (port
->flags
& BNA_PORT_F_MTU_CHANGED
) {
881 port
->flags
&= ~BNA_PORT_F_MTU_CHANGED
;
882 bfa_fsm_set_state(port
, bna_port_sm_mtu_init_wait
);
884 bfa_fsm_set_state(port
, bna_port_sm_started
);
885 bna_port_chld_start(port
);
890 bfa_sm_fault(port
->bna
, event
);
895 bna_port_sm_last_resp_wait_entry(struct bna_port
*port
)
900 bna_port_sm_last_resp_wait(struct bna_port
*port
,
901 enum bna_port_event event
)
905 case PORT_E_FWRESP_PAUSE
:
906 case PORT_E_FWRESP_MTU
:
907 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
911 bfa_sm_fault(port
->bna
, event
);
916 bna_port_sm_started_entry(struct bna_port
*port
)
919 * NOTE: Do not call bna_port_chld_start() here, since it will be
920 * inadvertently called during pause_cfg_wait->started transition
923 call_port_pause_cbfn(port
, BNA_CB_SUCCESS
);
924 call_port_mtu_cbfn(port
, BNA_CB_SUCCESS
);
928 bna_port_sm_started(struct bna_port
*port
,
929 enum bna_port_event event
)
933 bfa_fsm_set_state(port
, bna_port_sm_chld_stop_wait
);
937 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
938 bna_port_chld_fail(port
);
941 case PORT_E_PAUSE_CFG
:
942 bfa_fsm_set_state(port
, bna_port_sm_pause_cfg_wait
);
946 bfa_fsm_set_state(port
, bna_port_sm_rx_stop_wait
);
950 bfa_sm_fault(port
->bna
, event
);
955 bna_port_sm_pause_cfg_wait_entry(struct bna_port
*port
)
957 bna_fw_pause_set(port
);
961 bna_port_sm_pause_cfg_wait(struct bna_port
*port
,
962 enum bna_port_event event
)
966 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
967 bna_port_chld_fail(port
);
970 case PORT_E_FWRESP_PAUSE
:
971 bfa_fsm_set_state(port
, bna_port_sm_started
);
975 bfa_sm_fault(port
->bna
, event
);
980 bna_port_sm_rx_stop_wait_entry(struct bna_port
*port
)
982 bna_port_rx_stop(port
);
986 bna_port_sm_rx_stop_wait(struct bna_port
*port
,
987 enum bna_port_event event
)
991 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
992 bna_port_chld_fail(port
);
995 case PORT_E_CHLD_STOPPED
:
996 bfa_fsm_set_state(port
, bna_port_sm_mtu_cfg_wait
);
1000 bfa_sm_fault(port
->bna
, event
);
1005 bna_port_sm_mtu_cfg_wait_entry(struct bna_port
*port
)
1007 bna_fw_mtu_set(port
);
1011 bna_port_sm_mtu_cfg_wait(struct bna_port
*port
, enum bna_port_event event
)
1015 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1016 bna_port_chld_fail(port
);
1019 case PORT_E_FWRESP_MTU
:
1020 bfa_fsm_set_state(port
, bna_port_sm_started
);
1021 bna_port_rx_start(port
);
1025 bfa_sm_fault(port
->bna
, event
);
1030 bna_port_sm_chld_stop_wait_entry(struct bna_port
*port
)
1032 bna_port_chld_stop(port
);
1036 bna_port_sm_chld_stop_wait(struct bna_port
*port
,
1037 enum bna_port_event event
)
1041 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1042 bna_port_chld_fail(port
);
1045 case PORT_E_CHLD_STOPPED
:
1046 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1050 bfa_sm_fault(port
->bna
, event
);
1055 bna_fw_pause_set(struct bna_port
*port
)
1057 struct bfi_ll_set_pause_req ll_req
;
1059 memset(&ll_req
, 0, sizeof(ll_req
));
1060 ll_req
.mh
.msg_class
= BFI_MC_LL
;
1061 ll_req
.mh
.msg_id
= BFI_LL_H2I_SET_PAUSE_REQ
;
1062 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
1064 ll_req
.tx_pause
= port
->pause_config
.tx_pause
;
1065 ll_req
.rx_pause
= port
->pause_config
.rx_pause
;
1067 bna_mbox_qe_fill(&port
->mbox_qe
, &ll_req
, sizeof(ll_req
),
1068 bna_fw_cb_pause_set
, port
);
1070 bna_mbox_send(port
->bna
, &port
->mbox_qe
);
1074 bna_fw_cb_pause_set(void *arg
, int status
)
1076 struct bna_port
*port
= (struct bna_port
*)arg
;
1078 bfa_q_qe_init(&port
->mbox_qe
.qe
);
1079 bfa_fsm_send_event(port
, PORT_E_FWRESP_PAUSE
);
1083 bna_fw_mtu_set(struct bna_port
*port
)
1085 struct bfi_ll_mtu_info_req ll_req
;
1087 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_MTU_INFO_REQ
, 0);
1088 ll_req
.mtu
= htons((u16
)port
->mtu
);
1090 bna_mbox_qe_fill(&port
->mbox_qe
, &ll_req
, sizeof(ll_req
),
1091 bna_fw_cb_mtu_set
, port
);
1092 bna_mbox_send(port
->bna
, &port
->mbox_qe
);
1096 bna_fw_cb_mtu_set(void *arg
, int status
)
1098 struct bna_port
*port
= (struct bna_port
*)arg
;
1100 bfa_q_qe_init(&port
->mbox_qe
.qe
);
1101 bfa_fsm_send_event(port
, PORT_E_FWRESP_MTU
);
1105 bna_port_cb_chld_stopped(void *arg
)
1107 struct bna_port
*port
= (struct bna_port
*)arg
;
1109 bfa_fsm_send_event(port
, PORT_E_CHLD_STOPPED
);
1113 bna_port_init(struct bna_port
*port
, struct bna
*bna
)
1118 port
->type
= BNA_PORT_T_REGULAR
;
1120 port
->link_cbfn
= bnad_cb_port_link_status
;
1122 port
->chld_stop_wc
.wc_resume
= bna_port_cb_chld_stopped
;
1123 port
->chld_stop_wc
.wc_cbarg
= port
;
1124 port
->chld_stop_wc
.wc_count
= 0;
1126 port
->stop_cbfn
= NULL
;
1127 port
->stop_cbarg
= NULL
;
1129 port
->pause_cbfn
= NULL
;
1131 port
->mtu_cbfn
= NULL
;
1133 bfa_q_qe_init(&port
->mbox_qe
.qe
);
1135 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1137 bna_llport_init(&port
->llport
, bna
);
1141 bna_port_uninit(struct bna_port
*port
)
1143 bna_llport_uninit(&port
->llport
);
1151 bna_port_state_get(struct bna_port
*port
)
1153 return bfa_sm_to_state(port_sm_table
, port
->fsm
);
1157 bna_port_start(struct bna_port
*port
)
1159 port
->flags
|= BNA_PORT_F_DEVICE_READY
;
1160 if (port
->flags
& BNA_PORT_F_ENABLED
)
1161 bfa_fsm_send_event(port
, PORT_E_START
);
1165 bna_port_stop(struct bna_port
*port
)
1167 port
->stop_cbfn
= bna_device_cb_port_stopped
;
1168 port
->stop_cbarg
= &port
->bna
->device
;
1170 port
->flags
&= ~BNA_PORT_F_DEVICE_READY
;
1171 bfa_fsm_send_event(port
, PORT_E_STOP
);
1175 bna_port_fail(struct bna_port
*port
)
1177 port
->flags
&= ~BNA_PORT_F_DEVICE_READY
;
1178 bfa_fsm_send_event(port
, PORT_E_FAIL
);
1182 bna_port_cb_tx_stopped(struct bna_port
*port
, enum bna_cb_status status
)
1184 bfa_wc_down(&port
->chld_stop_wc
);
1188 bna_port_cb_rx_stopped(struct bna_port
*port
, enum bna_cb_status status
)
1190 bfa_wc_down(&port
->chld_stop_wc
);
1194 bna_port_cb_link_up(struct bna_port
*port
, struct bfi_ll_aen
*aen
,
1200 port
->llport
.link_status
= BNA_LINK_UP
;
1201 if (aen
->cee_linkup
)
1202 port
->llport
.link_status
= BNA_CEE_UP
;
1204 /* Compute the priority */
1205 prio_map
= aen
->prio_map
;
1207 for (i
= 0; i
< 8; i
++) {
1208 if ((prio_map
>> i
) & 0x1)
1215 /* Dispatch events */
1216 bna_tx_mod_cee_link_status(&port
->bna
->tx_mod
, aen
->cee_linkup
);
1217 bna_tx_mod_prio_changed(&port
->bna
->tx_mod
, port
->priority
);
1218 port
->link_cbfn(port
->bna
->bnad
, port
->llport
.link_status
);
1222 bna_port_cb_link_down(struct bna_port
*port
, int status
)
1224 port
->llport
.link_status
= BNA_LINK_DOWN
;
1226 /* Dispatch events */
1227 bna_tx_mod_cee_link_status(&port
->bna
->tx_mod
, BNA_LINK_DOWN
);
1228 port
->link_cbfn(port
->bna
->bnad
, BNA_LINK_DOWN
);
1232 bna_port_mtu_get(struct bna_port
*port
)
1238 bna_port_enable(struct bna_port
*port
)
1240 if (port
->fsm
!= (bfa_sm_t
)bna_port_sm_stopped
)
1243 port
->flags
|= BNA_PORT_F_ENABLED
;
1245 if (port
->flags
& BNA_PORT_F_DEVICE_READY
)
1246 bfa_fsm_send_event(port
, PORT_E_START
);
1250 bna_port_disable(struct bna_port
*port
, enum bna_cleanup_type type
,
1251 void (*cbfn
)(void *, enum bna_cb_status
))
1253 if (type
== BNA_SOFT_CLEANUP
) {
1254 (*cbfn
)(port
->bna
->bnad
, BNA_CB_SUCCESS
);
1258 port
->stop_cbfn
= cbfn
;
1259 port
->stop_cbarg
= port
->bna
->bnad
;
1261 port
->flags
&= ~BNA_PORT_F_ENABLED
;
1263 bfa_fsm_send_event(port
, PORT_E_STOP
);
1267 bna_port_pause_config(struct bna_port
*port
,
1268 struct bna_pause_config
*pause_config
,
1269 void (*cbfn
)(struct bnad
*, enum bna_cb_status
))
1271 port
->pause_config
= *pause_config
;
1273 port
->pause_cbfn
= cbfn
;
1275 bfa_fsm_send_event(port
, PORT_E_PAUSE_CFG
);
1279 bna_port_mtu_set(struct bna_port
*port
, int mtu
,
1280 void (*cbfn
)(struct bnad
*, enum bna_cb_status
))
1284 port
->mtu_cbfn
= cbfn
;
1286 bfa_fsm_send_event(port
, PORT_E_MTU_CFG
);
1290 bna_port_mac_get(struct bna_port
*port
, mac_t
*mac
)
1292 *mac
= bfa_ioc_get_mac(&port
->bna
->device
.ioc
);
1296 * Should be called only when port is disabled
1299 bna_port_type_set(struct bna_port
*port
, enum bna_port_type type
)
1302 port
->llport
.type
= type
;
1306 * Should be called only when port is disabled
1309 bna_port_linkcbfn_set(struct bna_port
*port
,
1310 void (*linkcbfn
)(struct bnad
*, enum bna_link_status
))
1312 port
->link_cbfn
= linkcbfn
;
1316 bna_port_admin_up(struct bna_port
*port
)
1318 struct bna_llport
*llport
= &port
->llport
;
1320 if (llport
->flags
& BNA_LLPORT_F_ENABLED
)
1323 llport
->flags
|= BNA_LLPORT_F_ENABLED
;
1325 if (llport
->flags
& BNA_LLPORT_F_RX_ENABLED
)
1326 bfa_fsm_send_event(llport
, LLPORT_E_UP
);
1330 bna_port_admin_down(struct bna_port
*port
)
1332 struct bna_llport
*llport
= &port
->llport
;
1334 if (!(llport
->flags
& BNA_LLPORT_F_ENABLED
))
1337 llport
->flags
&= ~BNA_LLPORT_F_ENABLED
;
1339 if (llport
->flags
& BNA_LLPORT_F_RX_ENABLED
)
1340 bfa_fsm_send_event(llport
, LLPORT_E_DOWN
);
1346 #define enable_mbox_intr(_device)\
1349 bna_intr_status_get((_device)->bna, intr_status);\
1350 bnad_cb_device_enable_mbox_intr((_device)->bna->bnad);\
1351 bna_mbox_intr_enable((_device)->bna);\
1354 #define disable_mbox_intr(_device)\
1356 bna_mbox_intr_disable((_device)->bna);\
1357 bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
1360 const struct bna_chip_regs_offset reg_offset
[] =
1361 {{HOST_PAGE_NUM_FN0
, HOSTFN0_INT_STATUS
,
1362 HOSTFN0_INT_MASK
, HOST_MSIX_ERR_INDEX_FN0
},
1363 {HOST_PAGE_NUM_FN1
, HOSTFN1_INT_STATUS
,
1364 HOSTFN1_INT_MASK
, HOST_MSIX_ERR_INDEX_FN1
},
1365 {HOST_PAGE_NUM_FN2
, HOSTFN2_INT_STATUS
,
1366 HOSTFN2_INT_MASK
, HOST_MSIX_ERR_INDEX_FN2
},
1367 {HOST_PAGE_NUM_FN3
, HOSTFN3_INT_STATUS
,
1368 HOSTFN3_INT_MASK
, HOST_MSIX_ERR_INDEX_FN3
},
1371 enum bna_device_event
{
1372 DEVICE_E_ENABLE
= 1,
1373 DEVICE_E_DISABLE
= 2,
1374 DEVICE_E_IOC_READY
= 3,
1375 DEVICE_E_IOC_FAILED
= 4,
1376 DEVICE_E_IOC_DISABLED
= 5,
1377 DEVICE_E_IOC_RESET
= 6,
1378 DEVICE_E_PORT_STOPPED
= 7,
1381 enum bna_device_state
{
1382 BNA_DEVICE_STOPPED
= 1,
1383 BNA_DEVICE_IOC_READY_WAIT
= 2,
1384 BNA_DEVICE_READY
= 3,
1385 BNA_DEVICE_PORT_STOP_WAIT
= 4,
1386 BNA_DEVICE_IOC_DISABLE_WAIT
= 5,
1387 BNA_DEVICE_FAILED
= 6
1390 bfa_fsm_state_decl(bna_device
, stopped
, struct bna_device
,
1391 enum bna_device_event
);
1392 bfa_fsm_state_decl(bna_device
, ioc_ready_wait
, struct bna_device
,
1393 enum bna_device_event
);
1394 bfa_fsm_state_decl(bna_device
, ready
, struct bna_device
,
1395 enum bna_device_event
);
1396 bfa_fsm_state_decl(bna_device
, port_stop_wait
, struct bna_device
,
1397 enum bna_device_event
);
1398 bfa_fsm_state_decl(bna_device
, ioc_disable_wait
, struct bna_device
,
1399 enum bna_device_event
);
1400 bfa_fsm_state_decl(bna_device
, failed
, struct bna_device
,
1401 enum bna_device_event
);
1403 static struct bfa_sm_table device_sm_table
[] = {
1404 {BFA_SM(bna_device_sm_stopped
), BNA_DEVICE_STOPPED
},
1405 {BFA_SM(bna_device_sm_ioc_ready_wait
), BNA_DEVICE_IOC_READY_WAIT
},
1406 {BFA_SM(bna_device_sm_ready
), BNA_DEVICE_READY
},
1407 {BFA_SM(bna_device_sm_port_stop_wait
), BNA_DEVICE_PORT_STOP_WAIT
},
1408 {BFA_SM(bna_device_sm_ioc_disable_wait
), BNA_DEVICE_IOC_DISABLE_WAIT
},
1409 {BFA_SM(bna_device_sm_failed
), BNA_DEVICE_FAILED
},
1413 bna_device_sm_stopped_entry(struct bna_device
*device
)
1415 if (device
->stop_cbfn
)
1416 device
->stop_cbfn(device
->stop_cbarg
, BNA_CB_SUCCESS
);
1418 device
->stop_cbfn
= NULL
;
1419 device
->stop_cbarg
= NULL
;
1423 bna_device_sm_stopped(struct bna_device
*device
,
1424 enum bna_device_event event
)
1427 case DEVICE_E_ENABLE
:
1428 if (device
->intr_type
== BNA_INTR_T_MSIX
)
1429 bna_mbox_msix_idx_set(device
);
1430 bfa_ioc_enable(&device
->ioc
);
1431 bfa_fsm_set_state(device
, bna_device_sm_ioc_ready_wait
);
1434 case DEVICE_E_DISABLE
:
1435 bfa_fsm_set_state(device
, bna_device_sm_stopped
);
1438 case DEVICE_E_IOC_RESET
:
1439 enable_mbox_intr(device
);
1442 case DEVICE_E_IOC_FAILED
:
1443 bfa_fsm_set_state(device
, bna_device_sm_failed
);
1447 bfa_sm_fault(device
->bna
, event
);
1452 bna_device_sm_ioc_ready_wait_entry(struct bna_device
*device
)
1455 * Do not call bfa_ioc_enable() here. It must be called in the
1456 * previous state due to failed -> ioc_ready_wait transition.
1461 bna_device_sm_ioc_ready_wait(struct bna_device
*device
,
1462 enum bna_device_event event
)
1465 case DEVICE_E_DISABLE
:
1466 if (device
->ready_cbfn
)
1467 device
->ready_cbfn(device
->ready_cbarg
,
1469 device
->ready_cbfn
= NULL
;
1470 device
->ready_cbarg
= NULL
;
1471 bfa_fsm_set_state(device
, bna_device_sm_ioc_disable_wait
);
1474 case DEVICE_E_IOC_READY
:
1475 bfa_fsm_set_state(device
, bna_device_sm_ready
);
1478 case DEVICE_E_IOC_FAILED
:
1479 bfa_fsm_set_state(device
, bna_device_sm_failed
);
1482 case DEVICE_E_IOC_RESET
:
1483 enable_mbox_intr(device
);
1487 bfa_sm_fault(device
->bna
, event
);
1492 bna_device_sm_ready_entry(struct bna_device
*device
)
1494 bna_mbox_mod_start(&device
->bna
->mbox_mod
);
1495 bna_port_start(&device
->bna
->port
);
1497 if (device
->ready_cbfn
)
1498 device
->ready_cbfn(device
->ready_cbarg
,
1500 device
->ready_cbfn
= NULL
;
1501 device
->ready_cbarg
= NULL
;
1505 bna_device_sm_ready(struct bna_device
*device
, enum bna_device_event event
)
1508 case DEVICE_E_DISABLE
:
1509 bfa_fsm_set_state(device
, bna_device_sm_port_stop_wait
);
1512 case DEVICE_E_IOC_FAILED
:
1513 bfa_fsm_set_state(device
, bna_device_sm_failed
);
1517 bfa_sm_fault(device
->bna
, event
);
1522 bna_device_sm_port_stop_wait_entry(struct bna_device
*device
)
1524 bna_port_stop(&device
->bna
->port
);
1528 bna_device_sm_port_stop_wait(struct bna_device
*device
,
1529 enum bna_device_event event
)
1532 case DEVICE_E_PORT_STOPPED
:
1533 bna_mbox_mod_stop(&device
->bna
->mbox_mod
);
1534 bfa_fsm_set_state(device
, bna_device_sm_ioc_disable_wait
);
1537 case DEVICE_E_IOC_FAILED
:
1538 disable_mbox_intr(device
);
1539 bna_port_fail(&device
->bna
->port
);
1543 bfa_sm_fault(device
->bna
, event
);
1548 bna_device_sm_ioc_disable_wait_entry(struct bna_device
*device
)
1550 bfa_ioc_disable(&device
->ioc
);
1554 bna_device_sm_ioc_disable_wait(struct bna_device
*device
,
1555 enum bna_device_event event
)
1558 case DEVICE_E_IOC_DISABLED
:
1559 disable_mbox_intr(device
);
1560 bfa_fsm_set_state(device
, bna_device_sm_stopped
);
1564 bfa_sm_fault(device
->bna
, event
);
1569 bna_device_sm_failed_entry(struct bna_device
*device
)
1571 disable_mbox_intr(device
);
1572 bna_port_fail(&device
->bna
->port
);
1573 bna_mbox_mod_stop(&device
->bna
->mbox_mod
);
1575 if (device
->ready_cbfn
)
1576 device
->ready_cbfn(device
->ready_cbarg
,
1578 device
->ready_cbfn
= NULL
;
1579 device
->ready_cbarg
= NULL
;
1583 bna_device_sm_failed(struct bna_device
*device
,
1584 enum bna_device_event event
)
1587 case DEVICE_E_DISABLE
:
1588 bfa_fsm_set_state(device
, bna_device_sm_ioc_disable_wait
);
1591 case DEVICE_E_IOC_RESET
:
1592 enable_mbox_intr(device
);
1593 bfa_fsm_set_state(device
, bna_device_sm_ioc_ready_wait
);
1597 bfa_sm_fault(device
->bna
, event
);
1601 /* IOC callback functions */
1604 bna_device_cb_iocll_ready(void *dev
, enum bfa_status error
)
1606 struct bna_device
*device
= (struct bna_device
*)dev
;
1609 bfa_fsm_send_event(device
, DEVICE_E_IOC_FAILED
);
1611 bfa_fsm_send_event(device
, DEVICE_E_IOC_READY
);
1615 bna_device_cb_iocll_disabled(void *dev
)
1617 struct bna_device
*device
= (struct bna_device
*)dev
;
1619 bfa_fsm_send_event(device
, DEVICE_E_IOC_DISABLED
);
1623 bna_device_cb_iocll_failed(void *dev
)
1625 struct bna_device
*device
= (struct bna_device
*)dev
;
1627 bfa_fsm_send_event(device
, DEVICE_E_IOC_FAILED
);
1631 bna_device_cb_iocll_reset(void *dev
)
1633 struct bna_device
*device
= (struct bna_device
*)dev
;
1635 bfa_fsm_send_event(device
, DEVICE_E_IOC_RESET
);
1638 static struct bfa_ioc_cbfn bfa_iocll_cbfn
= {
1639 bna_device_cb_iocll_ready
,
1640 bna_device_cb_iocll_disabled
,
1641 bna_device_cb_iocll_failed
,
1642 bna_device_cb_iocll_reset
1646 bna_device_init(struct bna_device
*device
, struct bna
*bna
,
1647 struct bna_res_info
*res_info
)
1654 * Attach IOC and claim:
1655 * 1. DMA memory for IOC attributes
1656 * 2. Kernel memory for FW trace
1658 bfa_ioc_attach(&device
->ioc
, device
, &bfa_iocll_cbfn
);
1659 bfa_ioc_pci_init(&device
->ioc
, &bna
->pcidev
, BFI_MC_LL
);
1662 &res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1663 bfa_ioc_mem_claim(&device
->ioc
,
1664 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].kva
,
1667 bna_adv_device_init(device
, bna
, res_info
);
1669 * Initialize mbox_mod only after IOC, so that mbox handler
1670 * registration goes through
1673 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.intr_type
;
1675 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.idl
[0].vector
;
1676 bna_mbox_mod_init(&bna
->mbox_mod
, bna
);
1678 device
->ready_cbfn
= device
->stop_cbfn
= NULL
;
1679 device
->ready_cbarg
= device
->stop_cbarg
= NULL
;
1681 bfa_fsm_set_state(device
, bna_device_sm_stopped
);
1685 bna_device_uninit(struct bna_device
*device
)
1687 bna_mbox_mod_uninit(&device
->bna
->mbox_mod
);
1689 bfa_cee_detach(&device
->bna
->cee
);
1691 bfa_ioc_detach(&device
->ioc
);
1697 bna_device_cb_port_stopped(void *arg
, enum bna_cb_status status
)
1699 struct bna_device
*device
= (struct bna_device
*)arg
;
1701 bfa_fsm_send_event(device
, DEVICE_E_PORT_STOPPED
);
1705 bna_device_status_get(struct bna_device
*device
)
1707 return (device
->fsm
== (bfa_fsm_t
)bna_device_sm_ready
);
1711 bna_device_enable(struct bna_device
*device
)
1713 if (device
->fsm
!= (bfa_fsm_t
)bna_device_sm_stopped
) {
1714 bnad_cb_device_enabled(device
->bna
->bnad
, BNA_CB_BUSY
);
1718 device
->ready_cbfn
= bnad_cb_device_enabled
;
1719 device
->ready_cbarg
= device
->bna
->bnad
;
1721 bfa_fsm_send_event(device
, DEVICE_E_ENABLE
);
1725 bna_device_disable(struct bna_device
*device
, enum bna_cleanup_type type
)
1727 if (type
== BNA_SOFT_CLEANUP
) {
1728 bnad_cb_device_disabled(device
->bna
->bnad
, BNA_CB_SUCCESS
);
1732 device
->stop_cbfn
= bnad_cb_device_disabled
;
1733 device
->stop_cbarg
= device
->bna
->bnad
;
1735 bfa_fsm_send_event(device
, DEVICE_E_DISABLE
);
1739 bna_device_state_get(struct bna_device
*device
)
1741 return bfa_sm_to_state(device_sm_table
, device
->fsm
);
1744 u32 bna_dim_vector
[BNA_LOAD_T_MAX
][BNA_BIAS_T_MAX
] = {
1755 u32 bna_napi_dim_vector
[BNA_LOAD_T_MAX
][BNA_BIAS_T_MAX
] = {
1768 bna_adv_device_init(struct bna_device
*device
, struct bna
*bna
,
1769 struct bna_res_info
*res_info
)
1776 kva
= res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mdl
[0].kva
;
1779 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1783 &res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1784 kva
= res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].kva
;
1786 bfa_cee_attach(&bna
->cee
, &device
->ioc
, bna
);
1787 bfa_cee_mem_claim(&bna
->cee
, kva
, dma
);
1788 kva
+= bfa_cee_meminfo();
1789 dma
+= bfa_cee_meminfo();
1796 bna_adv_res_req(struct bna_res_info
*res_info
)
1798 /* DMA memory for COMMON_MODULE */
1799 res_info
[BNA_RES_MEM_T_COM
].res_type
= BNA_RES_T_MEM
;
1800 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1801 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.num
= 1;
1802 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.len
= ALIGN(
1803 bfa_cee_meminfo(), PAGE_SIZE
);
1805 /* Virtual memory for retreiving fw_trc */
1806 res_info
[BNA_RES_MEM_T_FWTRC
].res_type
= BNA_RES_T_MEM
;
1807 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mem_type
= BNA_MEM_T_KVA
;
1808 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.num
= 0;
1809 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.len
= 0;
1811 /* DMA memory for retreiving stats */
1812 res_info
[BNA_RES_MEM_T_STATS
].res_type
= BNA_RES_T_MEM
;
1813 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1814 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.num
= 1;
1815 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.len
=
1816 ALIGN(BFI_HW_STATS_SIZE
, PAGE_SIZE
);
1818 /* Virtual memory for soft stats */
1819 res_info
[BNA_RES_MEM_T_SWSTATS
].res_type
= BNA_RES_T_MEM
;
1820 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.mem_type
= BNA_MEM_T_KVA
;
1821 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.num
= 1;
1822 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.len
=
1823 sizeof(struct bna_sw_stats
);
1827 bna_sw_stats_get(struct bna
*bna
, struct bna_sw_stats
*sw_stats
)
1830 struct bna_txq
*txq
;
1832 struct bna_rxp
*rxp
;
1833 struct list_head
*qe
;
1834 struct list_head
*txq_qe
;
1835 struct list_head
*rxp_qe
;
1836 struct list_head
*mac_qe
;
1839 sw_stats
->device_state
= bna_device_state_get(&bna
->device
);
1840 sw_stats
->port_state
= bna_port_state_get(&bna
->port
);
1841 sw_stats
->port_flags
= bna
->port
.flags
;
1842 sw_stats
->llport_state
= bna_llport_state_get(&bna
->port
.llport
);
1843 sw_stats
->priority
= bna
->port
.priority
;
1846 list_for_each(qe
, &bna
->tx_mod
.tx_active_q
) {
1847 tx
= (struct bna_tx
*)qe
;
1848 sw_stats
->tx_stats
[i
].tx_state
= bna_tx_state_get(tx
);
1849 sw_stats
->tx_stats
[i
].tx_flags
= tx
->flags
;
1851 sw_stats
->tx_stats
[i
].num_txqs
= 0;
1852 sw_stats
->tx_stats
[i
].txq_bmap
[0] = 0;
1853 sw_stats
->tx_stats
[i
].txq_bmap
[1] = 0;
1854 list_for_each(txq_qe
, &tx
->txq_q
) {
1855 txq
= (struct bna_txq
*)txq_qe
;
1856 if (txq
->txq_id
< 32)
1857 sw_stats
->tx_stats
[i
].txq_bmap
[0] |=
1858 ((u32
)1 << txq
->txq_id
);
1860 sw_stats
->tx_stats
[i
].txq_bmap
[1] |=
1862 1 << (txq
->txq_id
- 32));
1863 sw_stats
->tx_stats
[i
].num_txqs
++;
1866 sw_stats
->tx_stats
[i
].txf_id
= tx
->txf
.txf_id
;
1870 sw_stats
->num_active_tx
= i
;
1873 list_for_each(qe
, &bna
->rx_mod
.rx_active_q
) {
1874 rx
= (struct bna_rx
*)qe
;
1875 sw_stats
->rx_stats
[i
].rx_state
= bna_rx_state_get(rx
);
1876 sw_stats
->rx_stats
[i
].rx_flags
= rx
->rx_flags
;
1878 sw_stats
->rx_stats
[i
].num_rxps
= 0;
1879 sw_stats
->rx_stats
[i
].num_rxqs
= 0;
1880 sw_stats
->rx_stats
[i
].rxq_bmap
[0] = 0;
1881 sw_stats
->rx_stats
[i
].rxq_bmap
[1] = 0;
1882 sw_stats
->rx_stats
[i
].cq_bmap
[0] = 0;
1883 sw_stats
->rx_stats
[i
].cq_bmap
[1] = 0;
1884 list_for_each(rxp_qe
, &rx
->rxp_q
) {
1885 rxp
= (struct bna_rxp
*)rxp_qe
;
1887 sw_stats
->rx_stats
[i
].num_rxqs
+= 1;
1889 if (rxp
->type
== BNA_RXP_SINGLE
) {
1890 if (rxp
->rxq
.single
.only
->rxq_id
< 32) {
1891 sw_stats
->rx_stats
[i
].rxq_bmap
[0] |=
1893 rxp
->rxq
.single
.only
->rxq_id
);
1895 sw_stats
->rx_stats
[i
].rxq_bmap
[1] |=
1897 (rxp
->rxq
.single
.only
->rxq_id
- 32));
1900 if (rxp
->rxq
.slr
.large
->rxq_id
< 32) {
1901 sw_stats
->rx_stats
[i
].rxq_bmap
[0] |=
1903 rxp
->rxq
.slr
.large
->rxq_id
);
1905 sw_stats
->rx_stats
[i
].rxq_bmap
[1] |=
1907 (rxp
->rxq
.slr
.large
->rxq_id
- 32));
1910 if (rxp
->rxq
.slr
.small
->rxq_id
< 32) {
1911 sw_stats
->rx_stats
[i
].rxq_bmap
[0] |=
1913 rxp
->rxq
.slr
.small
->rxq_id
);
1915 sw_stats
->rx_stats
[i
].rxq_bmap
[1] |=
1917 (rxp
->rxq
.slr
.small
->rxq_id
- 32));
1919 sw_stats
->rx_stats
[i
].num_rxqs
+= 1;
1922 if (rxp
->cq
.cq_id
< 32)
1923 sw_stats
->rx_stats
[i
].cq_bmap
[0] |=
1924 (1 << rxp
->cq
.cq_id
);
1926 sw_stats
->rx_stats
[i
].cq_bmap
[1] |=
1927 (1 << (rxp
->cq
.cq_id
- 32));
1929 sw_stats
->rx_stats
[i
].num_rxps
++;
1932 sw_stats
->rx_stats
[i
].rxf_id
= rx
->rxf
.rxf_id
;
1933 sw_stats
->rx_stats
[i
].rxf_state
= bna_rxf_state_get(&rx
->rxf
);
1934 sw_stats
->rx_stats
[i
].rxf_oper_state
= rx
->rxf
.rxf_oper_state
;
1936 sw_stats
->rx_stats
[i
].num_active_ucast
= 0;
1937 if (rx
->rxf
.ucast_active_mac
)
1938 sw_stats
->rx_stats
[i
].num_active_ucast
++;
1939 list_for_each(mac_qe
, &rx
->rxf
.ucast_active_q
)
1940 sw_stats
->rx_stats
[i
].num_active_ucast
++;
1942 sw_stats
->rx_stats
[i
].num_active_mcast
= 0;
1943 list_for_each(mac_qe
, &rx
->rxf
.mcast_active_q
)
1944 sw_stats
->rx_stats
[i
].num_active_mcast
++;
1946 sw_stats
->rx_stats
[i
].rxmode_active
= rx
->rxf
.rxmode_active
;
1947 sw_stats
->rx_stats
[i
].vlan_filter_status
=
1948 rx
->rxf
.vlan_filter_status
;
1949 memcpy(sw_stats
->rx_stats
[i
].vlan_filter_table
,
1950 rx
->rxf
.vlan_filter_table
,
1951 sizeof(u32
) * ((BFI_MAX_VLAN
+ 1) / 32));
1953 sw_stats
->rx_stats
[i
].rss_status
= rx
->rxf
.rss_status
;
1954 sw_stats
->rx_stats
[i
].hds_status
= rx
->rxf
.hds_status
;
1958 sw_stats
->num_active_rx
= i
;
1962 bna_fw_cb_stats_get(void *arg
, int status
)
1964 struct bna
*bna
= (struct bna
*)arg
;
1967 int rxf_count
, txf_count
;
1968 u64 rxf_bmap
, txf_bmap
;
1970 bfa_q_qe_init(&bna
->mbox_qe
.qe
);
1973 p_stats
= (u64
*)bna
->stats
.hw_stats
;
1974 count
= sizeof(struct bfi_ll_stats
) / sizeof(u64
);
1975 for (i
= 0; i
< count
; i
++)
1976 p_stats
[i
] = cpu_to_be64(p_stats
[i
]);
1979 rxf_bmap
= (u64
)bna
->stats
.rxf_bmap
[0] |
1980 ((u64
)bna
->stats
.rxf_bmap
[1] << 32);
1981 for (i
= 0; i
< BFI_LL_RXF_ID_MAX
; i
++)
1982 if (rxf_bmap
& ((u64
)1 << i
))
1986 txf_bmap
= (u64
)bna
->stats
.txf_bmap
[0] |
1987 ((u64
)bna
->stats
.txf_bmap
[1] << 32);
1988 for (i
= 0; i
< BFI_LL_TXF_ID_MAX
; i
++)
1989 if (txf_bmap
& ((u64
)1 << i
))
1992 p_stats
= (u64
*)&bna
->stats
.hw_stats
->rxf_stats
[0] +
1993 ((rxf_count
* sizeof(struct bfi_ll_stats_rxf
) +
1994 txf_count
* sizeof(struct bfi_ll_stats_txf
))/
1997 /* Populate the TXF stats from the firmware DMAed copy */
1998 for (i
= (BFI_LL_TXF_ID_MAX
- 1); i
>= 0; i
--)
1999 if (txf_bmap
& ((u64
)1 << i
)) {
2000 p_stats
-= sizeof(struct bfi_ll_stats_txf
)/
2002 memcpy(&bna
->stats
.hw_stats
->txf_stats
[i
],
2004 sizeof(struct bfi_ll_stats_txf
));
2007 /* Populate the RXF stats from the firmware DMAed copy */
2008 for (i
= (BFI_LL_RXF_ID_MAX
- 1); i
>= 0; i
--)
2009 if (rxf_bmap
& ((u64
)1 << i
)) {
2010 p_stats
-= sizeof(struct bfi_ll_stats_rxf
)/
2012 memcpy(&bna
->stats
.hw_stats
->rxf_stats
[i
],
2014 sizeof(struct bfi_ll_stats_rxf
));
2017 bna_sw_stats_get(bna
, bna
->stats
.sw_stats
);
2018 bnad_cb_stats_get(bna
->bnad
, BNA_CB_SUCCESS
, &bna
->stats
);
2020 bnad_cb_stats_get(bna
->bnad
, BNA_CB_FAIL
, &bna
->stats
);
2024 bna_fw_stats_get(struct bna
*bna
)
2026 struct bfi_ll_stats_req ll_req
;
2028 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_STATS_GET_REQ
, 0);
2029 ll_req
.stats_mask
= htons(BFI_LL_STATS_ALL
);
2031 ll_req
.rxf_id_mask
[0] = htonl(bna
->rx_mod
.rxf_bmap
[0]);
2032 ll_req
.rxf_id_mask
[1] = htonl(bna
->rx_mod
.rxf_bmap
[1]);
2033 ll_req
.txf_id_mask
[0] = htonl(bna
->tx_mod
.txf_bmap
[0]);
2034 ll_req
.txf_id_mask
[1] = htonl(bna
->tx_mod
.txf_bmap
[1]);
2036 ll_req
.host_buffer
.a32
.addr_hi
= bna
->hw_stats_dma
.msb
;
2037 ll_req
.host_buffer
.a32
.addr_lo
= bna
->hw_stats_dma
.lsb
;
2039 bna_mbox_qe_fill(&bna
->mbox_qe
, &ll_req
, sizeof(ll_req
),
2040 bna_fw_cb_stats_get
, bna
);
2041 bna_mbox_send(bna
, &bna
->mbox_qe
);
2043 bna
->stats
.rxf_bmap
[0] = bna
->rx_mod
.rxf_bmap
[0];
2044 bna
->stats
.rxf_bmap
[1] = bna
->rx_mod
.rxf_bmap
[1];
2045 bna
->stats
.txf_bmap
[0] = bna
->tx_mod
.txf_bmap
[0];
2046 bna
->stats
.txf_bmap
[1] = bna
->tx_mod
.txf_bmap
[1];
2050 bna_fw_cb_stats_clr(void *arg
, int status
)
2052 struct bna
*bna
= (struct bna
*)arg
;
2054 bfa_q_qe_init(&bna
->mbox_qe
.qe
);
2056 memset(bna
->stats
.sw_stats
, 0, sizeof(struct bna_sw_stats
));
2057 memset(bna
->stats
.hw_stats
, 0, sizeof(struct bfi_ll_stats
));
2059 bnad_cb_stats_clr(bna
->bnad
);
2063 bna_fw_stats_clr(struct bna
*bna
)
2065 struct bfi_ll_stats_req ll_req
;
2067 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_STATS_CLEAR_REQ
, 0);
2068 ll_req
.stats_mask
= htons(BFI_LL_STATS_ALL
);
2069 ll_req
.rxf_id_mask
[0] = htonl(0xffffffff);
2070 ll_req
.rxf_id_mask
[1] = htonl(0xffffffff);
2071 ll_req
.txf_id_mask
[0] = htonl(0xffffffff);
2072 ll_req
.txf_id_mask
[1] = htonl(0xffffffff);
2074 bna_mbox_qe_fill(&bna
->mbox_qe
, &ll_req
, sizeof(ll_req
),
2075 bna_fw_cb_stats_clr
, bna
);
2076 bna_mbox_send(bna
, &bna
->mbox_qe
);
2080 bna_stats_get(struct bna
*bna
)
2082 if (bna_device_status_get(&bna
->device
))
2083 bna_fw_stats_get(bna
);
2085 bnad_cb_stats_get(bna
->bnad
, BNA_CB_FAIL
, &bna
->stats
);
2089 bna_stats_clr(struct bna
*bna
)
2091 if (bna_device_status_get(&bna
->device
))
2092 bna_fw_stats_clr(bna
);
2094 memset(&bna
->stats
.sw_stats
, 0,
2095 sizeof(struct bna_sw_stats
));
2096 memset(bna
->stats
.hw_stats
, 0,
2097 sizeof(struct bfi_ll_stats
));
2098 bnad_cb_stats_clr(bna
->bnad
);
2104 bna_ib_coalescing_timeo_set(struct bna_ib
*ib
, u8 coalescing_timeo
)
2106 ib
->ib_config
.coalescing_timeo
= coalescing_timeo
;
2108 if (ib
->start_count
)
2109 ib
->door_bell
.doorbell_ack
= BNA_DOORBELL_IB_INT_ACK(
2110 (u32
)ib
->ib_config
.coalescing_timeo
, 0);
2115 bna_rxf_adv_init(struct bna_rxf
*rxf
,
2117 struct bna_rx_config
*q_config
)
2119 switch (q_config
->rxp_type
) {
2120 case BNA_RXP_SINGLE
:
2124 rxf
->ctrl_flags
|= BNA_RXF_CF_SM_LG_RXQ
;
2127 rxf
->hds_cfg
.hdr_type
= q_config
->hds_config
.hdr_type
;
2128 rxf
->hds_cfg
.header_size
=
2129 q_config
->hds_config
.header_size
;
2130 rxf
->forced_offset
= 0;
2136 if (q_config
->rss_status
== BNA_STATUS_T_ENABLED
) {
2137 rxf
->ctrl_flags
|= BNA_RXF_CF_RSS_ENABLE
;
2138 rxf
->rss_cfg
.hash_type
= q_config
->rss_config
.hash_type
;
2139 rxf
->rss_cfg
.hash_mask
= q_config
->rss_config
.hash_mask
;
2140 memcpy(&rxf
->rss_cfg
.toeplitz_hash_key
[0],
2141 &q_config
->rss_config
.toeplitz_hash_key
[0],
2142 sizeof(rxf
->rss_cfg
.toeplitz_hash_key
));
2147 rxf_fltr_mbox_cmd(struct bna_rxf
*rxf
, u8 cmd
, enum bna_status status
)
2149 struct bfi_ll_rxf_req req
;
2151 bfi_h2i_set(req
.mh
, BFI_MC_LL
, cmd
, 0);
2153 req
.rxf_id
= rxf
->rxf_id
;
2154 req
.enable
= status
;
2156 bna_mbox_qe_fill(&rxf
->mbox_qe
, &req
, sizeof(req
),
2157 rxf_cb_cam_fltr_mbox_cmd
, rxf
);
2159 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
2163 __rxf_default_function_config(struct bna_rxf
*rxf
, enum bna_status status
)
2165 struct bna_rx_fndb_ram
*rx_fndb_ram
;
2169 rx_fndb_ram
= (struct bna_rx_fndb_ram
*)
2170 BNA_GET_MEM_BASE_ADDR(rxf
->rx
->bna
->pcidev
.pci_bar_kva
,
2171 RX_FNDB_RAM_BASE_OFFSET
);
2173 for (i
= 0; i
< BFI_MAX_RXF
; i
++) {
2174 if (status
== BNA_STATUS_T_ENABLED
) {
2175 if (i
== rxf
->rxf_id
)
2179 readl(&rx_fndb_ram
[i
].control_flags
);
2180 ctrl_flags
|= BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE
;
2182 &rx_fndb_ram
[i
].control_flags
);
2185 readl(&rx_fndb_ram
[i
].control_flags
);
2186 ctrl_flags
&= ~BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE
;
2188 &rx_fndb_ram
[i
].control_flags
);
2194 rxf_process_packet_filter_ucast(struct bna_rxf
*rxf
)
2196 struct bna_mac
*mac
= NULL
;
2197 struct list_head
*qe
;
2199 /* Add additional MAC entries */
2200 if (!list_empty(&rxf
->ucast_pending_add_q
)) {
2201 bfa_q_deq(&rxf
->ucast_pending_add_q
, &qe
);
2203 mac
= (struct bna_mac
*)qe
;
2204 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_ADD_REQ
, mac
);
2205 list_add_tail(&mac
->qe
, &rxf
->ucast_active_q
);
2209 /* Delete MAC addresses previousely added */
2210 if (!list_empty(&rxf
->ucast_pending_del_q
)) {
2211 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
2213 mac
= (struct bna_mac
*)qe
;
2214 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_DEL_REQ
, mac
);
2215 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2223 rxf_process_packet_filter_promisc(struct bna_rxf
*rxf
)
2225 struct bna
*bna
= rxf
->rx
->bna
;
2227 /* Enable/disable promiscuous mode */
2228 if (is_promisc_enable(rxf
->rxmode_pending
,
2229 rxf
->rxmode_pending_bitmask
)) {
2230 /* move promisc configuration from pending -> active */
2231 promisc_inactive(rxf
->rxmode_pending
,
2232 rxf
->rxmode_pending_bitmask
);
2233 rxf
->rxmode_active
|= BNA_RXMODE_PROMISC
;
2235 /* Disable VLAN filter to allow all VLANs */
2236 __rxf_vlan_filter_set(rxf
, BNA_STATUS_T_DISABLED
);
2237 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2238 BNA_STATUS_T_ENABLED
);
2240 } else if (is_promisc_disable(rxf
->rxmode_pending
,
2241 rxf
->rxmode_pending_bitmask
)) {
2242 /* move promisc configuration from pending -> active */
2243 promisc_inactive(rxf
->rxmode_pending
,
2244 rxf
->rxmode_pending_bitmask
);
2245 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2246 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2248 /* Revert VLAN filter */
2249 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2250 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2251 BNA_STATUS_T_DISABLED
);
2259 rxf_process_packet_filter_default(struct bna_rxf
*rxf
)
2261 struct bna
*bna
= rxf
->rx
->bna
;
2263 /* Enable/disable default mode */
2264 if (is_default_enable(rxf
->rxmode_pending
,
2265 rxf
->rxmode_pending_bitmask
)) {
2266 /* move default configuration from pending -> active */
2267 default_inactive(rxf
->rxmode_pending
,
2268 rxf
->rxmode_pending_bitmask
);
2269 rxf
->rxmode_active
|= BNA_RXMODE_DEFAULT
;
2271 /* Disable VLAN filter to allow all VLANs */
2272 __rxf_vlan_filter_set(rxf
, BNA_STATUS_T_DISABLED
);
2273 /* Redirect all other RxF vlan filtering to this one */
2274 __rxf_default_function_config(rxf
, BNA_STATUS_T_ENABLED
);
2275 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_DEFAULT_SET_REQ
,
2276 BNA_STATUS_T_ENABLED
);
2278 } else if (is_default_disable(rxf
->rxmode_pending
,
2279 rxf
->rxmode_pending_bitmask
)) {
2280 /* move default configuration from pending -> active */
2281 default_inactive(rxf
->rxmode_pending
,
2282 rxf
->rxmode_pending_bitmask
);
2283 rxf
->rxmode_active
&= ~BNA_RXMODE_DEFAULT
;
2284 bna
->rxf_default_id
= BFI_MAX_RXF
;
2286 /* Revert VLAN filter */
2287 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2288 /* Stop RxF vlan filter table redirection */
2289 __rxf_default_function_config(rxf
, BNA_STATUS_T_DISABLED
);
2290 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_DEFAULT_SET_REQ
,
2291 BNA_STATUS_T_DISABLED
);
2299 rxf_process_packet_filter_allmulti(struct bna_rxf
*rxf
)
2301 /* Enable/disable allmulti mode */
2302 if (is_allmulti_enable(rxf
->rxmode_pending
,
2303 rxf
->rxmode_pending_bitmask
)) {
2304 /* move allmulti configuration from pending -> active */
2305 allmulti_inactive(rxf
->rxmode_pending
,
2306 rxf
->rxmode_pending_bitmask
);
2307 rxf
->rxmode_active
|= BNA_RXMODE_ALLMULTI
;
2309 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2310 BNA_STATUS_T_ENABLED
);
2312 } else if (is_allmulti_disable(rxf
->rxmode_pending
,
2313 rxf
->rxmode_pending_bitmask
)) {
2314 /* move allmulti configuration from pending -> active */
2315 allmulti_inactive(rxf
->rxmode_pending
,
2316 rxf
->rxmode_pending_bitmask
);
2317 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2319 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2320 BNA_STATUS_T_DISABLED
);
2328 rxf_clear_packet_filter_ucast(struct bna_rxf
*rxf
)
2330 struct bna_mac
*mac
= NULL
;
2331 struct list_head
*qe
;
2333 /* 1. delete pending ucast entries */
2334 if (!list_empty(&rxf
->ucast_pending_del_q
)) {
2335 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
2337 mac
= (struct bna_mac
*)qe
;
2338 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_DEL_REQ
, mac
);
2339 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2343 /* 2. clear active ucast entries; move them to pending_add_q */
2344 if (!list_empty(&rxf
->ucast_active_q
)) {
2345 bfa_q_deq(&rxf
->ucast_active_q
, &qe
);
2347 mac
= (struct bna_mac
*)qe
;
2348 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_DEL_REQ
, mac
);
2349 list_add_tail(&mac
->qe
, &rxf
->ucast_pending_add_q
);
2357 rxf_clear_packet_filter_promisc(struct bna_rxf
*rxf
)
2359 struct bna
*bna
= rxf
->rx
->bna
;
2361 /* 6. Execute pending promisc mode disable command */
2362 if (is_promisc_disable(rxf
->rxmode_pending
,
2363 rxf
->rxmode_pending_bitmask
)) {
2364 /* move promisc configuration from pending -> active */
2365 promisc_inactive(rxf
->rxmode_pending
,
2366 rxf
->rxmode_pending_bitmask
);
2367 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2368 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2370 /* Revert VLAN filter */
2371 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2372 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2373 BNA_STATUS_T_DISABLED
);
2377 /* 7. Clear active promisc mode; move it to pending enable */
2378 if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
2379 /* move promisc configuration from active -> pending */
2380 promisc_enable(rxf
->rxmode_pending
,
2381 rxf
->rxmode_pending_bitmask
);
2382 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2384 /* Revert VLAN filter */
2385 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2386 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2387 BNA_STATUS_T_DISABLED
);
2395 rxf_clear_packet_filter_default(struct bna_rxf
*rxf
)
2397 struct bna
*bna
= rxf
->rx
->bna
;
2399 /* 8. Execute pending default mode disable command */
2400 if (is_default_disable(rxf
->rxmode_pending
,
2401 rxf
->rxmode_pending_bitmask
)) {
2402 /* move default configuration from pending -> active */
2403 default_inactive(rxf
->rxmode_pending
,
2404 rxf
->rxmode_pending_bitmask
);
2405 rxf
->rxmode_active
&= ~BNA_RXMODE_DEFAULT
;
2406 bna
->rxf_default_id
= BFI_MAX_RXF
;
2408 /* Revert VLAN filter */
2409 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2410 /* Stop RxF vlan filter table redirection */
2411 __rxf_default_function_config(rxf
, BNA_STATUS_T_DISABLED
);
2412 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_DEFAULT_SET_REQ
,
2413 BNA_STATUS_T_DISABLED
);
2417 /* 9. Clear active default mode; move it to pending enable */
2418 if (rxf
->rxmode_active
& BNA_RXMODE_DEFAULT
) {
2419 /* move default configuration from active -> pending */
2420 default_enable(rxf
->rxmode_pending
,
2421 rxf
->rxmode_pending_bitmask
);
2422 rxf
->rxmode_active
&= ~BNA_RXMODE_DEFAULT
;
2424 /* Revert VLAN filter */
2425 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2426 /* Stop RxF vlan filter table redirection */
2427 __rxf_default_function_config(rxf
, BNA_STATUS_T_DISABLED
);
2428 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_DEFAULT_SET_REQ
,
2429 BNA_STATUS_T_DISABLED
);
2437 rxf_clear_packet_filter_allmulti(struct bna_rxf
*rxf
)
2439 /* 10. Execute pending allmulti mode disable command */
2440 if (is_allmulti_disable(rxf
->rxmode_pending
,
2441 rxf
->rxmode_pending_bitmask
)) {
2442 /* move allmulti configuration from pending -> active */
2443 allmulti_inactive(rxf
->rxmode_pending
,
2444 rxf
->rxmode_pending_bitmask
);
2445 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2446 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2447 BNA_STATUS_T_DISABLED
);
2451 /* 11. Clear active allmulti mode; move it to pending enable */
2452 if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
2453 /* move allmulti configuration from active -> pending */
2454 allmulti_enable(rxf
->rxmode_pending
,
2455 rxf
->rxmode_pending_bitmask
);
2456 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2457 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2458 BNA_STATUS_T_DISABLED
);
2466 rxf_reset_packet_filter_ucast(struct bna_rxf
*rxf
)
2468 struct list_head
*qe
;
2469 struct bna_mac
*mac
;
2471 /* 1. Move active ucast entries to pending_add_q */
2472 while (!list_empty(&rxf
->ucast_active_q
)) {
2473 bfa_q_deq(&rxf
->ucast_active_q
, &qe
);
2475 list_add_tail(qe
, &rxf
->ucast_pending_add_q
);
2478 /* 2. Throw away delete pending ucast entries */
2479 while (!list_empty(&rxf
->ucast_pending_del_q
)) {
2480 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
2482 mac
= (struct bna_mac
*)qe
;
2483 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2488 rxf_reset_packet_filter_promisc(struct bna_rxf
*rxf
)
2490 struct bna
*bna
= rxf
->rx
->bna
;
2492 /* 6. Clear pending promisc mode disable */
2493 if (is_promisc_disable(rxf
->rxmode_pending
,
2494 rxf
->rxmode_pending_bitmask
)) {
2495 promisc_inactive(rxf
->rxmode_pending
,
2496 rxf
->rxmode_pending_bitmask
);
2497 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2498 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2501 /* 7. Move promisc mode config from active -> pending */
2502 if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
2503 promisc_enable(rxf
->rxmode_pending
,
2504 rxf
->rxmode_pending_bitmask
);
2505 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2511 rxf_reset_packet_filter_default(struct bna_rxf
*rxf
)
2513 struct bna
*bna
= rxf
->rx
->bna
;
2515 /* 8. Clear pending default mode disable */
2516 if (is_default_disable(rxf
->rxmode_pending
,
2517 rxf
->rxmode_pending_bitmask
)) {
2518 default_inactive(rxf
->rxmode_pending
,
2519 rxf
->rxmode_pending_bitmask
);
2520 rxf
->rxmode_active
&= ~BNA_RXMODE_DEFAULT
;
2521 bna
->rxf_default_id
= BFI_MAX_RXF
;
2524 /* 9. Move default mode config from active -> pending */
2525 if (rxf
->rxmode_active
& BNA_RXMODE_DEFAULT
) {
2526 default_enable(rxf
->rxmode_pending
,
2527 rxf
->rxmode_pending_bitmask
);
2528 rxf
->rxmode_active
&= ~BNA_RXMODE_DEFAULT
;
2533 rxf_reset_packet_filter_allmulti(struct bna_rxf
*rxf
)
2535 /* 10. Clear pending allmulti mode disable */
2536 if (is_allmulti_disable(rxf
->rxmode_pending
,
2537 rxf
->rxmode_pending_bitmask
)) {
2538 allmulti_inactive(rxf
->rxmode_pending
,
2539 rxf
->rxmode_pending_bitmask
);
2540 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2543 /* 11. Move allmulti mode config from active -> pending */
2544 if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
2545 allmulti_enable(rxf
->rxmode_pending
,
2546 rxf
->rxmode_pending_bitmask
);
2547 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2552 * Should only be called by bna_rxf_mode_set.
2553 * Helps deciding if h/w configuration is needed or not.
2556 * 1 = need h/w change
2559 rxf_promisc_enable(struct bna_rxf
*rxf
)
2561 struct bna
*bna
= rxf
->rx
->bna
;
2564 /* There can not be any pending disable command */
2566 /* Do nothing if pending enable or already enabled */
2567 if (is_promisc_enable(rxf
->rxmode_pending
,
2568 rxf
->rxmode_pending_bitmask
) ||
2569 (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
)) {
2570 /* Schedule enable */
2572 /* Promisc mode should not be active in the system */
2573 promisc_enable(rxf
->rxmode_pending
,
2574 rxf
->rxmode_pending_bitmask
);
2575 bna
->rxf_promisc_id
= rxf
->rxf_id
;
2583 * Should only be called by bna_rxf_mode_set.
2584 * Helps deciding if h/w configuration is needed or not.
2587 * 1 = need h/w change
2590 rxf_promisc_disable(struct bna_rxf
*rxf
)
2592 struct bna
*bna
= rxf
->rx
->bna
;
2595 /* There can not be any pending disable */
2597 /* Turn off pending enable command , if any */
2598 if (is_promisc_enable(rxf
->rxmode_pending
,
2599 rxf
->rxmode_pending_bitmask
)) {
2600 /* Promisc mode should not be active */
2601 /* system promisc state should be pending */
2602 promisc_inactive(rxf
->rxmode_pending
,
2603 rxf
->rxmode_pending_bitmask
);
2604 /* Remove the promisc state from the system */
2605 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2607 /* Schedule disable */
2608 } else if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
2609 /* Promisc mode should be active in the system */
2610 promisc_disable(rxf
->rxmode_pending
,
2611 rxf
->rxmode_pending_bitmask
);
2614 /* Do nothing if already disabled */
2622 * Should only be called by bna_rxf_mode_set.
2623 * Helps deciding if h/w configuration is needed or not.
2626 * 1 = need h/w change
2629 rxf_default_enable(struct bna_rxf
*rxf
)
2631 struct bna
*bna
= rxf
->rx
->bna
;
2634 /* There can not be any pending disable command */
2636 /* Do nothing if pending enable or already enabled */
2637 if (is_default_enable(rxf
->rxmode_pending
,
2638 rxf
->rxmode_pending_bitmask
) ||
2639 (rxf
->rxmode_active
& BNA_RXMODE_DEFAULT
)) {
2640 /* Schedule enable */
2642 /* Default mode should not be active in the system */
2643 default_enable(rxf
->rxmode_pending
,
2644 rxf
->rxmode_pending_bitmask
);
2645 bna
->rxf_default_id
= rxf
->rxf_id
;
2653 * Should only be called by bna_rxf_mode_set.
2654 * Helps deciding if h/w configuration is needed or not.
2657 * 1 = need h/w change
2660 rxf_default_disable(struct bna_rxf
*rxf
)
2662 struct bna
*bna
= rxf
->rx
->bna
;
2665 /* There can not be any pending disable */
2667 /* Turn off pending enable command , if any */
2668 if (is_default_enable(rxf
->rxmode_pending
,
2669 rxf
->rxmode_pending_bitmask
)) {
2670 /* Promisc mode should not be active */
2671 /* system default state should be pending */
2672 default_inactive(rxf
->rxmode_pending
,
2673 rxf
->rxmode_pending_bitmask
);
2674 /* Remove the default state from the system */
2675 bna
->rxf_default_id
= BFI_MAX_RXF
;
2677 /* Schedule disable */
2678 } else if (rxf
->rxmode_active
& BNA_RXMODE_DEFAULT
) {
2679 /* Default mode should be active in the system */
2680 default_disable(rxf
->rxmode_pending
,
2681 rxf
->rxmode_pending_bitmask
);
2684 /* Do nothing if already disabled */
2692 * Should only be called by bna_rxf_mode_set.
2693 * Helps deciding if h/w configuration is needed or not.
2696 * 1 = need h/w change
2699 rxf_allmulti_enable(struct bna_rxf
*rxf
)
2703 /* There can not be any pending disable command */
2705 /* Do nothing if pending enable or already enabled */
2706 if (is_allmulti_enable(rxf
->rxmode_pending
,
2707 rxf
->rxmode_pending_bitmask
) ||
2708 (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
)) {
2709 /* Schedule enable */
2711 allmulti_enable(rxf
->rxmode_pending
,
2712 rxf
->rxmode_pending_bitmask
);
2720 * Should only be called by bna_rxf_mode_set.
2721 * Helps deciding if h/w configuration is needed or not.
2724 * 1 = need h/w change
2727 rxf_allmulti_disable(struct bna_rxf
*rxf
)
2731 /* There can not be any pending disable */
2733 /* Turn off pending enable command , if any */
2734 if (is_allmulti_enable(rxf
->rxmode_pending
,
2735 rxf
->rxmode_pending_bitmask
)) {
2736 /* Allmulti mode should not be active */
2737 allmulti_inactive(rxf
->rxmode_pending
,
2738 rxf
->rxmode_pending_bitmask
);
2740 /* Schedule disable */
2741 } else if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
2742 allmulti_disable(rxf
->rxmode_pending
,
2743 rxf
->rxmode_pending_bitmask
);
2752 bna_rx_mcast_delall(struct bna_rx
*rx
,
2753 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
2754 enum bna_cb_status
))
2756 struct bna_rxf
*rxf
= &rx
->rxf
;
2757 struct list_head
*qe
;
2758 struct bna_mac
*mac
;
2759 int need_hw_config
= 0;
2761 /* Purge all entries from pending_add_q */
2762 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
2763 bfa_q_deq(&rxf
->mcast_pending_add_q
, &qe
);
2764 mac
= (struct bna_mac
*)qe
;
2765 bfa_q_qe_init(&mac
->qe
);
2766 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
2769 /* Schedule all entries in active_q for deletion */
2770 while (!list_empty(&rxf
->mcast_active_q
)) {
2771 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
2772 mac
= (struct bna_mac
*)qe
;
2773 bfa_q_qe_init(&mac
->qe
);
2774 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_del_q
);
2778 if (need_hw_config
) {
2779 rxf
->cam_fltr_cbfn
= cbfn
;
2780 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
2781 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
2786 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2791 bna_rx_receive_resume(struct bna_rx
*rx
,
2792 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
2793 enum bna_cb_status
))
2795 struct bna_rxf
*rxf
= &rx
->rxf
;
2797 if (rxf
->rxf_oper_state
== BNA_RXF_OPER_STATE_PAUSED
) {
2798 rxf
->oper_state_cbfn
= cbfn
;
2799 rxf
->oper_state_cbarg
= rx
->bna
->bnad
;
2800 bfa_fsm_send_event(rxf
, RXF_E_RESUME
);
2802 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2806 bna_rx_receive_pause(struct bna_rx
*rx
,
2807 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
2808 enum bna_cb_status
))
2810 struct bna_rxf
*rxf
= &rx
->rxf
;
2812 if (rxf
->rxf_oper_state
== BNA_RXF_OPER_STATE_RUNNING
) {
2813 rxf
->oper_state_cbfn
= cbfn
;
2814 rxf
->oper_state_cbarg
= rx
->bna
->bnad
;
2815 bfa_fsm_send_event(rxf
, RXF_E_PAUSE
);
2817 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2822 bna_rx_ucast_add(struct bna_rx
*rx
, u8
*addr
,
2823 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
2824 enum bna_cb_status
))
2826 struct bna_rxf
*rxf
= &rx
->rxf
;
2827 struct list_head
*qe
;
2828 struct bna_mac
*mac
;
2830 /* Check if already added */
2831 list_for_each(qe
, &rxf
->ucast_active_q
) {
2832 mac
= (struct bna_mac
*)qe
;
2833 if (BNA_MAC_IS_EQUAL(mac
->addr
, addr
)) {
2835 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2836 return BNA_CB_SUCCESS
;
2840 /* Check if pending addition */
2841 list_for_each(qe
, &rxf
->ucast_pending_add_q
) {
2842 mac
= (struct bna_mac
*)qe
;
2843 if (BNA_MAC_IS_EQUAL(mac
->addr
, addr
)) {
2845 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2846 return BNA_CB_SUCCESS
;
2850 mac
= bna_ucam_mod_mac_get(&rxf
->rx
->bna
->ucam_mod
);
2852 return BNA_CB_UCAST_CAM_FULL
;
2853 bfa_q_qe_init(&mac
->qe
);
2854 memcpy(mac
->addr
, addr
, ETH_ALEN
);
2855 list_add_tail(&mac
->qe
, &rxf
->ucast_pending_add_q
);
2857 rxf
->cam_fltr_cbfn
= cbfn
;
2858 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
2860 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
2862 return BNA_CB_SUCCESS
;
2867 bna_rx_ucast_del(struct bna_rx
*rx
, u8
*addr
,
2868 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
2869 enum bna_cb_status
))
2871 struct bna_rxf
*rxf
= &rx
->rxf
;
2872 struct list_head
*qe
;
2873 struct bna_mac
*mac
;
2875 list_for_each(qe
, &rxf
->ucast_pending_add_q
) {
2876 mac
= (struct bna_mac
*)qe
;
2877 if (BNA_MAC_IS_EQUAL(mac
->addr
, addr
)) {
2880 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2882 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2883 return BNA_CB_SUCCESS
;
2887 list_for_each(qe
, &rxf
->ucast_active_q
) {
2888 mac
= (struct bna_mac
*)qe
;
2889 if (BNA_MAC_IS_EQUAL(mac
->addr
, addr
)) {
2892 list_add_tail(qe
, &rxf
->ucast_pending_del_q
);
2893 rxf
->cam_fltr_cbfn
= cbfn
;
2894 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
2895 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
2896 return BNA_CB_SUCCESS
;
2900 return BNA_CB_INVALID_MAC
;
2905 bna_rx_mode_set(struct bna_rx
*rx
, enum bna_rxmode new_mode
,
2906 enum bna_rxmode bitmask
,
2907 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
2908 enum bna_cb_status
))
2910 struct bna_rxf
*rxf
= &rx
->rxf
;
2911 int need_hw_config
= 0;
2915 if (is_promisc_enable(new_mode
, bitmask
)) {
2916 /* If promisc mode is already enabled elsewhere in the system */
2917 if ((rx
->bna
->rxf_promisc_id
!= BFI_MAX_RXF
) &&
2918 (rx
->bna
->rxf_promisc_id
!= rxf
->rxf_id
))
2921 /* If default mode is already enabled in the system */
2922 if (rx
->bna
->rxf_default_id
!= BFI_MAX_RXF
)
2925 /* Trying to enable promiscuous and default mode together */
2926 if (is_default_enable(new_mode
, bitmask
))
2930 if (is_default_enable(new_mode
, bitmask
)) {
2931 /* If default mode is already enabled elsewhere in the system */
2932 if ((rx
->bna
->rxf_default_id
!= BFI_MAX_RXF
) &&
2933 (rx
->bna
->rxf_default_id
!= rxf
->rxf_id
)) {
2937 /* If promiscuous mode is already enabled in the system */
2938 if (rx
->bna
->rxf_promisc_id
!= BFI_MAX_RXF
)
2942 /* Process the commands */
2944 if (is_promisc_enable(new_mode
, bitmask
)) {
2945 if (rxf_promisc_enable(rxf
))
2947 } else if (is_promisc_disable(new_mode
, bitmask
)) {
2948 if (rxf_promisc_disable(rxf
))
2952 if (is_default_enable(new_mode
, bitmask
)) {
2953 if (rxf_default_enable(rxf
))
2955 } else if (is_default_disable(new_mode
, bitmask
)) {
2956 if (rxf_default_disable(rxf
))
2960 if (is_allmulti_enable(new_mode
, bitmask
)) {
2961 if (rxf_allmulti_enable(rxf
))
2963 } else if (is_allmulti_disable(new_mode
, bitmask
)) {
2964 if (rxf_allmulti_disable(rxf
))
2968 /* Trigger h/w if needed */
2970 if (need_hw_config
) {
2971 rxf
->cam_fltr_cbfn
= cbfn
;
2972 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
2973 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
2975 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2977 return BNA_CB_SUCCESS
;
2985 bna_rx_rss_enable(struct bna_rx
*rx
)
2987 struct bna_rxf
*rxf
= &rx
->rxf
;
2989 rxf
->rxf_flags
|= BNA_RXF_FL_RSS_CONFIG_PENDING
;
2990 rxf
->rss_status
= BNA_STATUS_T_ENABLED
;
2991 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
2996 bna_rx_rss_disable(struct bna_rx
*rx
)
2998 struct bna_rxf
*rxf
= &rx
->rxf
;
3000 rxf
->rxf_flags
|= BNA_RXF_FL_RSS_CONFIG_PENDING
;
3001 rxf
->rss_status
= BNA_STATUS_T_DISABLED
;
3002 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
3007 bna_rx_rss_reconfig(struct bna_rx
*rx
, struct bna_rxf_rss
*rss_config
)
3009 struct bna_rxf
*rxf
= &rx
->rxf
;
3010 rxf
->rxf_flags
|= BNA_RXF_FL_RSS_CONFIG_PENDING
;
3011 rxf
->rss_status
= BNA_STATUS_T_ENABLED
;
3012 rxf
->rss_cfg
= *rss_config
;
3013 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
3018 bna_rx_vlanfilter_enable(struct bna_rx
*rx
)
3020 struct bna_rxf
*rxf
= &rx
->rxf
;
3022 if (rxf
->vlan_filter_status
== BNA_STATUS_T_DISABLED
) {
3023 rxf
->rxf_flags
|= BNA_RXF_FL_VLAN_CONFIG_PENDING
;
3024 rxf
->vlan_filter_status
= BNA_STATUS_T_ENABLED
;
3025 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
3031 bna_rx_vlanfilter_disable(struct bna_rx
*rx
)
3033 struct bna_rxf
*rxf
= &rx
->rxf
;
3035 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
) {
3036 rxf
->rxf_flags
|= BNA_RXF_FL_VLAN_CONFIG_PENDING
;
3037 rxf
->vlan_filter_status
= BNA_STATUS_T_DISABLED
;
3038 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
3045 bna_rx_get_rxp(struct bna_rx
*rx
, int vector
)
3047 struct bna_rxp
*rxp
;
3048 struct list_head
*qe
;
3050 list_for_each(qe
, &rx
->rxp_q
) {
3051 rxp
= (struct bna_rxp
*)qe
;
3052 if (rxp
->vector
== vector
)
3059 * bna_rx_rss_rit_set()
3060 * Sets the Q ids for the specified msi-x vectors in the RIT.
3061 * Maximum rit size supported is 64, which should be the max size of the
3066 bna_rx_rss_rit_set(struct bna_rx
*rx
, unsigned int *vectors
, int nvectors
)
3069 struct bna_rxp
*rxp
;
3070 struct bna_rxq
*q0
= NULL
, *q1
= NULL
;
3072 struct bna_rxf
*rxf
;
3074 /* Build the RIT contents for this RX */
3078 for (i
= 0; i
< nvectors
; i
++) {
3079 rxp
= bna_rx_get_rxp(rx
, vectors
[i
]);
3081 GET_RXQS(rxp
, q0
, q1
);
3082 rxf
->rit_segment
->rit
[i
].large_rxq_id
= q0
->rxq_id
;
3083 rxf
->rit_segment
->rit
[i
].small_rxq_id
= (q1
? q1
->rxq_id
: 0);
3086 rxf
->rit_segment
->rit_size
= nvectors
;
3088 /* Subsequent call to enable/reconfig RSS will update the RIT in h/w */
3093 bna_rx_coalescing_timeo_set(struct bna_rx
*rx
, int coalescing_timeo
)
3095 struct bna_rxp
*rxp
;
3096 struct list_head
*qe
;
3098 list_for_each(qe
, &rx
->rxp_q
) {
3099 rxp
= (struct bna_rxp
*)qe
;
3100 rxp
->cq
.ccb
->rx_coalescing_timeo
= coalescing_timeo
;
3101 bna_ib_coalescing_timeo_set(rxp
->cq
.ib
, coalescing_timeo
);
3107 bna_rx_dim_reconfig(struct bna
*bna
, u32 vector
[][BNA_BIAS_T_MAX
])
3111 for (i
= 0; i
< BNA_LOAD_T_MAX
; i
++)
3112 for (j
= 0; j
< BNA_BIAS_T_MAX
; j
++)
3113 bna
->rx_mod
.dim_vector
[i
][j
] = vector
[i
][j
];
3118 bna_rx_dim_update(struct bna_ccb
*ccb
)
3120 struct bna
*bna
= ccb
->cq
->rx
->bna
;
3122 u32 pkt_rt
, small_rt
, large_rt
;
3123 u8 coalescing_timeo
;
3125 if ((ccb
->pkt_rate
.small_pkt_cnt
== 0) &&
3126 (ccb
->pkt_rate
.large_pkt_cnt
== 0))
3129 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
3131 small_rt
= ccb
->pkt_rate
.small_pkt_cnt
;
3132 large_rt
= ccb
->pkt_rate
.large_pkt_cnt
;
3134 pkt_rt
= small_rt
+ large_rt
;
3136 if (pkt_rt
< BNA_PKT_RATE_10K
)
3137 load
= BNA_LOAD_T_LOW_4
;
3138 else if (pkt_rt
< BNA_PKT_RATE_20K
)
3139 load
= BNA_LOAD_T_LOW_3
;
3140 else if (pkt_rt
< BNA_PKT_RATE_30K
)
3141 load
= BNA_LOAD_T_LOW_2
;
3142 else if (pkt_rt
< BNA_PKT_RATE_40K
)
3143 load
= BNA_LOAD_T_LOW_1
;
3144 else if (pkt_rt
< BNA_PKT_RATE_50K
)
3145 load
= BNA_LOAD_T_HIGH_1
;
3146 else if (pkt_rt
< BNA_PKT_RATE_60K
)
3147 load
= BNA_LOAD_T_HIGH_2
;
3148 else if (pkt_rt
< BNA_PKT_RATE_80K
)
3149 load
= BNA_LOAD_T_HIGH_3
;
3151 load
= BNA_LOAD_T_HIGH_4
;
3153 if (small_rt
> (large_rt
<< 1))
3158 ccb
->pkt_rate
.small_pkt_cnt
= 0;
3159 ccb
->pkt_rate
.large_pkt_cnt
= 0;
3161 coalescing_timeo
= bna
->rx_mod
.dim_vector
[load
][bias
];
3162 ccb
->rx_coalescing_timeo
= coalescing_timeo
;
3165 bna_ib_coalescing_timeo_set(ccb
->cq
->ib
, coalescing_timeo
);
3171 bna_tx_prio_set(struct bna_tx
*tx
, int prio
,
3172 void (*cbfn
)(struct bnad
*, struct bna_tx
*,
3173 enum bna_cb_status
))
3175 if (tx
->flags
& BNA_TX_F_PRIO_LOCK
)
3178 tx
->prio_change_cbfn
= cbfn
;
3179 bna_tx_prio_changed(tx
, prio
);
3182 return BNA_CB_SUCCESS
;
3187 bna_tx_coalescing_timeo_set(struct bna_tx
*tx
, int coalescing_timeo
)
3189 struct bna_txq
*txq
;
3190 struct list_head
*qe
;
3192 list_for_each(qe
, &tx
->txq_q
) {
3193 txq
= (struct bna_txq
*)qe
;
3194 bna_ib_coalescing_timeo_set(txq
->ib
, coalescing_timeo
);
3202 struct bna_ritseg_pool_cfg
{
3204 u32 pool_entry_size
;
3206 init_ritseg_pool(ritseg_pool_cfg
);
3212 bna_ucam_mod_init(struct bna_ucam_mod
*ucam_mod
, struct bna
*bna
,
3213 struct bna_res_info
*res_info
)
3217 ucam_mod
->ucmac
= (struct bna_mac
*)
3218 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
3220 INIT_LIST_HEAD(&ucam_mod
->free_q
);
3221 for (i
= 0; i
< BFI_MAX_UCMAC
; i
++) {
3222 bfa_q_qe_init(&ucam_mod
->ucmac
[i
].qe
);
3223 list_add_tail(&ucam_mod
->ucmac
[i
].qe
, &ucam_mod
->free_q
);
3226 ucam_mod
->bna
= bna
;
3230 bna_ucam_mod_uninit(struct bna_ucam_mod
*ucam_mod
)
3232 struct list_head
*qe
;
3235 list_for_each(qe
, &ucam_mod
->free_q
)
3238 ucam_mod
->bna
= NULL
;
3242 bna_mcam_mod_init(struct bna_mcam_mod
*mcam_mod
, struct bna
*bna
,
3243 struct bna_res_info
*res_info
)
3247 mcam_mod
->mcmac
= (struct bna_mac
*)
3248 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
3250 INIT_LIST_HEAD(&mcam_mod
->free_q
);
3251 for (i
= 0; i
< BFI_MAX_MCMAC
; i
++) {
3252 bfa_q_qe_init(&mcam_mod
->mcmac
[i
].qe
);
3253 list_add_tail(&mcam_mod
->mcmac
[i
].qe
, &mcam_mod
->free_q
);
3256 mcam_mod
->bna
= bna
;
3260 bna_mcam_mod_uninit(struct bna_mcam_mod
*mcam_mod
)
3262 struct list_head
*qe
;
3265 list_for_each(qe
, &mcam_mod
->free_q
)
3268 mcam_mod
->bna
= NULL
;
3272 bna_rit_mod_init(struct bna_rit_mod
*rit_mod
,
3273 struct bna_res_info
*res_info
)
3280 rit_mod
->rit
= (struct bna_rit_entry
*)
3281 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.mdl
[0].kva
;
3282 rit_mod
->rit_segment
= (struct bna_rit_segment
*)
3283 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.mdl
[0].kva
;
3287 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3288 INIT_LIST_HEAD(&rit_mod
->rit_seg_pool
[i
]);
3289 for (j
= 0; j
< ritseg_pool_cfg
[i
].pool_size
; j
++) {
3290 bfa_q_qe_init(&rit_mod
->rit_segment
[count
].qe
);
3291 rit_mod
->rit_segment
[count
].max_rit_size
=
3292 ritseg_pool_cfg
[i
].pool_entry_size
;
3293 rit_mod
->rit_segment
[count
].rit_offset
= offset
;
3294 rit_mod
->rit_segment
[count
].rit
=
3295 &rit_mod
->rit
[offset
];
3296 list_add_tail(&rit_mod
->rit_segment
[count
].qe
,
3297 &rit_mod
->rit_seg_pool
[i
]);
3299 offset
+= ritseg_pool_cfg
[i
].pool_entry_size
;
3305 bna_rit_mod_uninit(struct bna_rit_mod
*rit_mod
)
3307 struct bna_rit_segment
*rit_segment
;
3308 struct list_head
*qe
;
3312 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3314 list_for_each(qe
, &rit_mod
->rit_seg_pool
[i
]) {
3315 rit_segment
= (struct bna_rit_segment
*)qe
;
3325 /* Called during probe(), before calling bna_init() */
3327 bna_res_req(struct bna_res_info
*res_info
)
3329 bna_adv_res_req(res_info
);
3331 /* DMA memory for retrieving IOC attributes */
3332 res_info
[BNA_RES_MEM_T_ATTR
].res_type
= BNA_RES_T_MEM
;
3333 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
3334 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.num
= 1;
3335 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.len
=
3336 ALIGN(bfa_ioc_meminfo(), PAGE_SIZE
);
3338 /* DMA memory for index segment of an IB */
3339 res_info
[BNA_RES_MEM_T_IBIDX
].res_type
= BNA_RES_T_MEM
;
3340 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
3341 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.len
=
3342 BFI_IBIDX_SIZE
* BFI_IBIDX_MAX_SEGSIZE
;
3343 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.num
= BFI_MAX_IB
;
3345 /* Virtual memory for IB objects - stored by IB module */
3346 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_type
= BNA_RES_T_MEM
;
3347 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.mem_type
=
3349 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.num
= 1;
3350 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.len
=
3351 BFI_MAX_IB
* sizeof(struct bna_ib
);
3353 /* Virtual memory for intr objects - stored by IB module */
3354 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_type
= BNA_RES_T_MEM
;
3355 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.mem_type
=
3357 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.num
= 1;
3358 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.len
=
3359 BFI_MAX_IB
* sizeof(struct bna_intr
);
3361 /* Virtual memory for idx_seg objects - stored by IB module */
3362 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_type
= BNA_RES_T_MEM
;
3363 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.mem_type
=
3365 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.num
= 1;
3366 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.len
=
3367 BFI_IBIDX_TOTAL_SEGS
* sizeof(struct bna_ibidx_seg
);
3369 /* Virtual memory for Tx objects - stored by Tx module */
3370 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_type
= BNA_RES_T_MEM
;
3371 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.mem_type
=
3373 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.num
= 1;
3374 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.len
=
3375 BFI_MAX_TXQ
* sizeof(struct bna_tx
);
3377 /* Virtual memory for TxQ - stored by Tx module */
3378 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
3379 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.mem_type
=
3381 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.num
= 1;
3382 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.len
=
3383 BFI_MAX_TXQ
* sizeof(struct bna_txq
);
3385 /* Virtual memory for Rx objects - stored by Rx module */
3386 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_type
= BNA_RES_T_MEM
;
3387 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.mem_type
=
3389 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.num
= 1;
3390 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.len
=
3391 BFI_MAX_RXQ
* sizeof(struct bna_rx
);
3393 /* Virtual memory for RxPath - stored by Rx module */
3394 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_type
= BNA_RES_T_MEM
;
3395 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.mem_type
=
3397 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.num
= 1;
3398 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.len
=
3399 BFI_MAX_RXQ
* sizeof(struct bna_rxp
);
3401 /* Virtual memory for RxQ - stored by Rx module */
3402 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
3403 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.mem_type
=
3405 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.num
= 1;
3406 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.len
=
3407 BFI_MAX_RXQ
* sizeof(struct bna_rxq
);
3409 /* Virtual memory for Unicast MAC address - stored by ucam module */
3410 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
3411 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
3413 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
3414 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.len
=
3415 BFI_MAX_UCMAC
* sizeof(struct bna_mac
);
3417 /* Virtual memory for Multicast MAC address - stored by mcam module */
3418 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
3419 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
3421 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
3422 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.len
=
3423 BFI_MAX_MCMAC
* sizeof(struct bna_mac
);
3425 /* Virtual memory for RIT entries */
3426 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_type
= BNA_RES_T_MEM
;
3427 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.mem_type
=
3429 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.num
= 1;
3430 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.len
=
3431 BFI_MAX_RIT_SIZE
* sizeof(struct bna_rit_entry
);
3433 /* Virtual memory for RIT segment table */
3434 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_type
= BNA_RES_T_MEM
;
3435 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.mem_type
=
3437 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.num
= 1;
3438 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.len
=
3439 BFI_RIT_TOTAL_SEGS
* sizeof(struct bna_rit_segment
);
3441 /* Interrupt resource for mailbox interrupt */
3442 res_info
[BNA_RES_INTR_T_MBOX
].res_type
= BNA_RES_T_INTR
;
3443 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.intr_type
=
3445 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.num
= 1;
3448 /* Called during probe() */
3450 bna_init(struct bna
*bna
, struct bnad
*bnad
, struct bfa_pcidev
*pcidev
,
3451 struct bna_res_info
*res_info
)
3454 bna
->pcidev
= *pcidev
;
3456 bna
->stats
.hw_stats
= (struct bfi_ll_stats
*)
3457 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].kva
;
3458 bna
->hw_stats_dma
.msb
=
3459 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.msb
;
3460 bna
->hw_stats_dma
.lsb
=
3461 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.lsb
;
3462 bna
->stats
.sw_stats
= (struct bna_sw_stats
*)
3463 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.mdl
[0].kva
;
3465 bna
->regs
.page_addr
= bna
->pcidev
.pci_bar_kva
+
3466 reg_offset
[bna
->pcidev
.pci_func
].page_addr
;
3467 bna
->regs
.fn_int_status
= bna
->pcidev
.pci_bar_kva
+
3468 reg_offset
[bna
->pcidev
.pci_func
].fn_int_status
;
3469 bna
->regs
.fn_int_mask
= bna
->pcidev
.pci_bar_kva
+
3470 reg_offset
[bna
->pcidev
.pci_func
].fn_int_mask
;
3472 if (bna
->pcidev
.pci_func
< 3)
3477 /* Also initializes diag, cee, sfp, phy_port and mbox_mod */
3478 bna_device_init(&bna
->device
, bna
, res_info
);
3480 bna_port_init(&bna
->port
, bna
);
3482 bna_tx_mod_init(&bna
->tx_mod
, bna
, res_info
);
3484 bna_rx_mod_init(&bna
->rx_mod
, bna
, res_info
);
3486 bna_ib_mod_init(&bna
->ib_mod
, bna
, res_info
);
3488 bna_rit_mod_init(&bna
->rit_mod
, res_info
);
3490 bna_ucam_mod_init(&bna
->ucam_mod
, bna
, res_info
);
3492 bna_mcam_mod_init(&bna
->mcam_mod
, bna
, res_info
);
3494 bna
->rxf_default_id
= BFI_MAX_RXF
;
3495 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
3497 /* Mbox q element for posting stat request to f/w */
3498 bfa_q_qe_init(&bna
->mbox_qe
.qe
);
3502 bna_uninit(struct bna
*bna
)
3504 bna_mcam_mod_uninit(&bna
->mcam_mod
);
3506 bna_ucam_mod_uninit(&bna
->ucam_mod
);
3508 bna_rit_mod_uninit(&bna
->rit_mod
);
3510 bna_ib_mod_uninit(&bna
->ib_mod
);
3512 bna_rx_mod_uninit(&bna
->rx_mod
);
3514 bna_tx_mod_uninit(&bna
->tx_mod
);
3516 bna_port_uninit(&bna
->port
);
3518 bna_device_uninit(&bna
->device
);
3524 bna_ucam_mod_mac_get(struct bna_ucam_mod
*ucam_mod
)
3526 struct list_head
*qe
;
3528 if (list_empty(&ucam_mod
->free_q
))
3531 bfa_q_deq(&ucam_mod
->free_q
, &qe
);
3533 return (struct bna_mac
*)qe
;
3537 bna_ucam_mod_mac_put(struct bna_ucam_mod
*ucam_mod
, struct bna_mac
*mac
)
3539 list_add_tail(&mac
->qe
, &ucam_mod
->free_q
);
3543 bna_mcam_mod_mac_get(struct bna_mcam_mod
*mcam_mod
)
3545 struct list_head
*qe
;
3547 if (list_empty(&mcam_mod
->free_q
))
3550 bfa_q_deq(&mcam_mod
->free_q
, &qe
);
3552 return (struct bna_mac
*)qe
;
3556 bna_mcam_mod_mac_put(struct bna_mcam_mod
*mcam_mod
, struct bna_mac
*mac
)
3558 list_add_tail(&mac
->qe
, &mcam_mod
->free_q
);
3562 * Note: This should be called in the same locking context as the call to
3563 * bna_rit_mod_seg_get()
3566 bna_rit_mod_can_satisfy(struct bna_rit_mod
*rit_mod
, int seg_size
)
3570 /* Select the pool for seg_size */
3571 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3572 if (seg_size
<= ritseg_pool_cfg
[i
].pool_entry_size
)
3576 if (i
== BFI_RIT_SEG_TOTAL_POOLS
)
3579 if (list_empty(&rit_mod
->rit_seg_pool
[i
]))
3585 struct bna_rit_segment
*
3586 bna_rit_mod_seg_get(struct bna_rit_mod
*rit_mod
, int seg_size
)
3588 struct bna_rit_segment
*seg
;
3589 struct list_head
*qe
;
3592 /* Select the pool for seg_size */
3593 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3594 if (seg_size
<= ritseg_pool_cfg
[i
].pool_entry_size
)
3598 if (i
== BFI_RIT_SEG_TOTAL_POOLS
)
3601 if (list_empty(&rit_mod
->rit_seg_pool
[i
]))
3604 bfa_q_deq(&rit_mod
->rit_seg_pool
[i
], &qe
);
3605 seg
= (struct bna_rit_segment
*)qe
;
3606 bfa_q_qe_init(&seg
->qe
);
3607 seg
->rit_size
= seg_size
;
3613 bna_rit_mod_seg_put(struct bna_rit_mod
*rit_mod
,
3614 struct bna_rit_segment
*seg
)
3618 /* Select the pool for seg->max_rit_size */
3619 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3620 if (seg
->max_rit_size
== ritseg_pool_cfg
[i
].pool_entry_size
)
3625 list_add_tail(&seg
->qe
, &rit_mod
->rit_seg_pool
[i
]);