2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
20 /* MSGQ module source file. */
26 #define call_cmdq_ent_cbfn(_cmdq_ent, _status) \
28 bfa_msgq_cmdcbfn_t cbfn; \
30 cbfn = (_cmdq_ent)->cbfn; \
31 cbarg = (_cmdq_ent)->cbarg; \
32 (_cmdq_ent)->cbfn = NULL; \
33 (_cmdq_ent)->cbarg = NULL; \
35 cbfn(cbarg, (_status)); \
39 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq
*cmdq
);
40 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq
*cmdq
);
51 bfa_fsm_state_decl(cmdq
, stopped
, struct bfa_msgq_cmdq
, enum cmdq_event
);
52 bfa_fsm_state_decl(cmdq
, init_wait
, struct bfa_msgq_cmdq
, enum cmdq_event
);
53 bfa_fsm_state_decl(cmdq
, ready
, struct bfa_msgq_cmdq
, enum cmdq_event
);
54 bfa_fsm_state_decl(cmdq
, dbell_wait
, struct bfa_msgq_cmdq
,
58 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq
*cmdq
)
60 struct bfa_msgq_cmd_entry
*cmdq_ent
;
62 cmdq
->producer_index
= 0;
63 cmdq
->consumer_index
= 0;
67 cmdq
->bytes_to_copy
= 0;
68 while (!list_empty(&cmdq
->pending_q
)) {
69 bfa_q_deq(&cmdq
->pending_q
, &cmdq_ent
);
70 bfa_q_qe_init(&cmdq_ent
->qe
);
71 call_cmdq_ent_cbfn(cmdq_ent
, BFA_STATUS_FAILED
);
76 cmdq_sm_stopped(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
80 bfa_fsm_set_state(cmdq
, cmdq_sm_init_wait
);
89 cmdq
->flags
|= BFA_MSGQ_CMDQ_F_DB_UPDATE
;
98 cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq
*cmdq
)
100 bfa_wc_down(&cmdq
->msgq
->init_wc
);
104 cmdq_sm_init_wait(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
109 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
113 cmdq
->flags
|= BFA_MSGQ_CMDQ_F_DB_UPDATE
;
116 case CMDQ_E_INIT_RESP
:
117 if (cmdq
->flags
& BFA_MSGQ_CMDQ_F_DB_UPDATE
) {
118 cmdq
->flags
&= ~BFA_MSGQ_CMDQ_F_DB_UPDATE
;
119 bfa_fsm_set_state(cmdq
, cmdq_sm_dbell_wait
);
121 bfa_fsm_set_state(cmdq
, cmdq_sm_ready
);
130 cmdq_sm_ready_entry(struct bfa_msgq_cmdq
*cmdq
)
135 cmdq_sm_ready(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
140 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
144 bfa_fsm_set_state(cmdq
, cmdq_sm_dbell_wait
);
153 cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq
*cmdq
)
155 bfa_msgq_cmdq_dbell(cmdq
);
159 cmdq_sm_dbell_wait(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
164 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
168 cmdq
->flags
|= BFA_MSGQ_CMDQ_F_DB_UPDATE
;
171 case CMDQ_E_DB_READY
:
172 if (cmdq
->flags
& BFA_MSGQ_CMDQ_F_DB_UPDATE
) {
173 cmdq
->flags
&= ~BFA_MSGQ_CMDQ_F_DB_UPDATE
;
174 bfa_fsm_set_state(cmdq
, cmdq_sm_dbell_wait
);
176 bfa_fsm_set_state(cmdq
, cmdq_sm_ready
);
185 bfa_msgq_cmdq_dbell_ready(void *arg
)
187 struct bfa_msgq_cmdq
*cmdq
= (struct bfa_msgq_cmdq
*)arg
;
188 bfa_fsm_send_event(cmdq
, CMDQ_E_DB_READY
);
192 bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq
*cmdq
)
194 struct bfi_msgq_h2i_db
*dbell
=
195 (struct bfi_msgq_h2i_db
*)(&cmdq
->dbell_mb
.msg
[0]);
197 memset(dbell
, 0, sizeof(struct bfi_msgq_h2i_db
));
198 bfi_h2i_set(dbell
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_DOORBELL_PI
, 0);
199 dbell
->mh
.mtag
.i2htok
= 0;
200 dbell
->idx
.cmdq_pi
= htons(cmdq
->producer_index
);
202 if (!bfa_nw_ioc_mbox_queue(cmdq
->msgq
->ioc
, &cmdq
->dbell_mb
,
203 bfa_msgq_cmdq_dbell_ready
, cmdq
)) {
204 bfa_msgq_cmdq_dbell_ready(cmdq
);
209 __cmd_copy(struct bfa_msgq_cmdq
*cmdq
, struct bfa_msgq_cmd_entry
*cmd
)
211 size_t len
= cmd
->msg_size
;
216 src
= (u8
*)cmd
->msg_hdr
;
217 dst
= (u8
*)cmdq
->addr
.kva
;
218 dst
+= (cmdq
->producer_index
* BFI_MSGQ_CMD_ENTRY_SIZE
);
221 to_copy
= (len
< BFI_MSGQ_CMD_ENTRY_SIZE
) ?
222 len
: BFI_MSGQ_CMD_ENTRY_SIZE
;
223 memcpy(dst
, src
, to_copy
);
225 src
+= BFI_MSGQ_CMD_ENTRY_SIZE
;
226 BFA_MSGQ_INDX_ADD(cmdq
->producer_index
, 1, cmdq
->depth
);
227 dst
= (u8
*)cmdq
->addr
.kva
;
228 dst
+= (cmdq
->producer_index
* BFI_MSGQ_CMD_ENTRY_SIZE
);
235 bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq
*cmdq
, struct bfi_mbmsg
*mb
)
237 struct bfi_msgq_i2h_db
*dbell
= (struct bfi_msgq_i2h_db
*)mb
;
238 struct bfa_msgq_cmd_entry
*cmd
;
241 cmdq
->consumer_index
= ntohs(dbell
->idx
.cmdq_ci
);
243 /* Walk through pending list to see if the command can be posted */
244 while (!list_empty(&cmdq
->pending_q
)) {
246 (struct bfa_msgq_cmd_entry
*)bfa_q_first(&cmdq
->pending_q
);
247 if (ntohs(cmd
->msg_hdr
->num_entries
) <=
248 BFA_MSGQ_FREE_CNT(cmdq
)) {
250 __cmd_copy(cmdq
, cmd
);
252 call_cmdq_ent_cbfn(cmd
, BFA_STATUS_OK
);
259 bfa_fsm_send_event(cmdq
, CMDQ_E_POST
);
263 bfa_msgq_cmdq_copy_next(void *arg
)
265 struct bfa_msgq_cmdq
*cmdq
= (struct bfa_msgq_cmdq
*)arg
;
267 if (cmdq
->bytes_to_copy
)
268 bfa_msgq_cmdq_copy_rsp(cmdq
);
272 bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq
*cmdq
, struct bfi_mbmsg
*mb
)
274 struct bfi_msgq_i2h_cmdq_copy_req
*req
=
275 (struct bfi_msgq_i2h_cmdq_copy_req
*)mb
;
278 cmdq
->offset
= ntohs(req
->offset
);
279 cmdq
->bytes_to_copy
= ntohs(req
->len
);
280 bfa_msgq_cmdq_copy_rsp(cmdq
);
284 bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq
*cmdq
)
286 struct bfi_msgq_h2i_cmdq_copy_rsp
*rsp
=
287 (struct bfi_msgq_h2i_cmdq_copy_rsp
*)&cmdq
->copy_mb
.msg
[0];
289 u8
*addr
= (u8
*)cmdq
->addr
.kva
;
291 memset(rsp
, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp
));
292 bfi_h2i_set(rsp
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_CMDQ_COPY_RSP
, 0);
293 rsp
->mh
.mtag
.i2htok
= htons(cmdq
->token
);
294 copied
= (cmdq
->bytes_to_copy
>= BFI_CMD_COPY_SZ
) ? BFI_CMD_COPY_SZ
:
296 addr
+= cmdq
->offset
;
297 memcpy(rsp
->data
, addr
, copied
);
300 cmdq
->offset
+= copied
;
301 cmdq
->bytes_to_copy
-= copied
;
303 if (!bfa_nw_ioc_mbox_queue(cmdq
->msgq
->ioc
, &cmdq
->copy_mb
,
304 bfa_msgq_cmdq_copy_next
, cmdq
)) {
305 bfa_msgq_cmdq_copy_next(cmdq
);
310 bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq
*cmdq
, struct bfa_msgq
*msgq
)
312 cmdq
->depth
= BFA_MSGQ_CMDQ_NUM_ENTRY
;
313 INIT_LIST_HEAD(&cmdq
->pending_q
);
315 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
318 static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq
*rspq
);
325 RSPQ_E_INIT_RESP
= 5,
329 bfa_fsm_state_decl(rspq
, stopped
, struct bfa_msgq_rspq
, enum rspq_event
);
330 bfa_fsm_state_decl(rspq
, init_wait
, struct bfa_msgq_rspq
,
332 bfa_fsm_state_decl(rspq
, ready
, struct bfa_msgq_rspq
, enum rspq_event
);
333 bfa_fsm_state_decl(rspq
, dbell_wait
, struct bfa_msgq_rspq
,
337 rspq_sm_stopped_entry(struct bfa_msgq_rspq
*rspq
)
339 rspq
->producer_index
= 0;
340 rspq
->consumer_index
= 0;
345 rspq_sm_stopped(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
349 bfa_fsm_set_state(rspq
, rspq_sm_init_wait
);
363 rspq_sm_init_wait_entry(struct bfa_msgq_rspq
*rspq
)
365 bfa_wc_down(&rspq
->msgq
->init_wc
);
369 rspq_sm_init_wait(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
374 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
377 case RSPQ_E_INIT_RESP
:
378 bfa_fsm_set_state(rspq
, rspq_sm_ready
);
387 rspq_sm_ready_entry(struct bfa_msgq_rspq
*rspq
)
392 rspq_sm_ready(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
397 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
401 bfa_fsm_set_state(rspq
, rspq_sm_dbell_wait
);
410 rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq
*rspq
)
412 if (!bfa_nw_ioc_is_disabled(rspq
->msgq
->ioc
))
413 bfa_msgq_rspq_dbell(rspq
);
417 rspq_sm_dbell_wait(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
422 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
426 rspq
->flags
|= BFA_MSGQ_RSPQ_F_DB_UPDATE
;
429 case RSPQ_E_DB_READY
:
430 if (rspq
->flags
& BFA_MSGQ_RSPQ_F_DB_UPDATE
) {
431 rspq
->flags
&= ~BFA_MSGQ_RSPQ_F_DB_UPDATE
;
432 bfa_fsm_set_state(rspq
, rspq_sm_dbell_wait
);
434 bfa_fsm_set_state(rspq
, rspq_sm_ready
);
443 bfa_msgq_rspq_dbell_ready(void *arg
)
445 struct bfa_msgq_rspq
*rspq
= (struct bfa_msgq_rspq
*)arg
;
446 bfa_fsm_send_event(rspq
, RSPQ_E_DB_READY
);
450 bfa_msgq_rspq_dbell(struct bfa_msgq_rspq
*rspq
)
452 struct bfi_msgq_h2i_db
*dbell
=
453 (struct bfi_msgq_h2i_db
*)(&rspq
->dbell_mb
.msg
[0]);
455 memset(dbell
, 0, sizeof(struct bfi_msgq_h2i_db
));
456 bfi_h2i_set(dbell
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_DOORBELL_CI
, 0);
457 dbell
->mh
.mtag
.i2htok
= 0;
458 dbell
->idx
.rspq_ci
= htons(rspq
->consumer_index
);
460 if (!bfa_nw_ioc_mbox_queue(rspq
->msgq
->ioc
, &rspq
->dbell_mb
,
461 bfa_msgq_rspq_dbell_ready
, rspq
)) {
462 bfa_msgq_rspq_dbell_ready(rspq
);
467 bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq
*rspq
, struct bfi_mbmsg
*mb
)
469 struct bfi_msgq_i2h_db
*dbell
= (struct bfi_msgq_i2h_db
*)mb
;
470 struct bfi_msgq_mhdr
*msghdr
;
475 rspq
->producer_index
= ntohs(dbell
->idx
.rspq_pi
);
477 while (rspq
->consumer_index
!= rspq
->producer_index
) {
478 rspq_qe
= (u8
*)rspq
->addr
.kva
;
479 rspq_qe
+= (rspq
->consumer_index
* BFI_MSGQ_RSP_ENTRY_SIZE
);
480 msghdr
= (struct bfi_msgq_mhdr
*)rspq_qe
;
482 mc
= msghdr
->msg_class
;
483 num_entries
= ntohs(msghdr
->num_entries
);
485 if ((mc
>= BFI_MC_MAX
) || (rspq
->rsphdlr
[mc
].cbfn
== NULL
))
488 (rspq
->rsphdlr
[mc
].cbfn
)(rspq
->rsphdlr
[mc
].cbarg
, msghdr
);
490 BFA_MSGQ_INDX_ADD(rspq
->consumer_index
, num_entries
,
494 bfa_fsm_send_event(rspq
, RSPQ_E_RESP
);
498 bfa_msgq_rspq_attach(struct bfa_msgq_rspq
*rspq
, struct bfa_msgq
*msgq
)
500 rspq
->depth
= BFA_MSGQ_RSPQ_NUM_ENTRY
;
502 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
506 bfa_msgq_init_rsp(struct bfa_msgq
*msgq
,
507 struct bfi_mbmsg
*mb
)
509 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_INIT_RESP
);
510 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_INIT_RESP
);
514 bfa_msgq_init(void *arg
)
516 struct bfa_msgq
*msgq
= (struct bfa_msgq
*)arg
;
517 struct bfi_msgq_cfg_req
*msgq_cfg
=
518 (struct bfi_msgq_cfg_req
*)&msgq
->init_mb
.msg
[0];
520 memset(msgq_cfg
, 0, sizeof(struct bfi_msgq_cfg_req
));
521 bfi_h2i_set(msgq_cfg
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_INIT_REQ
, 0);
522 msgq_cfg
->mh
.mtag
.i2htok
= 0;
524 bfa_dma_be_addr_set(msgq_cfg
->cmdq
.addr
, msgq
->cmdq
.addr
.pa
);
525 msgq_cfg
->cmdq
.q_depth
= htons(msgq
->cmdq
.depth
);
526 bfa_dma_be_addr_set(msgq_cfg
->rspq
.addr
, msgq
->rspq
.addr
.pa
);
527 msgq_cfg
->rspq
.q_depth
= htons(msgq
->rspq
.depth
);
529 bfa_nw_ioc_mbox_queue(msgq
->ioc
, &msgq
->init_mb
, NULL
, NULL
);
533 bfa_msgq_isr(void *cbarg
, struct bfi_mbmsg
*msg
)
535 struct bfa_msgq
*msgq
= (struct bfa_msgq
*)cbarg
;
537 switch (msg
->mh
.msg_id
) {
538 case BFI_MSGQ_I2H_INIT_RSP
:
539 bfa_msgq_init_rsp(msgq
, msg
);
542 case BFI_MSGQ_I2H_DOORBELL_PI
:
543 bfa_msgq_rspq_pi_update(&msgq
->rspq
, msg
);
546 case BFI_MSGQ_I2H_DOORBELL_CI
:
547 bfa_msgq_cmdq_ci_update(&msgq
->cmdq
, msg
);
550 case BFI_MSGQ_I2H_CMDQ_COPY_REQ
:
551 bfa_msgq_cmdq_copy_req(&msgq
->cmdq
, msg
);
560 bfa_msgq_notify(void *cbarg
, enum bfa_ioc_event event
)
562 struct bfa_msgq
*msgq
= (struct bfa_msgq
*)cbarg
;
565 case BFA_IOC_E_ENABLED
:
566 bfa_wc_init(&msgq
->init_wc
, bfa_msgq_init
, msgq
);
567 bfa_wc_up(&msgq
->init_wc
);
568 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_START
);
569 bfa_wc_up(&msgq
->init_wc
);
570 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_START
);
571 bfa_wc_wait(&msgq
->init_wc
);
574 case BFA_IOC_E_DISABLED
:
575 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_STOP
);
576 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_STOP
);
579 case BFA_IOC_E_FAILED
:
580 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_FAIL
);
581 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_FAIL
);
590 bfa_msgq_meminfo(void)
592 return roundup(BFA_MSGQ_CMDQ_SIZE
, BFA_DMA_ALIGN_SZ
) +
593 roundup(BFA_MSGQ_RSPQ_SIZE
, BFA_DMA_ALIGN_SZ
);
597 bfa_msgq_memclaim(struct bfa_msgq
*msgq
, u8
*kva
, u64 pa
)
599 msgq
->cmdq
.addr
.kva
= kva
;
600 msgq
->cmdq
.addr
.pa
= pa
;
602 kva
+= roundup(BFA_MSGQ_CMDQ_SIZE
, BFA_DMA_ALIGN_SZ
);
603 pa
+= roundup(BFA_MSGQ_CMDQ_SIZE
, BFA_DMA_ALIGN_SZ
);
605 msgq
->rspq
.addr
.kva
= kva
;
606 msgq
->rspq
.addr
.pa
= pa
;
610 bfa_msgq_attach(struct bfa_msgq
*msgq
, struct bfa_ioc
*ioc
)
614 bfa_msgq_cmdq_attach(&msgq
->cmdq
, msgq
);
615 bfa_msgq_rspq_attach(&msgq
->rspq
, msgq
);
617 bfa_nw_ioc_mbox_regisr(msgq
->ioc
, BFI_MC_MSGQ
, bfa_msgq_isr
, msgq
);
618 bfa_q_qe_init(&msgq
->ioc_notify
);
619 bfa_ioc_notify_init(&msgq
->ioc_notify
, bfa_msgq_notify
, msgq
);
620 bfa_nw_ioc_notify_register(msgq
->ioc
, &msgq
->ioc_notify
);
624 bfa_msgq_regisr(struct bfa_msgq
*msgq
, enum bfi_mclass mc
,
625 bfa_msgq_mcfunc_t cbfn
, void *cbarg
)
627 msgq
->rspq
.rsphdlr
[mc
].cbfn
= cbfn
;
628 msgq
->rspq
.rsphdlr
[mc
].cbarg
= cbarg
;
632 bfa_msgq_cmd_post(struct bfa_msgq
*msgq
, struct bfa_msgq_cmd_entry
*cmd
)
634 if (ntohs(cmd
->msg_hdr
->num_entries
) <=
635 BFA_MSGQ_FREE_CNT(&msgq
->cmdq
)) {
636 __cmd_copy(&msgq
->cmdq
, cmd
);
637 call_cmdq_ent_cbfn(cmd
, BFA_STATUS_OK
);
638 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_POST
);
640 list_add_tail(&cmd
->qe
, &msgq
->cmdq
.pending_q
);
645 bfa_msgq_rsp_copy(struct bfa_msgq
*msgq
, u8
*buf
, size_t buf_len
)
647 struct bfa_msgq_rspq
*rspq
= &msgq
->rspq
;
648 size_t len
= buf_len
;
653 ci
= rspq
->consumer_index
;
654 src
= (u8
*)rspq
->addr
.kva
;
655 src
+= (ci
* BFI_MSGQ_RSP_ENTRY_SIZE
);
659 to_copy
= (len
< BFI_MSGQ_RSP_ENTRY_SIZE
) ?
660 len
: BFI_MSGQ_RSP_ENTRY_SIZE
;
661 memcpy(dst
, src
, to_copy
);
663 dst
+= BFI_MSGQ_RSP_ENTRY_SIZE
;
664 BFA_MSGQ_INDX_ADD(ci
, 1, rspq
->depth
);
665 src
= (u8
*)rspq
->addr
.kva
;
666 src
+= (ci
* BFI_MSGQ_RSP_ENTRY_SIZE
);