2 * Copyright (C) 2005 - 2009 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
20 static void be_mcc_notify(struct be_ctrl_info
*ctrl
)
22 struct be_queue_info
*mccq
= &ctrl
->mcc_obj
.q
;
25 val
|= mccq
->id
& DB_MCCQ_RING_ID_MASK
;
26 val
|= 1 << DB_MCCQ_NUM_POSTED_SHIFT
;
27 iowrite32(val
, ctrl
->db
+ DB_MCCQ_OFFSET
);
30 /* To check if valid bit is set, check the entire word as we don't know
31 * the endianness of the data (old entry is host endian while a new entry is
33 static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry
*compl)
35 if (compl->flags
!= 0) {
36 compl->flags
= le32_to_cpu(compl->flags
);
37 BUG_ON((compl->flags
& CQE_FLAGS_VALID_MASK
) == 0);
44 /* Need to reset the entire word that houses the valid bit */
45 static inline void be_mcc_compl_use(struct be_mcc_cq_entry
*compl)
50 static int be_mcc_compl_process(struct be_ctrl_info
*ctrl
,
51 struct be_mcc_cq_entry
*compl)
53 u16 compl_status
, extd_status
;
55 /* Just swap the status to host endian; mcc tag is opaquely copied
57 be_dws_le_to_cpu(compl, 4);
59 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
60 CQE_STATUS_COMPL_MASK
;
61 if (compl_status
!= MCC_STATUS_SUCCESS
) {
62 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
64 printk(KERN_WARNING DRV_NAME
65 " error in cmd completion: status(compl/extd)=%d/%d\n",
66 compl_status
, extd_status
);
72 /* Link state evt is a string of bytes; no need for endian swapping */
73 static void be_async_link_state_process(struct be_ctrl_info
*ctrl
,
74 struct be_async_event_link_state
*evt
)
76 ctrl
->async_cb(ctrl
->adapter_ctxt
,
77 evt
->port_link_status
== ASYNC_EVENT_LINK_UP
? true : false);
80 static inline bool is_link_state_evt(u32 trailer
)
82 return (((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
83 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
84 ASYNC_EVENT_CODE_LINK_STATE
);
87 static struct be_mcc_cq_entry
*be_mcc_compl_get(struct be_ctrl_info
*ctrl
)
89 struct be_queue_info
*mcc_cq
= &ctrl
->mcc_obj
.cq
;
90 struct be_mcc_cq_entry
*compl = queue_tail_node(mcc_cq
);
92 if (be_mcc_compl_is_new(compl)) {
93 queue_tail_inc(mcc_cq
);
99 void be_process_mcc(struct be_ctrl_info
*ctrl
)
101 struct be_mcc_cq_entry
*compl;
104 spin_lock_bh(&ctrl
->mcc_cq_lock
);
105 while ((compl = be_mcc_compl_get(ctrl
))) {
106 if (compl->flags
& CQE_FLAGS_ASYNC_MASK
) {
107 /* Interpret flags as an async trailer */
108 BUG_ON(!is_link_state_evt(compl->flags
));
110 /* Interpret compl as a async link evt */
111 be_async_link_state_process(ctrl
,
112 (struct be_async_event_link_state
*) compl);
114 be_mcc_compl_process(ctrl
, compl);
115 atomic_dec(&ctrl
->mcc_obj
.q
.used
);
117 be_mcc_compl_use(compl);
121 be_cq_notify(ctrl
, ctrl
->mcc_obj
.cq
.id
, true, num
);
122 spin_unlock_bh(&ctrl
->mcc_cq_lock
);
125 /* Wait till no more pending mcc requests are present */
126 static void be_mcc_wait_compl(struct be_ctrl_info
*ctrl
)
128 #define mcc_timeout 50000 /* 5s timeout */
130 for (i
= 0; i
< mcc_timeout
; i
++) {
131 be_process_mcc(ctrl
);
132 if (atomic_read(&ctrl
->mcc_obj
.q
.used
) == 0)
136 if (i
== mcc_timeout
)
137 printk(KERN_WARNING DRV_NAME
"mcc poll timed out\n");
140 /* Notify MCC requests and wait for completion */
141 static void be_mcc_notify_wait(struct be_ctrl_info
*ctrl
)
144 be_mcc_wait_compl(ctrl
);
147 static int be_mbox_db_ready_wait(void __iomem
*db
)
149 int cnt
= 0, wait
= 5;
153 ready
= ioread32(db
) & MPU_MAILBOX_DB_RDY_MASK
;
158 printk(KERN_WARNING DRV_NAME
159 ": mbox_db poll timed out\n");
173 * Insert the mailbox address into the doorbell in two steps
174 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
176 static int be_mbox_db_ring(struct be_ctrl_info
*ctrl
)
180 void __iomem
*db
= ctrl
->db
+ MPU_MAILBOX_DB_OFFSET
;
181 struct be_dma_mem
*mbox_mem
= &ctrl
->mbox_mem
;
182 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
183 struct be_mcc_cq_entry
*cqe
= &mbox
->cqe
;
185 memset(cqe
, 0, sizeof(*cqe
));
187 val
&= ~MPU_MAILBOX_DB_RDY_MASK
;
188 val
|= MPU_MAILBOX_DB_HI_MASK
;
189 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
190 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
193 /* wait for ready to be set */
194 status
= be_mbox_db_ready_wait(db
);
199 val
&= ~MPU_MAILBOX_DB_RDY_MASK
;
200 val
&= ~MPU_MAILBOX_DB_HI_MASK
;
201 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
202 val
|= (u32
)(mbox_mem
->dma
>> 4) << 2;
205 status
= be_mbox_db_ready_wait(db
);
209 /* A cq entry has been made now */
210 if (be_mcc_compl_is_new(cqe
)) {
211 status
= be_mcc_compl_process(ctrl
, &mbox
->cqe
);
212 be_mcc_compl_use(cqe
);
216 printk(KERN_WARNING DRV_NAME
"invalid mailbox completion\n");
222 static int be_POST_stage_get(struct be_ctrl_info
*ctrl
, u16
*stage
)
224 u32 sem
= ioread32(ctrl
->csr
+ MPU_EP_SEMAPHORE_OFFSET
);
226 *stage
= sem
& EP_SEMAPHORE_POST_STAGE_MASK
;
227 if ((sem
>> EP_SEMAPHORE_POST_ERR_SHIFT
) & EP_SEMAPHORE_POST_ERR_MASK
)
233 static int be_POST_stage_poll(struct be_ctrl_info
*ctrl
, u16 poll_stage
)
235 u16 stage
, cnt
, error
;
236 for (cnt
= 0; cnt
< 5000; cnt
++) {
237 error
= be_POST_stage_get(ctrl
, &stage
);
241 if (stage
== poll_stage
)
245 if (stage
!= poll_stage
)
251 int be_cmd_POST(struct be_ctrl_info
*ctrl
)
255 error
= be_POST_stage_get(ctrl
, &stage
);
259 if (stage
== POST_STAGE_ARMFW_RDY
)
262 if (stage
!= POST_STAGE_AWAITING_HOST_RDY
)
265 /* On awaiting host rdy, reset and again poll on awaiting host rdy */
266 iowrite32(POST_STAGE_BE_RESET
, ctrl
->csr
+ MPU_EP_SEMAPHORE_OFFSET
);
267 error
= be_POST_stage_poll(ctrl
, POST_STAGE_AWAITING_HOST_RDY
);
271 /* Now kickoff POST and poll on armfw ready */
272 iowrite32(POST_STAGE_HOST_RDY
, ctrl
->csr
+ MPU_EP_SEMAPHORE_OFFSET
);
273 error
= be_POST_stage_poll(ctrl
, POST_STAGE_ARMFW_RDY
);
279 printk(KERN_WARNING DRV_NAME
": ERROR, stage=%d\n", stage
);
283 static inline void *embedded_payload(struct be_mcc_wrb
*wrb
)
285 return wrb
->payload
.embedded_payload
;
288 static inline struct be_sge
*nonembedded_sgl(struct be_mcc_wrb
*wrb
)
290 return &wrb
->payload
.sgl
[0];
293 /* Don't touch the hdr after it's prepared */
294 static void be_wrb_hdr_prepare(struct be_mcc_wrb
*wrb
, int payload_len
,
295 bool embedded
, u8 sge_cnt
)
298 wrb
->embedded
|= MCC_WRB_EMBEDDED_MASK
;
300 wrb
->embedded
|= (sge_cnt
& MCC_WRB_SGE_CNT_MASK
) <<
301 MCC_WRB_SGE_CNT_SHIFT
;
302 wrb
->payload_length
= payload_len
;
303 be_dws_cpu_to_le(wrb
, 20);
306 /* Don't touch the hdr after it's prepared */
307 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr
*req_hdr
,
308 u8 subsystem
, u8 opcode
, int cmd_len
)
310 req_hdr
->opcode
= opcode
;
311 req_hdr
->subsystem
= subsystem
;
312 req_hdr
->request_length
= cpu_to_le32(cmd_len
- sizeof(*req_hdr
));
315 static void be_cmd_page_addrs_prepare(struct phys_addr
*pages
, u32 max_pages
,
316 struct be_dma_mem
*mem
)
318 int i
, buf_pages
= min(PAGES_4K_SPANNED(mem
->va
, mem
->size
), max_pages
);
319 u64 dma
= (u64
)mem
->dma
;
321 for (i
= 0; i
< buf_pages
; i
++) {
322 pages
[i
].lo
= cpu_to_le32(dma
& 0xFFFFFFFF);
323 pages
[i
].hi
= cpu_to_le32(upper_32_bits(dma
));
328 /* Converts interrupt delay in microseconds to multiplier value */
329 static u32
eq_delay_to_mult(u32 usec_delay
)
331 #define MAX_INTR_RATE 651042
332 const u32 round
= 10;
338 u32 interrupt_rate
= 1000000 / usec_delay
;
339 /* Max delay, corresponding to the lowest interrupt rate */
340 if (interrupt_rate
== 0)
343 multiplier
= (MAX_INTR_RATE
- interrupt_rate
) * round
;
344 multiplier
/= interrupt_rate
;
345 /* Round the multiplier to the closest value.*/
346 multiplier
= (multiplier
+ round
/2) / round
;
347 multiplier
= min(multiplier
, (u32
)1023);
353 static inline struct be_mcc_wrb
*wrb_from_mbox(struct be_dma_mem
*mbox_mem
)
355 return &((struct be_mcc_mailbox
*)(mbox_mem
->va
))->wrb
;
358 static inline struct be_mcc_wrb
*wrb_from_mcc(struct be_queue_info
*mccq
)
360 struct be_mcc_wrb
*wrb
= NULL
;
361 if (atomic_read(&mccq
->used
) < mccq
->len
) {
362 wrb
= queue_head_node(mccq
);
363 queue_head_inc(mccq
);
364 atomic_inc(&mccq
->used
);
365 memset(wrb
, 0, sizeof(*wrb
));
370 int be_cmd_eq_create(struct be_ctrl_info
*ctrl
,
371 struct be_queue_info
*eq
, int eq_delay
)
373 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
374 struct be_cmd_req_eq_create
*req
= embedded_payload(wrb
);
375 struct be_cmd_resp_eq_create
*resp
= embedded_payload(wrb
);
376 struct be_dma_mem
*q_mem
= &eq
->dma_mem
;
379 spin_lock(&ctrl
->mbox_lock
);
380 memset(wrb
, 0, sizeof(*wrb
));
382 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
384 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
385 OPCODE_COMMON_EQ_CREATE
, sizeof(*req
));
387 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
389 AMAP_SET_BITS(struct amap_eq_context
, func
, req
->context
,
391 AMAP_SET_BITS(struct amap_eq_context
, valid
, req
->context
, 1);
393 AMAP_SET_BITS(struct amap_eq_context
, size
, req
->context
, 0);
394 AMAP_SET_BITS(struct amap_eq_context
, count
, req
->context
,
395 __ilog2_u32(eq
->len
/256));
396 AMAP_SET_BITS(struct amap_eq_context
, delaymult
, req
->context
,
397 eq_delay_to_mult(eq_delay
));
398 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
400 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
402 status
= be_mbox_db_ring(ctrl
);
404 eq
->id
= le16_to_cpu(resp
->eq_id
);
407 spin_unlock(&ctrl
->mbox_lock
);
411 int be_cmd_mac_addr_query(struct be_ctrl_info
*ctrl
, u8
*mac_addr
,
412 u8 type
, bool permanent
, u32 if_handle
)
414 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
415 struct be_cmd_req_mac_query
*req
= embedded_payload(wrb
);
416 struct be_cmd_resp_mac_query
*resp
= embedded_payload(wrb
);
419 spin_lock(&ctrl
->mbox_lock
);
420 memset(wrb
, 0, sizeof(*wrb
));
422 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
424 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
425 OPCODE_COMMON_NTWK_MAC_QUERY
, sizeof(*req
));
431 req
->if_id
= cpu_to_le16((u16
)if_handle
);
435 status
= be_mbox_db_ring(ctrl
);
437 memcpy(mac_addr
, resp
->mac
.addr
, ETH_ALEN
);
439 spin_unlock(&ctrl
->mbox_lock
);
443 int be_cmd_pmac_add(struct be_ctrl_info
*ctrl
, u8
*mac_addr
,
444 u32 if_id
, u32
*pmac_id
)
446 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
447 struct be_cmd_req_pmac_add
*req
= embedded_payload(wrb
);
450 spin_lock(&ctrl
->mbox_lock
);
451 memset(wrb
, 0, sizeof(*wrb
));
453 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
455 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
456 OPCODE_COMMON_NTWK_PMAC_ADD
, sizeof(*req
));
458 req
->if_id
= cpu_to_le32(if_id
);
459 memcpy(req
->mac_address
, mac_addr
, ETH_ALEN
);
461 status
= be_mbox_db_ring(ctrl
);
463 struct be_cmd_resp_pmac_add
*resp
= embedded_payload(wrb
);
464 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
467 spin_unlock(&ctrl
->mbox_lock
);
471 int be_cmd_pmac_del(struct be_ctrl_info
*ctrl
, u32 if_id
, u32 pmac_id
)
473 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
474 struct be_cmd_req_pmac_del
*req
= embedded_payload(wrb
);
477 spin_lock(&ctrl
->mbox_lock
);
478 memset(wrb
, 0, sizeof(*wrb
));
480 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
482 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
483 OPCODE_COMMON_NTWK_PMAC_DEL
, sizeof(*req
));
485 req
->if_id
= cpu_to_le32(if_id
);
486 req
->pmac_id
= cpu_to_le32(pmac_id
);
488 status
= be_mbox_db_ring(ctrl
);
489 spin_unlock(&ctrl
->mbox_lock
);
494 int be_cmd_cq_create(struct be_ctrl_info
*ctrl
,
495 struct be_queue_info
*cq
, struct be_queue_info
*eq
,
496 bool sol_evts
, bool no_delay
, int coalesce_wm
)
498 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
499 struct be_cmd_req_cq_create
*req
= embedded_payload(wrb
);
500 struct be_cmd_resp_cq_create
*resp
= embedded_payload(wrb
);
501 struct be_dma_mem
*q_mem
= &cq
->dma_mem
;
502 void *ctxt
= &req
->context
;
505 spin_lock(&ctrl
->mbox_lock
);
506 memset(wrb
, 0, sizeof(*wrb
));
508 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
510 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
511 OPCODE_COMMON_CQ_CREATE
, sizeof(*req
));
513 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
515 AMAP_SET_BITS(struct amap_cq_context
, coalescwm
, ctxt
, coalesce_wm
);
516 AMAP_SET_BITS(struct amap_cq_context
, nodelay
, ctxt
, no_delay
);
517 AMAP_SET_BITS(struct amap_cq_context
, count
, ctxt
,
518 __ilog2_u32(cq
->len
/256));
519 AMAP_SET_BITS(struct amap_cq_context
, valid
, ctxt
, 1);
520 AMAP_SET_BITS(struct amap_cq_context
, solevent
, ctxt
, sol_evts
);
521 AMAP_SET_BITS(struct amap_cq_context
, eventable
, ctxt
, 1);
522 AMAP_SET_BITS(struct amap_cq_context
, eqid
, ctxt
, eq
->id
);
523 AMAP_SET_BITS(struct amap_cq_context
, armed
, ctxt
, 1);
524 AMAP_SET_BITS(struct amap_cq_context
, func
, ctxt
, ctrl
->pci_func
);
525 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
527 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
529 status
= be_mbox_db_ring(ctrl
);
531 cq
->id
= le16_to_cpu(resp
->cq_id
);
534 spin_unlock(&ctrl
->mbox_lock
);
539 static u32
be_encoded_q_len(int q_len
)
541 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
542 if (len_encoded
== 16)
547 int be_cmd_mccq_create(struct be_ctrl_info
*ctrl
,
548 struct be_queue_info
*mccq
,
549 struct be_queue_info
*cq
)
551 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
552 struct be_cmd_req_mcc_create
*req
= embedded_payload(wrb
);
553 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
554 void *ctxt
= &req
->context
;
557 spin_lock(&ctrl
->mbox_lock
);
558 memset(wrb
, 0, sizeof(*wrb
));
560 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
562 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
563 OPCODE_COMMON_MCC_CREATE
, sizeof(*req
));
565 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
567 AMAP_SET_BITS(struct amap_mcc_context
, fid
, ctxt
, ctrl
->pci_func
);
568 AMAP_SET_BITS(struct amap_mcc_context
, valid
, ctxt
, 1);
569 AMAP_SET_BITS(struct amap_mcc_context
, ring_size
, ctxt
,
570 be_encoded_q_len(mccq
->len
));
571 AMAP_SET_BITS(struct amap_mcc_context
, cq_id
, ctxt
, cq
->id
);
573 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
575 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
577 status
= be_mbox_db_ring(ctrl
);
579 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
580 mccq
->id
= le16_to_cpu(resp
->id
);
581 mccq
->created
= true;
583 spin_unlock(&ctrl
->mbox_lock
);
588 int be_cmd_txq_create(struct be_ctrl_info
*ctrl
,
589 struct be_queue_info
*txq
,
590 struct be_queue_info
*cq
)
592 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
593 struct be_cmd_req_eth_tx_create
*req
= embedded_payload(wrb
);
594 struct be_dma_mem
*q_mem
= &txq
->dma_mem
;
595 void *ctxt
= &req
->context
;
599 spin_lock(&ctrl
->mbox_lock
);
600 memset(wrb
, 0, sizeof(*wrb
));
602 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
604 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
, OPCODE_ETH_TX_CREATE
,
607 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
608 req
->ulp_num
= BE_ULP1_NUM
;
609 req
->type
= BE_ETH_TX_RING_TYPE_STANDARD
;
611 len_encoded
= fls(txq
->len
); /* log2(len) + 1 */
612 if (len_encoded
== 16)
614 AMAP_SET_BITS(struct amap_tx_context
, tx_ring_size
, ctxt
, len_encoded
);
615 AMAP_SET_BITS(struct amap_tx_context
, pci_func_id
, ctxt
,
617 AMAP_SET_BITS(struct amap_tx_context
, ctx_valid
, ctxt
, 1);
618 AMAP_SET_BITS(struct amap_tx_context
, cq_id_send
, ctxt
, cq
->id
);
620 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
622 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
624 status
= be_mbox_db_ring(ctrl
);
626 struct be_cmd_resp_eth_tx_create
*resp
= embedded_payload(wrb
);
627 txq
->id
= le16_to_cpu(resp
->cid
);
630 spin_unlock(&ctrl
->mbox_lock
);
635 int be_cmd_rxq_create(struct be_ctrl_info
*ctrl
,
636 struct be_queue_info
*rxq
, u16 cq_id
, u16 frag_size
,
637 u16 max_frame_size
, u32 if_id
, u32 rss
)
639 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
640 struct be_cmd_req_eth_rx_create
*req
= embedded_payload(wrb
);
641 struct be_dma_mem
*q_mem
= &rxq
->dma_mem
;
644 spin_lock(&ctrl
->mbox_lock
);
645 memset(wrb
, 0, sizeof(*wrb
));
647 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
649 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
, OPCODE_ETH_RX_CREATE
,
652 req
->cq_id
= cpu_to_le16(cq_id
);
653 req
->frag_size
= fls(frag_size
) - 1;
655 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
656 req
->interface_id
= cpu_to_le32(if_id
);
657 req
->max_frame_size
= cpu_to_le16(max_frame_size
);
658 req
->rss_queue
= cpu_to_le32(rss
);
660 status
= be_mbox_db_ring(ctrl
);
662 struct be_cmd_resp_eth_rx_create
*resp
= embedded_payload(wrb
);
663 rxq
->id
= le16_to_cpu(resp
->id
);
666 spin_unlock(&ctrl
->mbox_lock
);
671 /* Generic destroyer function for all types of queues */
672 int be_cmd_q_destroy(struct be_ctrl_info
*ctrl
, struct be_queue_info
*q
,
675 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
676 struct be_cmd_req_q_destroy
*req
= embedded_payload(wrb
);
677 u8 subsys
= 0, opcode
= 0;
680 spin_lock(&ctrl
->mbox_lock
);
682 memset(wrb
, 0, sizeof(*wrb
));
683 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
685 switch (queue_type
) {
687 subsys
= CMD_SUBSYSTEM_COMMON
;
688 opcode
= OPCODE_COMMON_EQ_DESTROY
;
691 subsys
= CMD_SUBSYSTEM_COMMON
;
692 opcode
= OPCODE_COMMON_CQ_DESTROY
;
695 subsys
= CMD_SUBSYSTEM_ETH
;
696 opcode
= OPCODE_ETH_TX_DESTROY
;
699 subsys
= CMD_SUBSYSTEM_ETH
;
700 opcode
= OPCODE_ETH_RX_DESTROY
;
703 subsys
= CMD_SUBSYSTEM_COMMON
;
704 opcode
= OPCODE_COMMON_MCC_DESTROY
;
707 printk(KERN_WARNING DRV_NAME
":bad Q type in Q destroy cmd\n");
711 be_cmd_hdr_prepare(&req
->hdr
, subsys
, opcode
, sizeof(*req
));
712 req
->id
= cpu_to_le16(q
->id
);
714 status
= be_mbox_db_ring(ctrl
);
716 spin_unlock(&ctrl
->mbox_lock
);
721 /* Create an rx filtering policy configuration on an i/f */
722 int be_cmd_if_create(struct be_ctrl_info
*ctrl
, u32 flags
, u8
*mac
,
723 bool pmac_invalid
, u32
*if_handle
, u32
*pmac_id
)
725 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
726 struct be_cmd_req_if_create
*req
= embedded_payload(wrb
);
729 spin_lock(&ctrl
->mbox_lock
);
730 memset(wrb
, 0, sizeof(*wrb
));
732 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
734 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
735 OPCODE_COMMON_NTWK_INTERFACE_CREATE
, sizeof(*req
));
737 req
->capability_flags
= cpu_to_le32(flags
);
738 req
->enable_flags
= cpu_to_le32(flags
);
740 memcpy(req
->mac_addr
, mac
, ETH_ALEN
);
742 status
= be_mbox_db_ring(ctrl
);
744 struct be_cmd_resp_if_create
*resp
= embedded_payload(wrb
);
745 *if_handle
= le32_to_cpu(resp
->interface_id
);
747 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
750 spin_unlock(&ctrl
->mbox_lock
);
754 int be_cmd_if_destroy(struct be_ctrl_info
*ctrl
, u32 interface_id
)
756 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
757 struct be_cmd_req_if_destroy
*req
= embedded_payload(wrb
);
760 spin_lock(&ctrl
->mbox_lock
);
761 memset(wrb
, 0, sizeof(*wrb
));
763 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
765 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
766 OPCODE_COMMON_NTWK_INTERFACE_DESTROY
, sizeof(*req
));
768 req
->interface_id
= cpu_to_le32(interface_id
);
769 status
= be_mbox_db_ring(ctrl
);
771 spin_unlock(&ctrl
->mbox_lock
);
776 /* Get stats is a non embedded command: the request is not embedded inside
777 * WRB but is a separate dma memory block
779 int be_cmd_get_stats(struct be_ctrl_info
*ctrl
, struct be_dma_mem
*nonemb_cmd
)
781 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
782 struct be_cmd_req_get_stats
*req
= nonemb_cmd
->va
;
783 struct be_sge
*sge
= nonembedded_sgl(wrb
);
786 spin_lock(&ctrl
->mbox_lock
);
787 memset(wrb
, 0, sizeof(*wrb
));
789 memset(req
, 0, sizeof(*req
));
791 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1);
793 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
794 OPCODE_ETH_GET_STATISTICS
, sizeof(*req
));
795 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
->dma
));
796 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
->dma
& 0xFFFFFFFF);
797 sge
->len
= cpu_to_le32(nonemb_cmd
->size
);
799 status
= be_mbox_db_ring(ctrl
);
801 struct be_cmd_resp_get_stats
*resp
= nonemb_cmd
->va
;
802 be_dws_le_to_cpu(&resp
->hw_stats
, sizeof(resp
->hw_stats
));
805 spin_unlock(&ctrl
->mbox_lock
);
809 int be_cmd_link_status_query(struct be_ctrl_info
*ctrl
,
812 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
813 struct be_cmd_req_link_status
*req
= embedded_payload(wrb
);
816 spin_lock(&ctrl
->mbox_lock
);
819 memset(wrb
, 0, sizeof(*wrb
));
821 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
823 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
824 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY
, sizeof(*req
));
826 status
= be_mbox_db_ring(ctrl
);
828 struct be_cmd_resp_link_status
*resp
= embedded_payload(wrb
);
829 if (resp
->mac_speed
!= PHY_LINK_SPEED_ZERO
)
833 spin_unlock(&ctrl
->mbox_lock
);
837 int be_cmd_get_fw_ver(struct be_ctrl_info
*ctrl
, char *fw_ver
)
839 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
840 struct be_cmd_req_get_fw_version
*req
= embedded_payload(wrb
);
843 spin_lock(&ctrl
->mbox_lock
);
844 memset(wrb
, 0, sizeof(*wrb
));
846 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
848 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
849 OPCODE_COMMON_GET_FW_VERSION
, sizeof(*req
));
851 status
= be_mbox_db_ring(ctrl
);
853 struct be_cmd_resp_get_fw_version
*resp
= embedded_payload(wrb
);
854 strncpy(fw_ver
, resp
->firmware_version_string
, FW_VER_LEN
);
857 spin_unlock(&ctrl
->mbox_lock
);
861 /* set the EQ delay interval of an EQ to specified value */
862 int be_cmd_modify_eqd(struct be_ctrl_info
*ctrl
, u32 eq_id
, u32 eqd
)
864 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
865 struct be_cmd_req_modify_eq_delay
*req
= embedded_payload(wrb
);
868 spin_lock(&ctrl
->mbox_lock
);
869 memset(wrb
, 0, sizeof(*wrb
));
871 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
873 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
874 OPCODE_COMMON_MODIFY_EQ_DELAY
, sizeof(*req
));
876 req
->num_eq
= cpu_to_le32(1);
877 req
->delay
[0].eq_id
= cpu_to_le32(eq_id
);
878 req
->delay
[0].phase
= 0;
879 req
->delay
[0].delay_multiplier
= cpu_to_le32(eqd
);
881 status
= be_mbox_db_ring(ctrl
);
883 spin_unlock(&ctrl
->mbox_lock
);
887 int be_cmd_vlan_config(struct be_ctrl_info
*ctrl
, u32 if_id
, u16
*vtag_array
,
888 u32 num
, bool untagged
, bool promiscuous
)
890 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
891 struct be_cmd_req_vlan_config
*req
= embedded_payload(wrb
);
894 spin_lock(&ctrl
->mbox_lock
);
895 memset(wrb
, 0, sizeof(*wrb
));
897 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
899 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
900 OPCODE_COMMON_NTWK_VLAN_CONFIG
, sizeof(*req
));
902 req
->interface_id
= if_id
;
903 req
->promiscuous
= promiscuous
;
904 req
->untagged
= untagged
;
907 memcpy(req
->normal_vlan
, vtag_array
,
908 req
->num_vlan
* sizeof(vtag_array
[0]));
911 status
= be_mbox_db_ring(ctrl
);
913 spin_unlock(&ctrl
->mbox_lock
);
917 /* Use MCC for this command as it may be called in BH context */
918 int be_cmd_promiscuous_config(struct be_ctrl_info
*ctrl
, u8 port_num
, bool en
)
920 struct be_mcc_wrb
*wrb
;
921 struct be_cmd_req_promiscuous_config
*req
;
923 spin_lock_bh(&ctrl
->mcc_lock
);
925 wrb
= wrb_from_mcc(&ctrl
->mcc_obj
.q
);
928 req
= embedded_payload(wrb
);
930 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
932 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
933 OPCODE_ETH_PROMISCUOUS
, sizeof(*req
));
936 req
->port1_promiscuous
= en
;
938 req
->port0_promiscuous
= en
;
940 be_mcc_notify_wait(ctrl
);
942 spin_unlock_bh(&ctrl
->mcc_lock
);
947 * Use MCC for this command as it may be called in BH context
948 * (mc == NULL) => multicast promiscous
950 int be_cmd_multicast_set(struct be_ctrl_info
*ctrl
, u32 if_id
,
951 struct dev_mc_list
*mc_list
, u32 mc_count
)
953 #define BE_MAX_MC 32 /* set mcast promisc if > 32 */
954 struct be_mcc_wrb
*wrb
;
955 struct be_cmd_req_mcast_mac_config
*req
;
957 spin_lock_bh(&ctrl
->mcc_lock
);
959 wrb
= wrb_from_mcc(&ctrl
->mcc_obj
.q
);
962 req
= embedded_payload(wrb
);
964 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
966 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
967 OPCODE_COMMON_NTWK_MULTICAST_SET
, sizeof(*req
));
969 req
->interface_id
= if_id
;
970 if (mc_list
&& mc_count
<= BE_MAX_MC
) {
972 struct dev_mc_list
*mc
;
974 req
->num_mac
= cpu_to_le16(mc_count
);
976 for (mc
= mc_list
, i
= 0; mc
; mc
= mc
->next
, i
++)
977 memcpy(req
->mac
[i
].byte
, mc
->dmi_addr
, ETH_ALEN
);
979 req
->promiscuous
= 1;
982 be_mcc_notify_wait(ctrl
);
984 spin_unlock_bh(&ctrl
->mcc_lock
);
989 int be_cmd_set_flow_control(struct be_ctrl_info
*ctrl
, u32 tx_fc
, u32 rx_fc
)
991 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
992 struct be_cmd_req_set_flow_control
*req
= embedded_payload(wrb
);
995 spin_lock(&ctrl
->mbox_lock
);
997 memset(wrb
, 0, sizeof(*wrb
));
999 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1001 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1002 OPCODE_COMMON_SET_FLOW_CONTROL
, sizeof(*req
));
1004 req
->tx_flow_control
= cpu_to_le16((u16
)tx_fc
);
1005 req
->rx_flow_control
= cpu_to_le16((u16
)rx_fc
);
1007 status
= be_mbox_db_ring(ctrl
);
1009 spin_unlock(&ctrl
->mbox_lock
);
1013 int be_cmd_get_flow_control(struct be_ctrl_info
*ctrl
, u32
*tx_fc
, u32
*rx_fc
)
1015 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1016 struct be_cmd_req_get_flow_control
*req
= embedded_payload(wrb
);
1019 spin_lock(&ctrl
->mbox_lock
);
1021 memset(wrb
, 0, sizeof(*wrb
));
1023 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1025 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1026 OPCODE_COMMON_GET_FLOW_CONTROL
, sizeof(*req
));
1028 status
= be_mbox_db_ring(ctrl
);
1030 struct be_cmd_resp_get_flow_control
*resp
=
1031 embedded_payload(wrb
);
1032 *tx_fc
= le16_to_cpu(resp
->tx_flow_control
);
1033 *rx_fc
= le16_to_cpu(resp
->rx_flow_control
);
1036 spin_unlock(&ctrl
->mbox_lock
);
1040 int be_cmd_query_fw_cfg(struct be_ctrl_info
*ctrl
, u32
*port_num
)
1042 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1043 struct be_cmd_req_query_fw_cfg
*req
= embedded_payload(wrb
);
1046 spin_lock(&ctrl
->mbox_lock
);
1048 memset(wrb
, 0, sizeof(*wrb
));
1050 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1052 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1053 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
, sizeof(*req
));
1055 status
= be_mbox_db_ring(ctrl
);
1057 struct be_cmd_resp_query_fw_cfg
*resp
= embedded_payload(wrb
);
1058 *port_num
= le32_to_cpu(resp
->phys_port
);
1061 spin_unlock(&ctrl
->mbox_lock
);