2 * Copyright (C) 2005 - 2010 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
21 static void be_mcc_notify(struct be_adapter
*adapter
)
23 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
26 if (adapter
->eeh_err
) {
27 dev_info(&adapter
->pdev
->dev
,
28 "Error in Card Detected! Cannot issue commands\n");
32 val
|= mccq
->id
& DB_MCCQ_RING_ID_MASK
;
33 val
|= 1 << DB_MCCQ_NUM_POSTED_SHIFT
;
36 iowrite32(val
, adapter
->db
+ DB_MCCQ_OFFSET
);
39 /* To check if valid bit is set, check the entire word as we don't know
40 * the endianness of the data (old entry is host endian while a new entry is
42 static inline bool be_mcc_compl_is_new(struct be_mcc_compl
*compl)
44 if (compl->flags
!= 0) {
45 compl->flags
= le32_to_cpu(compl->flags
);
46 BUG_ON((compl->flags
& CQE_FLAGS_VALID_MASK
) == 0);
53 /* Need to reset the entire word that houses the valid bit */
54 static inline void be_mcc_compl_use(struct be_mcc_compl
*compl)
59 static int be_mcc_compl_process(struct be_adapter
*adapter
,
60 struct be_mcc_compl
*compl)
62 u16 compl_status
, extd_status
;
64 /* Just swap the status to host endian; mcc tag is opaquely copied
66 be_dws_le_to_cpu(compl, 4);
68 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
69 CQE_STATUS_COMPL_MASK
;
71 if ((compl->tag0
== OPCODE_COMMON_WRITE_FLASHROM
) &&
72 (compl->tag1
== CMD_SUBSYSTEM_COMMON
)) {
73 adapter
->flash_status
= compl_status
;
74 complete(&adapter
->flash_compl
);
77 if (compl_status
== MCC_STATUS_SUCCESS
) {
78 if (compl->tag0
== OPCODE_ETH_GET_STATISTICS
) {
79 struct be_cmd_resp_get_stats
*resp
=
80 adapter
->stats_cmd
.va
;
81 be_dws_le_to_cpu(&resp
->hw_stats
,
82 sizeof(resp
->hw_stats
));
83 netdev_stats_update(adapter
);
84 adapter
->stats_cmd_sent
= false;
86 } else if ((compl_status
!= MCC_STATUS_NOT_SUPPORTED
) &&
87 (compl->tag0
!= OPCODE_COMMON_NTWK_MAC_QUERY
)) {
88 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
90 dev_warn(&adapter
->pdev
->dev
,
91 "Error in cmd completion - opcode %d, compl %d, extd %d\n",
92 compl->tag0
, compl_status
, extd_status
);
97 /* Link state evt is a string of bytes; no need for endian swapping */
98 static void be_async_link_state_process(struct be_adapter
*adapter
,
99 struct be_async_event_link_state
*evt
)
101 be_link_status_update(adapter
,
102 evt
->port_link_status
== ASYNC_EVENT_LINK_UP
);
105 /* Grp5 CoS Priority evt */
106 static void be_async_grp5_cos_priority_process(struct be_adapter
*adapter
,
107 struct be_async_event_grp5_cos_priority
*evt
)
110 adapter
->vlan_prio_bmap
= evt
->available_priority_bmap
;
111 adapter
->recommended_prio
&= ~VLAN_PRIO_MASK
;
112 adapter
->recommended_prio
=
113 evt
->reco_default_priority
<< VLAN_PRIO_SHIFT
;
117 /* Grp5 QOS Speed evt */
118 static void be_async_grp5_qos_speed_process(struct be_adapter
*adapter
,
119 struct be_async_event_grp5_qos_link_speed
*evt
)
121 if (evt
->physical_port
== adapter
->port_num
) {
122 /* qos_link_speed is in units of 10 Mbps */
123 adapter
->link_speed
= evt
->qos_link_speed
* 10;
127 static void be_async_grp5_evt_process(struct be_adapter
*adapter
,
128 u32 trailer
, struct be_mcc_compl
*evt
)
132 event_type
= (trailer
>> ASYNC_TRAILER_EVENT_TYPE_SHIFT
) &
133 ASYNC_TRAILER_EVENT_TYPE_MASK
;
135 switch (event_type
) {
136 case ASYNC_EVENT_COS_PRIORITY
:
137 be_async_grp5_cos_priority_process(adapter
,
138 (struct be_async_event_grp5_cos_priority
*)evt
);
140 case ASYNC_EVENT_QOS_SPEED
:
141 be_async_grp5_qos_speed_process(adapter
,
142 (struct be_async_event_grp5_qos_link_speed
*)evt
);
145 dev_warn(&adapter
->pdev
->dev
, "Unknown grp5 event!\n");
150 static inline bool is_link_state_evt(u32 trailer
)
152 return ((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
153 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
154 ASYNC_EVENT_CODE_LINK_STATE
;
157 static inline bool is_grp5_evt(u32 trailer
)
159 return (((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
160 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
161 ASYNC_EVENT_CODE_GRP_5
);
164 static struct be_mcc_compl
*be_mcc_compl_get(struct be_adapter
*adapter
)
166 struct be_queue_info
*mcc_cq
= &adapter
->mcc_obj
.cq
;
167 struct be_mcc_compl
*compl = queue_tail_node(mcc_cq
);
169 if (be_mcc_compl_is_new(compl)) {
170 queue_tail_inc(mcc_cq
);
176 void be_async_mcc_enable(struct be_adapter
*adapter
)
178 spin_lock_bh(&adapter
->mcc_cq_lock
);
180 be_cq_notify(adapter
, adapter
->mcc_obj
.cq
.id
, true, 0);
181 adapter
->mcc_obj
.rearm_cq
= true;
183 spin_unlock_bh(&adapter
->mcc_cq_lock
);
186 void be_async_mcc_disable(struct be_adapter
*adapter
)
188 adapter
->mcc_obj
.rearm_cq
= false;
191 int be_process_mcc(struct be_adapter
*adapter
, int *status
)
193 struct be_mcc_compl
*compl;
195 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
197 spin_lock_bh(&adapter
->mcc_cq_lock
);
198 while ((compl = be_mcc_compl_get(adapter
))) {
199 if (compl->flags
& CQE_FLAGS_ASYNC_MASK
) {
200 /* Interpret flags as an async trailer */
201 if (is_link_state_evt(compl->flags
))
202 be_async_link_state_process(adapter
,
203 (struct be_async_event_link_state
*) compl);
204 else if (is_grp5_evt(compl->flags
))
205 be_async_grp5_evt_process(adapter
,
206 compl->flags
, compl);
207 } else if (compl->flags
& CQE_FLAGS_COMPLETED_MASK
) {
208 *status
= be_mcc_compl_process(adapter
, compl);
209 atomic_dec(&mcc_obj
->q
.used
);
211 be_mcc_compl_use(compl);
215 spin_unlock_bh(&adapter
->mcc_cq_lock
);
219 /* Wait till no more pending mcc requests are present */
220 static int be_mcc_wait_compl(struct be_adapter
*adapter
)
222 #define mcc_timeout 120000 /* 12s timeout */
223 int i
, num
, status
= 0;
224 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
226 if (adapter
->eeh_err
)
229 for (i
= 0; i
< mcc_timeout
; i
++) {
230 num
= be_process_mcc(adapter
, &status
);
232 be_cq_notify(adapter
, mcc_obj
->cq
.id
,
233 mcc_obj
->rearm_cq
, num
);
235 if (atomic_read(&mcc_obj
->q
.used
) == 0)
239 if (i
== mcc_timeout
) {
240 dev_err(&adapter
->pdev
->dev
, "mccq poll timed out\n");
246 /* Notify MCC requests and wait for completion */
247 static int be_mcc_notify_wait(struct be_adapter
*adapter
)
249 be_mcc_notify(adapter
);
250 return be_mcc_wait_compl(adapter
);
253 static int be_mbox_db_ready_wait(struct be_adapter
*adapter
, void __iomem
*db
)
258 if (adapter
->eeh_err
) {
259 dev_err(&adapter
->pdev
->dev
,
260 "Error detected in card.Cannot issue commands\n");
265 ready
= ioread32(db
);
266 if (ready
== 0xffffffff) {
267 dev_err(&adapter
->pdev
->dev
,
268 "pci slot disconnected\n");
272 ready
&= MPU_MAILBOX_DB_RDY_MASK
;
277 dev_err(&adapter
->pdev
->dev
, "mbox poll timed out\n");
278 be_detect_dump_ue(adapter
);
282 set_current_state(TASK_INTERRUPTIBLE
);
283 schedule_timeout(msecs_to_jiffies(1));
291 * Insert the mailbox address into the doorbell in two steps
292 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
294 static int be_mbox_notify_wait(struct be_adapter
*adapter
)
298 void __iomem
*db
= adapter
->db
+ MPU_MAILBOX_DB_OFFSET
;
299 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
300 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
301 struct be_mcc_compl
*compl = &mbox
->compl;
303 /* wait for ready to be set */
304 status
= be_mbox_db_ready_wait(adapter
, db
);
308 val
|= MPU_MAILBOX_DB_HI_MASK
;
309 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
310 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
313 /* wait for ready to be set */
314 status
= be_mbox_db_ready_wait(adapter
, db
);
319 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
320 val
|= (u32
)(mbox_mem
->dma
>> 4) << 2;
323 status
= be_mbox_db_ready_wait(adapter
, db
);
327 /* A cq entry has been made now */
328 if (be_mcc_compl_is_new(compl)) {
329 status
= be_mcc_compl_process(adapter
, &mbox
->compl);
330 be_mcc_compl_use(compl);
334 dev_err(&adapter
->pdev
->dev
, "invalid mailbox completion\n");
340 static int be_POST_stage_get(struct be_adapter
*adapter
, u16
*stage
)
344 if (lancer_chip(adapter
))
345 sem
= ioread32(adapter
->db
+ MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET
);
347 sem
= ioread32(adapter
->csr
+ MPU_EP_SEMAPHORE_OFFSET
);
349 *stage
= sem
& EP_SEMAPHORE_POST_STAGE_MASK
;
350 if ((sem
>> EP_SEMAPHORE_POST_ERR_SHIFT
) & EP_SEMAPHORE_POST_ERR_MASK
)
356 int be_cmd_POST(struct be_adapter
*adapter
)
359 int status
, timeout
= 0;
362 status
= be_POST_stage_get(adapter
, &stage
);
364 dev_err(&adapter
->pdev
->dev
, "POST error; stage=0x%x\n",
367 } else if (stage
!= POST_STAGE_ARMFW_RDY
) {
368 set_current_state(TASK_INTERRUPTIBLE
);
369 schedule_timeout(2 * HZ
);
374 } while (timeout
< 40);
376 dev_err(&adapter
->pdev
->dev
, "POST timeout; stage=0x%x\n", stage
);
380 static inline void *embedded_payload(struct be_mcc_wrb
*wrb
)
382 return wrb
->payload
.embedded_payload
;
385 static inline struct be_sge
*nonembedded_sgl(struct be_mcc_wrb
*wrb
)
387 return &wrb
->payload
.sgl
[0];
390 /* Don't touch the hdr after it's prepared */
391 static void be_wrb_hdr_prepare(struct be_mcc_wrb
*wrb
, int payload_len
,
392 bool embedded
, u8 sge_cnt
, u32 opcode
)
395 wrb
->embedded
|= MCC_WRB_EMBEDDED_MASK
;
397 wrb
->embedded
|= (sge_cnt
& MCC_WRB_SGE_CNT_MASK
) <<
398 MCC_WRB_SGE_CNT_SHIFT
;
399 wrb
->payload_length
= payload_len
;
401 be_dws_cpu_to_le(wrb
, 8);
404 /* Don't touch the hdr after it's prepared */
405 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr
*req_hdr
,
406 u8 subsystem
, u8 opcode
, int cmd_len
)
408 req_hdr
->opcode
= opcode
;
409 req_hdr
->subsystem
= subsystem
;
410 req_hdr
->request_length
= cpu_to_le32(cmd_len
- sizeof(*req_hdr
));
411 req_hdr
->version
= 0;
414 static void be_cmd_page_addrs_prepare(struct phys_addr
*pages
, u32 max_pages
,
415 struct be_dma_mem
*mem
)
417 int i
, buf_pages
= min(PAGES_4K_SPANNED(mem
->va
, mem
->size
), max_pages
);
418 u64 dma
= (u64
)mem
->dma
;
420 for (i
= 0; i
< buf_pages
; i
++) {
421 pages
[i
].lo
= cpu_to_le32(dma
& 0xFFFFFFFF);
422 pages
[i
].hi
= cpu_to_le32(upper_32_bits(dma
));
427 /* Converts interrupt delay in microseconds to multiplier value */
428 static u32
eq_delay_to_mult(u32 usec_delay
)
430 #define MAX_INTR_RATE 651042
431 const u32 round
= 10;
437 u32 interrupt_rate
= 1000000 / usec_delay
;
438 /* Max delay, corresponding to the lowest interrupt rate */
439 if (interrupt_rate
== 0)
442 multiplier
= (MAX_INTR_RATE
- interrupt_rate
) * round
;
443 multiplier
/= interrupt_rate
;
444 /* Round the multiplier to the closest value.*/
445 multiplier
= (multiplier
+ round
/2) / round
;
446 multiplier
= min(multiplier
, (u32
)1023);
452 static inline struct be_mcc_wrb
*wrb_from_mbox(struct be_adapter
*adapter
)
454 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
455 struct be_mcc_wrb
*wrb
456 = &((struct be_mcc_mailbox
*)(mbox_mem
->va
))->wrb
;
457 memset(wrb
, 0, sizeof(*wrb
));
461 static struct be_mcc_wrb
*wrb_from_mccq(struct be_adapter
*adapter
)
463 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
464 struct be_mcc_wrb
*wrb
;
466 if (atomic_read(&mccq
->used
) >= mccq
->len
) {
467 dev_err(&adapter
->pdev
->dev
, "Out of MCCQ wrbs\n");
471 wrb
= queue_head_node(mccq
);
472 queue_head_inc(mccq
);
473 atomic_inc(&mccq
->used
);
474 memset(wrb
, 0, sizeof(*wrb
));
478 /* Tell fw we're about to start firing cmds by writing a
479 * special pattern across the wrb hdr; uses mbox
481 int be_cmd_fw_init(struct be_adapter
*adapter
)
486 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
489 wrb
= (u8
*)wrb_from_mbox(adapter
);
499 status
= be_mbox_notify_wait(adapter
);
501 mutex_unlock(&adapter
->mbox_lock
);
505 /* Tell fw we're done with firing cmds by writing a
506 * special pattern across the wrb hdr; uses mbox
508 int be_cmd_fw_clean(struct be_adapter
*adapter
)
513 if (adapter
->eeh_err
)
516 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
519 wrb
= (u8
*)wrb_from_mbox(adapter
);
529 status
= be_mbox_notify_wait(adapter
);
531 mutex_unlock(&adapter
->mbox_lock
);
534 int be_cmd_eq_create(struct be_adapter
*adapter
,
535 struct be_queue_info
*eq
, int eq_delay
)
537 struct be_mcc_wrb
*wrb
;
538 struct be_cmd_req_eq_create
*req
;
539 struct be_dma_mem
*q_mem
= &eq
->dma_mem
;
542 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
545 wrb
= wrb_from_mbox(adapter
);
546 req
= embedded_payload(wrb
);
548 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0, OPCODE_COMMON_EQ_CREATE
);
550 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
551 OPCODE_COMMON_EQ_CREATE
, sizeof(*req
));
553 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
555 AMAP_SET_BITS(struct amap_eq_context
, valid
, req
->context
, 1);
557 AMAP_SET_BITS(struct amap_eq_context
, size
, req
->context
, 0);
558 AMAP_SET_BITS(struct amap_eq_context
, count
, req
->context
,
559 __ilog2_u32(eq
->len
/256));
560 AMAP_SET_BITS(struct amap_eq_context
, delaymult
, req
->context
,
561 eq_delay_to_mult(eq_delay
));
562 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
564 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
566 status
= be_mbox_notify_wait(adapter
);
568 struct be_cmd_resp_eq_create
*resp
= embedded_payload(wrb
);
569 eq
->id
= le16_to_cpu(resp
->eq_id
);
573 mutex_unlock(&adapter
->mbox_lock
);
578 int be_cmd_mac_addr_query(struct be_adapter
*adapter
, u8
*mac_addr
,
579 u8 type
, bool permanent
, u32 if_handle
)
581 struct be_mcc_wrb
*wrb
;
582 struct be_cmd_req_mac_query
*req
;
585 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
588 wrb
= wrb_from_mbox(adapter
);
589 req
= embedded_payload(wrb
);
591 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
592 OPCODE_COMMON_NTWK_MAC_QUERY
);
594 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
595 OPCODE_COMMON_NTWK_MAC_QUERY
, sizeof(*req
));
601 req
->if_id
= cpu_to_le16((u16
) if_handle
);
605 status
= be_mbox_notify_wait(adapter
);
607 struct be_cmd_resp_mac_query
*resp
= embedded_payload(wrb
);
608 memcpy(mac_addr
, resp
->mac
.addr
, ETH_ALEN
);
611 mutex_unlock(&adapter
->mbox_lock
);
615 /* Uses synchronous MCCQ */
616 int be_cmd_pmac_add(struct be_adapter
*adapter
, u8
*mac_addr
,
617 u32 if_id
, u32
*pmac_id
, u32 domain
)
619 struct be_mcc_wrb
*wrb
;
620 struct be_cmd_req_pmac_add
*req
;
623 spin_lock_bh(&adapter
->mcc_lock
);
625 wrb
= wrb_from_mccq(adapter
);
630 req
= embedded_payload(wrb
);
632 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
633 OPCODE_COMMON_NTWK_PMAC_ADD
);
635 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
636 OPCODE_COMMON_NTWK_PMAC_ADD
, sizeof(*req
));
638 req
->hdr
.domain
= domain
;
639 req
->if_id
= cpu_to_le32(if_id
);
640 memcpy(req
->mac_address
, mac_addr
, ETH_ALEN
);
642 status
= be_mcc_notify_wait(adapter
);
644 struct be_cmd_resp_pmac_add
*resp
= embedded_payload(wrb
);
645 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
649 spin_unlock_bh(&adapter
->mcc_lock
);
653 /* Uses synchronous MCCQ */
654 int be_cmd_pmac_del(struct be_adapter
*adapter
, u32 if_id
, u32 pmac_id
, u32 dom
)
656 struct be_mcc_wrb
*wrb
;
657 struct be_cmd_req_pmac_del
*req
;
660 spin_lock_bh(&adapter
->mcc_lock
);
662 wrb
= wrb_from_mccq(adapter
);
667 req
= embedded_payload(wrb
);
669 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
670 OPCODE_COMMON_NTWK_PMAC_DEL
);
672 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
673 OPCODE_COMMON_NTWK_PMAC_DEL
, sizeof(*req
));
675 req
->hdr
.domain
= dom
;
676 req
->if_id
= cpu_to_le32(if_id
);
677 req
->pmac_id
= cpu_to_le32(pmac_id
);
679 status
= be_mcc_notify_wait(adapter
);
682 spin_unlock_bh(&adapter
->mcc_lock
);
687 int be_cmd_cq_create(struct be_adapter
*adapter
,
688 struct be_queue_info
*cq
, struct be_queue_info
*eq
,
689 bool sol_evts
, bool no_delay
, int coalesce_wm
)
691 struct be_mcc_wrb
*wrb
;
692 struct be_cmd_req_cq_create
*req
;
693 struct be_dma_mem
*q_mem
= &cq
->dma_mem
;
697 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
700 wrb
= wrb_from_mbox(adapter
);
701 req
= embedded_payload(wrb
);
702 ctxt
= &req
->context
;
704 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
705 OPCODE_COMMON_CQ_CREATE
);
707 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
708 OPCODE_COMMON_CQ_CREATE
, sizeof(*req
));
710 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
711 if (lancer_chip(adapter
)) {
712 req
->hdr
.version
= 1;
713 req
->page_size
= 1; /* 1 for 4K */
714 AMAP_SET_BITS(struct amap_cq_context_lancer
, coalescwm
, ctxt
,
716 AMAP_SET_BITS(struct amap_cq_context_lancer
, nodelay
, ctxt
,
718 AMAP_SET_BITS(struct amap_cq_context_lancer
, count
, ctxt
,
719 __ilog2_u32(cq
->len
/256));
720 AMAP_SET_BITS(struct amap_cq_context_lancer
, valid
, ctxt
, 1);
721 AMAP_SET_BITS(struct amap_cq_context_lancer
, eventable
,
723 AMAP_SET_BITS(struct amap_cq_context_lancer
, eqid
,
725 AMAP_SET_BITS(struct amap_cq_context_lancer
, armed
, ctxt
, 1);
727 AMAP_SET_BITS(struct amap_cq_context_be
, coalescwm
, ctxt
,
729 AMAP_SET_BITS(struct amap_cq_context_be
, nodelay
,
731 AMAP_SET_BITS(struct amap_cq_context_be
, count
, ctxt
,
732 __ilog2_u32(cq
->len
/256));
733 AMAP_SET_BITS(struct amap_cq_context_be
, valid
, ctxt
, 1);
734 AMAP_SET_BITS(struct amap_cq_context_be
, solevent
,
736 AMAP_SET_BITS(struct amap_cq_context_be
, eventable
, ctxt
, 1);
737 AMAP_SET_BITS(struct amap_cq_context_be
, eqid
, ctxt
, eq
->id
);
738 AMAP_SET_BITS(struct amap_cq_context_be
, armed
, ctxt
, 1);
741 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
743 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
745 status
= be_mbox_notify_wait(adapter
);
747 struct be_cmd_resp_cq_create
*resp
= embedded_payload(wrb
);
748 cq
->id
= le16_to_cpu(resp
->cq_id
);
752 mutex_unlock(&adapter
->mbox_lock
);
757 static u32
be_encoded_q_len(int q_len
)
759 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
760 if (len_encoded
== 16)
765 int be_cmd_mccq_create(struct be_adapter
*adapter
,
766 struct be_queue_info
*mccq
,
767 struct be_queue_info
*cq
)
769 struct be_mcc_wrb
*wrb
;
770 struct be_cmd_req_mcc_create
*req
;
771 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
775 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
778 wrb
= wrb_from_mbox(adapter
);
779 req
= embedded_payload(wrb
);
780 ctxt
= &req
->context
;
782 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
783 OPCODE_COMMON_MCC_CREATE_EXT
);
785 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
786 OPCODE_COMMON_MCC_CREATE_EXT
, sizeof(*req
));
788 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
789 if (lancer_chip(adapter
)) {
790 req
->hdr
.version
= 1;
791 req
->cq_id
= cpu_to_le16(cq
->id
);
793 AMAP_SET_BITS(struct amap_mcc_context_lancer
, ring_size
, ctxt
,
794 be_encoded_q_len(mccq
->len
));
795 AMAP_SET_BITS(struct amap_mcc_context_lancer
, valid
, ctxt
, 1);
796 AMAP_SET_BITS(struct amap_mcc_context_lancer
, async_cq_id
,
798 AMAP_SET_BITS(struct amap_mcc_context_lancer
, async_cq_valid
,
802 AMAP_SET_BITS(struct amap_mcc_context_be
, valid
, ctxt
, 1);
803 AMAP_SET_BITS(struct amap_mcc_context_be
, ring_size
, ctxt
,
804 be_encoded_q_len(mccq
->len
));
805 AMAP_SET_BITS(struct amap_mcc_context_be
, cq_id
, ctxt
, cq
->id
);
808 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
809 req
->async_event_bitmap
[0] = cpu_to_le32(0x00000022);
810 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
812 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
814 status
= be_mbox_notify_wait(adapter
);
816 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
817 mccq
->id
= le16_to_cpu(resp
->id
);
818 mccq
->created
= true;
820 mutex_unlock(&adapter
->mbox_lock
);
825 int be_cmd_txq_create(struct be_adapter
*adapter
,
826 struct be_queue_info
*txq
,
827 struct be_queue_info
*cq
)
829 struct be_mcc_wrb
*wrb
;
830 struct be_cmd_req_eth_tx_create
*req
;
831 struct be_dma_mem
*q_mem
= &txq
->dma_mem
;
835 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
838 wrb
= wrb_from_mbox(adapter
);
839 req
= embedded_payload(wrb
);
840 ctxt
= &req
->context
;
842 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
843 OPCODE_ETH_TX_CREATE
);
845 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
, OPCODE_ETH_TX_CREATE
,
848 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
849 req
->ulp_num
= BE_ULP1_NUM
;
850 req
->type
= BE_ETH_TX_RING_TYPE_STANDARD
;
852 AMAP_SET_BITS(struct amap_tx_context
, tx_ring_size
, ctxt
,
853 be_encoded_q_len(txq
->len
));
854 AMAP_SET_BITS(struct amap_tx_context
, ctx_valid
, ctxt
, 1);
855 AMAP_SET_BITS(struct amap_tx_context
, cq_id_send
, ctxt
, cq
->id
);
857 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
859 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
861 status
= be_mbox_notify_wait(adapter
);
863 struct be_cmd_resp_eth_tx_create
*resp
= embedded_payload(wrb
);
864 txq
->id
= le16_to_cpu(resp
->cid
);
868 mutex_unlock(&adapter
->mbox_lock
);
874 int be_cmd_rxq_create(struct be_adapter
*adapter
,
875 struct be_queue_info
*rxq
, u16 cq_id
, u16 frag_size
,
876 u16 max_frame_size
, u32 if_id
, u32 rss
, u8
*rss_id
)
878 struct be_mcc_wrb
*wrb
;
879 struct be_cmd_req_eth_rx_create
*req
;
880 struct be_dma_mem
*q_mem
= &rxq
->dma_mem
;
883 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
886 wrb
= wrb_from_mbox(adapter
);
887 req
= embedded_payload(wrb
);
889 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
890 OPCODE_ETH_RX_CREATE
);
892 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
, OPCODE_ETH_RX_CREATE
,
895 req
->cq_id
= cpu_to_le16(cq_id
);
896 req
->frag_size
= fls(frag_size
) - 1;
898 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
899 req
->interface_id
= cpu_to_le32(if_id
);
900 req
->max_frame_size
= cpu_to_le16(max_frame_size
);
901 req
->rss_queue
= cpu_to_le32(rss
);
903 status
= be_mbox_notify_wait(adapter
);
905 struct be_cmd_resp_eth_rx_create
*resp
= embedded_payload(wrb
);
906 rxq
->id
= le16_to_cpu(resp
->id
);
908 *rss_id
= resp
->rss_id
;
911 mutex_unlock(&adapter
->mbox_lock
);
916 /* Generic destroyer function for all types of queues
919 int be_cmd_q_destroy(struct be_adapter
*adapter
, struct be_queue_info
*q
,
922 struct be_mcc_wrb
*wrb
;
923 struct be_cmd_req_q_destroy
*req
;
924 u8 subsys
= 0, opcode
= 0;
927 if (adapter
->eeh_err
)
930 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
933 wrb
= wrb_from_mbox(adapter
);
934 req
= embedded_payload(wrb
);
936 switch (queue_type
) {
938 subsys
= CMD_SUBSYSTEM_COMMON
;
939 opcode
= OPCODE_COMMON_EQ_DESTROY
;
942 subsys
= CMD_SUBSYSTEM_COMMON
;
943 opcode
= OPCODE_COMMON_CQ_DESTROY
;
946 subsys
= CMD_SUBSYSTEM_ETH
;
947 opcode
= OPCODE_ETH_TX_DESTROY
;
950 subsys
= CMD_SUBSYSTEM_ETH
;
951 opcode
= OPCODE_ETH_RX_DESTROY
;
954 subsys
= CMD_SUBSYSTEM_COMMON
;
955 opcode
= OPCODE_COMMON_MCC_DESTROY
;
961 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0, opcode
);
963 be_cmd_hdr_prepare(&req
->hdr
, subsys
, opcode
, sizeof(*req
));
964 req
->id
= cpu_to_le16(q
->id
);
966 status
= be_mbox_notify_wait(adapter
);
968 mutex_unlock(&adapter
->mbox_lock
);
973 /* Create an rx filtering policy configuration on an i/f
976 int be_cmd_if_create(struct be_adapter
*adapter
, u32 cap_flags
, u32 en_flags
,
977 u8
*mac
, bool pmac_invalid
, u32
*if_handle
, u32
*pmac_id
,
980 struct be_mcc_wrb
*wrb
;
981 struct be_cmd_req_if_create
*req
;
984 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
987 wrb
= wrb_from_mbox(adapter
);
988 req
= embedded_payload(wrb
);
990 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
991 OPCODE_COMMON_NTWK_INTERFACE_CREATE
);
993 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
994 OPCODE_COMMON_NTWK_INTERFACE_CREATE
, sizeof(*req
));
996 req
->hdr
.domain
= domain
;
997 req
->capability_flags
= cpu_to_le32(cap_flags
);
998 req
->enable_flags
= cpu_to_le32(en_flags
);
999 req
->pmac_invalid
= pmac_invalid
;
1001 memcpy(req
->mac_addr
, mac
, ETH_ALEN
);
1003 status
= be_mbox_notify_wait(adapter
);
1005 struct be_cmd_resp_if_create
*resp
= embedded_payload(wrb
);
1006 *if_handle
= le32_to_cpu(resp
->interface_id
);
1008 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
1011 mutex_unlock(&adapter
->mbox_lock
);
1016 int be_cmd_if_destroy(struct be_adapter
*adapter
, u32 interface_id
, u32 domain
)
1018 struct be_mcc_wrb
*wrb
;
1019 struct be_cmd_req_if_destroy
*req
;
1022 if (adapter
->eeh_err
)
1025 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1028 wrb
= wrb_from_mbox(adapter
);
1029 req
= embedded_payload(wrb
);
1031 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1032 OPCODE_COMMON_NTWK_INTERFACE_DESTROY
);
1034 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1035 OPCODE_COMMON_NTWK_INTERFACE_DESTROY
, sizeof(*req
));
1037 req
->hdr
.domain
= domain
;
1038 req
->interface_id
= cpu_to_le32(interface_id
);
1040 status
= be_mbox_notify_wait(adapter
);
1042 mutex_unlock(&adapter
->mbox_lock
);
1047 /* Get stats is a non embedded command: the request is not embedded inside
1048 * WRB but is a separate dma memory block
1049 * Uses asynchronous MCC
1051 int be_cmd_get_stats(struct be_adapter
*adapter
, struct be_dma_mem
*nonemb_cmd
)
1053 struct be_mcc_wrb
*wrb
;
1054 struct be_cmd_req_get_stats
*req
;
1058 spin_lock_bh(&adapter
->mcc_lock
);
1060 wrb
= wrb_from_mccq(adapter
);
1065 req
= nonemb_cmd
->va
;
1066 sge
= nonembedded_sgl(wrb
);
1068 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
1069 OPCODE_ETH_GET_STATISTICS
);
1071 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1072 OPCODE_ETH_GET_STATISTICS
, sizeof(*req
));
1073 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
->dma
));
1074 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
->dma
& 0xFFFFFFFF);
1075 sge
->len
= cpu_to_le32(nonemb_cmd
->size
);
1077 be_mcc_notify(adapter
);
1078 adapter
->stats_cmd_sent
= true;
1081 spin_unlock_bh(&adapter
->mcc_lock
);
1085 /* Uses synchronous mcc */
1086 int be_cmd_link_status_query(struct be_adapter
*adapter
,
1087 bool *link_up
, u8
*mac_speed
, u16
*link_speed
)
1089 struct be_mcc_wrb
*wrb
;
1090 struct be_cmd_req_link_status
*req
;
1093 spin_lock_bh(&adapter
->mcc_lock
);
1095 wrb
= wrb_from_mccq(adapter
);
1100 req
= embedded_payload(wrb
);
1104 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1105 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY
);
1107 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1108 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY
, sizeof(*req
));
1110 status
= be_mcc_notify_wait(adapter
);
1112 struct be_cmd_resp_link_status
*resp
= embedded_payload(wrb
);
1113 if (resp
->mac_speed
!= PHY_LINK_SPEED_ZERO
) {
1115 *link_speed
= le16_to_cpu(resp
->link_speed
);
1116 *mac_speed
= resp
->mac_speed
;
1121 spin_unlock_bh(&adapter
->mcc_lock
);
1126 int be_cmd_get_fw_ver(struct be_adapter
*adapter
, char *fw_ver
)
1128 struct be_mcc_wrb
*wrb
;
1129 struct be_cmd_req_get_fw_version
*req
;
1132 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1135 wrb
= wrb_from_mbox(adapter
);
1136 req
= embedded_payload(wrb
);
1138 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1139 OPCODE_COMMON_GET_FW_VERSION
);
1141 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1142 OPCODE_COMMON_GET_FW_VERSION
, sizeof(*req
));
1144 status
= be_mbox_notify_wait(adapter
);
1146 struct be_cmd_resp_get_fw_version
*resp
= embedded_payload(wrb
);
1147 strncpy(fw_ver
, resp
->firmware_version_string
, FW_VER_LEN
);
1150 mutex_unlock(&adapter
->mbox_lock
);
1154 /* set the EQ delay interval of an EQ to specified value
1157 int be_cmd_modify_eqd(struct be_adapter
*adapter
, u32 eq_id
, u32 eqd
)
1159 struct be_mcc_wrb
*wrb
;
1160 struct be_cmd_req_modify_eq_delay
*req
;
1163 spin_lock_bh(&adapter
->mcc_lock
);
1165 wrb
= wrb_from_mccq(adapter
);
1170 req
= embedded_payload(wrb
);
1172 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1173 OPCODE_COMMON_MODIFY_EQ_DELAY
);
1175 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1176 OPCODE_COMMON_MODIFY_EQ_DELAY
, sizeof(*req
));
1178 req
->num_eq
= cpu_to_le32(1);
1179 req
->delay
[0].eq_id
= cpu_to_le32(eq_id
);
1180 req
->delay
[0].phase
= 0;
1181 req
->delay
[0].delay_multiplier
= cpu_to_le32(eqd
);
1183 be_mcc_notify(adapter
);
1186 spin_unlock_bh(&adapter
->mcc_lock
);
1190 /* Uses sycnhronous mcc */
1191 int be_cmd_vlan_config(struct be_adapter
*adapter
, u32 if_id
, u16
*vtag_array
,
1192 u32 num
, bool untagged
, bool promiscuous
)
1194 struct be_mcc_wrb
*wrb
;
1195 struct be_cmd_req_vlan_config
*req
;
1198 spin_lock_bh(&adapter
->mcc_lock
);
1200 wrb
= wrb_from_mccq(adapter
);
1205 req
= embedded_payload(wrb
);
1207 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1208 OPCODE_COMMON_NTWK_VLAN_CONFIG
);
1210 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1211 OPCODE_COMMON_NTWK_VLAN_CONFIG
, sizeof(*req
));
1213 req
->interface_id
= if_id
;
1214 req
->promiscuous
= promiscuous
;
1215 req
->untagged
= untagged
;
1216 req
->num_vlan
= num
;
1218 memcpy(req
->normal_vlan
, vtag_array
,
1219 req
->num_vlan
* sizeof(vtag_array
[0]));
1222 status
= be_mcc_notify_wait(adapter
);
1225 spin_unlock_bh(&adapter
->mcc_lock
);
1229 /* Uses MCC for this command as it may be called in BH context
1230 * Uses synchronous mcc
1232 int be_cmd_promiscuous_config(struct be_adapter
*adapter
, u8 port_num
, bool en
)
1234 struct be_mcc_wrb
*wrb
;
1235 struct be_cmd_req_promiscuous_config
*req
;
1238 spin_lock_bh(&adapter
->mcc_lock
);
1240 wrb
= wrb_from_mccq(adapter
);
1245 req
= embedded_payload(wrb
);
1247 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0, OPCODE_ETH_PROMISCUOUS
);
1249 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1250 OPCODE_ETH_PROMISCUOUS
, sizeof(*req
));
1252 /* In FW versions X.102.149/X.101.487 and later,
1253 * the port setting associated only with the
1254 * issuing pci function will take effect
1257 req
->port1_promiscuous
= en
;
1259 req
->port0_promiscuous
= en
;
1261 status
= be_mcc_notify_wait(adapter
);
1264 spin_unlock_bh(&adapter
->mcc_lock
);
1269 * Uses MCC for this command as it may be called in BH context
1270 * (mc == NULL) => multicast promiscous
1272 int be_cmd_multicast_set(struct be_adapter
*adapter
, u32 if_id
,
1273 struct net_device
*netdev
, struct be_dma_mem
*mem
)
1275 struct be_mcc_wrb
*wrb
;
1276 struct be_cmd_req_mcast_mac_config
*req
= mem
->va
;
1280 spin_lock_bh(&adapter
->mcc_lock
);
1282 wrb
= wrb_from_mccq(adapter
);
1287 sge
= nonembedded_sgl(wrb
);
1288 memset(req
, 0, sizeof(*req
));
1290 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
1291 OPCODE_COMMON_NTWK_MULTICAST_SET
);
1292 sge
->pa_hi
= cpu_to_le32(upper_32_bits(mem
->dma
));
1293 sge
->pa_lo
= cpu_to_le32(mem
->dma
& 0xFFFFFFFF);
1294 sge
->len
= cpu_to_le32(mem
->size
);
1296 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1297 OPCODE_COMMON_NTWK_MULTICAST_SET
, sizeof(*req
));
1299 req
->interface_id
= if_id
;
1302 struct netdev_hw_addr
*ha
;
1304 req
->num_mac
= cpu_to_le16(netdev_mc_count(netdev
));
1307 netdev_for_each_mc_addr(ha
, netdev
)
1308 memcpy(req
->mac
[i
++].byte
, ha
->addr
, ETH_ALEN
);
1310 req
->promiscuous
= 1;
1313 status
= be_mcc_notify_wait(adapter
);
1316 spin_unlock_bh(&adapter
->mcc_lock
);
1320 /* Uses synchrounous mcc */
1321 int be_cmd_set_flow_control(struct be_adapter
*adapter
, u32 tx_fc
, u32 rx_fc
)
1323 struct be_mcc_wrb
*wrb
;
1324 struct be_cmd_req_set_flow_control
*req
;
1327 spin_lock_bh(&adapter
->mcc_lock
);
1329 wrb
= wrb_from_mccq(adapter
);
1334 req
= embedded_payload(wrb
);
1336 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1337 OPCODE_COMMON_SET_FLOW_CONTROL
);
1339 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1340 OPCODE_COMMON_SET_FLOW_CONTROL
, sizeof(*req
));
1342 req
->tx_flow_control
= cpu_to_le16((u16
)tx_fc
);
1343 req
->rx_flow_control
= cpu_to_le16((u16
)rx_fc
);
1345 status
= be_mcc_notify_wait(adapter
);
1348 spin_unlock_bh(&adapter
->mcc_lock
);
1353 int be_cmd_get_flow_control(struct be_adapter
*adapter
, u32
*tx_fc
, u32
*rx_fc
)
1355 struct be_mcc_wrb
*wrb
;
1356 struct be_cmd_req_get_flow_control
*req
;
1359 spin_lock_bh(&adapter
->mcc_lock
);
1361 wrb
= wrb_from_mccq(adapter
);
1366 req
= embedded_payload(wrb
);
1368 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1369 OPCODE_COMMON_GET_FLOW_CONTROL
);
1371 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1372 OPCODE_COMMON_GET_FLOW_CONTROL
, sizeof(*req
));
1374 status
= be_mcc_notify_wait(adapter
);
1376 struct be_cmd_resp_get_flow_control
*resp
=
1377 embedded_payload(wrb
);
1378 *tx_fc
= le16_to_cpu(resp
->tx_flow_control
);
1379 *rx_fc
= le16_to_cpu(resp
->rx_flow_control
);
1383 spin_unlock_bh(&adapter
->mcc_lock
);
1388 int be_cmd_query_fw_cfg(struct be_adapter
*adapter
, u32
*port_num
,
1389 u32
*mode
, u32
*caps
)
1391 struct be_mcc_wrb
*wrb
;
1392 struct be_cmd_req_query_fw_cfg
*req
;
1395 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1398 wrb
= wrb_from_mbox(adapter
);
1399 req
= embedded_payload(wrb
);
1401 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1402 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
);
1404 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1405 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
, sizeof(*req
));
1407 status
= be_mbox_notify_wait(adapter
);
1409 struct be_cmd_resp_query_fw_cfg
*resp
= embedded_payload(wrb
);
1410 *port_num
= le32_to_cpu(resp
->phys_port
);
1411 *mode
= le32_to_cpu(resp
->function_mode
);
1412 *caps
= le32_to_cpu(resp
->function_caps
);
1415 mutex_unlock(&adapter
->mbox_lock
);
1420 int be_cmd_reset_function(struct be_adapter
*adapter
)
1422 struct be_mcc_wrb
*wrb
;
1423 struct be_cmd_req_hdr
*req
;
1426 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1429 wrb
= wrb_from_mbox(adapter
);
1430 req
= embedded_payload(wrb
);
1432 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1433 OPCODE_COMMON_FUNCTION_RESET
);
1435 be_cmd_hdr_prepare(req
, CMD_SUBSYSTEM_COMMON
,
1436 OPCODE_COMMON_FUNCTION_RESET
, sizeof(*req
));
1438 status
= be_mbox_notify_wait(adapter
);
1440 mutex_unlock(&adapter
->mbox_lock
);
1444 int be_cmd_rss_config(struct be_adapter
*adapter
, u8
*rsstable
, u16 table_size
)
1446 struct be_mcc_wrb
*wrb
;
1447 struct be_cmd_req_rss_config
*req
;
1451 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1454 wrb
= wrb_from_mbox(adapter
);
1455 req
= embedded_payload(wrb
);
1457 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1458 OPCODE_ETH_RSS_CONFIG
);
1460 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1461 OPCODE_ETH_RSS_CONFIG
, sizeof(*req
));
1463 req
->if_id
= cpu_to_le32(adapter
->if_handle
);
1464 req
->enable_rss
= cpu_to_le16(RSS_ENABLE_TCP_IPV4
| RSS_ENABLE_IPV4
);
1465 req
->cpu_table_size_log2
= cpu_to_le16(fls(table_size
) - 1);
1466 memcpy(req
->cpu_table
, rsstable
, table_size
);
1467 memcpy(req
->hash
, myhash
, sizeof(myhash
));
1468 be_dws_cpu_to_le(req
->hash
, sizeof(req
->hash
));
1470 status
= be_mbox_notify_wait(adapter
);
1472 mutex_unlock(&adapter
->mbox_lock
);
1477 int be_cmd_set_beacon_state(struct be_adapter
*adapter
, u8 port_num
,
1478 u8 bcn
, u8 sts
, u8 state
)
1480 struct be_mcc_wrb
*wrb
;
1481 struct be_cmd_req_enable_disable_beacon
*req
;
1484 spin_lock_bh(&adapter
->mcc_lock
);
1486 wrb
= wrb_from_mccq(adapter
);
1491 req
= embedded_payload(wrb
);
1493 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1494 OPCODE_COMMON_ENABLE_DISABLE_BEACON
);
1496 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1497 OPCODE_COMMON_ENABLE_DISABLE_BEACON
, sizeof(*req
));
1499 req
->port_num
= port_num
;
1500 req
->beacon_state
= state
;
1501 req
->beacon_duration
= bcn
;
1502 req
->status_duration
= sts
;
1504 status
= be_mcc_notify_wait(adapter
);
1507 spin_unlock_bh(&adapter
->mcc_lock
);
1512 int be_cmd_get_beacon_state(struct be_adapter
*adapter
, u8 port_num
, u32
*state
)
1514 struct be_mcc_wrb
*wrb
;
1515 struct be_cmd_req_get_beacon_state
*req
;
1518 spin_lock_bh(&adapter
->mcc_lock
);
1520 wrb
= wrb_from_mccq(adapter
);
1525 req
= embedded_payload(wrb
);
1527 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1528 OPCODE_COMMON_GET_BEACON_STATE
);
1530 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1531 OPCODE_COMMON_GET_BEACON_STATE
, sizeof(*req
));
1533 req
->port_num
= port_num
;
1535 status
= be_mcc_notify_wait(adapter
);
1537 struct be_cmd_resp_get_beacon_state
*resp
=
1538 embedded_payload(wrb
);
1539 *state
= resp
->beacon_state
;
1543 spin_unlock_bh(&adapter
->mcc_lock
);
1547 int be_cmd_write_flashrom(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
1548 u32 flash_type
, u32 flash_opcode
, u32 buf_size
)
1550 struct be_mcc_wrb
*wrb
;
1551 struct be_cmd_write_flashrom
*req
;
1555 spin_lock_bh(&adapter
->mcc_lock
);
1556 adapter
->flash_status
= 0;
1558 wrb
= wrb_from_mccq(adapter
);
1564 sge
= nonembedded_sgl(wrb
);
1566 be_wrb_hdr_prepare(wrb
, cmd
->size
, false, 1,
1567 OPCODE_COMMON_WRITE_FLASHROM
);
1568 wrb
->tag1
= CMD_SUBSYSTEM_COMMON
;
1570 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1571 OPCODE_COMMON_WRITE_FLASHROM
, cmd
->size
);
1572 sge
->pa_hi
= cpu_to_le32(upper_32_bits(cmd
->dma
));
1573 sge
->pa_lo
= cpu_to_le32(cmd
->dma
& 0xFFFFFFFF);
1574 sge
->len
= cpu_to_le32(cmd
->size
);
1576 req
->params
.op_type
= cpu_to_le32(flash_type
);
1577 req
->params
.op_code
= cpu_to_le32(flash_opcode
);
1578 req
->params
.data_buf_size
= cpu_to_le32(buf_size
);
1580 be_mcc_notify(adapter
);
1581 spin_unlock_bh(&adapter
->mcc_lock
);
1583 if (!wait_for_completion_timeout(&adapter
->flash_compl
,
1584 msecs_to_jiffies(12000)))
1587 status
= adapter
->flash_status
;
1592 spin_unlock_bh(&adapter
->mcc_lock
);
1596 int be_cmd_get_flash_crc(struct be_adapter
*adapter
, u8
*flashed_crc
,
1599 struct be_mcc_wrb
*wrb
;
1600 struct be_cmd_write_flashrom
*req
;
1603 spin_lock_bh(&adapter
->mcc_lock
);
1605 wrb
= wrb_from_mccq(adapter
);
1610 req
= embedded_payload(wrb
);
1612 be_wrb_hdr_prepare(wrb
, sizeof(*req
)+4, true, 0,
1613 OPCODE_COMMON_READ_FLASHROM
);
1615 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1616 OPCODE_COMMON_READ_FLASHROM
, sizeof(*req
)+4);
1618 req
->params
.op_type
= cpu_to_le32(IMG_TYPE_REDBOOT
);
1619 req
->params
.op_code
= cpu_to_le32(FLASHROM_OPER_REPORT
);
1620 req
->params
.offset
= cpu_to_le32(offset
);
1621 req
->params
.data_buf_size
= cpu_to_le32(0x4);
1623 status
= be_mcc_notify_wait(adapter
);
1625 memcpy(flashed_crc
, req
->params
.data_buf
, 4);
1628 spin_unlock_bh(&adapter
->mcc_lock
);
1632 int be_cmd_enable_magic_wol(struct be_adapter
*adapter
, u8
*mac
,
1633 struct be_dma_mem
*nonemb_cmd
)
1635 struct be_mcc_wrb
*wrb
;
1636 struct be_cmd_req_acpi_wol_magic_config
*req
;
1640 spin_lock_bh(&adapter
->mcc_lock
);
1642 wrb
= wrb_from_mccq(adapter
);
1647 req
= nonemb_cmd
->va
;
1648 sge
= nonembedded_sgl(wrb
);
1650 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
1651 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
);
1653 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1654 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
, sizeof(*req
));
1655 memcpy(req
->magic_mac
, mac
, ETH_ALEN
);
1657 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
->dma
));
1658 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
->dma
& 0xFFFFFFFF);
1659 sge
->len
= cpu_to_le32(nonemb_cmd
->size
);
1661 status
= be_mcc_notify_wait(adapter
);
1664 spin_unlock_bh(&adapter
->mcc_lock
);
1668 int be_cmd_set_loopback(struct be_adapter
*adapter
, u8 port_num
,
1669 u8 loopback_type
, u8 enable
)
1671 struct be_mcc_wrb
*wrb
;
1672 struct be_cmd_req_set_lmode
*req
;
1675 spin_lock_bh(&adapter
->mcc_lock
);
1677 wrb
= wrb_from_mccq(adapter
);
1683 req
= embedded_payload(wrb
);
1685 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1686 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE
);
1688 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
1689 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE
,
1692 req
->src_port
= port_num
;
1693 req
->dest_port
= port_num
;
1694 req
->loopback_type
= loopback_type
;
1695 req
->loopback_state
= enable
;
1697 status
= be_mcc_notify_wait(adapter
);
1699 spin_unlock_bh(&adapter
->mcc_lock
);
1703 int be_cmd_loopback_test(struct be_adapter
*adapter
, u32 port_num
,
1704 u32 loopback_type
, u32 pkt_size
, u32 num_pkts
, u64 pattern
)
1706 struct be_mcc_wrb
*wrb
;
1707 struct be_cmd_req_loopback_test
*req
;
1710 spin_lock_bh(&adapter
->mcc_lock
);
1712 wrb
= wrb_from_mccq(adapter
);
1718 req
= embedded_payload(wrb
);
1720 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1721 OPCODE_LOWLEVEL_LOOPBACK_TEST
);
1723 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
1724 OPCODE_LOWLEVEL_LOOPBACK_TEST
, sizeof(*req
));
1725 req
->hdr
.timeout
= cpu_to_le32(4);
1727 req
->pattern
= cpu_to_le64(pattern
);
1728 req
->src_port
= cpu_to_le32(port_num
);
1729 req
->dest_port
= cpu_to_le32(port_num
);
1730 req
->pkt_size
= cpu_to_le32(pkt_size
);
1731 req
->num_pkts
= cpu_to_le32(num_pkts
);
1732 req
->loopback_type
= cpu_to_le32(loopback_type
);
1734 status
= be_mcc_notify_wait(adapter
);
1736 struct be_cmd_resp_loopback_test
*resp
= embedded_payload(wrb
);
1737 status
= le32_to_cpu(resp
->status
);
1741 spin_unlock_bh(&adapter
->mcc_lock
);
1745 int be_cmd_ddr_dma_test(struct be_adapter
*adapter
, u64 pattern
,
1746 u32 byte_cnt
, struct be_dma_mem
*cmd
)
1748 struct be_mcc_wrb
*wrb
;
1749 struct be_cmd_req_ddrdma_test
*req
;
1754 spin_lock_bh(&adapter
->mcc_lock
);
1756 wrb
= wrb_from_mccq(adapter
);
1762 sge
= nonembedded_sgl(wrb
);
1763 be_wrb_hdr_prepare(wrb
, cmd
->size
, false, 1,
1764 OPCODE_LOWLEVEL_HOST_DDR_DMA
);
1765 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
1766 OPCODE_LOWLEVEL_HOST_DDR_DMA
, cmd
->size
);
1768 sge
->pa_hi
= cpu_to_le32(upper_32_bits(cmd
->dma
));
1769 sge
->pa_lo
= cpu_to_le32(cmd
->dma
& 0xFFFFFFFF);
1770 sge
->len
= cpu_to_le32(cmd
->size
);
1772 req
->pattern
= cpu_to_le64(pattern
);
1773 req
->byte_count
= cpu_to_le32(byte_cnt
);
1774 for (i
= 0; i
< byte_cnt
; i
++) {
1775 req
->snd_buff
[i
] = (u8
)(pattern
>> (j
*8));
1781 status
= be_mcc_notify_wait(adapter
);
1784 struct be_cmd_resp_ddrdma_test
*resp
;
1786 if ((memcmp(resp
->rcv_buff
, req
->snd_buff
, byte_cnt
) != 0) ||
1793 spin_unlock_bh(&adapter
->mcc_lock
);
1797 int be_cmd_get_seeprom_data(struct be_adapter
*adapter
,
1798 struct be_dma_mem
*nonemb_cmd
)
1800 struct be_mcc_wrb
*wrb
;
1801 struct be_cmd_req_seeprom_read
*req
;
1805 spin_lock_bh(&adapter
->mcc_lock
);
1807 wrb
= wrb_from_mccq(adapter
);
1812 req
= nonemb_cmd
->va
;
1813 sge
= nonembedded_sgl(wrb
);
1815 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
1816 OPCODE_COMMON_SEEPROM_READ
);
1818 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1819 OPCODE_COMMON_SEEPROM_READ
, sizeof(*req
));
1821 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
->dma
));
1822 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
->dma
& 0xFFFFFFFF);
1823 sge
->len
= cpu_to_le32(nonemb_cmd
->size
);
1825 status
= be_mcc_notify_wait(adapter
);
1828 spin_unlock_bh(&adapter
->mcc_lock
);
1832 int be_cmd_get_phy_info(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
)
1834 struct be_mcc_wrb
*wrb
;
1835 struct be_cmd_req_get_phy_info
*req
;
1839 spin_lock_bh(&adapter
->mcc_lock
);
1841 wrb
= wrb_from_mccq(adapter
);
1848 sge
= nonembedded_sgl(wrb
);
1850 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
1851 OPCODE_COMMON_GET_PHY_DETAILS
);
1853 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1854 OPCODE_COMMON_GET_PHY_DETAILS
,
1857 sge
->pa_hi
= cpu_to_le32(upper_32_bits(cmd
->dma
));
1858 sge
->pa_lo
= cpu_to_le32(cmd
->dma
& 0xFFFFFFFF);
1859 sge
->len
= cpu_to_le32(cmd
->size
);
1861 status
= be_mcc_notify_wait(adapter
);
1863 spin_unlock_bh(&adapter
->mcc_lock
);
1867 int be_cmd_set_qos(struct be_adapter
*adapter
, u32 bps
, u32 domain
)
1869 struct be_mcc_wrb
*wrb
;
1870 struct be_cmd_req_set_qos
*req
;
1873 spin_lock_bh(&adapter
->mcc_lock
);
1875 wrb
= wrb_from_mccq(adapter
);
1881 req
= embedded_payload(wrb
);
1883 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1884 OPCODE_COMMON_SET_QOS
);
1886 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1887 OPCODE_COMMON_SET_QOS
, sizeof(*req
));
1889 req
->hdr
.domain
= domain
;
1890 req
->valid_bits
= cpu_to_le32(BE_QOS_BITS_NIC
);
1891 req
->max_bps_nic
= cpu_to_le32(bps
);
1893 status
= be_mcc_notify_wait(adapter
);
1896 spin_unlock_bh(&adapter
->mcc_lock
);