2 * Copyright (C) 2005 - 2013 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/module.h>
22 static struct be_cmd_priv_map cmd_priv_map
[] = {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
,
26 BE_PRIV_LNKMGMT
| BE_PRIV_VHADM
|
27 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
30 OPCODE_COMMON_GET_FLOW_CONTROL
,
32 BE_PRIV_LNKQUERY
| BE_PRIV_VHADM
|
33 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
36 OPCODE_COMMON_SET_FLOW_CONTROL
,
38 BE_PRIV_LNKMGMT
| BE_PRIV_VHADM
|
39 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
42 OPCODE_ETH_GET_PPORT_STATS
,
44 BE_PRIV_LNKMGMT
| BE_PRIV_VHADM
|
45 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
48 OPCODE_COMMON_GET_PHY_DETAILS
,
50 BE_PRIV_LNKMGMT
| BE_PRIV_VHADM
|
51 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
55 static bool be_cmd_allowed(struct be_adapter
*adapter
, u8 opcode
,
59 int num_entries
= sizeof(cmd_priv_map
)/sizeof(struct be_cmd_priv_map
);
60 u32 cmd_privileges
= adapter
->cmd_privileges
;
62 for (i
= 0; i
< num_entries
; i
++)
63 if (opcode
== cmd_priv_map
[i
].opcode
&&
64 subsystem
== cmd_priv_map
[i
].subsystem
)
65 if (!(cmd_privileges
& cmd_priv_map
[i
].priv_mask
))
71 static inline void *embedded_payload(struct be_mcc_wrb
*wrb
)
73 return wrb
->payload
.embedded_payload
;
76 static void be_mcc_notify(struct be_adapter
*adapter
)
78 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
81 if (be_error(adapter
))
84 val
|= mccq
->id
& DB_MCCQ_RING_ID_MASK
;
85 val
|= 1 << DB_MCCQ_NUM_POSTED_SHIFT
;
88 iowrite32(val
, adapter
->db
+ DB_MCCQ_OFFSET
);
91 /* To check if valid bit is set, check the entire word as we don't know
92 * the endianness of the data (old entry is host endian while a new entry is
94 static inline bool be_mcc_compl_is_new(struct be_mcc_compl
*compl)
98 if (compl->flags
!= 0) {
99 flags
= le32_to_cpu(compl->flags
);
100 if (flags
& CQE_FLAGS_VALID_MASK
) {
101 compl->flags
= flags
;
108 /* Need to reset the entire word that houses the valid bit */
109 static inline void be_mcc_compl_use(struct be_mcc_compl
*compl)
114 static struct be_cmd_resp_hdr
*be_decode_resp_hdr(u32 tag0
, u32 tag1
)
119 addr
= ((addr
<< 16) << 16) | tag0
;
123 static int be_mcc_compl_process(struct be_adapter
*adapter
,
124 struct be_mcc_compl
*compl)
126 u16 compl_status
, extd_status
;
127 struct be_cmd_resp_hdr
*resp_hdr
;
128 u8 opcode
= 0, subsystem
= 0;
130 /* Just swap the status to host endian; mcc tag is opaquely copied
132 be_dws_le_to_cpu(compl, 4);
134 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
135 CQE_STATUS_COMPL_MASK
;
137 resp_hdr
= be_decode_resp_hdr(compl->tag0
, compl->tag1
);
140 opcode
= resp_hdr
->opcode
;
141 subsystem
= resp_hdr
->subsystem
;
144 if (((opcode
== OPCODE_COMMON_WRITE_FLASHROM
) ||
145 (opcode
== OPCODE_COMMON_WRITE_OBJECT
)) &&
146 (subsystem
== CMD_SUBSYSTEM_COMMON
)) {
147 adapter
->flash_status
= compl_status
;
148 complete(&adapter
->flash_compl
);
151 if (compl_status
== MCC_STATUS_SUCCESS
) {
152 if (((opcode
== OPCODE_ETH_GET_STATISTICS
) ||
153 (opcode
== OPCODE_ETH_GET_PPORT_STATS
)) &&
154 (subsystem
== CMD_SUBSYSTEM_ETH
)) {
155 be_parse_stats(adapter
);
156 adapter
->stats_cmd_sent
= false;
158 if (opcode
== OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
&&
159 subsystem
== CMD_SUBSYSTEM_COMMON
) {
160 struct be_cmd_resp_get_cntl_addnl_attribs
*resp
=
162 adapter
->drv_stats
.be_on_die_temperature
=
163 resp
->on_die_temperature
;
166 if (opcode
== OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
)
167 adapter
->be_get_temp_freq
= 0;
169 if (compl_status
== MCC_STATUS_NOT_SUPPORTED
||
170 compl_status
== MCC_STATUS_ILLEGAL_REQUEST
)
173 if (compl_status
== MCC_STATUS_UNAUTHORIZED_REQUEST
) {
174 dev_warn(&adapter
->pdev
->dev
,
175 "VF is not privileged to issue opcode %d-%d\n",
178 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
179 CQE_STATUS_EXTD_MASK
;
180 dev_err(&adapter
->pdev
->dev
,
181 "opcode %d-%d failed:status %d-%d\n",
182 opcode
, subsystem
, compl_status
, extd_status
);
184 if (extd_status
== MCC_ADDL_STS_INSUFFICIENT_RESOURCES
)
192 /* Link state evt is a string of bytes; no need for endian swapping */
193 static void be_async_link_state_process(struct be_adapter
*adapter
,
194 struct be_async_event_link_state
*evt
)
196 /* When link status changes, link speed must be re-queried from FW */
197 adapter
->phy
.link_speed
= -1;
199 /* Ignore physical link event */
200 if (lancer_chip(adapter
) &&
201 !(evt
->port_link_status
& LOGICAL_LINK_STATUS_MASK
))
204 /* For the initial link status do not rely on the ASYNC event as
205 * it may not be received in some cases.
207 if (adapter
->flags
& BE_FLAGS_LINK_STATUS_INIT
)
208 be_link_status_update(adapter
, evt
->port_link_status
);
211 /* Grp5 CoS Priority evt */
212 static void be_async_grp5_cos_priority_process(struct be_adapter
*adapter
,
213 struct be_async_event_grp5_cos_priority
*evt
)
216 adapter
->vlan_prio_bmap
= evt
->available_priority_bmap
;
217 adapter
->recommended_prio
&= ~VLAN_PRIO_MASK
;
218 adapter
->recommended_prio
=
219 evt
->reco_default_priority
<< VLAN_PRIO_SHIFT
;
223 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
224 static void be_async_grp5_qos_speed_process(struct be_adapter
*adapter
,
225 struct be_async_event_grp5_qos_link_speed
*evt
)
227 if (adapter
->phy
.link_speed
>= 0 &&
228 evt
->physical_port
== adapter
->port_num
)
229 adapter
->phy
.link_speed
= le16_to_cpu(evt
->qos_link_speed
) * 10;
233 static void be_async_grp5_pvid_state_process(struct be_adapter
*adapter
,
234 struct be_async_event_grp5_pvid_state
*evt
)
237 adapter
->pvid
= le16_to_cpu(evt
->tag
) & VLAN_VID_MASK
;
242 static void be_async_grp5_evt_process(struct be_adapter
*adapter
,
243 u32 trailer
, struct be_mcc_compl
*evt
)
247 event_type
= (trailer
>> ASYNC_TRAILER_EVENT_TYPE_SHIFT
) &
248 ASYNC_TRAILER_EVENT_TYPE_MASK
;
250 switch (event_type
) {
251 case ASYNC_EVENT_COS_PRIORITY
:
252 be_async_grp5_cos_priority_process(adapter
,
253 (struct be_async_event_grp5_cos_priority
*)evt
);
255 case ASYNC_EVENT_QOS_SPEED
:
256 be_async_grp5_qos_speed_process(adapter
,
257 (struct be_async_event_grp5_qos_link_speed
*)evt
);
259 case ASYNC_EVENT_PVID_STATE
:
260 be_async_grp5_pvid_state_process(adapter
,
261 (struct be_async_event_grp5_pvid_state
*)evt
);
264 dev_warn(&adapter
->pdev
->dev
, "Unknown grp5 event 0x%x!\n",
270 static void be_async_dbg_evt_process(struct be_adapter
*adapter
,
271 u32 trailer
, struct be_mcc_compl
*cmp
)
274 struct be_async_event_qnq
*evt
= (struct be_async_event_qnq
*) cmp
;
276 event_type
= (trailer
>> ASYNC_TRAILER_EVENT_TYPE_SHIFT
) &
277 ASYNC_TRAILER_EVENT_TYPE_MASK
;
279 switch (event_type
) {
280 case ASYNC_DEBUG_EVENT_TYPE_QNQ
:
282 adapter
->qnq_vid
= le16_to_cpu(evt
->vlan_tag
);
283 adapter
->flags
|= BE_FLAGS_QNQ_ASYNC_EVT_RCVD
;
286 dev_warn(&adapter
->pdev
->dev
, "Unknown debug event 0x%x!\n",
292 static inline bool is_link_state_evt(u32 trailer
)
294 return ((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
295 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
296 ASYNC_EVENT_CODE_LINK_STATE
;
299 static inline bool is_grp5_evt(u32 trailer
)
301 return (((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
302 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
303 ASYNC_EVENT_CODE_GRP_5
);
306 static inline bool is_dbg_evt(u32 trailer
)
308 return (((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
309 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
310 ASYNC_EVENT_CODE_QNQ
);
313 static struct be_mcc_compl
*be_mcc_compl_get(struct be_adapter
*adapter
)
315 struct be_queue_info
*mcc_cq
= &adapter
->mcc_obj
.cq
;
316 struct be_mcc_compl
*compl = queue_tail_node(mcc_cq
);
318 if (be_mcc_compl_is_new(compl)) {
319 queue_tail_inc(mcc_cq
);
325 void be_async_mcc_enable(struct be_adapter
*adapter
)
327 spin_lock_bh(&adapter
->mcc_cq_lock
);
329 be_cq_notify(adapter
, adapter
->mcc_obj
.cq
.id
, true, 0);
330 adapter
->mcc_obj
.rearm_cq
= true;
332 spin_unlock_bh(&adapter
->mcc_cq_lock
);
335 void be_async_mcc_disable(struct be_adapter
*adapter
)
337 spin_lock_bh(&adapter
->mcc_cq_lock
);
339 adapter
->mcc_obj
.rearm_cq
= false;
340 be_cq_notify(adapter
, adapter
->mcc_obj
.cq
.id
, false, 0);
342 spin_unlock_bh(&adapter
->mcc_cq_lock
);
345 int be_process_mcc(struct be_adapter
*adapter
)
347 struct be_mcc_compl
*compl;
348 int num
= 0, status
= 0;
349 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
351 spin_lock(&adapter
->mcc_cq_lock
);
352 while ((compl = be_mcc_compl_get(adapter
))) {
353 if (compl->flags
& CQE_FLAGS_ASYNC_MASK
) {
354 /* Interpret flags as an async trailer */
355 if (is_link_state_evt(compl->flags
))
356 be_async_link_state_process(adapter
,
357 (struct be_async_event_link_state
*) compl);
358 else if (is_grp5_evt(compl->flags
))
359 be_async_grp5_evt_process(adapter
,
360 compl->flags
, compl);
361 else if (is_dbg_evt(compl->flags
))
362 be_async_dbg_evt_process(adapter
,
363 compl->flags
, compl);
364 } else if (compl->flags
& CQE_FLAGS_COMPLETED_MASK
) {
365 status
= be_mcc_compl_process(adapter
, compl);
366 atomic_dec(&mcc_obj
->q
.used
);
368 be_mcc_compl_use(compl);
373 be_cq_notify(adapter
, mcc_obj
->cq
.id
, mcc_obj
->rearm_cq
, num
);
375 spin_unlock(&adapter
->mcc_cq_lock
);
379 /* Wait till no more pending mcc requests are present */
380 static int be_mcc_wait_compl(struct be_adapter
*adapter
)
382 #define mcc_timeout 120000 /* 12s timeout */
384 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
386 for (i
= 0; i
< mcc_timeout
; i
++) {
387 if (be_error(adapter
))
391 status
= be_process_mcc(adapter
);
394 if (atomic_read(&mcc_obj
->q
.used
) == 0)
398 if (i
== mcc_timeout
) {
399 dev_err(&adapter
->pdev
->dev
, "FW not responding\n");
400 adapter
->fw_timeout
= true;
406 /* Notify MCC requests and wait for completion */
407 static int be_mcc_notify_wait(struct be_adapter
*adapter
)
410 struct be_mcc_wrb
*wrb
;
411 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
412 u16 index
= mcc_obj
->q
.head
;
413 struct be_cmd_resp_hdr
*resp
;
415 index_dec(&index
, mcc_obj
->q
.len
);
416 wrb
= queue_index_node(&mcc_obj
->q
, index
);
418 resp
= be_decode_resp_hdr(wrb
->tag0
, wrb
->tag1
);
420 be_mcc_notify(adapter
);
422 status
= be_mcc_wait_compl(adapter
);
426 status
= resp
->status
;
431 static int be_mbox_db_ready_wait(struct be_adapter
*adapter
, void __iomem
*db
)
437 if (be_error(adapter
))
440 ready
= ioread32(db
);
441 if (ready
== 0xffffffff)
444 ready
&= MPU_MAILBOX_DB_RDY_MASK
;
449 dev_err(&adapter
->pdev
->dev
, "FW not responding\n");
450 adapter
->fw_timeout
= true;
451 be_detect_error(adapter
);
463 * Insert the mailbox address into the doorbell in two steps
464 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
466 static int be_mbox_notify_wait(struct be_adapter
*adapter
)
470 void __iomem
*db
= adapter
->db
+ MPU_MAILBOX_DB_OFFSET
;
471 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
472 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
473 struct be_mcc_compl
*compl = &mbox
->compl;
475 /* wait for ready to be set */
476 status
= be_mbox_db_ready_wait(adapter
, db
);
480 val
|= MPU_MAILBOX_DB_HI_MASK
;
481 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
482 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
485 /* wait for ready to be set */
486 status
= be_mbox_db_ready_wait(adapter
, db
);
491 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
492 val
|= (u32
)(mbox_mem
->dma
>> 4) << 2;
495 status
= be_mbox_db_ready_wait(adapter
, db
);
499 /* A cq entry has been made now */
500 if (be_mcc_compl_is_new(compl)) {
501 status
= be_mcc_compl_process(adapter
, &mbox
->compl);
502 be_mcc_compl_use(compl);
506 dev_err(&adapter
->pdev
->dev
, "invalid mailbox completion\n");
512 static u16
be_POST_stage_get(struct be_adapter
*adapter
)
516 if (BEx_chip(adapter
))
517 sem
= ioread32(adapter
->csr
+ SLIPORT_SEMAPHORE_OFFSET_BEx
);
519 pci_read_config_dword(adapter
->pdev
,
520 SLIPORT_SEMAPHORE_OFFSET_SH
, &sem
);
522 return sem
& POST_STAGE_MASK
;
525 static int lancer_wait_ready(struct be_adapter
*adapter
)
527 #define SLIPORT_READY_TIMEOUT 30
531 for (i
= 0; i
< SLIPORT_READY_TIMEOUT
; i
++) {
532 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
533 if (sliport_status
& SLIPORT_STATUS_RDY_MASK
)
539 if (i
== SLIPORT_READY_TIMEOUT
)
545 static bool lancer_provisioning_error(struct be_adapter
*adapter
)
547 u32 sliport_status
= 0, sliport_err1
= 0, sliport_err2
= 0;
548 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
549 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
550 sliport_err1
= ioread32(adapter
->db
+
551 SLIPORT_ERROR1_OFFSET
);
552 sliport_err2
= ioread32(adapter
->db
+
553 SLIPORT_ERROR2_OFFSET
);
555 if (sliport_err1
== SLIPORT_ERROR_NO_RESOURCE1
&&
556 sliport_err2
== SLIPORT_ERROR_NO_RESOURCE2
)
562 int lancer_test_and_set_rdy_state(struct be_adapter
*adapter
)
565 u32 sliport_status
, err
, reset_needed
;
568 resource_error
= lancer_provisioning_error(adapter
);
572 status
= lancer_wait_ready(adapter
);
574 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
575 err
= sliport_status
& SLIPORT_STATUS_ERR_MASK
;
576 reset_needed
= sliport_status
& SLIPORT_STATUS_RN_MASK
;
577 if (err
&& reset_needed
) {
578 iowrite32(SLI_PORT_CONTROL_IP_MASK
,
579 adapter
->db
+ SLIPORT_CONTROL_OFFSET
);
581 /* check adapter has corrected the error */
582 status
= lancer_wait_ready(adapter
);
583 sliport_status
= ioread32(adapter
->db
+
584 SLIPORT_STATUS_OFFSET
);
585 sliport_status
&= (SLIPORT_STATUS_ERR_MASK
|
586 SLIPORT_STATUS_RN_MASK
);
587 if (status
|| sliport_status
)
589 } else if (err
|| reset_needed
) {
593 /* Stop error recovery if error is not recoverable.
594 * No resource error is temporary errors and will go away
595 * when PF provisions resources.
597 resource_error
= lancer_provisioning_error(adapter
);
604 int be_fw_wait_ready(struct be_adapter
*adapter
)
607 int status
, timeout
= 0;
608 struct device
*dev
= &adapter
->pdev
->dev
;
610 if (lancer_chip(adapter
)) {
611 status
= lancer_wait_ready(adapter
);
616 stage
= be_POST_stage_get(adapter
);
617 if (stage
== POST_STAGE_ARMFW_RDY
)
620 dev_info(dev
, "Waiting for POST, %ds elapsed\n",
622 if (msleep_interruptible(2000)) {
623 dev_err(dev
, "Waiting for POST aborted\n");
627 } while (timeout
< 60);
629 dev_err(dev
, "POST timeout; stage=0x%x\n", stage
);
634 static inline struct be_sge
*nonembedded_sgl(struct be_mcc_wrb
*wrb
)
636 return &wrb
->payload
.sgl
[0];
639 static inline void fill_wrb_tags(struct be_mcc_wrb
*wrb
,
642 wrb
->tag0
= addr
& 0xFFFFFFFF;
643 wrb
->tag1
= upper_32_bits(addr
);
646 /* Don't touch the hdr after it's prepared */
647 /* mem will be NULL for embedded commands */
648 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr
*req_hdr
,
649 u8 subsystem
, u8 opcode
, int cmd_len
,
650 struct be_mcc_wrb
*wrb
, struct be_dma_mem
*mem
)
654 req_hdr
->opcode
= opcode
;
655 req_hdr
->subsystem
= subsystem
;
656 req_hdr
->request_length
= cpu_to_le32(cmd_len
- sizeof(*req_hdr
));
657 req_hdr
->version
= 0;
658 fill_wrb_tags(wrb
, (ulong
) req_hdr
);
659 wrb
->payload_length
= cmd_len
;
661 wrb
->embedded
|= (1 & MCC_WRB_SGE_CNT_MASK
) <<
662 MCC_WRB_SGE_CNT_SHIFT
;
663 sge
= nonembedded_sgl(wrb
);
664 sge
->pa_hi
= cpu_to_le32(upper_32_bits(mem
->dma
));
665 sge
->pa_lo
= cpu_to_le32(mem
->dma
& 0xFFFFFFFF);
666 sge
->len
= cpu_to_le32(mem
->size
);
668 wrb
->embedded
|= MCC_WRB_EMBEDDED_MASK
;
669 be_dws_cpu_to_le(wrb
, 8);
672 static void be_cmd_page_addrs_prepare(struct phys_addr
*pages
, u32 max_pages
,
673 struct be_dma_mem
*mem
)
675 int i
, buf_pages
= min(PAGES_4K_SPANNED(mem
->va
, mem
->size
), max_pages
);
676 u64 dma
= (u64
)mem
->dma
;
678 for (i
= 0; i
< buf_pages
; i
++) {
679 pages
[i
].lo
= cpu_to_le32(dma
& 0xFFFFFFFF);
680 pages
[i
].hi
= cpu_to_le32(upper_32_bits(dma
));
685 static inline struct be_mcc_wrb
*wrb_from_mbox(struct be_adapter
*adapter
)
687 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
688 struct be_mcc_wrb
*wrb
689 = &((struct be_mcc_mailbox
*)(mbox_mem
->va
))->wrb
;
690 memset(wrb
, 0, sizeof(*wrb
));
694 static struct be_mcc_wrb
*wrb_from_mccq(struct be_adapter
*adapter
)
696 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
697 struct be_mcc_wrb
*wrb
;
702 if (atomic_read(&mccq
->used
) >= mccq
->len
)
705 wrb
= queue_head_node(mccq
);
706 queue_head_inc(mccq
);
707 atomic_inc(&mccq
->used
);
708 memset(wrb
, 0, sizeof(*wrb
));
712 static bool use_mcc(struct be_adapter
*adapter
)
714 return adapter
->mcc_obj
.q
.created
;
717 /* Must be used only in process context */
718 static int be_cmd_lock(struct be_adapter
*adapter
)
720 if (use_mcc(adapter
)) {
721 spin_lock_bh(&adapter
->mcc_lock
);
724 return mutex_lock_interruptible(&adapter
->mbox_lock
);
728 /* Must be used only in process context */
729 static void be_cmd_unlock(struct be_adapter
*adapter
)
731 if (use_mcc(adapter
))
732 spin_unlock_bh(&adapter
->mcc_lock
);
734 return mutex_unlock(&adapter
->mbox_lock
);
737 static struct be_mcc_wrb
*be_cmd_copy(struct be_adapter
*adapter
,
738 struct be_mcc_wrb
*wrb
)
740 struct be_mcc_wrb
*dest_wrb
;
742 if (use_mcc(adapter
)) {
743 dest_wrb
= wrb_from_mccq(adapter
);
747 dest_wrb
= wrb_from_mbox(adapter
);
750 memcpy(dest_wrb
, wrb
, sizeof(*wrb
));
751 if (wrb
->embedded
& cpu_to_le32(MCC_WRB_EMBEDDED_MASK
))
752 fill_wrb_tags(dest_wrb
, (ulong
) embedded_payload(wrb
));
757 /* Must be used only in process context */
758 static int be_cmd_notify_wait(struct be_adapter
*adapter
,
759 struct be_mcc_wrb
*wrb
)
761 struct be_mcc_wrb
*dest_wrb
;
764 status
= be_cmd_lock(adapter
);
768 dest_wrb
= be_cmd_copy(adapter
, wrb
);
772 if (use_mcc(adapter
))
773 status
= be_mcc_notify_wait(adapter
);
775 status
= be_mbox_notify_wait(adapter
);
778 memcpy(wrb
, dest_wrb
, sizeof(*wrb
));
780 be_cmd_unlock(adapter
);
784 /* Tell fw we're about to start firing cmds by writing a
785 * special pattern across the wrb hdr; uses mbox
787 int be_cmd_fw_init(struct be_adapter
*adapter
)
792 if (lancer_chip(adapter
))
795 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
798 wrb
= (u8
*)wrb_from_mbox(adapter
);
808 status
= be_mbox_notify_wait(adapter
);
810 mutex_unlock(&adapter
->mbox_lock
);
814 /* Tell fw we're done with firing cmds by writing a
815 * special pattern across the wrb hdr; uses mbox
817 int be_cmd_fw_clean(struct be_adapter
*adapter
)
822 if (lancer_chip(adapter
))
825 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
828 wrb
= (u8
*)wrb_from_mbox(adapter
);
838 status
= be_mbox_notify_wait(adapter
);
840 mutex_unlock(&adapter
->mbox_lock
);
844 int be_cmd_eq_create(struct be_adapter
*adapter
, struct be_eq_obj
*eqo
)
846 struct be_mcc_wrb
*wrb
;
847 struct be_cmd_req_eq_create
*req
;
848 struct be_dma_mem
*q_mem
= &eqo
->q
.dma_mem
;
851 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
854 wrb
= wrb_from_mbox(adapter
);
855 req
= embedded_payload(wrb
);
857 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
858 OPCODE_COMMON_EQ_CREATE
, sizeof(*req
), wrb
, NULL
);
860 /* Support for EQ_CREATEv2 available only SH-R onwards */
861 if (!(BEx_chip(adapter
) || lancer_chip(adapter
)))
864 req
->hdr
.version
= ver
;
865 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
867 AMAP_SET_BITS(struct amap_eq_context
, valid
, req
->context
, 1);
869 AMAP_SET_BITS(struct amap_eq_context
, size
, req
->context
, 0);
870 AMAP_SET_BITS(struct amap_eq_context
, count
, req
->context
,
871 __ilog2_u32(eqo
->q
.len
/ 256));
872 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
874 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
876 status
= be_mbox_notify_wait(adapter
);
878 struct be_cmd_resp_eq_create
*resp
= embedded_payload(wrb
);
879 eqo
->q
.id
= le16_to_cpu(resp
->eq_id
);
881 (ver
== 2) ? le16_to_cpu(resp
->msix_idx
) : eqo
->idx
;
882 eqo
->q
.created
= true;
885 mutex_unlock(&adapter
->mbox_lock
);
890 int be_cmd_mac_addr_query(struct be_adapter
*adapter
, u8
*mac_addr
,
891 bool permanent
, u32 if_handle
, u32 pmac_id
)
893 struct be_mcc_wrb
*wrb
;
894 struct be_cmd_req_mac_query
*req
;
897 spin_lock_bh(&adapter
->mcc_lock
);
899 wrb
= wrb_from_mccq(adapter
);
904 req
= embedded_payload(wrb
);
906 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
907 OPCODE_COMMON_NTWK_MAC_QUERY
, sizeof(*req
), wrb
, NULL
);
908 req
->type
= MAC_ADDRESS_TYPE_NETWORK
;
912 req
->if_id
= cpu_to_le16((u16
) if_handle
);
913 req
->pmac_id
= cpu_to_le32(pmac_id
);
917 status
= be_mcc_notify_wait(adapter
);
919 struct be_cmd_resp_mac_query
*resp
= embedded_payload(wrb
);
920 memcpy(mac_addr
, resp
->mac
.addr
, ETH_ALEN
);
924 spin_unlock_bh(&adapter
->mcc_lock
);
928 /* Uses synchronous MCCQ */
929 int be_cmd_pmac_add(struct be_adapter
*adapter
, u8
*mac_addr
,
930 u32 if_id
, u32
*pmac_id
, u32 domain
)
932 struct be_mcc_wrb
*wrb
;
933 struct be_cmd_req_pmac_add
*req
;
936 spin_lock_bh(&adapter
->mcc_lock
);
938 wrb
= wrb_from_mccq(adapter
);
943 req
= embedded_payload(wrb
);
945 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
946 OPCODE_COMMON_NTWK_PMAC_ADD
, sizeof(*req
), wrb
, NULL
);
948 req
->hdr
.domain
= domain
;
949 req
->if_id
= cpu_to_le32(if_id
);
950 memcpy(req
->mac_address
, mac_addr
, ETH_ALEN
);
952 status
= be_mcc_notify_wait(adapter
);
954 struct be_cmd_resp_pmac_add
*resp
= embedded_payload(wrb
);
955 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
959 spin_unlock_bh(&adapter
->mcc_lock
);
961 if (status
== MCC_STATUS_UNAUTHORIZED_REQUEST
)
967 /* Uses synchronous MCCQ */
968 int be_cmd_pmac_del(struct be_adapter
*adapter
, u32 if_id
, int pmac_id
, u32 dom
)
970 struct be_mcc_wrb
*wrb
;
971 struct be_cmd_req_pmac_del
*req
;
977 spin_lock_bh(&adapter
->mcc_lock
);
979 wrb
= wrb_from_mccq(adapter
);
984 req
= embedded_payload(wrb
);
986 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
987 OPCODE_COMMON_NTWK_PMAC_DEL
, sizeof(*req
), wrb
, NULL
);
989 req
->hdr
.domain
= dom
;
990 req
->if_id
= cpu_to_le32(if_id
);
991 req
->pmac_id
= cpu_to_le32(pmac_id
);
993 status
= be_mcc_notify_wait(adapter
);
996 spin_unlock_bh(&adapter
->mcc_lock
);
1001 int be_cmd_cq_create(struct be_adapter
*adapter
, struct be_queue_info
*cq
,
1002 struct be_queue_info
*eq
, bool no_delay
, int coalesce_wm
)
1004 struct be_mcc_wrb
*wrb
;
1005 struct be_cmd_req_cq_create
*req
;
1006 struct be_dma_mem
*q_mem
= &cq
->dma_mem
;
1010 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1013 wrb
= wrb_from_mbox(adapter
);
1014 req
= embedded_payload(wrb
);
1015 ctxt
= &req
->context
;
1017 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1018 OPCODE_COMMON_CQ_CREATE
, sizeof(*req
), wrb
, NULL
);
1020 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
1022 if (BEx_chip(adapter
)) {
1023 AMAP_SET_BITS(struct amap_cq_context_be
, coalescwm
, ctxt
,
1025 AMAP_SET_BITS(struct amap_cq_context_be
, nodelay
,
1027 AMAP_SET_BITS(struct amap_cq_context_be
, count
, ctxt
,
1028 __ilog2_u32(cq
->len
/256));
1029 AMAP_SET_BITS(struct amap_cq_context_be
, valid
, ctxt
, 1);
1030 AMAP_SET_BITS(struct amap_cq_context_be
, eventable
, ctxt
, 1);
1031 AMAP_SET_BITS(struct amap_cq_context_be
, eqid
, ctxt
, eq
->id
);
1033 req
->hdr
.version
= 2;
1034 req
->page_size
= 1; /* 1 for 4K */
1035 AMAP_SET_BITS(struct amap_cq_context_v2
, nodelay
, ctxt
,
1037 AMAP_SET_BITS(struct amap_cq_context_v2
, count
, ctxt
,
1038 __ilog2_u32(cq
->len
/256));
1039 AMAP_SET_BITS(struct amap_cq_context_v2
, valid
, ctxt
, 1);
1040 AMAP_SET_BITS(struct amap_cq_context_v2
, eventable
,
1042 AMAP_SET_BITS(struct amap_cq_context_v2
, eqid
,
1046 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
1048 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1050 status
= be_mbox_notify_wait(adapter
);
1052 struct be_cmd_resp_cq_create
*resp
= embedded_payload(wrb
);
1053 cq
->id
= le16_to_cpu(resp
->cq_id
);
1057 mutex_unlock(&adapter
->mbox_lock
);
1062 static u32
be_encoded_q_len(int q_len
)
1064 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
1065 if (len_encoded
== 16)
1070 static int be_cmd_mccq_ext_create(struct be_adapter
*adapter
,
1071 struct be_queue_info
*mccq
,
1072 struct be_queue_info
*cq
)
1074 struct be_mcc_wrb
*wrb
;
1075 struct be_cmd_req_mcc_ext_create
*req
;
1076 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
1080 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1083 wrb
= wrb_from_mbox(adapter
);
1084 req
= embedded_payload(wrb
);
1085 ctxt
= &req
->context
;
1087 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1088 OPCODE_COMMON_MCC_CREATE_EXT
, sizeof(*req
), wrb
, NULL
);
1090 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
1091 if (lancer_chip(adapter
)) {
1092 req
->hdr
.version
= 1;
1093 req
->cq_id
= cpu_to_le16(cq
->id
);
1095 AMAP_SET_BITS(struct amap_mcc_context_lancer
, ring_size
, ctxt
,
1096 be_encoded_q_len(mccq
->len
));
1097 AMAP_SET_BITS(struct amap_mcc_context_lancer
, valid
, ctxt
, 1);
1098 AMAP_SET_BITS(struct amap_mcc_context_lancer
, async_cq_id
,
1100 AMAP_SET_BITS(struct amap_mcc_context_lancer
, async_cq_valid
,
1104 AMAP_SET_BITS(struct amap_mcc_context_be
, valid
, ctxt
, 1);
1105 AMAP_SET_BITS(struct amap_mcc_context_be
, ring_size
, ctxt
,
1106 be_encoded_q_len(mccq
->len
));
1107 AMAP_SET_BITS(struct amap_mcc_context_be
, cq_id
, ctxt
, cq
->id
);
1110 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1111 req
->async_event_bitmap
[0] = cpu_to_le32(0x00000022);
1112 req
->async_event_bitmap
[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ
);
1113 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
1115 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1117 status
= be_mbox_notify_wait(adapter
);
1119 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
1120 mccq
->id
= le16_to_cpu(resp
->id
);
1121 mccq
->created
= true;
1123 mutex_unlock(&adapter
->mbox_lock
);
1128 static int be_cmd_mccq_org_create(struct be_adapter
*adapter
,
1129 struct be_queue_info
*mccq
,
1130 struct be_queue_info
*cq
)
1132 struct be_mcc_wrb
*wrb
;
1133 struct be_cmd_req_mcc_create
*req
;
1134 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
1138 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1141 wrb
= wrb_from_mbox(adapter
);
1142 req
= embedded_payload(wrb
);
1143 ctxt
= &req
->context
;
1145 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1146 OPCODE_COMMON_MCC_CREATE
, sizeof(*req
), wrb
, NULL
);
1148 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
1150 AMAP_SET_BITS(struct amap_mcc_context_be
, valid
, ctxt
, 1);
1151 AMAP_SET_BITS(struct amap_mcc_context_be
, ring_size
, ctxt
,
1152 be_encoded_q_len(mccq
->len
));
1153 AMAP_SET_BITS(struct amap_mcc_context_be
, cq_id
, ctxt
, cq
->id
);
1155 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
1157 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1159 status
= be_mbox_notify_wait(adapter
);
1161 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
1162 mccq
->id
= le16_to_cpu(resp
->id
);
1163 mccq
->created
= true;
1166 mutex_unlock(&adapter
->mbox_lock
);
1170 int be_cmd_mccq_create(struct be_adapter
*adapter
,
1171 struct be_queue_info
*mccq
,
1172 struct be_queue_info
*cq
)
1176 status
= be_cmd_mccq_ext_create(adapter
, mccq
, cq
);
1177 if (status
&& !lancer_chip(adapter
)) {
1178 dev_warn(&adapter
->pdev
->dev
, "Upgrade to F/W ver 2.102.235.0 "
1179 "or newer to avoid conflicting priorities between NIC "
1180 "and FCoE traffic");
1181 status
= be_cmd_mccq_org_create(adapter
, mccq
, cq
);
1186 int be_cmd_txq_create(struct be_adapter
*adapter
, struct be_tx_obj
*txo
)
1188 struct be_mcc_wrb wrb
= {0};
1189 struct be_cmd_req_eth_tx_create
*req
;
1190 struct be_queue_info
*txq
= &txo
->q
;
1191 struct be_queue_info
*cq
= &txo
->cq
;
1192 struct be_dma_mem
*q_mem
= &txq
->dma_mem
;
1193 int status
, ver
= 0;
1195 req
= embedded_payload(&wrb
);
1196 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1197 OPCODE_ETH_TX_CREATE
, sizeof(*req
), &wrb
, NULL
);
1199 if (lancer_chip(adapter
)) {
1200 req
->hdr
.version
= 1;
1201 } else if (BEx_chip(adapter
)) {
1202 if (adapter
->function_caps
& BE_FUNCTION_CAPS_SUPER_NIC
)
1203 req
->hdr
.version
= 2;
1204 } else { /* For SH */
1205 req
->hdr
.version
= 2;
1208 if (req
->hdr
.version
> 0)
1209 req
->if_id
= cpu_to_le16(adapter
->if_handle
);
1210 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
1211 req
->ulp_num
= BE_ULP1_NUM
;
1212 req
->type
= BE_ETH_TX_RING_TYPE_STANDARD
;
1213 req
->cq_id
= cpu_to_le16(cq
->id
);
1214 req
->queue_size
= be_encoded_q_len(txq
->len
);
1215 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1216 ver
= req
->hdr
.version
;
1218 status
= be_cmd_notify_wait(adapter
, &wrb
);
1220 struct be_cmd_resp_eth_tx_create
*resp
= embedded_payload(&wrb
);
1221 txq
->id
= le16_to_cpu(resp
->cid
);
1223 txo
->db_offset
= le32_to_cpu(resp
->db_offset
);
1225 txo
->db_offset
= DB_TXULP1_OFFSET
;
1226 txq
->created
= true;
1233 int be_cmd_rxq_create(struct be_adapter
*adapter
,
1234 struct be_queue_info
*rxq
, u16 cq_id
, u16 frag_size
,
1235 u32 if_id
, u32 rss
, u8
*rss_id
)
1237 struct be_mcc_wrb
*wrb
;
1238 struct be_cmd_req_eth_rx_create
*req
;
1239 struct be_dma_mem
*q_mem
= &rxq
->dma_mem
;
1242 spin_lock_bh(&adapter
->mcc_lock
);
1244 wrb
= wrb_from_mccq(adapter
);
1249 req
= embedded_payload(wrb
);
1251 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1252 OPCODE_ETH_RX_CREATE
, sizeof(*req
), wrb
, NULL
);
1254 req
->cq_id
= cpu_to_le16(cq_id
);
1255 req
->frag_size
= fls(frag_size
) - 1;
1257 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1258 req
->interface_id
= cpu_to_le32(if_id
);
1259 req
->max_frame_size
= cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE
);
1260 req
->rss_queue
= cpu_to_le32(rss
);
1262 status
= be_mcc_notify_wait(adapter
);
1264 struct be_cmd_resp_eth_rx_create
*resp
= embedded_payload(wrb
);
1265 rxq
->id
= le16_to_cpu(resp
->id
);
1266 rxq
->created
= true;
1267 *rss_id
= resp
->rss_id
;
1271 spin_unlock_bh(&adapter
->mcc_lock
);
1275 /* Generic destroyer function for all types of queues
1278 int be_cmd_q_destroy(struct be_adapter
*adapter
, struct be_queue_info
*q
,
1281 struct be_mcc_wrb
*wrb
;
1282 struct be_cmd_req_q_destroy
*req
;
1283 u8 subsys
= 0, opcode
= 0;
1286 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1289 wrb
= wrb_from_mbox(adapter
);
1290 req
= embedded_payload(wrb
);
1292 switch (queue_type
) {
1294 subsys
= CMD_SUBSYSTEM_COMMON
;
1295 opcode
= OPCODE_COMMON_EQ_DESTROY
;
1298 subsys
= CMD_SUBSYSTEM_COMMON
;
1299 opcode
= OPCODE_COMMON_CQ_DESTROY
;
1302 subsys
= CMD_SUBSYSTEM_ETH
;
1303 opcode
= OPCODE_ETH_TX_DESTROY
;
1306 subsys
= CMD_SUBSYSTEM_ETH
;
1307 opcode
= OPCODE_ETH_RX_DESTROY
;
1310 subsys
= CMD_SUBSYSTEM_COMMON
;
1311 opcode
= OPCODE_COMMON_MCC_DESTROY
;
1317 be_wrb_cmd_hdr_prepare(&req
->hdr
, subsys
, opcode
, sizeof(*req
), wrb
,
1319 req
->id
= cpu_to_le16(q
->id
);
1321 status
= be_mbox_notify_wait(adapter
);
1324 mutex_unlock(&adapter
->mbox_lock
);
1329 int be_cmd_rxq_destroy(struct be_adapter
*adapter
, struct be_queue_info
*q
)
1331 struct be_mcc_wrb
*wrb
;
1332 struct be_cmd_req_q_destroy
*req
;
1335 spin_lock_bh(&adapter
->mcc_lock
);
1337 wrb
= wrb_from_mccq(adapter
);
1342 req
= embedded_payload(wrb
);
1344 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1345 OPCODE_ETH_RX_DESTROY
, sizeof(*req
), wrb
, NULL
);
1346 req
->id
= cpu_to_le16(q
->id
);
1348 status
= be_mcc_notify_wait(adapter
);
1352 spin_unlock_bh(&adapter
->mcc_lock
);
1356 /* Create an rx filtering policy configuration on an i/f
1357 * Will use MBOX only if MCCQ has not been created.
1359 int be_cmd_if_create(struct be_adapter
*adapter
, u32 cap_flags
, u32 en_flags
,
1360 u32
*if_handle
, u32 domain
)
1362 struct be_mcc_wrb wrb
= {0};
1363 struct be_cmd_req_if_create
*req
;
1366 req
= embedded_payload(&wrb
);
1367 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1368 OPCODE_COMMON_NTWK_INTERFACE_CREATE
, sizeof(*req
), &wrb
, NULL
);
1369 req
->hdr
.domain
= domain
;
1370 req
->capability_flags
= cpu_to_le32(cap_flags
);
1371 req
->enable_flags
= cpu_to_le32(en_flags
);
1372 req
->pmac_invalid
= true;
1374 status
= be_cmd_notify_wait(adapter
, &wrb
);
1376 struct be_cmd_resp_if_create
*resp
= embedded_payload(&wrb
);
1377 *if_handle
= le32_to_cpu(resp
->interface_id
);
1379 /* Hack to retrieve VF's pmac-id on BE3 */
1380 if (BE3_chip(adapter
) && !be_physfn(adapter
))
1381 adapter
->pmac_id
[0] = le32_to_cpu(resp
->pmac_id
);
1387 int be_cmd_if_destroy(struct be_adapter
*adapter
, int interface_id
, u32 domain
)
1389 struct be_mcc_wrb
*wrb
;
1390 struct be_cmd_req_if_destroy
*req
;
1393 if (interface_id
== -1)
1396 spin_lock_bh(&adapter
->mcc_lock
);
1398 wrb
= wrb_from_mccq(adapter
);
1403 req
= embedded_payload(wrb
);
1405 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1406 OPCODE_COMMON_NTWK_INTERFACE_DESTROY
, sizeof(*req
), wrb
, NULL
);
1407 req
->hdr
.domain
= domain
;
1408 req
->interface_id
= cpu_to_le32(interface_id
);
1410 status
= be_mcc_notify_wait(adapter
);
1412 spin_unlock_bh(&adapter
->mcc_lock
);
1416 /* Get stats is a non embedded command: the request is not embedded inside
1417 * WRB but is a separate dma memory block
1418 * Uses asynchronous MCC
1420 int be_cmd_get_stats(struct be_adapter
*adapter
, struct be_dma_mem
*nonemb_cmd
)
1422 struct be_mcc_wrb
*wrb
;
1423 struct be_cmd_req_hdr
*hdr
;
1426 spin_lock_bh(&adapter
->mcc_lock
);
1428 wrb
= wrb_from_mccq(adapter
);
1433 hdr
= nonemb_cmd
->va
;
1435 be_wrb_cmd_hdr_prepare(hdr
, CMD_SUBSYSTEM_ETH
,
1436 OPCODE_ETH_GET_STATISTICS
, nonemb_cmd
->size
, wrb
, nonemb_cmd
);
1438 /* version 1 of the cmd is not supported only by BE2 */
1439 if (BE2_chip(adapter
))
1441 if (BE3_chip(adapter
) || lancer_chip(adapter
))
1446 be_mcc_notify(adapter
);
1447 adapter
->stats_cmd_sent
= true;
1450 spin_unlock_bh(&adapter
->mcc_lock
);
1455 int lancer_cmd_get_pport_stats(struct be_adapter
*adapter
,
1456 struct be_dma_mem
*nonemb_cmd
)
1459 struct be_mcc_wrb
*wrb
;
1460 struct lancer_cmd_req_pport_stats
*req
;
1463 if (!be_cmd_allowed(adapter
, OPCODE_ETH_GET_PPORT_STATS
,
1467 spin_lock_bh(&adapter
->mcc_lock
);
1469 wrb
= wrb_from_mccq(adapter
);
1474 req
= nonemb_cmd
->va
;
1476 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1477 OPCODE_ETH_GET_PPORT_STATS
, nonemb_cmd
->size
, wrb
,
1480 req
->cmd_params
.params
.pport_num
= cpu_to_le16(adapter
->hba_port_num
);
1481 req
->cmd_params
.params
.reset_stats
= 0;
1483 be_mcc_notify(adapter
);
1484 adapter
->stats_cmd_sent
= true;
1487 spin_unlock_bh(&adapter
->mcc_lock
);
1491 static int be_mac_to_link_speed(int mac_speed
)
1493 switch (mac_speed
) {
1494 case PHY_LINK_SPEED_ZERO
:
1496 case PHY_LINK_SPEED_10MBPS
:
1498 case PHY_LINK_SPEED_100MBPS
:
1500 case PHY_LINK_SPEED_1GBPS
:
1502 case PHY_LINK_SPEED_10GBPS
:
1504 case PHY_LINK_SPEED_20GBPS
:
1506 case PHY_LINK_SPEED_25GBPS
:
1508 case PHY_LINK_SPEED_40GBPS
:
1514 /* Uses synchronous mcc
1515 * Returns link_speed in Mbps
1517 int be_cmd_link_status_query(struct be_adapter
*adapter
, u16
*link_speed
,
1518 u8
*link_status
, u32 dom
)
1520 struct be_mcc_wrb
*wrb
;
1521 struct be_cmd_req_link_status
*req
;
1524 spin_lock_bh(&adapter
->mcc_lock
);
1527 *link_status
= LINK_DOWN
;
1529 wrb
= wrb_from_mccq(adapter
);
1534 req
= embedded_payload(wrb
);
1536 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1537 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY
, sizeof(*req
), wrb
, NULL
);
1539 /* version 1 of the cmd is not supported only by BE2 */
1540 if (!BE2_chip(adapter
))
1541 req
->hdr
.version
= 1;
1543 req
->hdr
.domain
= dom
;
1545 status
= be_mcc_notify_wait(adapter
);
1547 struct be_cmd_resp_link_status
*resp
= embedded_payload(wrb
);
1549 *link_speed
= resp
->link_speed
?
1550 le16_to_cpu(resp
->link_speed
) * 10 :
1551 be_mac_to_link_speed(resp
->mac_speed
);
1553 if (!resp
->logical_link_status
)
1557 *link_status
= resp
->logical_link_status
;
1561 spin_unlock_bh(&adapter
->mcc_lock
);
1565 /* Uses synchronous mcc */
1566 int be_cmd_get_die_temperature(struct be_adapter
*adapter
)
1568 struct be_mcc_wrb
*wrb
;
1569 struct be_cmd_req_get_cntl_addnl_attribs
*req
;
1572 spin_lock_bh(&adapter
->mcc_lock
);
1574 wrb
= wrb_from_mccq(adapter
);
1579 req
= embedded_payload(wrb
);
1581 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1582 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
, sizeof(*req
),
1585 be_mcc_notify(adapter
);
1588 spin_unlock_bh(&adapter
->mcc_lock
);
1592 /* Uses synchronous mcc */
1593 int be_cmd_get_reg_len(struct be_adapter
*adapter
, u32
*log_size
)
1595 struct be_mcc_wrb
*wrb
;
1596 struct be_cmd_req_get_fat
*req
;
1599 spin_lock_bh(&adapter
->mcc_lock
);
1601 wrb
= wrb_from_mccq(adapter
);
1606 req
= embedded_payload(wrb
);
1608 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1609 OPCODE_COMMON_MANAGE_FAT
, sizeof(*req
), wrb
, NULL
);
1610 req
->fat_operation
= cpu_to_le32(QUERY_FAT
);
1611 status
= be_mcc_notify_wait(adapter
);
1613 struct be_cmd_resp_get_fat
*resp
= embedded_payload(wrb
);
1614 if (log_size
&& resp
->log_size
)
1615 *log_size
= le32_to_cpu(resp
->log_size
) -
1619 spin_unlock_bh(&adapter
->mcc_lock
);
1623 void be_cmd_get_regs(struct be_adapter
*adapter
, u32 buf_len
, void *buf
)
1625 struct be_dma_mem get_fat_cmd
;
1626 struct be_mcc_wrb
*wrb
;
1627 struct be_cmd_req_get_fat
*req
;
1628 u32 offset
= 0, total_size
, buf_size
,
1629 log_offset
= sizeof(u32
), payload_len
;
1635 total_size
= buf_len
;
1637 get_fat_cmd
.size
= sizeof(struct be_cmd_req_get_fat
) + 60*1024;
1638 get_fat_cmd
.va
= pci_alloc_consistent(adapter
->pdev
,
1641 if (!get_fat_cmd
.va
) {
1643 dev_err(&adapter
->pdev
->dev
,
1644 "Memory allocation failure while retrieving FAT data\n");
1648 spin_lock_bh(&adapter
->mcc_lock
);
1650 while (total_size
) {
1651 buf_size
= min(total_size
, (u32
)60*1024);
1652 total_size
-= buf_size
;
1654 wrb
= wrb_from_mccq(adapter
);
1659 req
= get_fat_cmd
.va
;
1661 payload_len
= sizeof(struct be_cmd_req_get_fat
) + buf_size
;
1662 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1663 OPCODE_COMMON_MANAGE_FAT
, payload_len
, wrb
,
1666 req
->fat_operation
= cpu_to_le32(RETRIEVE_FAT
);
1667 req
->read_log_offset
= cpu_to_le32(log_offset
);
1668 req
->read_log_length
= cpu_to_le32(buf_size
);
1669 req
->data_buffer_size
= cpu_to_le32(buf_size
);
1671 status
= be_mcc_notify_wait(adapter
);
1673 struct be_cmd_resp_get_fat
*resp
= get_fat_cmd
.va
;
1674 memcpy(buf
+ offset
,
1676 le32_to_cpu(resp
->read_log_length
));
1678 dev_err(&adapter
->pdev
->dev
, "FAT Table Retrieve error\n");
1682 log_offset
+= buf_size
;
1685 pci_free_consistent(adapter
->pdev
, get_fat_cmd
.size
,
1688 spin_unlock_bh(&adapter
->mcc_lock
);
1691 /* Uses synchronous mcc */
1692 int be_cmd_get_fw_ver(struct be_adapter
*adapter
, char *fw_ver
,
1695 struct be_mcc_wrb
*wrb
;
1696 struct be_cmd_req_get_fw_version
*req
;
1699 spin_lock_bh(&adapter
->mcc_lock
);
1701 wrb
= wrb_from_mccq(adapter
);
1707 req
= embedded_payload(wrb
);
1709 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1710 OPCODE_COMMON_GET_FW_VERSION
, sizeof(*req
), wrb
, NULL
);
1711 status
= be_mcc_notify_wait(adapter
);
1713 struct be_cmd_resp_get_fw_version
*resp
= embedded_payload(wrb
);
1714 strcpy(fw_ver
, resp
->firmware_version_string
);
1716 strcpy(fw_on_flash
, resp
->fw_on_flash_version_string
);
1719 spin_unlock_bh(&adapter
->mcc_lock
);
1723 /* set the EQ delay interval of an EQ to specified value
1726 int be_cmd_modify_eqd(struct be_adapter
*adapter
, struct be_set_eqd
*set_eqd
,
1729 struct be_mcc_wrb
*wrb
;
1730 struct be_cmd_req_modify_eq_delay
*req
;
1733 spin_lock_bh(&adapter
->mcc_lock
);
1735 wrb
= wrb_from_mccq(adapter
);
1740 req
= embedded_payload(wrb
);
1742 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1743 OPCODE_COMMON_MODIFY_EQ_DELAY
, sizeof(*req
), wrb
, NULL
);
1745 req
->num_eq
= cpu_to_le32(num
);
1746 for (i
= 0; i
< num
; i
++) {
1747 req
->set_eqd
[i
].eq_id
= cpu_to_le32(set_eqd
[i
].eq_id
);
1748 req
->set_eqd
[i
].phase
= 0;
1749 req
->set_eqd
[i
].delay_multiplier
=
1750 cpu_to_le32(set_eqd
[i
].delay_multiplier
);
1753 be_mcc_notify(adapter
);
1755 spin_unlock_bh(&adapter
->mcc_lock
);
1759 /* Uses sycnhronous mcc */
1760 int be_cmd_vlan_config(struct be_adapter
*adapter
, u32 if_id
, u16
*vtag_array
,
1761 u32 num
, bool untagged
, bool promiscuous
)
1763 struct be_mcc_wrb
*wrb
;
1764 struct be_cmd_req_vlan_config
*req
;
1767 spin_lock_bh(&adapter
->mcc_lock
);
1769 wrb
= wrb_from_mccq(adapter
);
1774 req
= embedded_payload(wrb
);
1776 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1777 OPCODE_COMMON_NTWK_VLAN_CONFIG
, sizeof(*req
), wrb
, NULL
);
1779 req
->interface_id
= if_id
;
1780 req
->promiscuous
= promiscuous
;
1781 req
->untagged
= untagged
;
1782 req
->num_vlan
= num
;
1784 memcpy(req
->normal_vlan
, vtag_array
,
1785 req
->num_vlan
* sizeof(vtag_array
[0]));
1788 status
= be_mcc_notify_wait(adapter
);
1791 spin_unlock_bh(&adapter
->mcc_lock
);
1795 int be_cmd_rx_filter(struct be_adapter
*adapter
, u32 flags
, u32 value
)
1797 struct be_mcc_wrb
*wrb
;
1798 struct be_dma_mem
*mem
= &adapter
->rx_filter
;
1799 struct be_cmd_req_rx_filter
*req
= mem
->va
;
1802 spin_lock_bh(&adapter
->mcc_lock
);
1804 wrb
= wrb_from_mccq(adapter
);
1809 memset(req
, 0, sizeof(*req
));
1810 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1811 OPCODE_COMMON_NTWK_RX_FILTER
, sizeof(*req
),
1814 req
->if_id
= cpu_to_le32(adapter
->if_handle
);
1815 if (flags
& IFF_PROMISC
) {
1816 req
->if_flags_mask
= cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS
|
1817 BE_IF_FLAGS_VLAN_PROMISCUOUS
|
1818 BE_IF_FLAGS_MCAST_PROMISCUOUS
);
1820 req
->if_flags
= cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS
|
1821 BE_IF_FLAGS_VLAN_PROMISCUOUS
|
1822 BE_IF_FLAGS_MCAST_PROMISCUOUS
);
1823 } else if (flags
& IFF_ALLMULTI
) {
1824 req
->if_flags_mask
= req
->if_flags
=
1825 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS
);
1826 } else if (flags
& BE_FLAGS_VLAN_PROMISC
) {
1827 req
->if_flags_mask
= cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS
);
1831 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS
);
1833 struct netdev_hw_addr
*ha
;
1836 req
->if_flags_mask
= req
->if_flags
=
1837 cpu_to_le32(BE_IF_FLAGS_MULTICAST
);
1839 /* Reset mcast promisc mode if already set by setting mask
1840 * and not setting flags field
1842 req
->if_flags_mask
|=
1843 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS
&
1844 be_if_cap_flags(adapter
));
1845 req
->mcast_num
= cpu_to_le32(netdev_mc_count(adapter
->netdev
));
1846 netdev_for_each_mc_addr(ha
, adapter
->netdev
)
1847 memcpy(req
->mcast_mac
[i
++].byte
, ha
->addr
, ETH_ALEN
);
1850 status
= be_mcc_notify_wait(adapter
);
1852 spin_unlock_bh(&adapter
->mcc_lock
);
1856 /* Uses synchrounous mcc */
1857 int be_cmd_set_flow_control(struct be_adapter
*adapter
, u32 tx_fc
, u32 rx_fc
)
1859 struct be_mcc_wrb
*wrb
;
1860 struct be_cmd_req_set_flow_control
*req
;
1863 if (!be_cmd_allowed(adapter
, OPCODE_COMMON_SET_FLOW_CONTROL
,
1864 CMD_SUBSYSTEM_COMMON
))
1867 spin_lock_bh(&adapter
->mcc_lock
);
1869 wrb
= wrb_from_mccq(adapter
);
1874 req
= embedded_payload(wrb
);
1876 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1877 OPCODE_COMMON_SET_FLOW_CONTROL
, sizeof(*req
), wrb
, NULL
);
1879 req
->tx_flow_control
= cpu_to_le16((u16
)tx_fc
);
1880 req
->rx_flow_control
= cpu_to_le16((u16
)rx_fc
);
1882 status
= be_mcc_notify_wait(adapter
);
1885 spin_unlock_bh(&adapter
->mcc_lock
);
1890 int be_cmd_get_flow_control(struct be_adapter
*adapter
, u32
*tx_fc
, u32
*rx_fc
)
1892 struct be_mcc_wrb
*wrb
;
1893 struct be_cmd_req_get_flow_control
*req
;
1896 if (!be_cmd_allowed(adapter
, OPCODE_COMMON_GET_FLOW_CONTROL
,
1897 CMD_SUBSYSTEM_COMMON
))
1900 spin_lock_bh(&adapter
->mcc_lock
);
1902 wrb
= wrb_from_mccq(adapter
);
1907 req
= embedded_payload(wrb
);
1909 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1910 OPCODE_COMMON_GET_FLOW_CONTROL
, sizeof(*req
), wrb
, NULL
);
1912 status
= be_mcc_notify_wait(adapter
);
1914 struct be_cmd_resp_get_flow_control
*resp
=
1915 embedded_payload(wrb
);
1916 *tx_fc
= le16_to_cpu(resp
->tx_flow_control
);
1917 *rx_fc
= le16_to_cpu(resp
->rx_flow_control
);
1921 spin_unlock_bh(&adapter
->mcc_lock
);
1926 int be_cmd_query_fw_cfg(struct be_adapter
*adapter
, u32
*port_num
,
1927 u32
*mode
, u32
*caps
, u16
*asic_rev
)
1929 struct be_mcc_wrb
*wrb
;
1930 struct be_cmd_req_query_fw_cfg
*req
;
1933 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1936 wrb
= wrb_from_mbox(adapter
);
1937 req
= embedded_payload(wrb
);
1939 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1940 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
, sizeof(*req
), wrb
, NULL
);
1942 status
= be_mbox_notify_wait(adapter
);
1944 struct be_cmd_resp_query_fw_cfg
*resp
= embedded_payload(wrb
);
1945 *port_num
= le32_to_cpu(resp
->phys_port
);
1946 *mode
= le32_to_cpu(resp
->function_mode
);
1947 *caps
= le32_to_cpu(resp
->function_caps
);
1948 *asic_rev
= le32_to_cpu(resp
->asic_revision
) & 0xFF;
1951 mutex_unlock(&adapter
->mbox_lock
);
1956 int be_cmd_reset_function(struct be_adapter
*adapter
)
1958 struct be_mcc_wrb
*wrb
;
1959 struct be_cmd_req_hdr
*req
;
1962 if (lancer_chip(adapter
)) {
1963 status
= lancer_wait_ready(adapter
);
1965 iowrite32(SLI_PORT_CONTROL_IP_MASK
,
1966 adapter
->db
+ SLIPORT_CONTROL_OFFSET
);
1967 status
= lancer_test_and_set_rdy_state(adapter
);
1970 dev_err(&adapter
->pdev
->dev
,
1971 "Adapter in non recoverable error\n");
1976 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1979 wrb
= wrb_from_mbox(adapter
);
1980 req
= embedded_payload(wrb
);
1982 be_wrb_cmd_hdr_prepare(req
, CMD_SUBSYSTEM_COMMON
,
1983 OPCODE_COMMON_FUNCTION_RESET
, sizeof(*req
), wrb
, NULL
);
1985 status
= be_mbox_notify_wait(adapter
);
1987 mutex_unlock(&adapter
->mbox_lock
);
1991 int be_cmd_rss_config(struct be_adapter
*adapter
, u8
*rsstable
,
1992 u32 rss_hash_opts
, u16 table_size
)
1994 struct be_mcc_wrb
*wrb
;
1995 struct be_cmd_req_rss_config
*req
;
1996 u32 myhash
[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1997 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1998 0x3ea83c02, 0x4a110304};
2001 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2004 wrb
= wrb_from_mbox(adapter
);
2005 req
= embedded_payload(wrb
);
2007 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
2008 OPCODE_ETH_RSS_CONFIG
, sizeof(*req
), wrb
, NULL
);
2010 req
->if_id
= cpu_to_le32(adapter
->if_handle
);
2011 req
->enable_rss
= cpu_to_le16(rss_hash_opts
);
2012 req
->cpu_table_size_log2
= cpu_to_le16(fls(table_size
) - 1);
2014 if (lancer_chip(adapter
) || skyhawk_chip(adapter
))
2015 req
->hdr
.version
= 1;
2017 memcpy(req
->cpu_table
, rsstable
, table_size
);
2018 memcpy(req
->hash
, myhash
, sizeof(myhash
));
2019 be_dws_cpu_to_le(req
->hash
, sizeof(req
->hash
));
2021 status
= be_mbox_notify_wait(adapter
);
2023 mutex_unlock(&adapter
->mbox_lock
);
2028 int be_cmd_set_beacon_state(struct be_adapter
*adapter
, u8 port_num
,
2029 u8 bcn
, u8 sts
, u8 state
)
2031 struct be_mcc_wrb
*wrb
;
2032 struct be_cmd_req_enable_disable_beacon
*req
;
2035 spin_lock_bh(&adapter
->mcc_lock
);
2037 wrb
= wrb_from_mccq(adapter
);
2042 req
= embedded_payload(wrb
);
2044 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2045 OPCODE_COMMON_ENABLE_DISABLE_BEACON
, sizeof(*req
), wrb
, NULL
);
2047 req
->port_num
= port_num
;
2048 req
->beacon_state
= state
;
2049 req
->beacon_duration
= bcn
;
2050 req
->status_duration
= sts
;
2052 status
= be_mcc_notify_wait(adapter
);
2055 spin_unlock_bh(&adapter
->mcc_lock
);
2060 int be_cmd_get_beacon_state(struct be_adapter
*adapter
, u8 port_num
, u32
*state
)
2062 struct be_mcc_wrb
*wrb
;
2063 struct be_cmd_req_get_beacon_state
*req
;
2066 spin_lock_bh(&adapter
->mcc_lock
);
2068 wrb
= wrb_from_mccq(adapter
);
2073 req
= embedded_payload(wrb
);
2075 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2076 OPCODE_COMMON_GET_BEACON_STATE
, sizeof(*req
), wrb
, NULL
);
2078 req
->port_num
= port_num
;
2080 status
= be_mcc_notify_wait(adapter
);
2082 struct be_cmd_resp_get_beacon_state
*resp
=
2083 embedded_payload(wrb
);
2084 *state
= resp
->beacon_state
;
2088 spin_unlock_bh(&adapter
->mcc_lock
);
2092 int lancer_cmd_write_object(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
2093 u32 data_size
, u32 data_offset
,
2094 const char *obj_name
, u32
*data_written
,
2095 u8
*change_status
, u8
*addn_status
)
2097 struct be_mcc_wrb
*wrb
;
2098 struct lancer_cmd_req_write_object
*req
;
2099 struct lancer_cmd_resp_write_object
*resp
;
2103 spin_lock_bh(&adapter
->mcc_lock
);
2104 adapter
->flash_status
= 0;
2106 wrb
= wrb_from_mccq(adapter
);
2112 req
= embedded_payload(wrb
);
2114 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2115 OPCODE_COMMON_WRITE_OBJECT
,
2116 sizeof(struct lancer_cmd_req_write_object
), wrb
,
2119 ctxt
= &req
->context
;
2120 AMAP_SET_BITS(struct amap_lancer_write_obj_context
,
2121 write_length
, ctxt
, data_size
);
2124 AMAP_SET_BITS(struct amap_lancer_write_obj_context
,
2127 AMAP_SET_BITS(struct amap_lancer_write_obj_context
,
2130 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
2131 req
->write_offset
= cpu_to_le32(data_offset
);
2132 strcpy(req
->object_name
, obj_name
);
2133 req
->descriptor_count
= cpu_to_le32(1);
2134 req
->buf_len
= cpu_to_le32(data_size
);
2135 req
->addr_low
= cpu_to_le32((cmd
->dma
+
2136 sizeof(struct lancer_cmd_req_write_object
))
2138 req
->addr_high
= cpu_to_le32(upper_32_bits(cmd
->dma
+
2139 sizeof(struct lancer_cmd_req_write_object
)));
2141 be_mcc_notify(adapter
);
2142 spin_unlock_bh(&adapter
->mcc_lock
);
2144 if (!wait_for_completion_timeout(&adapter
->flash_compl
,
2145 msecs_to_jiffies(60000)))
2148 status
= adapter
->flash_status
;
2150 resp
= embedded_payload(wrb
);
2152 *data_written
= le32_to_cpu(resp
->actual_write_len
);
2153 *change_status
= resp
->change_status
;
2155 *addn_status
= resp
->additional_status
;
2161 spin_unlock_bh(&adapter
->mcc_lock
);
2165 int lancer_cmd_read_object(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
2166 u32 data_size
, u32 data_offset
, const char *obj_name
,
2167 u32
*data_read
, u32
*eof
, u8
*addn_status
)
2169 struct be_mcc_wrb
*wrb
;
2170 struct lancer_cmd_req_read_object
*req
;
2171 struct lancer_cmd_resp_read_object
*resp
;
2174 spin_lock_bh(&adapter
->mcc_lock
);
2176 wrb
= wrb_from_mccq(adapter
);
2182 req
= embedded_payload(wrb
);
2184 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2185 OPCODE_COMMON_READ_OBJECT
,
2186 sizeof(struct lancer_cmd_req_read_object
), wrb
,
2189 req
->desired_read_len
= cpu_to_le32(data_size
);
2190 req
->read_offset
= cpu_to_le32(data_offset
);
2191 strcpy(req
->object_name
, obj_name
);
2192 req
->descriptor_count
= cpu_to_le32(1);
2193 req
->buf_len
= cpu_to_le32(data_size
);
2194 req
->addr_low
= cpu_to_le32((cmd
->dma
& 0xFFFFFFFF));
2195 req
->addr_high
= cpu_to_le32(upper_32_bits(cmd
->dma
));
2197 status
= be_mcc_notify_wait(adapter
);
2199 resp
= embedded_payload(wrb
);
2201 *data_read
= le32_to_cpu(resp
->actual_read_len
);
2202 *eof
= le32_to_cpu(resp
->eof
);
2204 *addn_status
= resp
->additional_status
;
2208 spin_unlock_bh(&adapter
->mcc_lock
);
2212 int be_cmd_write_flashrom(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
2213 u32 flash_type
, u32 flash_opcode
, u32 buf_size
)
2215 struct be_mcc_wrb
*wrb
;
2216 struct be_cmd_write_flashrom
*req
;
2219 spin_lock_bh(&adapter
->mcc_lock
);
2220 adapter
->flash_status
= 0;
2222 wrb
= wrb_from_mccq(adapter
);
2229 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2230 OPCODE_COMMON_WRITE_FLASHROM
, cmd
->size
, wrb
, cmd
);
2232 req
->params
.op_type
= cpu_to_le32(flash_type
);
2233 req
->params
.op_code
= cpu_to_le32(flash_opcode
);
2234 req
->params
.data_buf_size
= cpu_to_le32(buf_size
);
2236 be_mcc_notify(adapter
);
2237 spin_unlock_bh(&adapter
->mcc_lock
);
2239 if (!wait_for_completion_timeout(&adapter
->flash_compl
,
2240 msecs_to_jiffies(40000)))
2243 status
= adapter
->flash_status
;
2248 spin_unlock_bh(&adapter
->mcc_lock
);
2252 int be_cmd_get_flash_crc(struct be_adapter
*adapter
, u8
*flashed_crc
,
2255 struct be_mcc_wrb
*wrb
;
2256 struct be_cmd_read_flash_crc
*req
;
2259 spin_lock_bh(&adapter
->mcc_lock
);
2261 wrb
= wrb_from_mccq(adapter
);
2266 req
= embedded_payload(wrb
);
2268 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2269 OPCODE_COMMON_READ_FLASHROM
, sizeof(*req
),
2272 req
->params
.op_type
= cpu_to_le32(OPTYPE_REDBOOT
);
2273 req
->params
.op_code
= cpu_to_le32(FLASHROM_OPER_REPORT
);
2274 req
->params
.offset
= cpu_to_le32(offset
);
2275 req
->params
.data_buf_size
= cpu_to_le32(0x4);
2277 status
= be_mcc_notify_wait(adapter
);
2279 memcpy(flashed_crc
, req
->crc
, 4);
2282 spin_unlock_bh(&adapter
->mcc_lock
);
2286 int be_cmd_enable_magic_wol(struct be_adapter
*adapter
, u8
*mac
,
2287 struct be_dma_mem
*nonemb_cmd
)
2289 struct be_mcc_wrb
*wrb
;
2290 struct be_cmd_req_acpi_wol_magic_config
*req
;
2293 spin_lock_bh(&adapter
->mcc_lock
);
2295 wrb
= wrb_from_mccq(adapter
);
2300 req
= nonemb_cmd
->va
;
2302 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
2303 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
, sizeof(*req
), wrb
,
2305 memcpy(req
->magic_mac
, mac
, ETH_ALEN
);
2307 status
= be_mcc_notify_wait(adapter
);
2310 spin_unlock_bh(&adapter
->mcc_lock
);
2314 int be_cmd_set_loopback(struct be_adapter
*adapter
, u8 port_num
,
2315 u8 loopback_type
, u8 enable
)
2317 struct be_mcc_wrb
*wrb
;
2318 struct be_cmd_req_set_lmode
*req
;
2321 spin_lock_bh(&adapter
->mcc_lock
);
2323 wrb
= wrb_from_mccq(adapter
);
2329 req
= embedded_payload(wrb
);
2331 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
2332 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE
, sizeof(*req
), wrb
,
2335 req
->src_port
= port_num
;
2336 req
->dest_port
= port_num
;
2337 req
->loopback_type
= loopback_type
;
2338 req
->loopback_state
= enable
;
2340 status
= be_mcc_notify_wait(adapter
);
2342 spin_unlock_bh(&adapter
->mcc_lock
);
2346 int be_cmd_loopback_test(struct be_adapter
*adapter
, u32 port_num
,
2347 u32 loopback_type
, u32 pkt_size
, u32 num_pkts
, u64 pattern
)
2349 struct be_mcc_wrb
*wrb
;
2350 struct be_cmd_req_loopback_test
*req
;
2353 spin_lock_bh(&adapter
->mcc_lock
);
2355 wrb
= wrb_from_mccq(adapter
);
2361 req
= embedded_payload(wrb
);
2363 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
2364 OPCODE_LOWLEVEL_LOOPBACK_TEST
, sizeof(*req
), wrb
, NULL
);
2365 req
->hdr
.timeout
= cpu_to_le32(4);
2367 req
->pattern
= cpu_to_le64(pattern
);
2368 req
->src_port
= cpu_to_le32(port_num
);
2369 req
->dest_port
= cpu_to_le32(port_num
);
2370 req
->pkt_size
= cpu_to_le32(pkt_size
);
2371 req
->num_pkts
= cpu_to_le32(num_pkts
);
2372 req
->loopback_type
= cpu_to_le32(loopback_type
);
2374 status
= be_mcc_notify_wait(adapter
);
2376 struct be_cmd_resp_loopback_test
*resp
= embedded_payload(wrb
);
2377 status
= le32_to_cpu(resp
->status
);
2381 spin_unlock_bh(&adapter
->mcc_lock
);
2385 int be_cmd_ddr_dma_test(struct be_adapter
*adapter
, u64 pattern
,
2386 u32 byte_cnt
, struct be_dma_mem
*cmd
)
2388 struct be_mcc_wrb
*wrb
;
2389 struct be_cmd_req_ddrdma_test
*req
;
2393 spin_lock_bh(&adapter
->mcc_lock
);
2395 wrb
= wrb_from_mccq(adapter
);
2401 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
2402 OPCODE_LOWLEVEL_HOST_DDR_DMA
, cmd
->size
, wrb
, cmd
);
2404 req
->pattern
= cpu_to_le64(pattern
);
2405 req
->byte_count
= cpu_to_le32(byte_cnt
);
2406 for (i
= 0; i
< byte_cnt
; i
++) {
2407 req
->snd_buff
[i
] = (u8
)(pattern
>> (j
*8));
2413 status
= be_mcc_notify_wait(adapter
);
2416 struct be_cmd_resp_ddrdma_test
*resp
;
2418 if ((memcmp(resp
->rcv_buff
, req
->snd_buff
, byte_cnt
) != 0) ||
2425 spin_unlock_bh(&adapter
->mcc_lock
);
2429 int be_cmd_get_seeprom_data(struct be_adapter
*adapter
,
2430 struct be_dma_mem
*nonemb_cmd
)
2432 struct be_mcc_wrb
*wrb
;
2433 struct be_cmd_req_seeprom_read
*req
;
2436 spin_lock_bh(&adapter
->mcc_lock
);
2438 wrb
= wrb_from_mccq(adapter
);
2443 req
= nonemb_cmd
->va
;
2445 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2446 OPCODE_COMMON_SEEPROM_READ
, sizeof(*req
), wrb
,
2449 status
= be_mcc_notify_wait(adapter
);
2452 spin_unlock_bh(&adapter
->mcc_lock
);
2456 int be_cmd_get_phy_info(struct be_adapter
*adapter
)
2458 struct be_mcc_wrb
*wrb
;
2459 struct be_cmd_req_get_phy_info
*req
;
2460 struct be_dma_mem cmd
;
2463 if (!be_cmd_allowed(adapter
, OPCODE_COMMON_GET_PHY_DETAILS
,
2464 CMD_SUBSYSTEM_COMMON
))
2467 spin_lock_bh(&adapter
->mcc_lock
);
2469 wrb
= wrb_from_mccq(adapter
);
2474 cmd
.size
= sizeof(struct be_cmd_req_get_phy_info
);
2475 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
,
2478 dev_err(&adapter
->pdev
->dev
, "Memory alloc failure\n");
2485 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2486 OPCODE_COMMON_GET_PHY_DETAILS
, sizeof(*req
),
2489 status
= be_mcc_notify_wait(adapter
);
2491 struct be_phy_info
*resp_phy_info
=
2492 cmd
.va
+ sizeof(struct be_cmd_req_hdr
);
2493 adapter
->phy
.phy_type
= le16_to_cpu(resp_phy_info
->phy_type
);
2494 adapter
->phy
.interface_type
=
2495 le16_to_cpu(resp_phy_info
->interface_type
);
2496 adapter
->phy
.auto_speeds_supported
=
2497 le16_to_cpu(resp_phy_info
->auto_speeds_supported
);
2498 adapter
->phy
.fixed_speeds_supported
=
2499 le16_to_cpu(resp_phy_info
->fixed_speeds_supported
);
2500 adapter
->phy
.misc_params
=
2501 le32_to_cpu(resp_phy_info
->misc_params
);
2503 if (BE2_chip(adapter
)) {
2504 adapter
->phy
.fixed_speeds_supported
=
2505 BE_SUPPORTED_SPEED_10GBPS
|
2506 BE_SUPPORTED_SPEED_1GBPS
;
2509 pci_free_consistent(adapter
->pdev
, cmd
.size
,
2512 spin_unlock_bh(&adapter
->mcc_lock
);
2516 int be_cmd_set_qos(struct be_adapter
*adapter
, u32 bps
, u32 domain
)
2518 struct be_mcc_wrb
*wrb
;
2519 struct be_cmd_req_set_qos
*req
;
2522 spin_lock_bh(&adapter
->mcc_lock
);
2524 wrb
= wrb_from_mccq(adapter
);
2530 req
= embedded_payload(wrb
);
2532 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2533 OPCODE_COMMON_SET_QOS
, sizeof(*req
), wrb
, NULL
);
2535 req
->hdr
.domain
= domain
;
2536 req
->valid_bits
= cpu_to_le32(BE_QOS_BITS_NIC
);
2537 req
->max_bps_nic
= cpu_to_le32(bps
);
2539 status
= be_mcc_notify_wait(adapter
);
2542 spin_unlock_bh(&adapter
->mcc_lock
);
2546 int be_cmd_get_cntl_attributes(struct be_adapter
*adapter
)
2548 struct be_mcc_wrb
*wrb
;
2549 struct be_cmd_req_cntl_attribs
*req
;
2550 struct be_cmd_resp_cntl_attribs
*resp
;
2552 int payload_len
= max(sizeof(*req
), sizeof(*resp
));
2553 struct mgmt_controller_attrib
*attribs
;
2554 struct be_dma_mem attribs_cmd
;
2556 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2559 memset(&attribs_cmd
, 0, sizeof(struct be_dma_mem
));
2560 attribs_cmd
.size
= sizeof(struct be_cmd_resp_cntl_attribs
);
2561 attribs_cmd
.va
= pci_alloc_consistent(adapter
->pdev
, attribs_cmd
.size
,
2563 if (!attribs_cmd
.va
) {
2564 dev_err(&adapter
->pdev
->dev
,
2565 "Memory allocation failure\n");
2570 wrb
= wrb_from_mbox(adapter
);
2575 req
= attribs_cmd
.va
;
2577 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2578 OPCODE_COMMON_GET_CNTL_ATTRIBUTES
, payload_len
, wrb
,
2581 status
= be_mbox_notify_wait(adapter
);
2583 attribs
= attribs_cmd
.va
+ sizeof(struct be_cmd_resp_hdr
);
2584 adapter
->hba_port_num
= attribs
->hba_attribs
.phy_port
;
2588 mutex_unlock(&adapter
->mbox_lock
);
2590 pci_free_consistent(adapter
->pdev
, attribs_cmd
.size
,
2591 attribs_cmd
.va
, attribs_cmd
.dma
);
2596 int be_cmd_req_native_mode(struct be_adapter
*adapter
)
2598 struct be_mcc_wrb
*wrb
;
2599 struct be_cmd_req_set_func_cap
*req
;
2602 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2605 wrb
= wrb_from_mbox(adapter
);
2611 req
= embedded_payload(wrb
);
2613 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2614 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP
, sizeof(*req
), wrb
, NULL
);
2616 req
->valid_cap_flags
= cpu_to_le32(CAPABILITY_SW_TIMESTAMPS
|
2617 CAPABILITY_BE3_NATIVE_ERX_API
);
2618 req
->cap_flags
= cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API
);
2620 status
= be_mbox_notify_wait(adapter
);
2622 struct be_cmd_resp_set_func_cap
*resp
= embedded_payload(wrb
);
2623 adapter
->be3_native
= le32_to_cpu(resp
->cap_flags
) &
2624 CAPABILITY_BE3_NATIVE_ERX_API
;
2625 if (!adapter
->be3_native
)
2626 dev_warn(&adapter
->pdev
->dev
,
2627 "adapter not in advanced mode\n");
2630 mutex_unlock(&adapter
->mbox_lock
);
2634 /* Get privilege(s) for a function */
2635 int be_cmd_get_fn_privileges(struct be_adapter
*adapter
, u32
*privilege
,
2638 struct be_mcc_wrb
*wrb
;
2639 struct be_cmd_req_get_fn_privileges
*req
;
2642 spin_lock_bh(&adapter
->mcc_lock
);
2644 wrb
= wrb_from_mccq(adapter
);
2650 req
= embedded_payload(wrb
);
2652 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2653 OPCODE_COMMON_GET_FN_PRIVILEGES
, sizeof(*req
),
2656 req
->hdr
.domain
= domain
;
2658 status
= be_mcc_notify_wait(adapter
);
2660 struct be_cmd_resp_get_fn_privileges
*resp
=
2661 embedded_payload(wrb
);
2662 *privilege
= le32_to_cpu(resp
->privilege_mask
);
2666 spin_unlock_bh(&adapter
->mcc_lock
);
2670 /* Set privilege(s) for a function */
2671 int be_cmd_set_fn_privileges(struct be_adapter
*adapter
, u32 privileges
,
2674 struct be_mcc_wrb
*wrb
;
2675 struct be_cmd_req_set_fn_privileges
*req
;
2678 spin_lock_bh(&adapter
->mcc_lock
);
2680 wrb
= wrb_from_mccq(adapter
);
2686 req
= embedded_payload(wrb
);
2687 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2688 OPCODE_COMMON_SET_FN_PRIVILEGES
, sizeof(*req
),
2690 req
->hdr
.domain
= domain
;
2691 if (lancer_chip(adapter
))
2692 req
->privileges_lancer
= cpu_to_le32(privileges
);
2694 req
->privileges
= cpu_to_le32(privileges
);
2696 status
= be_mcc_notify_wait(adapter
);
2698 spin_unlock_bh(&adapter
->mcc_lock
);
2702 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2703 * pmac_id_valid: false => pmac_id or MAC address is requested.
2704 * If pmac_id is returned, pmac_id_valid is returned as true
2706 int be_cmd_get_mac_from_list(struct be_adapter
*adapter
, u8
*mac
,
2707 bool *pmac_id_valid
, u32
*pmac_id
, u8 domain
)
2709 struct be_mcc_wrb
*wrb
;
2710 struct be_cmd_req_get_mac_list
*req
;
2713 struct be_dma_mem get_mac_list_cmd
;
2716 memset(&get_mac_list_cmd
, 0, sizeof(struct be_dma_mem
));
2717 get_mac_list_cmd
.size
= sizeof(struct be_cmd_resp_get_mac_list
);
2718 get_mac_list_cmd
.va
= pci_alloc_consistent(adapter
->pdev
,
2719 get_mac_list_cmd
.size
,
2720 &get_mac_list_cmd
.dma
);
2722 if (!get_mac_list_cmd
.va
) {
2723 dev_err(&adapter
->pdev
->dev
,
2724 "Memory allocation failure during GET_MAC_LIST\n");
2728 spin_lock_bh(&adapter
->mcc_lock
);
2730 wrb
= wrb_from_mccq(adapter
);
2736 req
= get_mac_list_cmd
.va
;
2738 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2739 OPCODE_COMMON_GET_MAC_LIST
,
2740 get_mac_list_cmd
.size
, wrb
, &get_mac_list_cmd
);
2741 req
->hdr
.domain
= domain
;
2742 req
->mac_type
= MAC_ADDRESS_TYPE_NETWORK
;
2743 if (*pmac_id_valid
) {
2744 req
->mac_id
= cpu_to_le32(*pmac_id
);
2745 req
->iface_id
= cpu_to_le16(adapter
->if_handle
);
2746 req
->perm_override
= 0;
2748 req
->perm_override
= 1;
2751 status
= be_mcc_notify_wait(adapter
);
2753 struct be_cmd_resp_get_mac_list
*resp
=
2754 get_mac_list_cmd
.va
;
2756 if (*pmac_id_valid
) {
2757 memcpy(mac
, resp
->macid_macaddr
.mac_addr_id
.macaddr
,
2762 mac_count
= resp
->true_mac_count
+ resp
->pseudo_mac_count
;
2763 /* Mac list returned could contain one or more active mac_ids
2764 * or one or more true or pseudo permanant mac addresses.
2765 * If an active mac_id is present, return first active mac_id
2768 for (i
= 0; i
< mac_count
; i
++) {
2769 struct get_list_macaddr
*mac_entry
;
2773 mac_entry
= &resp
->macaddr_list
[i
];
2774 mac_addr_size
= le16_to_cpu(mac_entry
->mac_addr_size
);
2775 /* mac_id is a 32 bit value and mac_addr size
2778 if (mac_addr_size
== sizeof(u32
)) {
2779 *pmac_id_valid
= true;
2780 mac_id
= mac_entry
->mac_addr_id
.s_mac_id
.mac_id
;
2781 *pmac_id
= le32_to_cpu(mac_id
);
2785 /* If no active mac_id found, return first mac addr */
2786 *pmac_id_valid
= false;
2787 memcpy(mac
, resp
->macaddr_list
[0].mac_addr_id
.macaddr
,
2792 spin_unlock_bh(&adapter
->mcc_lock
);
2793 pci_free_consistent(adapter
->pdev
, get_mac_list_cmd
.size
,
2794 get_mac_list_cmd
.va
, get_mac_list_cmd
.dma
);
2798 int be_cmd_get_active_mac(struct be_adapter
*adapter
, u32 curr_pmac_id
, u8
*mac
)
2802 if (BEx_chip(adapter
))
2803 return be_cmd_mac_addr_query(adapter
, mac
, false,
2804 adapter
->if_handle
, curr_pmac_id
);
2806 /* Fetch the MAC address using pmac_id */
2807 return be_cmd_get_mac_from_list(adapter
, mac
, &active
,
2811 int be_cmd_get_perm_mac(struct be_adapter
*adapter
, u8
*mac
)
2814 bool pmac_valid
= false;
2816 memset(mac
, 0, ETH_ALEN
);
2818 if (BEx_chip(adapter
)) {
2819 if (be_physfn(adapter
))
2820 status
= be_cmd_mac_addr_query(adapter
, mac
, true, 0,
2823 status
= be_cmd_mac_addr_query(adapter
, mac
, false,
2824 adapter
->if_handle
, 0);
2826 status
= be_cmd_get_mac_from_list(adapter
, mac
, &pmac_valid
,
2833 /* Uses synchronous MCCQ */
2834 int be_cmd_set_mac_list(struct be_adapter
*adapter
, u8
*mac_array
,
2835 u8 mac_count
, u32 domain
)
2837 struct be_mcc_wrb
*wrb
;
2838 struct be_cmd_req_set_mac_list
*req
;
2840 struct be_dma_mem cmd
;
2842 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
2843 cmd
.size
= sizeof(struct be_cmd_req_set_mac_list
);
2844 cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, cmd
.size
,
2845 &cmd
.dma
, GFP_KERNEL
);
2849 spin_lock_bh(&adapter
->mcc_lock
);
2851 wrb
= wrb_from_mccq(adapter
);
2858 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2859 OPCODE_COMMON_SET_MAC_LIST
, sizeof(*req
),
2862 req
->hdr
.domain
= domain
;
2863 req
->mac_count
= mac_count
;
2865 memcpy(req
->mac
, mac_array
, ETH_ALEN
*mac_count
);
2867 status
= be_mcc_notify_wait(adapter
);
2870 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
,
2872 spin_unlock_bh(&adapter
->mcc_lock
);
2876 /* Wrapper to delete any active MACs and provision the new mac.
2877 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
2878 * current list are active.
2880 int be_cmd_set_mac(struct be_adapter
*adapter
, u8
*mac
, int if_id
, u32 dom
)
2882 bool active_mac
= false;
2883 u8 old_mac
[ETH_ALEN
];
2887 status
= be_cmd_get_mac_from_list(adapter
, old_mac
, &active_mac
,
2889 if (!status
&& active_mac
)
2890 be_cmd_pmac_del(adapter
, if_id
, pmac_id
, dom
);
2892 return be_cmd_set_mac_list(adapter
, mac
, mac
? 1 : 0, dom
);
2895 int be_cmd_set_hsw_config(struct be_adapter
*adapter
, u16 pvid
,
2896 u32 domain
, u16 intf_id
, u16 hsw_mode
)
2898 struct be_mcc_wrb
*wrb
;
2899 struct be_cmd_req_set_hsw_config
*req
;
2903 spin_lock_bh(&adapter
->mcc_lock
);
2905 wrb
= wrb_from_mccq(adapter
);
2911 req
= embedded_payload(wrb
);
2912 ctxt
= &req
->context
;
2914 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2915 OPCODE_COMMON_SET_HSW_CONFIG
, sizeof(*req
), wrb
, NULL
);
2917 req
->hdr
.domain
= domain
;
2918 AMAP_SET_BITS(struct amap_set_hsw_context
, interface_id
, ctxt
, intf_id
);
2920 AMAP_SET_BITS(struct amap_set_hsw_context
, pvid_valid
, ctxt
, 1);
2921 AMAP_SET_BITS(struct amap_set_hsw_context
, pvid
, ctxt
, pvid
);
2923 if (!BEx_chip(adapter
) && hsw_mode
) {
2924 AMAP_SET_BITS(struct amap_set_hsw_context
, interface_id
,
2925 ctxt
, adapter
->hba_port_num
);
2926 AMAP_SET_BITS(struct amap_set_hsw_context
, pport
, ctxt
, 1);
2927 AMAP_SET_BITS(struct amap_set_hsw_context
, port_fwd_type
,
2931 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
2932 status
= be_mcc_notify_wait(adapter
);
2935 spin_unlock_bh(&adapter
->mcc_lock
);
2939 /* Get Hyper switch config */
2940 int be_cmd_get_hsw_config(struct be_adapter
*adapter
, u16
*pvid
,
2941 u32 domain
, u16 intf_id
, u8
*mode
)
2943 struct be_mcc_wrb
*wrb
;
2944 struct be_cmd_req_get_hsw_config
*req
;
2949 spin_lock_bh(&adapter
->mcc_lock
);
2951 wrb
= wrb_from_mccq(adapter
);
2957 req
= embedded_payload(wrb
);
2958 ctxt
= &req
->context
;
2960 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2961 OPCODE_COMMON_GET_HSW_CONFIG
, sizeof(*req
), wrb
, NULL
);
2963 req
->hdr
.domain
= domain
;
2964 AMAP_SET_BITS(struct amap_get_hsw_req_context
, interface_id
,
2966 AMAP_SET_BITS(struct amap_get_hsw_req_context
, pvid_valid
, ctxt
, 1);
2968 if (!BEx_chip(adapter
)) {
2969 AMAP_SET_BITS(struct amap_get_hsw_req_context
, interface_id
,
2970 ctxt
, adapter
->hba_port_num
);
2971 AMAP_SET_BITS(struct amap_get_hsw_req_context
, pport
, ctxt
, 1);
2973 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
2975 status
= be_mcc_notify_wait(adapter
);
2977 struct be_cmd_resp_get_hsw_config
*resp
=
2978 embedded_payload(wrb
);
2979 be_dws_le_to_cpu(&resp
->context
,
2980 sizeof(resp
->context
));
2981 vid
= AMAP_GET_BITS(struct amap_get_hsw_resp_context
,
2982 pvid
, &resp
->context
);
2984 *pvid
= le16_to_cpu(vid
);
2986 *mode
= AMAP_GET_BITS(struct amap_get_hsw_resp_context
,
2987 port_fwd_type
, &resp
->context
);
2991 spin_unlock_bh(&adapter
->mcc_lock
);
2995 int be_cmd_get_acpi_wol_cap(struct be_adapter
*adapter
)
2997 struct be_mcc_wrb
*wrb
;
2998 struct be_cmd_req_acpi_wol_magic_config_v1
*req
;
3000 int payload_len
= sizeof(*req
);
3001 struct be_dma_mem cmd
;
3003 if (!be_cmd_allowed(adapter
, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
,
3007 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
3010 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
3011 cmd
.size
= sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1
);
3012 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
,
3015 dev_err(&adapter
->pdev
->dev
,
3016 "Memory allocation failure\n");
3021 wrb
= wrb_from_mbox(adapter
);
3029 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
3030 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
,
3031 payload_len
, wrb
, &cmd
);
3033 req
->hdr
.version
= 1;
3034 req
->query_options
= BE_GET_WOL_CAP
;
3036 status
= be_mbox_notify_wait(adapter
);
3038 struct be_cmd_resp_acpi_wol_magic_config_v1
*resp
;
3039 resp
= (struct be_cmd_resp_acpi_wol_magic_config_v1
*) cmd
.va
;
3041 /* the command could succeed misleadingly on old f/w
3042 * which is not aware of the V1 version. fake an error. */
3043 if (resp
->hdr
.response_length
< payload_len
) {
3047 adapter
->wol_cap
= resp
->wol_settings
;
3050 mutex_unlock(&adapter
->mbox_lock
);
3052 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
, cmd
.dma
);
3056 int be_cmd_get_ext_fat_capabilites(struct be_adapter
*adapter
,
3057 struct be_dma_mem
*cmd
)
3059 struct be_mcc_wrb
*wrb
;
3060 struct be_cmd_req_get_ext_fat_caps
*req
;
3063 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
3066 wrb
= wrb_from_mbox(adapter
);
3073 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3074 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES
,
3075 cmd
->size
, wrb
, cmd
);
3076 req
->parameter_type
= cpu_to_le32(1);
3078 status
= be_mbox_notify_wait(adapter
);
3080 mutex_unlock(&adapter
->mbox_lock
);
3084 int be_cmd_set_ext_fat_capabilites(struct be_adapter
*adapter
,
3085 struct be_dma_mem
*cmd
,
3086 struct be_fat_conf_params
*configs
)
3088 struct be_mcc_wrb
*wrb
;
3089 struct be_cmd_req_set_ext_fat_caps
*req
;
3092 spin_lock_bh(&adapter
->mcc_lock
);
3094 wrb
= wrb_from_mccq(adapter
);
3101 memcpy(&req
->set_params
, configs
, sizeof(struct be_fat_conf_params
));
3102 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3103 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES
,
3104 cmd
->size
, wrb
, cmd
);
3106 status
= be_mcc_notify_wait(adapter
);
3108 spin_unlock_bh(&adapter
->mcc_lock
);
3112 int be_cmd_query_port_name(struct be_adapter
*adapter
, u8
*port_name
)
3114 struct be_mcc_wrb
*wrb
;
3115 struct be_cmd_req_get_port_name
*req
;
3118 if (!lancer_chip(adapter
)) {
3119 *port_name
= adapter
->hba_port_num
+ '0';
3123 spin_lock_bh(&adapter
->mcc_lock
);
3125 wrb
= wrb_from_mccq(adapter
);
3131 req
= embedded_payload(wrb
);
3133 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3134 OPCODE_COMMON_GET_PORT_NAME
, sizeof(*req
), wrb
,
3136 req
->hdr
.version
= 1;
3138 status
= be_mcc_notify_wait(adapter
);
3140 struct be_cmd_resp_get_port_name
*resp
= embedded_payload(wrb
);
3141 *port_name
= resp
->port_name
[adapter
->hba_port_num
];
3143 *port_name
= adapter
->hba_port_num
+ '0';
3146 spin_unlock_bh(&adapter
->mcc_lock
);
3150 static struct be_nic_res_desc
*be_get_nic_desc(u8
*buf
, u32 desc_count
)
3152 struct be_res_desc_hdr
*hdr
= (struct be_res_desc_hdr
*)buf
;
3155 for (i
= 0; i
< desc_count
; i
++) {
3156 if (hdr
->desc_type
== NIC_RESOURCE_DESC_TYPE_V0
||
3157 hdr
->desc_type
== NIC_RESOURCE_DESC_TYPE_V1
)
3158 return (struct be_nic_res_desc
*)hdr
;
3160 hdr
->desc_len
= hdr
->desc_len
? : RESOURCE_DESC_SIZE_V0
;
3161 hdr
= (void *)hdr
+ hdr
->desc_len
;
3166 static struct be_pcie_res_desc
*be_get_pcie_desc(u8 devfn
, u8
*buf
,
3169 struct be_res_desc_hdr
*hdr
= (struct be_res_desc_hdr
*)buf
;
3170 struct be_pcie_res_desc
*pcie
;
3173 for (i
= 0; i
< desc_count
; i
++) {
3174 if ((hdr
->desc_type
== PCIE_RESOURCE_DESC_TYPE_V0
||
3175 hdr
->desc_type
== PCIE_RESOURCE_DESC_TYPE_V1
)) {
3176 pcie
= (struct be_pcie_res_desc
*)hdr
;
3177 if (pcie
->pf_num
== devfn
)
3181 hdr
->desc_len
= hdr
->desc_len
? : RESOURCE_DESC_SIZE_V0
;
3182 hdr
= (void *)hdr
+ hdr
->desc_len
;
3187 static void be_copy_nic_desc(struct be_resources
*res
,
3188 struct be_nic_res_desc
*desc
)
3190 res
->max_uc_mac
= le16_to_cpu(desc
->unicast_mac_count
);
3191 res
->max_vlans
= le16_to_cpu(desc
->vlan_count
);
3192 res
->max_mcast_mac
= le16_to_cpu(desc
->mcast_mac_count
);
3193 res
->max_tx_qs
= le16_to_cpu(desc
->txq_count
);
3194 res
->max_rss_qs
= le16_to_cpu(desc
->rssq_count
);
3195 res
->max_rx_qs
= le16_to_cpu(desc
->rq_count
);
3196 res
->max_evt_qs
= le16_to_cpu(desc
->eq_count
);
3197 /* Clear flags that driver is not interested in */
3198 res
->if_cap_flags
= le32_to_cpu(desc
->cap_flags
) &
3199 BE_IF_CAP_FLAGS_WANT
;
3200 /* Need 1 RXQ as the default RXQ */
3201 if (res
->max_rss_qs
&& res
->max_rss_qs
== res
->max_rx_qs
)
3202 res
->max_rss_qs
-= 1;
3206 int be_cmd_get_func_config(struct be_adapter
*adapter
, struct be_resources
*res
)
3208 struct be_mcc_wrb
*wrb
;
3209 struct be_cmd_req_get_func_config
*req
;
3211 struct be_dma_mem cmd
;
3213 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
3216 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
3217 cmd
.size
= sizeof(struct be_cmd_resp_get_func_config
);
3218 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
,
3221 dev_err(&adapter
->pdev
->dev
, "Memory alloc failure\n");
3226 wrb
= wrb_from_mbox(adapter
);
3234 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3235 OPCODE_COMMON_GET_FUNC_CONFIG
,
3236 cmd
.size
, wrb
, &cmd
);
3238 if (skyhawk_chip(adapter
))
3239 req
->hdr
.version
= 1;
3241 status
= be_mbox_notify_wait(adapter
);
3243 struct be_cmd_resp_get_func_config
*resp
= cmd
.va
;
3244 u32 desc_count
= le32_to_cpu(resp
->desc_count
);
3245 struct be_nic_res_desc
*desc
;
3247 desc
= be_get_nic_desc(resp
->func_param
, desc_count
);
3253 adapter
->pf_number
= desc
->pf_num
;
3254 be_copy_nic_desc(res
, desc
);
3257 mutex_unlock(&adapter
->mbox_lock
);
3259 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
, cmd
.dma
);
3264 static int be_cmd_get_profile_config_mbox(struct be_adapter
*adapter
,
3265 u8 domain
, struct be_dma_mem
*cmd
)
3267 struct be_mcc_wrb
*wrb
;
3268 struct be_cmd_req_get_profile_config
*req
;
3271 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
3273 wrb
= wrb_from_mbox(adapter
);
3276 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3277 OPCODE_COMMON_GET_PROFILE_CONFIG
,
3278 cmd
->size
, wrb
, cmd
);
3280 req
->type
= ACTIVE_PROFILE_TYPE
;
3281 req
->hdr
.domain
= domain
;
3282 if (!lancer_chip(adapter
))
3283 req
->hdr
.version
= 1;
3285 status
= be_mbox_notify_wait(adapter
);
3287 mutex_unlock(&adapter
->mbox_lock
);
3292 static int be_cmd_get_profile_config_mccq(struct be_adapter
*adapter
,
3293 u8 domain
, struct be_dma_mem
*cmd
)
3295 struct be_mcc_wrb
*wrb
;
3296 struct be_cmd_req_get_profile_config
*req
;
3299 spin_lock_bh(&adapter
->mcc_lock
);
3301 wrb
= wrb_from_mccq(adapter
);
3308 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3309 OPCODE_COMMON_GET_PROFILE_CONFIG
,
3310 cmd
->size
, wrb
, cmd
);
3312 req
->type
= ACTIVE_PROFILE_TYPE
;
3313 req
->hdr
.domain
= domain
;
3314 if (!lancer_chip(adapter
))
3315 req
->hdr
.version
= 1;
3317 status
= be_mcc_notify_wait(adapter
);
3320 spin_unlock_bh(&adapter
->mcc_lock
);
3324 /* Uses sync mcc, if MCCQ is already created otherwise mbox */
3325 int be_cmd_get_profile_config(struct be_adapter
*adapter
,
3326 struct be_resources
*res
, u8 domain
)
3328 struct be_cmd_resp_get_profile_config
*resp
;
3329 struct be_pcie_res_desc
*pcie
;
3330 struct be_nic_res_desc
*nic
;
3331 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
3332 struct be_dma_mem cmd
;
3336 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
3337 cmd
.size
= sizeof(struct be_cmd_resp_get_profile_config
);
3338 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
, &cmd
.dma
);
3343 status
= be_cmd_get_profile_config_mbox(adapter
, domain
, &cmd
);
3345 status
= be_cmd_get_profile_config_mccq(adapter
, domain
, &cmd
);
3350 desc_count
= le32_to_cpu(resp
->desc_count
);
3352 pcie
= be_get_pcie_desc(adapter
->pdev
->devfn
, resp
->func_param
,
3355 res
->max_vfs
= le16_to_cpu(pcie
->num_vfs
);
3357 nic
= be_get_nic_desc(resp
->func_param
, desc_count
);
3359 be_copy_nic_desc(res
, nic
);
3363 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
, cmd
.dma
);
3367 /* Currently only Lancer uses this command and it supports version 0 only
3370 int be_cmd_set_profile_config(struct be_adapter
*adapter
, u32 bps
,
3373 struct be_mcc_wrb
*wrb
;
3374 struct be_cmd_req_set_profile_config
*req
;
3377 spin_lock_bh(&adapter
->mcc_lock
);
3379 wrb
= wrb_from_mccq(adapter
);
3385 req
= embedded_payload(wrb
);
3387 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3388 OPCODE_COMMON_SET_PROFILE_CONFIG
, sizeof(*req
),
3390 req
->hdr
.domain
= domain
;
3391 req
->desc_count
= cpu_to_le32(1);
3392 req
->nic_desc
.hdr
.desc_type
= NIC_RESOURCE_DESC_TYPE_V0
;
3393 req
->nic_desc
.hdr
.desc_len
= RESOURCE_DESC_SIZE_V0
;
3394 req
->nic_desc
.flags
= (1 << QUN
) | (1 << IMM
) | (1 << NOSV
);
3395 req
->nic_desc
.pf_num
= adapter
->pf_number
;
3396 req
->nic_desc
.vf_num
= domain
;
3398 /* Mark fields invalid */
3399 req
->nic_desc
.unicast_mac_count
= 0xFFFF;
3400 req
->nic_desc
.mcc_count
= 0xFFFF;
3401 req
->nic_desc
.vlan_count
= 0xFFFF;
3402 req
->nic_desc
.mcast_mac_count
= 0xFFFF;
3403 req
->nic_desc
.txq_count
= 0xFFFF;
3404 req
->nic_desc
.rq_count
= 0xFFFF;
3405 req
->nic_desc
.rssq_count
= 0xFFFF;
3406 req
->nic_desc
.lro_count
= 0xFFFF;
3407 req
->nic_desc
.cq_count
= 0xFFFF;
3408 req
->nic_desc
.toe_conn_count
= 0xFFFF;
3409 req
->nic_desc
.eq_count
= 0xFFFF;
3410 req
->nic_desc
.link_param
= 0xFF;
3411 req
->nic_desc
.bw_min
= 0xFFFFFFFF;
3412 req
->nic_desc
.acpi_params
= 0xFF;
3413 req
->nic_desc
.wol_param
= 0x0F;
3416 req
->nic_desc
.bw_min
= cpu_to_le32(bps
);
3417 req
->nic_desc
.bw_max
= cpu_to_le32(bps
);
3418 status
= be_mcc_notify_wait(adapter
);
3420 spin_unlock_bh(&adapter
->mcc_lock
);
3424 int be_cmd_get_if_id(struct be_adapter
*adapter
, struct be_vf_cfg
*vf_cfg
,
3427 struct be_mcc_wrb
*wrb
;
3428 struct be_cmd_req_get_iface_list
*req
;
3429 struct be_cmd_resp_get_iface_list
*resp
;
3432 spin_lock_bh(&adapter
->mcc_lock
);
3434 wrb
= wrb_from_mccq(adapter
);
3439 req
= embedded_payload(wrb
);
3441 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3442 OPCODE_COMMON_GET_IFACE_LIST
, sizeof(*resp
),
3444 req
->hdr
.domain
= vf_num
+ 1;
3446 status
= be_mcc_notify_wait(adapter
);
3448 resp
= (struct be_cmd_resp_get_iface_list
*)req
;
3449 vf_cfg
->if_handle
= le32_to_cpu(resp
->if_desc
.if_id
);
3453 spin_unlock_bh(&adapter
->mcc_lock
);
3457 static int lancer_wait_idle(struct be_adapter
*adapter
)
3459 #define SLIPORT_IDLE_TIMEOUT 30
3463 for (i
= 0; i
< SLIPORT_IDLE_TIMEOUT
; i
++) {
3464 reg_val
= ioread32(adapter
->db
+ PHYSDEV_CONTROL_OFFSET
);
3465 if ((reg_val
& PHYSDEV_CONTROL_INP_MASK
) == 0)
3471 if (i
== SLIPORT_IDLE_TIMEOUT
)
3477 int lancer_physdev_ctrl(struct be_adapter
*adapter
, u32 mask
)
3481 status
= lancer_wait_idle(adapter
);
3485 iowrite32(mask
, adapter
->db
+ PHYSDEV_CONTROL_OFFSET
);
3490 /* Routine to check whether dump image is present or not */
3491 bool dump_present(struct be_adapter
*adapter
)
3493 u32 sliport_status
= 0;
3495 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
3496 return !!(sliport_status
& SLIPORT_STATUS_DIP_MASK
);
3499 int lancer_initiate_dump(struct be_adapter
*adapter
)
3503 /* give firmware reset and diagnostic dump */
3504 status
= lancer_physdev_ctrl(adapter
, PHYSDEV_CONTROL_FW_RESET_MASK
|
3505 PHYSDEV_CONTROL_DD_MASK
);
3507 dev_err(&adapter
->pdev
->dev
, "Firmware reset failed\n");
3511 status
= lancer_wait_idle(adapter
);
3515 if (!dump_present(adapter
)) {
3516 dev_err(&adapter
->pdev
->dev
, "Dump image not present\n");
3524 int be_cmd_enable_vf(struct be_adapter
*adapter
, u8 domain
)
3526 struct be_mcc_wrb
*wrb
;
3527 struct be_cmd_enable_disable_vf
*req
;
3530 if (BEx_chip(adapter
))
3533 spin_lock_bh(&adapter
->mcc_lock
);
3535 wrb
= wrb_from_mccq(adapter
);
3541 req
= embedded_payload(wrb
);
3543 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3544 OPCODE_COMMON_ENABLE_DISABLE_VF
, sizeof(*req
),
3547 req
->hdr
.domain
= domain
;
3549 status
= be_mcc_notify_wait(adapter
);
3551 spin_unlock_bh(&adapter
->mcc_lock
);
3555 int be_cmd_intr_set(struct be_adapter
*adapter
, bool intr_enable
)
3557 struct be_mcc_wrb
*wrb
;
3558 struct be_cmd_req_intr_set
*req
;
3561 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
3564 wrb
= wrb_from_mbox(adapter
);
3566 req
= embedded_payload(wrb
);
3568 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3569 OPCODE_COMMON_SET_INTERRUPT_ENABLE
, sizeof(*req
),
3572 req
->intr_enabled
= intr_enable
;
3574 status
= be_mbox_notify_wait(adapter
);
3576 mutex_unlock(&adapter
->mbox_lock
);
3580 int be_roce_mcc_cmd(void *netdev_handle
, void *wrb_payload
,
3581 int wrb_payload_size
, u16
*cmd_status
, u16
*ext_status
)
3583 struct be_adapter
*adapter
= netdev_priv(netdev_handle
);
3584 struct be_mcc_wrb
*wrb
;
3585 struct be_cmd_req_hdr
*hdr
= (struct be_cmd_req_hdr
*) wrb_payload
;
3586 struct be_cmd_req_hdr
*req
;
3587 struct be_cmd_resp_hdr
*resp
;
3590 spin_lock_bh(&adapter
->mcc_lock
);
3592 wrb
= wrb_from_mccq(adapter
);
3597 req
= embedded_payload(wrb
);
3598 resp
= embedded_payload(wrb
);
3600 be_wrb_cmd_hdr_prepare(req
, hdr
->subsystem
,
3601 hdr
->opcode
, wrb_payload_size
, wrb
, NULL
);
3602 memcpy(req
, wrb_payload
, wrb_payload_size
);
3603 be_dws_cpu_to_le(req
, wrb_payload_size
);
3605 status
= be_mcc_notify_wait(adapter
);
3607 *cmd_status
= (status
& 0xffff);
3610 memcpy(wrb_payload
, resp
, sizeof(*resp
) + resp
->response_length
);
3611 be_dws_le_to_cpu(wrb_payload
, sizeof(*resp
) + resp
->response_length
);
3613 spin_unlock_bh(&adapter
->mcc_lock
);
3616 EXPORT_SYMBOL(be_roce_mcc_cmd
);