2 * Copyright (C) 2005 - 2014 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/module.h>
22 static char *be_port_misconfig_evt_desc
[] = {
23 "A valid SFP module detected",
24 "Optics faulted/ incorrectly installed/ not installed.",
25 "Optics of two types installed.",
26 "Incompatible optics.",
27 "Unknown port SFP status"
30 static char *be_port_misconfig_remedy_desc
[] = {
32 "Reseat optics. If issue not resolved, replace",
33 "Remove one optic or install matching pair of optics",
34 "Replace with compatible optics for card to function",
38 static struct be_cmd_priv_map cmd_priv_map
[] = {
40 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
,
42 BE_PRIV_LNKMGMT
| BE_PRIV_VHADM
|
43 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
46 OPCODE_COMMON_GET_FLOW_CONTROL
,
48 BE_PRIV_LNKQUERY
| BE_PRIV_VHADM
|
49 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
52 OPCODE_COMMON_SET_FLOW_CONTROL
,
54 BE_PRIV_LNKMGMT
| BE_PRIV_VHADM
|
55 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
58 OPCODE_ETH_GET_PPORT_STATS
,
60 BE_PRIV_LNKMGMT
| BE_PRIV_VHADM
|
61 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
64 OPCODE_COMMON_GET_PHY_DETAILS
,
66 BE_PRIV_LNKMGMT
| BE_PRIV_VHADM
|
67 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
71 static bool be_cmd_allowed(struct be_adapter
*adapter
, u8 opcode
, u8 subsystem
)
74 int num_entries
= sizeof(cmd_priv_map
)/sizeof(struct be_cmd_priv_map
);
75 u32 cmd_privileges
= adapter
->cmd_privileges
;
77 for (i
= 0; i
< num_entries
; i
++)
78 if (opcode
== cmd_priv_map
[i
].opcode
&&
79 subsystem
== cmd_priv_map
[i
].subsystem
)
80 if (!(cmd_privileges
& cmd_priv_map
[i
].priv_mask
))
86 static inline void *embedded_payload(struct be_mcc_wrb
*wrb
)
88 return wrb
->payload
.embedded_payload
;
91 static void be_mcc_notify(struct be_adapter
*adapter
)
93 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
96 if (be_error(adapter
))
99 val
|= mccq
->id
& DB_MCCQ_RING_ID_MASK
;
100 val
|= 1 << DB_MCCQ_NUM_POSTED_SHIFT
;
103 iowrite32(val
, adapter
->db
+ DB_MCCQ_OFFSET
);
106 /* To check if valid bit is set, check the entire word as we don't know
107 * the endianness of the data (old entry is host endian while a new entry is
109 static inline bool be_mcc_compl_is_new(struct be_mcc_compl
*compl)
113 if (compl->flags
!= 0) {
114 flags
= le32_to_cpu(compl->flags
);
115 if (flags
& CQE_FLAGS_VALID_MASK
) {
116 compl->flags
= flags
;
123 /* Need to reset the entire word that houses the valid bit */
124 static inline void be_mcc_compl_use(struct be_mcc_compl
*compl)
129 static struct be_cmd_resp_hdr
*be_decode_resp_hdr(u32 tag0
, u32 tag1
)
134 addr
= ((addr
<< 16) << 16) | tag0
;
138 static bool be_skip_err_log(u8 opcode
, u16 base_status
, u16 addl_status
)
140 if (base_status
== MCC_STATUS_NOT_SUPPORTED
||
141 base_status
== MCC_STATUS_ILLEGAL_REQUEST
||
142 addl_status
== MCC_ADDL_STATUS_TOO_MANY_INTERFACES
||
143 addl_status
== MCC_ADDL_STATUS_INSUFFICIENT_VLANS
||
144 (opcode
== OPCODE_COMMON_WRITE_FLASHROM
&&
145 (base_status
== MCC_STATUS_ILLEGAL_FIELD
||
146 addl_status
== MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH
)))
152 /* Place holder for all the async MCC cmds wherein the caller is not in a busy
153 * loop (has not issued be_mcc_notify_wait())
155 static void be_async_cmd_process(struct be_adapter
*adapter
,
156 struct be_mcc_compl
*compl,
157 struct be_cmd_resp_hdr
*resp_hdr
)
159 enum mcc_base_status base_status
= base_status(compl->status
);
160 u8 opcode
= 0, subsystem
= 0;
163 opcode
= resp_hdr
->opcode
;
164 subsystem
= resp_hdr
->subsystem
;
167 if (opcode
== OPCODE_LOWLEVEL_LOOPBACK_TEST
&&
168 subsystem
== CMD_SUBSYSTEM_LOWLEVEL
) {
169 complete(&adapter
->et_cmd_compl
);
173 if ((opcode
== OPCODE_COMMON_WRITE_FLASHROM
||
174 opcode
== OPCODE_COMMON_WRITE_OBJECT
) &&
175 subsystem
== CMD_SUBSYSTEM_COMMON
) {
176 adapter
->flash_status
= compl->status
;
177 complete(&adapter
->et_cmd_compl
);
181 if ((opcode
== OPCODE_ETH_GET_STATISTICS
||
182 opcode
== OPCODE_ETH_GET_PPORT_STATS
) &&
183 subsystem
== CMD_SUBSYSTEM_ETH
&&
184 base_status
== MCC_STATUS_SUCCESS
) {
185 be_parse_stats(adapter
);
186 adapter
->stats_cmd_sent
= false;
190 if (opcode
== OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
&&
191 subsystem
== CMD_SUBSYSTEM_COMMON
) {
192 if (base_status
== MCC_STATUS_SUCCESS
) {
193 struct be_cmd_resp_get_cntl_addnl_attribs
*resp
=
195 adapter
->drv_stats
.be_on_die_temperature
=
196 resp
->on_die_temperature
;
198 adapter
->be_get_temp_freq
= 0;
204 static int be_mcc_compl_process(struct be_adapter
*adapter
,
205 struct be_mcc_compl
*compl)
207 enum mcc_base_status base_status
;
208 enum mcc_addl_status addl_status
;
209 struct be_cmd_resp_hdr
*resp_hdr
;
210 u8 opcode
= 0, subsystem
= 0;
212 /* Just swap the status to host endian; mcc tag is opaquely copied
214 be_dws_le_to_cpu(compl, 4);
216 base_status
= base_status(compl->status
);
217 addl_status
= addl_status(compl->status
);
219 resp_hdr
= be_decode_resp_hdr(compl->tag0
, compl->tag1
);
221 opcode
= resp_hdr
->opcode
;
222 subsystem
= resp_hdr
->subsystem
;
225 be_async_cmd_process(adapter
, compl, resp_hdr
);
227 if (base_status
!= MCC_STATUS_SUCCESS
&&
228 !be_skip_err_log(opcode
, base_status
, addl_status
)) {
229 if (base_status
== MCC_STATUS_UNAUTHORIZED_REQUEST
) {
230 dev_warn(&adapter
->pdev
->dev
,
231 "VF is not privileged to issue opcode %d-%d\n",
234 dev_err(&adapter
->pdev
->dev
,
235 "opcode %d-%d failed:status %d-%d\n",
236 opcode
, subsystem
, base_status
, addl_status
);
239 return compl->status
;
242 /* Link state evt is a string of bytes; no need for endian swapping */
243 static void be_async_link_state_process(struct be_adapter
*adapter
,
244 struct be_mcc_compl
*compl)
246 struct be_async_event_link_state
*evt
=
247 (struct be_async_event_link_state
*)compl;
249 /* When link status changes, link speed must be re-queried from FW */
250 adapter
->phy
.link_speed
= -1;
252 /* On BEx the FW does not send a separate link status
253 * notification for physical and logical link.
254 * On other chips just process the logical link
255 * status notification
257 if (!BEx_chip(adapter
) &&
258 !(evt
->port_link_status
& LOGICAL_LINK_STATUS_MASK
))
261 /* For the initial link status do not rely on the ASYNC event as
262 * it may not be received in some cases.
264 if (adapter
->flags
& BE_FLAGS_LINK_STATUS_INIT
)
265 be_link_status_update(adapter
,
266 evt
->port_link_status
& LINK_STATUS_MASK
);
269 static void be_async_port_misconfig_event_process(struct be_adapter
*adapter
,
270 struct be_mcc_compl
*compl)
272 struct be_async_event_misconfig_port
*evt
=
273 (struct be_async_event_misconfig_port
*)compl;
274 u32 sfp_mismatch_evt
= le32_to_cpu(evt
->event_data_word1
);
275 struct device
*dev
= &adapter
->pdev
->dev
;
276 u8 port_misconfig_evt
;
279 ((sfp_mismatch_evt
>> (adapter
->hba_port_num
* 8)) & 0xff);
281 /* Log an error message that would allow a user to determine
282 * whether the SFPs have an issue
284 dev_info(dev
, "Port %c: %s %s", adapter
->port_name
,
285 be_port_misconfig_evt_desc
[port_misconfig_evt
],
286 be_port_misconfig_remedy_desc
[port_misconfig_evt
]);
288 if (port_misconfig_evt
== INCOMPATIBLE_SFP
)
289 adapter
->flags
|= BE_FLAGS_EVT_INCOMPATIBLE_SFP
;
292 /* Grp5 CoS Priority evt */
293 static void be_async_grp5_cos_priority_process(struct be_adapter
*adapter
,
294 struct be_mcc_compl
*compl)
296 struct be_async_event_grp5_cos_priority
*evt
=
297 (struct be_async_event_grp5_cos_priority
*)compl;
300 adapter
->vlan_prio_bmap
= evt
->available_priority_bmap
;
301 adapter
->recommended_prio
&= ~VLAN_PRIO_MASK
;
302 adapter
->recommended_prio
=
303 evt
->reco_default_priority
<< VLAN_PRIO_SHIFT
;
307 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
308 static void be_async_grp5_qos_speed_process(struct be_adapter
*adapter
,
309 struct be_mcc_compl
*compl)
311 struct be_async_event_grp5_qos_link_speed
*evt
=
312 (struct be_async_event_grp5_qos_link_speed
*)compl;
314 if (adapter
->phy
.link_speed
>= 0 &&
315 evt
->physical_port
== adapter
->port_num
)
316 adapter
->phy
.link_speed
= le16_to_cpu(evt
->qos_link_speed
) * 10;
320 static void be_async_grp5_pvid_state_process(struct be_adapter
*adapter
,
321 struct be_mcc_compl
*compl)
323 struct be_async_event_grp5_pvid_state
*evt
=
324 (struct be_async_event_grp5_pvid_state
*)compl;
327 adapter
->pvid
= le16_to_cpu(evt
->tag
) & VLAN_VID_MASK
;
328 dev_info(&adapter
->pdev
->dev
, "LPVID: %d\n", adapter
->pvid
);
334 static void be_async_grp5_evt_process(struct be_adapter
*adapter
,
335 struct be_mcc_compl
*compl)
337 u8 event_type
= (compl->flags
>> ASYNC_EVENT_TYPE_SHIFT
) &
338 ASYNC_EVENT_TYPE_MASK
;
340 switch (event_type
) {
341 case ASYNC_EVENT_COS_PRIORITY
:
342 be_async_grp5_cos_priority_process(adapter
, compl);
344 case ASYNC_EVENT_QOS_SPEED
:
345 be_async_grp5_qos_speed_process(adapter
, compl);
347 case ASYNC_EVENT_PVID_STATE
:
348 be_async_grp5_pvid_state_process(adapter
, compl);
355 static void be_async_dbg_evt_process(struct be_adapter
*adapter
,
356 struct be_mcc_compl
*cmp
)
359 struct be_async_event_qnq
*evt
= (struct be_async_event_qnq
*)cmp
;
361 event_type
= (cmp
->flags
>> ASYNC_EVENT_TYPE_SHIFT
) &
362 ASYNC_EVENT_TYPE_MASK
;
364 switch (event_type
) {
365 case ASYNC_DEBUG_EVENT_TYPE_QNQ
:
367 adapter
->qnq_vid
= le16_to_cpu(evt
->vlan_tag
);
368 adapter
->flags
|= BE_FLAGS_QNQ_ASYNC_EVT_RCVD
;
371 dev_warn(&adapter
->pdev
->dev
, "Unknown debug event 0x%x!\n",
377 static void be_async_sliport_evt_process(struct be_adapter
*adapter
,
378 struct be_mcc_compl
*cmp
)
380 u8 event_type
= (cmp
->flags
>> ASYNC_EVENT_TYPE_SHIFT
) &
381 ASYNC_EVENT_TYPE_MASK
;
383 if (event_type
== ASYNC_EVENT_PORT_MISCONFIG
)
384 be_async_port_misconfig_event_process(adapter
, cmp
);
387 static inline bool is_link_state_evt(u32 flags
)
389 return ((flags
>> ASYNC_EVENT_CODE_SHIFT
) & ASYNC_EVENT_CODE_MASK
) ==
390 ASYNC_EVENT_CODE_LINK_STATE
;
393 static inline bool is_grp5_evt(u32 flags
)
395 return ((flags
>> ASYNC_EVENT_CODE_SHIFT
) & ASYNC_EVENT_CODE_MASK
) ==
396 ASYNC_EVENT_CODE_GRP_5
;
399 static inline bool is_dbg_evt(u32 flags
)
401 return ((flags
>> ASYNC_EVENT_CODE_SHIFT
) & ASYNC_EVENT_CODE_MASK
) ==
402 ASYNC_EVENT_CODE_QNQ
;
405 static inline bool is_sliport_evt(u32 flags
)
407 return ((flags
>> ASYNC_EVENT_CODE_SHIFT
) & ASYNC_EVENT_CODE_MASK
) ==
408 ASYNC_EVENT_CODE_SLIPORT
;
411 static void be_mcc_event_process(struct be_adapter
*adapter
,
412 struct be_mcc_compl
*compl)
414 if (is_link_state_evt(compl->flags
))
415 be_async_link_state_process(adapter
, compl);
416 else if (is_grp5_evt(compl->flags
))
417 be_async_grp5_evt_process(adapter
, compl);
418 else if (is_dbg_evt(compl->flags
))
419 be_async_dbg_evt_process(adapter
, compl);
420 else if (is_sliport_evt(compl->flags
))
421 be_async_sliport_evt_process(adapter
, compl);
424 static struct be_mcc_compl
*be_mcc_compl_get(struct be_adapter
*adapter
)
426 struct be_queue_info
*mcc_cq
= &adapter
->mcc_obj
.cq
;
427 struct be_mcc_compl
*compl = queue_tail_node(mcc_cq
);
429 if (be_mcc_compl_is_new(compl)) {
430 queue_tail_inc(mcc_cq
);
436 void be_async_mcc_enable(struct be_adapter
*adapter
)
438 spin_lock_bh(&adapter
->mcc_cq_lock
);
440 be_cq_notify(adapter
, adapter
->mcc_obj
.cq
.id
, true, 0);
441 adapter
->mcc_obj
.rearm_cq
= true;
443 spin_unlock_bh(&adapter
->mcc_cq_lock
);
446 void be_async_mcc_disable(struct be_adapter
*adapter
)
448 spin_lock_bh(&adapter
->mcc_cq_lock
);
450 adapter
->mcc_obj
.rearm_cq
= false;
451 be_cq_notify(adapter
, adapter
->mcc_obj
.cq
.id
, false, 0);
453 spin_unlock_bh(&adapter
->mcc_cq_lock
);
456 int be_process_mcc(struct be_adapter
*adapter
)
458 struct be_mcc_compl
*compl;
459 int num
= 0, status
= 0;
460 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
462 spin_lock(&adapter
->mcc_cq_lock
);
464 while ((compl = be_mcc_compl_get(adapter
))) {
465 if (compl->flags
& CQE_FLAGS_ASYNC_MASK
) {
466 be_mcc_event_process(adapter
, compl);
467 } else if (compl->flags
& CQE_FLAGS_COMPLETED_MASK
) {
468 status
= be_mcc_compl_process(adapter
, compl);
469 atomic_dec(&mcc_obj
->q
.used
);
471 be_mcc_compl_use(compl);
476 be_cq_notify(adapter
, mcc_obj
->cq
.id
, mcc_obj
->rearm_cq
, num
);
478 spin_unlock(&adapter
->mcc_cq_lock
);
482 /* Wait till no more pending mcc requests are present */
483 static int be_mcc_wait_compl(struct be_adapter
*adapter
)
485 #define mcc_timeout 120000 /* 12s timeout */
487 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
489 for (i
= 0; i
< mcc_timeout
; i
++) {
490 if (be_error(adapter
))
494 status
= be_process_mcc(adapter
);
497 if (atomic_read(&mcc_obj
->q
.used
) == 0)
501 if (i
== mcc_timeout
) {
502 dev_err(&adapter
->pdev
->dev
, "FW not responding\n");
503 adapter
->fw_timeout
= true;
509 /* Notify MCC requests and wait for completion */
510 static int be_mcc_notify_wait(struct be_adapter
*adapter
)
513 struct be_mcc_wrb
*wrb
;
514 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
515 u16 index
= mcc_obj
->q
.head
;
516 struct be_cmd_resp_hdr
*resp
;
518 index_dec(&index
, mcc_obj
->q
.len
);
519 wrb
= queue_index_node(&mcc_obj
->q
, index
);
521 resp
= be_decode_resp_hdr(wrb
->tag0
, wrb
->tag1
);
523 be_mcc_notify(adapter
);
525 status
= be_mcc_wait_compl(adapter
);
529 status
= (resp
->base_status
|
530 ((resp
->addl_status
& CQE_ADDL_STATUS_MASK
) <<
531 CQE_ADDL_STATUS_SHIFT
));
536 static int be_mbox_db_ready_wait(struct be_adapter
*adapter
, void __iomem
*db
)
542 if (be_error(adapter
))
545 ready
= ioread32(db
);
546 if (ready
== 0xffffffff)
549 ready
&= MPU_MAILBOX_DB_RDY_MASK
;
554 dev_err(&adapter
->pdev
->dev
, "FW not responding\n");
555 adapter
->fw_timeout
= true;
556 be_detect_error(adapter
);
568 * Insert the mailbox address into the doorbell in two steps
569 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
571 static int be_mbox_notify_wait(struct be_adapter
*adapter
)
575 void __iomem
*db
= adapter
->db
+ MPU_MAILBOX_DB_OFFSET
;
576 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
577 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
578 struct be_mcc_compl
*compl = &mbox
->compl;
580 /* wait for ready to be set */
581 status
= be_mbox_db_ready_wait(adapter
, db
);
585 val
|= MPU_MAILBOX_DB_HI_MASK
;
586 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
587 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
590 /* wait for ready to be set */
591 status
= be_mbox_db_ready_wait(adapter
, db
);
596 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
597 val
|= (u32
)(mbox_mem
->dma
>> 4) << 2;
600 status
= be_mbox_db_ready_wait(adapter
, db
);
604 /* A cq entry has been made now */
605 if (be_mcc_compl_is_new(compl)) {
606 status
= be_mcc_compl_process(adapter
, &mbox
->compl);
607 be_mcc_compl_use(compl);
611 dev_err(&adapter
->pdev
->dev
, "invalid mailbox completion\n");
617 static u16
be_POST_stage_get(struct be_adapter
*adapter
)
621 if (BEx_chip(adapter
))
622 sem
= ioread32(adapter
->csr
+ SLIPORT_SEMAPHORE_OFFSET_BEx
);
624 pci_read_config_dword(adapter
->pdev
,
625 SLIPORT_SEMAPHORE_OFFSET_SH
, &sem
);
627 return sem
& POST_STAGE_MASK
;
630 static int lancer_wait_ready(struct be_adapter
*adapter
)
632 #define SLIPORT_READY_TIMEOUT 30
636 for (i
= 0; i
< SLIPORT_READY_TIMEOUT
; i
++) {
637 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
638 if (sliport_status
& SLIPORT_STATUS_RDY_MASK
)
641 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
&&
642 !(sliport_status
& SLIPORT_STATUS_RN_MASK
))
648 return sliport_status
? : -1;
651 int be_fw_wait_ready(struct be_adapter
*adapter
)
654 int status
, timeout
= 0;
655 struct device
*dev
= &adapter
->pdev
->dev
;
657 if (lancer_chip(adapter
)) {
658 status
= lancer_wait_ready(adapter
);
667 /* There's no means to poll POST state on BE2/3 VFs */
668 if (BEx_chip(adapter
) && be_virtfn(adapter
))
671 stage
= be_POST_stage_get(adapter
);
672 if (stage
== POST_STAGE_ARMFW_RDY
)
675 dev_info(dev
, "Waiting for POST, %ds elapsed\n", timeout
);
676 if (msleep_interruptible(2000)) {
677 dev_err(dev
, "Waiting for POST aborted\n");
681 } while (timeout
< 60);
684 dev_err(dev
, "POST timeout; stage=%#x\n", stage
);
688 static inline struct be_sge
*nonembedded_sgl(struct be_mcc_wrb
*wrb
)
690 return &wrb
->payload
.sgl
[0];
693 static inline void fill_wrb_tags(struct be_mcc_wrb
*wrb
, unsigned long addr
)
695 wrb
->tag0
= addr
& 0xFFFFFFFF;
696 wrb
->tag1
= upper_32_bits(addr
);
699 /* Don't touch the hdr after it's prepared */
700 /* mem will be NULL for embedded commands */
701 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr
*req_hdr
,
702 u8 subsystem
, u8 opcode
, int cmd_len
,
703 struct be_mcc_wrb
*wrb
,
704 struct be_dma_mem
*mem
)
708 req_hdr
->opcode
= opcode
;
709 req_hdr
->subsystem
= subsystem
;
710 req_hdr
->request_length
= cpu_to_le32(cmd_len
- sizeof(*req_hdr
));
711 req_hdr
->version
= 0;
712 fill_wrb_tags(wrb
, (ulong
) req_hdr
);
713 wrb
->payload_length
= cmd_len
;
715 wrb
->embedded
|= (1 & MCC_WRB_SGE_CNT_MASK
) <<
716 MCC_WRB_SGE_CNT_SHIFT
;
717 sge
= nonembedded_sgl(wrb
);
718 sge
->pa_hi
= cpu_to_le32(upper_32_bits(mem
->dma
));
719 sge
->pa_lo
= cpu_to_le32(mem
->dma
& 0xFFFFFFFF);
720 sge
->len
= cpu_to_le32(mem
->size
);
722 wrb
->embedded
|= MCC_WRB_EMBEDDED_MASK
;
723 be_dws_cpu_to_le(wrb
, 8);
726 static void be_cmd_page_addrs_prepare(struct phys_addr
*pages
, u32 max_pages
,
727 struct be_dma_mem
*mem
)
729 int i
, buf_pages
= min(PAGES_4K_SPANNED(mem
->va
, mem
->size
), max_pages
);
730 u64 dma
= (u64
)mem
->dma
;
732 for (i
= 0; i
< buf_pages
; i
++) {
733 pages
[i
].lo
= cpu_to_le32(dma
& 0xFFFFFFFF);
734 pages
[i
].hi
= cpu_to_le32(upper_32_bits(dma
));
739 static inline struct be_mcc_wrb
*wrb_from_mbox(struct be_adapter
*adapter
)
741 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
742 struct be_mcc_wrb
*wrb
743 = &((struct be_mcc_mailbox
*)(mbox_mem
->va
))->wrb
;
744 memset(wrb
, 0, sizeof(*wrb
));
748 static struct be_mcc_wrb
*wrb_from_mccq(struct be_adapter
*adapter
)
750 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
751 struct be_mcc_wrb
*wrb
;
756 if (atomic_read(&mccq
->used
) >= mccq
->len
)
759 wrb
= queue_head_node(mccq
);
760 queue_head_inc(mccq
);
761 atomic_inc(&mccq
->used
);
762 memset(wrb
, 0, sizeof(*wrb
));
766 static bool use_mcc(struct be_adapter
*adapter
)
768 return adapter
->mcc_obj
.q
.created
;
771 /* Must be used only in process context */
772 static int be_cmd_lock(struct be_adapter
*adapter
)
774 if (use_mcc(adapter
)) {
775 spin_lock_bh(&adapter
->mcc_lock
);
778 return mutex_lock_interruptible(&adapter
->mbox_lock
);
782 /* Must be used only in process context */
783 static void be_cmd_unlock(struct be_adapter
*adapter
)
785 if (use_mcc(adapter
))
786 spin_unlock_bh(&adapter
->mcc_lock
);
788 return mutex_unlock(&adapter
->mbox_lock
);
791 static struct be_mcc_wrb
*be_cmd_copy(struct be_adapter
*adapter
,
792 struct be_mcc_wrb
*wrb
)
794 struct be_mcc_wrb
*dest_wrb
;
796 if (use_mcc(adapter
)) {
797 dest_wrb
= wrb_from_mccq(adapter
);
801 dest_wrb
= wrb_from_mbox(adapter
);
804 memcpy(dest_wrb
, wrb
, sizeof(*wrb
));
805 if (wrb
->embedded
& cpu_to_le32(MCC_WRB_EMBEDDED_MASK
))
806 fill_wrb_tags(dest_wrb
, (ulong
) embedded_payload(wrb
));
811 /* Must be used only in process context */
812 static int be_cmd_notify_wait(struct be_adapter
*adapter
,
813 struct be_mcc_wrb
*wrb
)
815 struct be_mcc_wrb
*dest_wrb
;
818 status
= be_cmd_lock(adapter
);
822 dest_wrb
= be_cmd_copy(adapter
, wrb
);
826 if (use_mcc(adapter
))
827 status
= be_mcc_notify_wait(adapter
);
829 status
= be_mbox_notify_wait(adapter
);
832 memcpy(wrb
, dest_wrb
, sizeof(*wrb
));
834 be_cmd_unlock(adapter
);
838 /* Tell fw we're about to start firing cmds by writing a
839 * special pattern across the wrb hdr; uses mbox
841 int be_cmd_fw_init(struct be_adapter
*adapter
)
846 if (lancer_chip(adapter
))
849 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
852 wrb
= (u8
*)wrb_from_mbox(adapter
);
862 status
= be_mbox_notify_wait(adapter
);
864 mutex_unlock(&adapter
->mbox_lock
);
868 /* Tell fw we're done with firing cmds by writing a
869 * special pattern across the wrb hdr; uses mbox
871 int be_cmd_fw_clean(struct be_adapter
*adapter
)
876 if (lancer_chip(adapter
))
879 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
882 wrb
= (u8
*)wrb_from_mbox(adapter
);
892 status
= be_mbox_notify_wait(adapter
);
894 mutex_unlock(&adapter
->mbox_lock
);
898 int be_cmd_eq_create(struct be_adapter
*adapter
, struct be_eq_obj
*eqo
)
900 struct be_mcc_wrb
*wrb
;
901 struct be_cmd_req_eq_create
*req
;
902 struct be_dma_mem
*q_mem
= &eqo
->q
.dma_mem
;
905 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
908 wrb
= wrb_from_mbox(adapter
);
909 req
= embedded_payload(wrb
);
911 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
912 OPCODE_COMMON_EQ_CREATE
, sizeof(*req
), wrb
,
915 /* Support for EQ_CREATEv2 available only SH-R onwards */
916 if (!(BEx_chip(adapter
) || lancer_chip(adapter
)))
919 req
->hdr
.version
= ver
;
920 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
922 AMAP_SET_BITS(struct amap_eq_context
, valid
, req
->context
, 1);
924 AMAP_SET_BITS(struct amap_eq_context
, size
, req
->context
, 0);
925 AMAP_SET_BITS(struct amap_eq_context
, count
, req
->context
,
926 __ilog2_u32(eqo
->q
.len
/ 256));
927 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
929 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
931 status
= be_mbox_notify_wait(adapter
);
933 struct be_cmd_resp_eq_create
*resp
= embedded_payload(wrb
);
935 eqo
->q
.id
= le16_to_cpu(resp
->eq_id
);
937 (ver
== 2) ? le16_to_cpu(resp
->msix_idx
) : eqo
->idx
;
938 eqo
->q
.created
= true;
941 mutex_unlock(&adapter
->mbox_lock
);
946 int be_cmd_mac_addr_query(struct be_adapter
*adapter
, u8
*mac_addr
,
947 bool permanent
, u32 if_handle
, u32 pmac_id
)
949 struct be_mcc_wrb
*wrb
;
950 struct be_cmd_req_mac_query
*req
;
953 spin_lock_bh(&adapter
->mcc_lock
);
955 wrb
= wrb_from_mccq(adapter
);
960 req
= embedded_payload(wrb
);
962 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
963 OPCODE_COMMON_NTWK_MAC_QUERY
, sizeof(*req
), wrb
,
965 req
->type
= MAC_ADDRESS_TYPE_NETWORK
;
969 req
->if_id
= cpu_to_le16((u16
)if_handle
);
970 req
->pmac_id
= cpu_to_le32(pmac_id
);
974 status
= be_mcc_notify_wait(adapter
);
976 struct be_cmd_resp_mac_query
*resp
= embedded_payload(wrb
);
978 memcpy(mac_addr
, resp
->mac
.addr
, ETH_ALEN
);
982 spin_unlock_bh(&adapter
->mcc_lock
);
986 /* Uses synchronous MCCQ */
987 int be_cmd_pmac_add(struct be_adapter
*adapter
, u8
*mac_addr
,
988 u32 if_id
, u32
*pmac_id
, u32 domain
)
990 struct be_mcc_wrb
*wrb
;
991 struct be_cmd_req_pmac_add
*req
;
994 spin_lock_bh(&adapter
->mcc_lock
);
996 wrb
= wrb_from_mccq(adapter
);
1001 req
= embedded_payload(wrb
);
1003 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1004 OPCODE_COMMON_NTWK_PMAC_ADD
, sizeof(*req
), wrb
,
1007 req
->hdr
.domain
= domain
;
1008 req
->if_id
= cpu_to_le32(if_id
);
1009 memcpy(req
->mac_address
, mac_addr
, ETH_ALEN
);
1011 status
= be_mcc_notify_wait(adapter
);
1013 struct be_cmd_resp_pmac_add
*resp
= embedded_payload(wrb
);
1015 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
1019 spin_unlock_bh(&adapter
->mcc_lock
);
1021 if (status
== MCC_STATUS_UNAUTHORIZED_REQUEST
)
1027 /* Uses synchronous MCCQ */
1028 int be_cmd_pmac_del(struct be_adapter
*adapter
, u32 if_id
, int pmac_id
, u32 dom
)
1030 struct be_mcc_wrb
*wrb
;
1031 struct be_cmd_req_pmac_del
*req
;
1037 spin_lock_bh(&adapter
->mcc_lock
);
1039 wrb
= wrb_from_mccq(adapter
);
1044 req
= embedded_payload(wrb
);
1046 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1047 OPCODE_COMMON_NTWK_PMAC_DEL
, sizeof(*req
),
1050 req
->hdr
.domain
= dom
;
1051 req
->if_id
= cpu_to_le32(if_id
);
1052 req
->pmac_id
= cpu_to_le32(pmac_id
);
1054 status
= be_mcc_notify_wait(adapter
);
1057 spin_unlock_bh(&adapter
->mcc_lock
);
1062 int be_cmd_cq_create(struct be_adapter
*adapter
, struct be_queue_info
*cq
,
1063 struct be_queue_info
*eq
, bool no_delay
, int coalesce_wm
)
1065 struct be_mcc_wrb
*wrb
;
1066 struct be_cmd_req_cq_create
*req
;
1067 struct be_dma_mem
*q_mem
= &cq
->dma_mem
;
1071 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1074 wrb
= wrb_from_mbox(adapter
);
1075 req
= embedded_payload(wrb
);
1076 ctxt
= &req
->context
;
1078 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1079 OPCODE_COMMON_CQ_CREATE
, sizeof(*req
), wrb
,
1082 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
1084 if (BEx_chip(adapter
)) {
1085 AMAP_SET_BITS(struct amap_cq_context_be
, coalescwm
, ctxt
,
1087 AMAP_SET_BITS(struct amap_cq_context_be
, nodelay
,
1089 AMAP_SET_BITS(struct amap_cq_context_be
, count
, ctxt
,
1090 __ilog2_u32(cq
->len
/ 256));
1091 AMAP_SET_BITS(struct amap_cq_context_be
, valid
, ctxt
, 1);
1092 AMAP_SET_BITS(struct amap_cq_context_be
, eventable
, ctxt
, 1);
1093 AMAP_SET_BITS(struct amap_cq_context_be
, eqid
, ctxt
, eq
->id
);
1095 req
->hdr
.version
= 2;
1096 req
->page_size
= 1; /* 1 for 4K */
1098 /* coalesce-wm field in this cmd is not relevant to Lancer.
1099 * Lancer uses COMMON_MODIFY_CQ to set this field
1101 if (!lancer_chip(adapter
))
1102 AMAP_SET_BITS(struct amap_cq_context_v2
, coalescwm
,
1104 AMAP_SET_BITS(struct amap_cq_context_v2
, nodelay
, ctxt
,
1106 AMAP_SET_BITS(struct amap_cq_context_v2
, count
, ctxt
,
1107 __ilog2_u32(cq
->len
/ 256));
1108 AMAP_SET_BITS(struct amap_cq_context_v2
, valid
, ctxt
, 1);
1109 AMAP_SET_BITS(struct amap_cq_context_v2
, eventable
, ctxt
, 1);
1110 AMAP_SET_BITS(struct amap_cq_context_v2
, eqid
, ctxt
, eq
->id
);
1113 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
1115 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1117 status
= be_mbox_notify_wait(adapter
);
1119 struct be_cmd_resp_cq_create
*resp
= embedded_payload(wrb
);
1121 cq
->id
= le16_to_cpu(resp
->cq_id
);
1125 mutex_unlock(&adapter
->mbox_lock
);
1130 static u32
be_encoded_q_len(int q_len
)
1132 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
1134 if (len_encoded
== 16)
1139 static int be_cmd_mccq_ext_create(struct be_adapter
*adapter
,
1140 struct be_queue_info
*mccq
,
1141 struct be_queue_info
*cq
)
1143 struct be_mcc_wrb
*wrb
;
1144 struct be_cmd_req_mcc_ext_create
*req
;
1145 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
1149 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1152 wrb
= wrb_from_mbox(adapter
);
1153 req
= embedded_payload(wrb
);
1154 ctxt
= &req
->context
;
1156 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1157 OPCODE_COMMON_MCC_CREATE_EXT
, sizeof(*req
), wrb
,
1160 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
1161 if (BEx_chip(adapter
)) {
1162 AMAP_SET_BITS(struct amap_mcc_context_be
, valid
, ctxt
, 1);
1163 AMAP_SET_BITS(struct amap_mcc_context_be
, ring_size
, ctxt
,
1164 be_encoded_q_len(mccq
->len
));
1165 AMAP_SET_BITS(struct amap_mcc_context_be
, cq_id
, ctxt
, cq
->id
);
1167 req
->hdr
.version
= 1;
1168 req
->cq_id
= cpu_to_le16(cq
->id
);
1170 AMAP_SET_BITS(struct amap_mcc_context_v1
, ring_size
, ctxt
,
1171 be_encoded_q_len(mccq
->len
));
1172 AMAP_SET_BITS(struct amap_mcc_context_v1
, valid
, ctxt
, 1);
1173 AMAP_SET_BITS(struct amap_mcc_context_v1
, async_cq_id
,
1175 AMAP_SET_BITS(struct amap_mcc_context_v1
, async_cq_valid
,
1179 /* Subscribe to Link State, Sliport Event and Group 5 Events
1180 * (bits 1, 5 and 17 set)
1182 req
->async_event_bitmap
[0] =
1183 cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE
) |
1184 BIT(ASYNC_EVENT_CODE_GRP_5
) |
1185 BIT(ASYNC_EVENT_CODE_QNQ
) |
1186 BIT(ASYNC_EVENT_CODE_SLIPORT
));
1188 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
1190 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1192 status
= be_mbox_notify_wait(adapter
);
1194 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
1196 mccq
->id
= le16_to_cpu(resp
->id
);
1197 mccq
->created
= true;
1199 mutex_unlock(&adapter
->mbox_lock
);
1204 static int be_cmd_mccq_org_create(struct be_adapter
*adapter
,
1205 struct be_queue_info
*mccq
,
1206 struct be_queue_info
*cq
)
1208 struct be_mcc_wrb
*wrb
;
1209 struct be_cmd_req_mcc_create
*req
;
1210 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
1214 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1217 wrb
= wrb_from_mbox(adapter
);
1218 req
= embedded_payload(wrb
);
1219 ctxt
= &req
->context
;
1221 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1222 OPCODE_COMMON_MCC_CREATE
, sizeof(*req
), wrb
,
1225 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
1227 AMAP_SET_BITS(struct amap_mcc_context_be
, valid
, ctxt
, 1);
1228 AMAP_SET_BITS(struct amap_mcc_context_be
, ring_size
, ctxt
,
1229 be_encoded_q_len(mccq
->len
));
1230 AMAP_SET_BITS(struct amap_mcc_context_be
, cq_id
, ctxt
, cq
->id
);
1232 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
1234 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1236 status
= be_mbox_notify_wait(adapter
);
1238 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
1240 mccq
->id
= le16_to_cpu(resp
->id
);
1241 mccq
->created
= true;
1244 mutex_unlock(&adapter
->mbox_lock
);
1248 int be_cmd_mccq_create(struct be_adapter
*adapter
,
1249 struct be_queue_info
*mccq
, struct be_queue_info
*cq
)
1253 status
= be_cmd_mccq_ext_create(adapter
, mccq
, cq
);
1254 if (status
&& BEx_chip(adapter
)) {
1255 dev_warn(&adapter
->pdev
->dev
, "Upgrade to F/W ver 2.102.235.0 "
1256 "or newer to avoid conflicting priorities between NIC "
1257 "and FCoE traffic");
1258 status
= be_cmd_mccq_org_create(adapter
, mccq
, cq
);
1263 int be_cmd_txq_create(struct be_adapter
*adapter
, struct be_tx_obj
*txo
)
1265 struct be_mcc_wrb wrb
= {0};
1266 struct be_cmd_req_eth_tx_create
*req
;
1267 struct be_queue_info
*txq
= &txo
->q
;
1268 struct be_queue_info
*cq
= &txo
->cq
;
1269 struct be_dma_mem
*q_mem
= &txq
->dma_mem
;
1270 int status
, ver
= 0;
1272 req
= embedded_payload(&wrb
);
1273 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1274 OPCODE_ETH_TX_CREATE
, sizeof(*req
), &wrb
, NULL
);
1276 if (lancer_chip(adapter
)) {
1277 req
->hdr
.version
= 1;
1278 } else if (BEx_chip(adapter
)) {
1279 if (adapter
->function_caps
& BE_FUNCTION_CAPS_SUPER_NIC
)
1280 req
->hdr
.version
= 2;
1281 } else { /* For SH */
1282 req
->hdr
.version
= 2;
1285 if (req
->hdr
.version
> 0)
1286 req
->if_id
= cpu_to_le16(adapter
->if_handle
);
1287 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
1288 req
->ulp_num
= BE_ULP1_NUM
;
1289 req
->type
= BE_ETH_TX_RING_TYPE_STANDARD
;
1290 req
->cq_id
= cpu_to_le16(cq
->id
);
1291 req
->queue_size
= be_encoded_q_len(txq
->len
);
1292 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1293 ver
= req
->hdr
.version
;
1295 status
= be_cmd_notify_wait(adapter
, &wrb
);
1297 struct be_cmd_resp_eth_tx_create
*resp
= embedded_payload(&wrb
);
1299 txq
->id
= le16_to_cpu(resp
->cid
);
1301 txo
->db_offset
= le32_to_cpu(resp
->db_offset
);
1303 txo
->db_offset
= DB_TXULP1_OFFSET
;
1304 txq
->created
= true;
1311 int be_cmd_rxq_create(struct be_adapter
*adapter
,
1312 struct be_queue_info
*rxq
, u16 cq_id
, u16 frag_size
,
1313 u32 if_id
, u32 rss
, u8
*rss_id
)
1315 struct be_mcc_wrb
*wrb
;
1316 struct be_cmd_req_eth_rx_create
*req
;
1317 struct be_dma_mem
*q_mem
= &rxq
->dma_mem
;
1320 spin_lock_bh(&adapter
->mcc_lock
);
1322 wrb
= wrb_from_mccq(adapter
);
1327 req
= embedded_payload(wrb
);
1329 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1330 OPCODE_ETH_RX_CREATE
, sizeof(*req
), wrb
, NULL
);
1332 req
->cq_id
= cpu_to_le16(cq_id
);
1333 req
->frag_size
= fls(frag_size
) - 1;
1335 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1336 req
->interface_id
= cpu_to_le32(if_id
);
1337 req
->max_frame_size
= cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE
);
1338 req
->rss_queue
= cpu_to_le32(rss
);
1340 status
= be_mcc_notify_wait(adapter
);
1342 struct be_cmd_resp_eth_rx_create
*resp
= embedded_payload(wrb
);
1344 rxq
->id
= le16_to_cpu(resp
->id
);
1345 rxq
->created
= true;
1346 *rss_id
= resp
->rss_id
;
1350 spin_unlock_bh(&adapter
->mcc_lock
);
1354 /* Generic destroyer function for all types of queues
1357 int be_cmd_q_destroy(struct be_adapter
*adapter
, struct be_queue_info
*q
,
1360 struct be_mcc_wrb
*wrb
;
1361 struct be_cmd_req_q_destroy
*req
;
1362 u8 subsys
= 0, opcode
= 0;
1365 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1368 wrb
= wrb_from_mbox(adapter
);
1369 req
= embedded_payload(wrb
);
1371 switch (queue_type
) {
1373 subsys
= CMD_SUBSYSTEM_COMMON
;
1374 opcode
= OPCODE_COMMON_EQ_DESTROY
;
1377 subsys
= CMD_SUBSYSTEM_COMMON
;
1378 opcode
= OPCODE_COMMON_CQ_DESTROY
;
1381 subsys
= CMD_SUBSYSTEM_ETH
;
1382 opcode
= OPCODE_ETH_TX_DESTROY
;
1385 subsys
= CMD_SUBSYSTEM_ETH
;
1386 opcode
= OPCODE_ETH_RX_DESTROY
;
1389 subsys
= CMD_SUBSYSTEM_COMMON
;
1390 opcode
= OPCODE_COMMON_MCC_DESTROY
;
1396 be_wrb_cmd_hdr_prepare(&req
->hdr
, subsys
, opcode
, sizeof(*req
), wrb
,
1398 req
->id
= cpu_to_le16(q
->id
);
1400 status
= be_mbox_notify_wait(adapter
);
1403 mutex_unlock(&adapter
->mbox_lock
);
1408 int be_cmd_rxq_destroy(struct be_adapter
*adapter
, struct be_queue_info
*q
)
1410 struct be_mcc_wrb
*wrb
;
1411 struct be_cmd_req_q_destroy
*req
;
1414 spin_lock_bh(&adapter
->mcc_lock
);
1416 wrb
= wrb_from_mccq(adapter
);
1421 req
= embedded_payload(wrb
);
1423 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1424 OPCODE_ETH_RX_DESTROY
, sizeof(*req
), wrb
, NULL
);
1425 req
->id
= cpu_to_le16(q
->id
);
1427 status
= be_mcc_notify_wait(adapter
);
1431 spin_unlock_bh(&adapter
->mcc_lock
);
1435 /* Create an rx filtering policy configuration on an i/f
1436 * Will use MBOX only if MCCQ has not been created.
1438 int be_cmd_if_create(struct be_adapter
*adapter
, u32 cap_flags
, u32 en_flags
,
1439 u32
*if_handle
, u32 domain
)
1441 struct be_mcc_wrb wrb
= {0};
1442 struct be_cmd_req_if_create
*req
;
1445 req
= embedded_payload(&wrb
);
1446 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1447 OPCODE_COMMON_NTWK_INTERFACE_CREATE
,
1448 sizeof(*req
), &wrb
, NULL
);
1449 req
->hdr
.domain
= domain
;
1450 req
->capability_flags
= cpu_to_le32(cap_flags
);
1451 req
->enable_flags
= cpu_to_le32(en_flags
);
1452 req
->pmac_invalid
= true;
1454 status
= be_cmd_notify_wait(adapter
, &wrb
);
1456 struct be_cmd_resp_if_create
*resp
= embedded_payload(&wrb
);
1458 *if_handle
= le32_to_cpu(resp
->interface_id
);
1460 /* Hack to retrieve VF's pmac-id on BE3 */
1461 if (BE3_chip(adapter
) && !be_physfn(adapter
))
1462 adapter
->pmac_id
[0] = le32_to_cpu(resp
->pmac_id
);
1468 int be_cmd_if_destroy(struct be_adapter
*adapter
, int interface_id
, u32 domain
)
1470 struct be_mcc_wrb
*wrb
;
1471 struct be_cmd_req_if_destroy
*req
;
1474 if (interface_id
== -1)
1477 spin_lock_bh(&adapter
->mcc_lock
);
1479 wrb
= wrb_from_mccq(adapter
);
1484 req
= embedded_payload(wrb
);
1486 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1487 OPCODE_COMMON_NTWK_INTERFACE_DESTROY
,
1488 sizeof(*req
), wrb
, NULL
);
1489 req
->hdr
.domain
= domain
;
1490 req
->interface_id
= cpu_to_le32(interface_id
);
1492 status
= be_mcc_notify_wait(adapter
);
1494 spin_unlock_bh(&adapter
->mcc_lock
);
1498 /* Get stats is a non embedded command: the request is not embedded inside
1499 * WRB but is a separate dma memory block
1500 * Uses asynchronous MCC
1502 int be_cmd_get_stats(struct be_adapter
*adapter
, struct be_dma_mem
*nonemb_cmd
)
1504 struct be_mcc_wrb
*wrb
;
1505 struct be_cmd_req_hdr
*hdr
;
1508 spin_lock_bh(&adapter
->mcc_lock
);
1510 wrb
= wrb_from_mccq(adapter
);
1515 hdr
= nonemb_cmd
->va
;
1517 be_wrb_cmd_hdr_prepare(hdr
, CMD_SUBSYSTEM_ETH
,
1518 OPCODE_ETH_GET_STATISTICS
, nonemb_cmd
->size
, wrb
,
1521 /* version 1 of the cmd is not supported only by BE2 */
1522 if (BE2_chip(adapter
))
1524 if (BE3_chip(adapter
) || lancer_chip(adapter
))
1529 be_mcc_notify(adapter
);
1530 adapter
->stats_cmd_sent
= true;
1533 spin_unlock_bh(&adapter
->mcc_lock
);
1538 int lancer_cmd_get_pport_stats(struct be_adapter
*adapter
,
1539 struct be_dma_mem
*nonemb_cmd
)
1541 struct be_mcc_wrb
*wrb
;
1542 struct lancer_cmd_req_pport_stats
*req
;
1545 if (!be_cmd_allowed(adapter
, OPCODE_ETH_GET_PPORT_STATS
,
1549 spin_lock_bh(&adapter
->mcc_lock
);
1551 wrb
= wrb_from_mccq(adapter
);
1556 req
= nonemb_cmd
->va
;
1558 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1559 OPCODE_ETH_GET_PPORT_STATS
, nonemb_cmd
->size
,
1562 req
->cmd_params
.params
.pport_num
= cpu_to_le16(adapter
->hba_port_num
);
1563 req
->cmd_params
.params
.reset_stats
= 0;
1565 be_mcc_notify(adapter
);
1566 adapter
->stats_cmd_sent
= true;
1569 spin_unlock_bh(&adapter
->mcc_lock
);
1573 static int be_mac_to_link_speed(int mac_speed
)
1575 switch (mac_speed
) {
1576 case PHY_LINK_SPEED_ZERO
:
1578 case PHY_LINK_SPEED_10MBPS
:
1580 case PHY_LINK_SPEED_100MBPS
:
1582 case PHY_LINK_SPEED_1GBPS
:
1584 case PHY_LINK_SPEED_10GBPS
:
1586 case PHY_LINK_SPEED_20GBPS
:
1588 case PHY_LINK_SPEED_25GBPS
:
1590 case PHY_LINK_SPEED_40GBPS
:
1596 /* Uses synchronous mcc
1597 * Returns link_speed in Mbps
1599 int be_cmd_link_status_query(struct be_adapter
*adapter
, u16
*link_speed
,
1600 u8
*link_status
, u32 dom
)
1602 struct be_mcc_wrb
*wrb
;
1603 struct be_cmd_req_link_status
*req
;
1606 spin_lock_bh(&adapter
->mcc_lock
);
1609 *link_status
= LINK_DOWN
;
1611 wrb
= wrb_from_mccq(adapter
);
1616 req
= embedded_payload(wrb
);
1618 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1619 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY
,
1620 sizeof(*req
), wrb
, NULL
);
1622 /* version 1 of the cmd is not supported only by BE2 */
1623 if (!BE2_chip(adapter
))
1624 req
->hdr
.version
= 1;
1626 req
->hdr
.domain
= dom
;
1628 status
= be_mcc_notify_wait(adapter
);
1630 struct be_cmd_resp_link_status
*resp
= embedded_payload(wrb
);
1633 *link_speed
= resp
->link_speed
?
1634 le16_to_cpu(resp
->link_speed
) * 10 :
1635 be_mac_to_link_speed(resp
->mac_speed
);
1637 if (!resp
->logical_link_status
)
1641 *link_status
= resp
->logical_link_status
;
1645 spin_unlock_bh(&adapter
->mcc_lock
);
1649 /* Uses synchronous mcc */
1650 int be_cmd_get_die_temperature(struct be_adapter
*adapter
)
1652 struct be_mcc_wrb
*wrb
;
1653 struct be_cmd_req_get_cntl_addnl_attribs
*req
;
1656 spin_lock_bh(&adapter
->mcc_lock
);
1658 wrb
= wrb_from_mccq(adapter
);
1663 req
= embedded_payload(wrb
);
1665 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1666 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
,
1667 sizeof(*req
), wrb
, NULL
);
1669 be_mcc_notify(adapter
);
1672 spin_unlock_bh(&adapter
->mcc_lock
);
1676 /* Uses synchronous mcc */
1677 int be_cmd_get_reg_len(struct be_adapter
*adapter
, u32
*log_size
)
1679 struct be_mcc_wrb
*wrb
;
1680 struct be_cmd_req_get_fat
*req
;
1683 spin_lock_bh(&adapter
->mcc_lock
);
1685 wrb
= wrb_from_mccq(adapter
);
1690 req
= embedded_payload(wrb
);
1692 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1693 OPCODE_COMMON_MANAGE_FAT
, sizeof(*req
), wrb
,
1695 req
->fat_operation
= cpu_to_le32(QUERY_FAT
);
1696 status
= be_mcc_notify_wait(adapter
);
1698 struct be_cmd_resp_get_fat
*resp
= embedded_payload(wrb
);
1700 if (log_size
&& resp
->log_size
)
1701 *log_size
= le32_to_cpu(resp
->log_size
) -
1705 spin_unlock_bh(&adapter
->mcc_lock
);
1709 int be_cmd_get_regs(struct be_adapter
*adapter
, u32 buf_len
, void *buf
)
1711 struct be_dma_mem get_fat_cmd
;
1712 struct be_mcc_wrb
*wrb
;
1713 struct be_cmd_req_get_fat
*req
;
1714 u32 offset
= 0, total_size
, buf_size
,
1715 log_offset
= sizeof(u32
), payload_len
;
1721 total_size
= buf_len
;
1723 get_fat_cmd
.size
= sizeof(struct be_cmd_req_get_fat
) + 60*1024;
1724 get_fat_cmd
.va
= pci_alloc_consistent(adapter
->pdev
,
1727 if (!get_fat_cmd
.va
) {
1728 dev_err(&adapter
->pdev
->dev
,
1729 "Memory allocation failure while reading FAT data\n");
1733 spin_lock_bh(&adapter
->mcc_lock
);
1735 while (total_size
) {
1736 buf_size
= min(total_size
, (u32
)60*1024);
1737 total_size
-= buf_size
;
1739 wrb
= wrb_from_mccq(adapter
);
1744 req
= get_fat_cmd
.va
;
1746 payload_len
= sizeof(struct be_cmd_req_get_fat
) + buf_size
;
1747 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1748 OPCODE_COMMON_MANAGE_FAT
, payload_len
,
1751 req
->fat_operation
= cpu_to_le32(RETRIEVE_FAT
);
1752 req
->read_log_offset
= cpu_to_le32(log_offset
);
1753 req
->read_log_length
= cpu_to_le32(buf_size
);
1754 req
->data_buffer_size
= cpu_to_le32(buf_size
);
1756 status
= be_mcc_notify_wait(adapter
);
1758 struct be_cmd_resp_get_fat
*resp
= get_fat_cmd
.va
;
1760 memcpy(buf
+ offset
,
1762 le32_to_cpu(resp
->read_log_length
));
1764 dev_err(&adapter
->pdev
->dev
, "FAT Table Retrieve error\n");
1768 log_offset
+= buf_size
;
1771 pci_free_consistent(adapter
->pdev
, get_fat_cmd
.size
,
1772 get_fat_cmd
.va
, get_fat_cmd
.dma
);
1773 spin_unlock_bh(&adapter
->mcc_lock
);
1777 /* Uses synchronous mcc */
1778 int be_cmd_get_fw_ver(struct be_adapter
*adapter
)
1780 struct be_mcc_wrb
*wrb
;
1781 struct be_cmd_req_get_fw_version
*req
;
1784 spin_lock_bh(&adapter
->mcc_lock
);
1786 wrb
= wrb_from_mccq(adapter
);
1792 req
= embedded_payload(wrb
);
1794 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1795 OPCODE_COMMON_GET_FW_VERSION
, sizeof(*req
), wrb
,
1797 status
= be_mcc_notify_wait(adapter
);
1799 struct be_cmd_resp_get_fw_version
*resp
= embedded_payload(wrb
);
1801 strlcpy(adapter
->fw_ver
, resp
->firmware_version_string
,
1802 sizeof(adapter
->fw_ver
));
1803 strlcpy(adapter
->fw_on_flash
, resp
->fw_on_flash_version_string
,
1804 sizeof(adapter
->fw_on_flash
));
1807 spin_unlock_bh(&adapter
->mcc_lock
);
1811 /* set the EQ delay interval of an EQ to specified value
1814 static int __be_cmd_modify_eqd(struct be_adapter
*adapter
,
1815 struct be_set_eqd
*set_eqd
, int num
)
1817 struct be_mcc_wrb
*wrb
;
1818 struct be_cmd_req_modify_eq_delay
*req
;
1821 spin_lock_bh(&adapter
->mcc_lock
);
1823 wrb
= wrb_from_mccq(adapter
);
1828 req
= embedded_payload(wrb
);
1830 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1831 OPCODE_COMMON_MODIFY_EQ_DELAY
, sizeof(*req
), wrb
,
1834 req
->num_eq
= cpu_to_le32(num
);
1835 for (i
= 0; i
< num
; i
++) {
1836 req
->set_eqd
[i
].eq_id
= cpu_to_le32(set_eqd
[i
].eq_id
);
1837 req
->set_eqd
[i
].phase
= 0;
1838 req
->set_eqd
[i
].delay_multiplier
=
1839 cpu_to_le32(set_eqd
[i
].delay_multiplier
);
1842 be_mcc_notify(adapter
);
1844 spin_unlock_bh(&adapter
->mcc_lock
);
1848 int be_cmd_modify_eqd(struct be_adapter
*adapter
, struct be_set_eqd
*set_eqd
,
1854 num_eqs
= min(num
, 8);
1855 __be_cmd_modify_eqd(adapter
, &set_eqd
[i
], num_eqs
);
1863 /* Uses sycnhronous mcc */
1864 int be_cmd_vlan_config(struct be_adapter
*adapter
, u32 if_id
, u16
*vtag_array
,
1865 u32 num
, u32 domain
)
1867 struct be_mcc_wrb
*wrb
;
1868 struct be_cmd_req_vlan_config
*req
;
1871 spin_lock_bh(&adapter
->mcc_lock
);
1873 wrb
= wrb_from_mccq(adapter
);
1878 req
= embedded_payload(wrb
);
1880 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1881 OPCODE_COMMON_NTWK_VLAN_CONFIG
, sizeof(*req
),
1883 req
->hdr
.domain
= domain
;
1885 req
->interface_id
= if_id
;
1886 req
->untagged
= BE_IF_FLAGS_UNTAGGED
& be_if_cap_flags(adapter
) ? 1 : 0;
1887 req
->num_vlan
= num
;
1888 memcpy(req
->normal_vlan
, vtag_array
,
1889 req
->num_vlan
* sizeof(vtag_array
[0]));
1891 status
= be_mcc_notify_wait(adapter
);
1893 spin_unlock_bh(&adapter
->mcc_lock
);
1897 static int __be_cmd_rx_filter(struct be_adapter
*adapter
, u32 flags
, u32 value
)
1899 struct be_mcc_wrb
*wrb
;
1900 struct be_dma_mem
*mem
= &adapter
->rx_filter
;
1901 struct be_cmd_req_rx_filter
*req
= mem
->va
;
1904 spin_lock_bh(&adapter
->mcc_lock
);
1906 wrb
= wrb_from_mccq(adapter
);
1911 memset(req
, 0, sizeof(*req
));
1912 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1913 OPCODE_COMMON_NTWK_RX_FILTER
, sizeof(*req
),
1916 req
->if_id
= cpu_to_le32(adapter
->if_handle
);
1917 req
->if_flags_mask
= cpu_to_le32(flags
);
1918 req
->if_flags
= (value
== ON
) ? req
->if_flags_mask
: 0;
1920 if (flags
& BE_IF_FLAGS_MULTICAST
) {
1921 struct netdev_hw_addr
*ha
;
1924 /* Reset mcast promisc mode if already set by setting mask
1925 * and not setting flags field
1927 req
->if_flags_mask
|=
1928 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS
&
1929 be_if_cap_flags(adapter
));
1930 req
->mcast_num
= cpu_to_le32(netdev_mc_count(adapter
->netdev
));
1931 netdev_for_each_mc_addr(ha
, adapter
->netdev
)
1932 memcpy(req
->mcast_mac
[i
++].byte
, ha
->addr
, ETH_ALEN
);
1935 status
= be_mcc_notify_wait(adapter
);
1937 spin_unlock_bh(&adapter
->mcc_lock
);
1941 int be_cmd_rx_filter(struct be_adapter
*adapter
, u32 flags
, u32 value
)
1943 struct device
*dev
= &adapter
->pdev
->dev
;
1945 if ((flags
& be_if_cap_flags(adapter
)) != flags
) {
1946 dev_warn(dev
, "Cannot set rx filter flags 0x%x\n", flags
);
1947 dev_warn(dev
, "Interface is capable of 0x%x flags only\n",
1948 be_if_cap_flags(adapter
));
1950 flags
&= be_if_cap_flags(adapter
);
1952 return __be_cmd_rx_filter(adapter
, flags
, value
);
1955 /* Uses synchrounous mcc */
1956 int be_cmd_set_flow_control(struct be_adapter
*adapter
, u32 tx_fc
, u32 rx_fc
)
1958 struct be_mcc_wrb
*wrb
;
1959 struct be_cmd_req_set_flow_control
*req
;
1962 if (!be_cmd_allowed(adapter
, OPCODE_COMMON_SET_FLOW_CONTROL
,
1963 CMD_SUBSYSTEM_COMMON
))
1966 spin_lock_bh(&adapter
->mcc_lock
);
1968 wrb
= wrb_from_mccq(adapter
);
1973 req
= embedded_payload(wrb
);
1975 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1976 OPCODE_COMMON_SET_FLOW_CONTROL
, sizeof(*req
),
1979 req
->hdr
.version
= 1;
1980 req
->tx_flow_control
= cpu_to_le16((u16
)tx_fc
);
1981 req
->rx_flow_control
= cpu_to_le16((u16
)rx_fc
);
1983 status
= be_mcc_notify_wait(adapter
);
1986 spin_unlock_bh(&adapter
->mcc_lock
);
1988 if (base_status(status
) == MCC_STATUS_FEATURE_NOT_SUPPORTED
)
1995 int be_cmd_get_flow_control(struct be_adapter
*adapter
, u32
*tx_fc
, u32
*rx_fc
)
1997 struct be_mcc_wrb
*wrb
;
1998 struct be_cmd_req_get_flow_control
*req
;
2001 if (!be_cmd_allowed(adapter
, OPCODE_COMMON_GET_FLOW_CONTROL
,
2002 CMD_SUBSYSTEM_COMMON
))
2005 spin_lock_bh(&adapter
->mcc_lock
);
2007 wrb
= wrb_from_mccq(adapter
);
2012 req
= embedded_payload(wrb
);
2014 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2015 OPCODE_COMMON_GET_FLOW_CONTROL
, sizeof(*req
),
2018 status
= be_mcc_notify_wait(adapter
);
2020 struct be_cmd_resp_get_flow_control
*resp
=
2021 embedded_payload(wrb
);
2023 *tx_fc
= le16_to_cpu(resp
->tx_flow_control
);
2024 *rx_fc
= le16_to_cpu(resp
->rx_flow_control
);
2028 spin_unlock_bh(&adapter
->mcc_lock
);
2033 int be_cmd_query_fw_cfg(struct be_adapter
*adapter
)
2035 struct be_mcc_wrb
*wrb
;
2036 struct be_cmd_req_query_fw_cfg
*req
;
2039 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2042 wrb
= wrb_from_mbox(adapter
);
2043 req
= embedded_payload(wrb
);
2045 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2046 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
,
2047 sizeof(*req
), wrb
, NULL
);
2049 status
= be_mbox_notify_wait(adapter
);
2051 struct be_cmd_resp_query_fw_cfg
*resp
= embedded_payload(wrb
);
2053 adapter
->port_num
= le32_to_cpu(resp
->phys_port
);
2054 adapter
->function_mode
= le32_to_cpu(resp
->function_mode
);
2055 adapter
->function_caps
= le32_to_cpu(resp
->function_caps
);
2056 adapter
->asic_rev
= le32_to_cpu(resp
->asic_revision
) & 0xFF;
2057 dev_info(&adapter
->pdev
->dev
,
2058 "FW config: function_mode=0x%x, function_caps=0x%x\n",
2059 adapter
->function_mode
, adapter
->function_caps
);
2062 mutex_unlock(&adapter
->mbox_lock
);
2067 int be_cmd_reset_function(struct be_adapter
*adapter
)
2069 struct be_mcc_wrb
*wrb
;
2070 struct be_cmd_req_hdr
*req
;
2073 if (lancer_chip(adapter
)) {
2074 iowrite32(SLI_PORT_CONTROL_IP_MASK
,
2075 adapter
->db
+ SLIPORT_CONTROL_OFFSET
);
2076 status
= lancer_wait_ready(adapter
);
2078 dev_err(&adapter
->pdev
->dev
,
2079 "Adapter in non recoverable error\n");
2083 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2086 wrb
= wrb_from_mbox(adapter
);
2087 req
= embedded_payload(wrb
);
2089 be_wrb_cmd_hdr_prepare(req
, CMD_SUBSYSTEM_COMMON
,
2090 OPCODE_COMMON_FUNCTION_RESET
, sizeof(*req
), wrb
,
2093 status
= be_mbox_notify_wait(adapter
);
2095 mutex_unlock(&adapter
->mbox_lock
);
2099 int be_cmd_rss_config(struct be_adapter
*adapter
, u8
*rsstable
,
2100 u32 rss_hash_opts
, u16 table_size
, const u8
*rss_hkey
)
2102 struct be_mcc_wrb
*wrb
;
2103 struct be_cmd_req_rss_config
*req
;
2106 if (!(be_if_cap_flags(adapter
) & BE_IF_FLAGS_RSS
))
2109 spin_lock_bh(&adapter
->mcc_lock
);
2111 wrb
= wrb_from_mccq(adapter
);
2116 req
= embedded_payload(wrb
);
2118 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
2119 OPCODE_ETH_RSS_CONFIG
, sizeof(*req
), wrb
, NULL
);
2121 req
->if_id
= cpu_to_le32(adapter
->if_handle
);
2122 req
->enable_rss
= cpu_to_le16(rss_hash_opts
);
2123 req
->cpu_table_size_log2
= cpu_to_le16(fls(table_size
) - 1);
2125 if (!BEx_chip(adapter
))
2126 req
->hdr
.version
= 1;
2128 memcpy(req
->cpu_table
, rsstable
, table_size
);
2129 memcpy(req
->hash
, rss_hkey
, RSS_HASH_KEY_LEN
);
2130 be_dws_cpu_to_le(req
->hash
, sizeof(req
->hash
));
2132 status
= be_mcc_notify_wait(adapter
);
2134 spin_unlock_bh(&adapter
->mcc_lock
);
2139 int be_cmd_set_beacon_state(struct be_adapter
*adapter
, u8 port_num
,
2140 u8 bcn
, u8 sts
, u8 state
)
2142 struct be_mcc_wrb
*wrb
;
2143 struct be_cmd_req_enable_disable_beacon
*req
;
2146 spin_lock_bh(&adapter
->mcc_lock
);
2148 wrb
= wrb_from_mccq(adapter
);
2153 req
= embedded_payload(wrb
);
2155 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2156 OPCODE_COMMON_ENABLE_DISABLE_BEACON
,
2157 sizeof(*req
), wrb
, NULL
);
2159 req
->port_num
= port_num
;
2160 req
->beacon_state
= state
;
2161 req
->beacon_duration
= bcn
;
2162 req
->status_duration
= sts
;
2164 status
= be_mcc_notify_wait(adapter
);
2167 spin_unlock_bh(&adapter
->mcc_lock
);
2172 int be_cmd_get_beacon_state(struct be_adapter
*adapter
, u8 port_num
, u32
*state
)
2174 struct be_mcc_wrb
*wrb
;
2175 struct be_cmd_req_get_beacon_state
*req
;
2178 spin_lock_bh(&adapter
->mcc_lock
);
2180 wrb
= wrb_from_mccq(adapter
);
2185 req
= embedded_payload(wrb
);
2187 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2188 OPCODE_COMMON_GET_BEACON_STATE
, sizeof(*req
),
2191 req
->port_num
= port_num
;
2193 status
= be_mcc_notify_wait(adapter
);
2195 struct be_cmd_resp_get_beacon_state
*resp
=
2196 embedded_payload(wrb
);
2198 *state
= resp
->beacon_state
;
2202 spin_unlock_bh(&adapter
->mcc_lock
);
2207 int be_cmd_read_port_transceiver_data(struct be_adapter
*adapter
,
2208 u8 page_num
, u8
*data
)
2210 struct be_dma_mem cmd
;
2211 struct be_mcc_wrb
*wrb
;
2212 struct be_cmd_req_port_type
*req
;
2215 if (page_num
> TR_PAGE_A2
)
2218 cmd
.size
= sizeof(struct be_cmd_resp_port_type
);
2219 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
, &cmd
.dma
);
2221 dev_err(&adapter
->pdev
->dev
, "Memory allocation failed\n");
2224 memset(cmd
.va
, 0, cmd
.size
);
2226 spin_lock_bh(&adapter
->mcc_lock
);
2228 wrb
= wrb_from_mccq(adapter
);
2235 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2236 OPCODE_COMMON_READ_TRANSRECV_DATA
,
2237 cmd
.size
, wrb
, &cmd
);
2239 req
->port
= cpu_to_le32(adapter
->hba_port_num
);
2240 req
->page_num
= cpu_to_le32(page_num
);
2241 status
= be_mcc_notify_wait(adapter
);
2243 struct be_cmd_resp_port_type
*resp
= cmd
.va
;
2245 memcpy(data
, resp
->page_data
, PAGE_DATA_LEN
);
2248 spin_unlock_bh(&adapter
->mcc_lock
);
2249 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
, cmd
.dma
);
2253 int lancer_cmd_write_object(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
2254 u32 data_size
, u32 data_offset
,
2255 const char *obj_name
, u32
*data_written
,
2256 u8
*change_status
, u8
*addn_status
)
2258 struct be_mcc_wrb
*wrb
;
2259 struct lancer_cmd_req_write_object
*req
;
2260 struct lancer_cmd_resp_write_object
*resp
;
2264 spin_lock_bh(&adapter
->mcc_lock
);
2265 adapter
->flash_status
= 0;
2267 wrb
= wrb_from_mccq(adapter
);
2273 req
= embedded_payload(wrb
);
2275 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2276 OPCODE_COMMON_WRITE_OBJECT
,
2277 sizeof(struct lancer_cmd_req_write_object
), wrb
,
2280 ctxt
= &req
->context
;
2281 AMAP_SET_BITS(struct amap_lancer_write_obj_context
,
2282 write_length
, ctxt
, data_size
);
2285 AMAP_SET_BITS(struct amap_lancer_write_obj_context
,
2288 AMAP_SET_BITS(struct amap_lancer_write_obj_context
,
2291 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
2292 req
->write_offset
= cpu_to_le32(data_offset
);
2293 strlcpy(req
->object_name
, obj_name
, sizeof(req
->object_name
));
2294 req
->descriptor_count
= cpu_to_le32(1);
2295 req
->buf_len
= cpu_to_le32(data_size
);
2296 req
->addr_low
= cpu_to_le32((cmd
->dma
+
2297 sizeof(struct lancer_cmd_req_write_object
))
2299 req
->addr_high
= cpu_to_le32(upper_32_bits(cmd
->dma
+
2300 sizeof(struct lancer_cmd_req_write_object
)));
2302 be_mcc_notify(adapter
);
2303 spin_unlock_bh(&adapter
->mcc_lock
);
2305 if (!wait_for_completion_timeout(&adapter
->et_cmd_compl
,
2306 msecs_to_jiffies(60000)))
2307 status
= -ETIMEDOUT
;
2309 status
= adapter
->flash_status
;
2311 resp
= embedded_payload(wrb
);
2313 *data_written
= le32_to_cpu(resp
->actual_write_len
);
2314 *change_status
= resp
->change_status
;
2316 *addn_status
= resp
->additional_status
;
2322 spin_unlock_bh(&adapter
->mcc_lock
);
2326 int be_cmd_query_cable_type(struct be_adapter
*adapter
)
2328 u8 page_data
[PAGE_DATA_LEN
];
2331 status
= be_cmd_read_port_transceiver_data(adapter
, TR_PAGE_A0
,
2334 switch (adapter
->phy
.interface_type
) {
2336 adapter
->phy
.cable_type
=
2337 page_data
[QSFP_PLUS_CABLE_TYPE_OFFSET
];
2339 case PHY_TYPE_SFP_PLUS_10GB
:
2340 adapter
->phy
.cable_type
=
2341 page_data
[SFP_PLUS_CABLE_TYPE_OFFSET
];
2344 adapter
->phy
.cable_type
= 0;
2351 int be_cmd_query_sfp_info(struct be_adapter
*adapter
)
2353 u8 page_data
[PAGE_DATA_LEN
];
2356 status
= be_cmd_read_port_transceiver_data(adapter
, TR_PAGE_A0
,
2359 strlcpy(adapter
->phy
.vendor_name
, page_data
+
2360 SFP_VENDOR_NAME_OFFSET
, SFP_VENDOR_NAME_LEN
- 1);
2361 strlcpy(adapter
->phy
.vendor_pn
,
2362 page_data
+ SFP_VENDOR_PN_OFFSET
,
2363 SFP_VENDOR_NAME_LEN
- 1);
2369 int lancer_cmd_delete_object(struct be_adapter
*adapter
, const char *obj_name
)
2371 struct lancer_cmd_req_delete_object
*req
;
2372 struct be_mcc_wrb
*wrb
;
2375 spin_lock_bh(&adapter
->mcc_lock
);
2377 wrb
= wrb_from_mccq(adapter
);
2383 req
= embedded_payload(wrb
);
2385 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2386 OPCODE_COMMON_DELETE_OBJECT
,
2387 sizeof(*req
), wrb
, NULL
);
2389 strlcpy(req
->object_name
, obj_name
, sizeof(req
->object_name
));
2391 status
= be_mcc_notify_wait(adapter
);
2393 spin_unlock_bh(&adapter
->mcc_lock
);
2397 int lancer_cmd_read_object(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
2398 u32 data_size
, u32 data_offset
, const char *obj_name
,
2399 u32
*data_read
, u32
*eof
, u8
*addn_status
)
2401 struct be_mcc_wrb
*wrb
;
2402 struct lancer_cmd_req_read_object
*req
;
2403 struct lancer_cmd_resp_read_object
*resp
;
2406 spin_lock_bh(&adapter
->mcc_lock
);
2408 wrb
= wrb_from_mccq(adapter
);
2414 req
= embedded_payload(wrb
);
2416 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2417 OPCODE_COMMON_READ_OBJECT
,
2418 sizeof(struct lancer_cmd_req_read_object
), wrb
,
2421 req
->desired_read_len
= cpu_to_le32(data_size
);
2422 req
->read_offset
= cpu_to_le32(data_offset
);
2423 strcpy(req
->object_name
, obj_name
);
2424 req
->descriptor_count
= cpu_to_le32(1);
2425 req
->buf_len
= cpu_to_le32(data_size
);
2426 req
->addr_low
= cpu_to_le32((cmd
->dma
& 0xFFFFFFFF));
2427 req
->addr_high
= cpu_to_le32(upper_32_bits(cmd
->dma
));
2429 status
= be_mcc_notify_wait(adapter
);
2431 resp
= embedded_payload(wrb
);
2433 *data_read
= le32_to_cpu(resp
->actual_read_len
);
2434 *eof
= le32_to_cpu(resp
->eof
);
2436 *addn_status
= resp
->additional_status
;
2440 spin_unlock_bh(&adapter
->mcc_lock
);
2444 int be_cmd_write_flashrom(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
2445 u32 flash_type
, u32 flash_opcode
, u32 img_offset
,
2448 struct be_mcc_wrb
*wrb
;
2449 struct be_cmd_write_flashrom
*req
;
2452 spin_lock_bh(&adapter
->mcc_lock
);
2453 adapter
->flash_status
= 0;
2455 wrb
= wrb_from_mccq(adapter
);
2462 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2463 OPCODE_COMMON_WRITE_FLASHROM
, cmd
->size
, wrb
,
2466 req
->params
.op_type
= cpu_to_le32(flash_type
);
2467 if (flash_type
== OPTYPE_OFFSET_SPECIFIED
)
2468 req
->params
.offset
= cpu_to_le32(img_offset
);
2470 req
->params
.op_code
= cpu_to_le32(flash_opcode
);
2471 req
->params
.data_buf_size
= cpu_to_le32(buf_size
);
2473 be_mcc_notify(adapter
);
2474 spin_unlock_bh(&adapter
->mcc_lock
);
2476 if (!wait_for_completion_timeout(&adapter
->et_cmd_compl
,
2477 msecs_to_jiffies(40000)))
2478 status
= -ETIMEDOUT
;
2480 status
= adapter
->flash_status
;
2485 spin_unlock_bh(&adapter
->mcc_lock
);
2489 int be_cmd_get_flash_crc(struct be_adapter
*adapter
, u8
*flashed_crc
,
2490 u16 img_optype
, u32 img_offset
, u32 crc_offset
)
2492 struct be_cmd_read_flash_crc
*req
;
2493 struct be_mcc_wrb
*wrb
;
2496 spin_lock_bh(&adapter
->mcc_lock
);
2498 wrb
= wrb_from_mccq(adapter
);
2503 req
= embedded_payload(wrb
);
2505 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2506 OPCODE_COMMON_READ_FLASHROM
, sizeof(*req
),
2509 req
->params
.op_type
= cpu_to_le32(img_optype
);
2510 if (img_optype
== OPTYPE_OFFSET_SPECIFIED
)
2511 req
->params
.offset
= cpu_to_le32(img_offset
+ crc_offset
);
2513 req
->params
.offset
= cpu_to_le32(crc_offset
);
2515 req
->params
.op_code
= cpu_to_le32(FLASHROM_OPER_REPORT
);
2516 req
->params
.data_buf_size
= cpu_to_le32(0x4);
2518 status
= be_mcc_notify_wait(adapter
);
2520 memcpy(flashed_crc
, req
->crc
, 4);
2523 spin_unlock_bh(&adapter
->mcc_lock
);
2527 int be_cmd_enable_magic_wol(struct be_adapter
*adapter
, u8
*mac
,
2528 struct be_dma_mem
*nonemb_cmd
)
2530 struct be_mcc_wrb
*wrb
;
2531 struct be_cmd_req_acpi_wol_magic_config
*req
;
2534 spin_lock_bh(&adapter
->mcc_lock
);
2536 wrb
= wrb_from_mccq(adapter
);
2541 req
= nonemb_cmd
->va
;
2543 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
2544 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
, sizeof(*req
),
2546 memcpy(req
->magic_mac
, mac
, ETH_ALEN
);
2548 status
= be_mcc_notify_wait(adapter
);
2551 spin_unlock_bh(&adapter
->mcc_lock
);
2555 int be_cmd_set_loopback(struct be_adapter
*adapter
, u8 port_num
,
2556 u8 loopback_type
, u8 enable
)
2558 struct be_mcc_wrb
*wrb
;
2559 struct be_cmd_req_set_lmode
*req
;
2562 spin_lock_bh(&adapter
->mcc_lock
);
2564 wrb
= wrb_from_mccq(adapter
);
2570 req
= embedded_payload(wrb
);
2572 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
2573 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE
, sizeof(*req
),
2576 req
->src_port
= port_num
;
2577 req
->dest_port
= port_num
;
2578 req
->loopback_type
= loopback_type
;
2579 req
->loopback_state
= enable
;
2581 status
= be_mcc_notify_wait(adapter
);
2583 spin_unlock_bh(&adapter
->mcc_lock
);
2587 int be_cmd_loopback_test(struct be_adapter
*adapter
, u32 port_num
,
2588 u32 loopback_type
, u32 pkt_size
, u32 num_pkts
,
2591 struct be_mcc_wrb
*wrb
;
2592 struct be_cmd_req_loopback_test
*req
;
2593 struct be_cmd_resp_loopback_test
*resp
;
2596 spin_lock_bh(&adapter
->mcc_lock
);
2598 wrb
= wrb_from_mccq(adapter
);
2604 req
= embedded_payload(wrb
);
2606 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
2607 OPCODE_LOWLEVEL_LOOPBACK_TEST
, sizeof(*req
), wrb
,
2610 req
->hdr
.timeout
= cpu_to_le32(15);
2611 req
->pattern
= cpu_to_le64(pattern
);
2612 req
->src_port
= cpu_to_le32(port_num
);
2613 req
->dest_port
= cpu_to_le32(port_num
);
2614 req
->pkt_size
= cpu_to_le32(pkt_size
);
2615 req
->num_pkts
= cpu_to_le32(num_pkts
);
2616 req
->loopback_type
= cpu_to_le32(loopback_type
);
2618 be_mcc_notify(adapter
);
2620 spin_unlock_bh(&adapter
->mcc_lock
);
2622 wait_for_completion(&adapter
->et_cmd_compl
);
2623 resp
= embedded_payload(wrb
);
2624 status
= le32_to_cpu(resp
->status
);
2628 spin_unlock_bh(&adapter
->mcc_lock
);
2632 int be_cmd_ddr_dma_test(struct be_adapter
*adapter
, u64 pattern
,
2633 u32 byte_cnt
, struct be_dma_mem
*cmd
)
2635 struct be_mcc_wrb
*wrb
;
2636 struct be_cmd_req_ddrdma_test
*req
;
2640 spin_lock_bh(&adapter
->mcc_lock
);
2642 wrb
= wrb_from_mccq(adapter
);
2648 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
2649 OPCODE_LOWLEVEL_HOST_DDR_DMA
, cmd
->size
, wrb
,
2652 req
->pattern
= cpu_to_le64(pattern
);
2653 req
->byte_count
= cpu_to_le32(byte_cnt
);
2654 for (i
= 0; i
< byte_cnt
; i
++) {
2655 req
->snd_buff
[i
] = (u8
)(pattern
>> (j
*8));
2661 status
= be_mcc_notify_wait(adapter
);
2664 struct be_cmd_resp_ddrdma_test
*resp
;
2667 if ((memcmp(resp
->rcv_buff
, req
->snd_buff
, byte_cnt
) != 0) ||
2674 spin_unlock_bh(&adapter
->mcc_lock
);
2678 int be_cmd_get_seeprom_data(struct be_adapter
*adapter
,
2679 struct be_dma_mem
*nonemb_cmd
)
2681 struct be_mcc_wrb
*wrb
;
2682 struct be_cmd_req_seeprom_read
*req
;
2685 spin_lock_bh(&adapter
->mcc_lock
);
2687 wrb
= wrb_from_mccq(adapter
);
2692 req
= nonemb_cmd
->va
;
2694 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2695 OPCODE_COMMON_SEEPROM_READ
, sizeof(*req
), wrb
,
2698 status
= be_mcc_notify_wait(adapter
);
2701 spin_unlock_bh(&adapter
->mcc_lock
);
2705 int be_cmd_get_phy_info(struct be_adapter
*adapter
)
2707 struct be_mcc_wrb
*wrb
;
2708 struct be_cmd_req_get_phy_info
*req
;
2709 struct be_dma_mem cmd
;
2712 if (!be_cmd_allowed(adapter
, OPCODE_COMMON_GET_PHY_DETAILS
,
2713 CMD_SUBSYSTEM_COMMON
))
2716 spin_lock_bh(&adapter
->mcc_lock
);
2718 wrb
= wrb_from_mccq(adapter
);
2723 cmd
.size
= sizeof(struct be_cmd_req_get_phy_info
);
2724 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
, &cmd
.dma
);
2726 dev_err(&adapter
->pdev
->dev
, "Memory alloc failure\n");
2733 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2734 OPCODE_COMMON_GET_PHY_DETAILS
, sizeof(*req
),
2737 status
= be_mcc_notify_wait(adapter
);
2739 struct be_phy_info
*resp_phy_info
=
2740 cmd
.va
+ sizeof(struct be_cmd_req_hdr
);
2742 adapter
->phy
.phy_type
= le16_to_cpu(resp_phy_info
->phy_type
);
2743 adapter
->phy
.interface_type
=
2744 le16_to_cpu(resp_phy_info
->interface_type
);
2745 adapter
->phy
.auto_speeds_supported
=
2746 le16_to_cpu(resp_phy_info
->auto_speeds_supported
);
2747 adapter
->phy
.fixed_speeds_supported
=
2748 le16_to_cpu(resp_phy_info
->fixed_speeds_supported
);
2749 adapter
->phy
.misc_params
=
2750 le32_to_cpu(resp_phy_info
->misc_params
);
2752 if (BE2_chip(adapter
)) {
2753 adapter
->phy
.fixed_speeds_supported
=
2754 BE_SUPPORTED_SPEED_10GBPS
|
2755 BE_SUPPORTED_SPEED_1GBPS
;
2758 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
, cmd
.dma
);
2760 spin_unlock_bh(&adapter
->mcc_lock
);
2764 static int be_cmd_set_qos(struct be_adapter
*adapter
, u32 bps
, u32 domain
)
2766 struct be_mcc_wrb
*wrb
;
2767 struct be_cmd_req_set_qos
*req
;
2770 spin_lock_bh(&adapter
->mcc_lock
);
2772 wrb
= wrb_from_mccq(adapter
);
2778 req
= embedded_payload(wrb
);
2780 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2781 OPCODE_COMMON_SET_QOS
, sizeof(*req
), wrb
, NULL
);
2783 req
->hdr
.domain
= domain
;
2784 req
->valid_bits
= cpu_to_le32(BE_QOS_BITS_NIC
);
2785 req
->max_bps_nic
= cpu_to_le32(bps
);
2787 status
= be_mcc_notify_wait(adapter
);
2790 spin_unlock_bh(&adapter
->mcc_lock
);
2794 int be_cmd_get_cntl_attributes(struct be_adapter
*adapter
)
2796 struct be_mcc_wrb
*wrb
;
2797 struct be_cmd_req_cntl_attribs
*req
;
2798 struct be_cmd_resp_cntl_attribs
*resp
;
2800 int payload_len
= max(sizeof(*req
), sizeof(*resp
));
2801 struct mgmt_controller_attrib
*attribs
;
2802 struct be_dma_mem attribs_cmd
;
2804 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2807 memset(&attribs_cmd
, 0, sizeof(struct be_dma_mem
));
2808 attribs_cmd
.size
= sizeof(struct be_cmd_resp_cntl_attribs
);
2809 attribs_cmd
.va
= pci_alloc_consistent(adapter
->pdev
, attribs_cmd
.size
,
2811 if (!attribs_cmd
.va
) {
2812 dev_err(&adapter
->pdev
->dev
, "Memory allocation failure\n");
2817 wrb
= wrb_from_mbox(adapter
);
2822 req
= attribs_cmd
.va
;
2824 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2825 OPCODE_COMMON_GET_CNTL_ATTRIBUTES
, payload_len
,
2828 status
= be_mbox_notify_wait(adapter
);
2830 attribs
= attribs_cmd
.va
+ sizeof(struct be_cmd_resp_hdr
);
2831 adapter
->hba_port_num
= attribs
->hba_attribs
.phy_port
;
2835 mutex_unlock(&adapter
->mbox_lock
);
2837 pci_free_consistent(adapter
->pdev
, attribs_cmd
.size
,
2838 attribs_cmd
.va
, attribs_cmd
.dma
);
2843 int be_cmd_req_native_mode(struct be_adapter
*adapter
)
2845 struct be_mcc_wrb
*wrb
;
2846 struct be_cmd_req_set_func_cap
*req
;
2849 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2852 wrb
= wrb_from_mbox(adapter
);
2858 req
= embedded_payload(wrb
);
2860 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2861 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP
,
2862 sizeof(*req
), wrb
, NULL
);
2864 req
->valid_cap_flags
= cpu_to_le32(CAPABILITY_SW_TIMESTAMPS
|
2865 CAPABILITY_BE3_NATIVE_ERX_API
);
2866 req
->cap_flags
= cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API
);
2868 status
= be_mbox_notify_wait(adapter
);
2870 struct be_cmd_resp_set_func_cap
*resp
= embedded_payload(wrb
);
2872 adapter
->be3_native
= le32_to_cpu(resp
->cap_flags
) &
2873 CAPABILITY_BE3_NATIVE_ERX_API
;
2874 if (!adapter
->be3_native
)
2875 dev_warn(&adapter
->pdev
->dev
,
2876 "adapter not in advanced mode\n");
2879 mutex_unlock(&adapter
->mbox_lock
);
2883 /* Get privilege(s) for a function */
2884 int be_cmd_get_fn_privileges(struct be_adapter
*adapter
, u32
*privilege
,
2887 struct be_mcc_wrb
*wrb
;
2888 struct be_cmd_req_get_fn_privileges
*req
;
2891 spin_lock_bh(&adapter
->mcc_lock
);
2893 wrb
= wrb_from_mccq(adapter
);
2899 req
= embedded_payload(wrb
);
2901 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2902 OPCODE_COMMON_GET_FN_PRIVILEGES
, sizeof(*req
),
2905 req
->hdr
.domain
= domain
;
2907 status
= be_mcc_notify_wait(adapter
);
2909 struct be_cmd_resp_get_fn_privileges
*resp
=
2910 embedded_payload(wrb
);
2912 *privilege
= le32_to_cpu(resp
->privilege_mask
);
2914 /* In UMC mode FW does not return right privileges.
2915 * Override with correct privilege equivalent to PF.
2917 if (BEx_chip(adapter
) && be_is_mc(adapter
) &&
2919 *privilege
= MAX_PRIVILEGES
;
2923 spin_unlock_bh(&adapter
->mcc_lock
);
2927 /* Set privilege(s) for a function */
2928 int be_cmd_set_fn_privileges(struct be_adapter
*adapter
, u32 privileges
,
2931 struct be_mcc_wrb
*wrb
;
2932 struct be_cmd_req_set_fn_privileges
*req
;
2935 spin_lock_bh(&adapter
->mcc_lock
);
2937 wrb
= wrb_from_mccq(adapter
);
2943 req
= embedded_payload(wrb
);
2944 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2945 OPCODE_COMMON_SET_FN_PRIVILEGES
, sizeof(*req
),
2947 req
->hdr
.domain
= domain
;
2948 if (lancer_chip(adapter
))
2949 req
->privileges_lancer
= cpu_to_le32(privileges
);
2951 req
->privileges
= cpu_to_le32(privileges
);
2953 status
= be_mcc_notify_wait(adapter
);
2955 spin_unlock_bh(&adapter
->mcc_lock
);
2959 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2960 * pmac_id_valid: false => pmac_id or MAC address is requested.
2961 * If pmac_id is returned, pmac_id_valid is returned as true
2963 int be_cmd_get_mac_from_list(struct be_adapter
*adapter
, u8
*mac
,
2964 bool *pmac_id_valid
, u32
*pmac_id
, u32 if_handle
,
2967 struct be_mcc_wrb
*wrb
;
2968 struct be_cmd_req_get_mac_list
*req
;
2971 struct be_dma_mem get_mac_list_cmd
;
2974 memset(&get_mac_list_cmd
, 0, sizeof(struct be_dma_mem
));
2975 get_mac_list_cmd
.size
= sizeof(struct be_cmd_resp_get_mac_list
);
2976 get_mac_list_cmd
.va
= pci_alloc_consistent(adapter
->pdev
,
2977 get_mac_list_cmd
.size
,
2978 &get_mac_list_cmd
.dma
);
2980 if (!get_mac_list_cmd
.va
) {
2981 dev_err(&adapter
->pdev
->dev
,
2982 "Memory allocation failure during GET_MAC_LIST\n");
2986 spin_lock_bh(&adapter
->mcc_lock
);
2988 wrb
= wrb_from_mccq(adapter
);
2994 req
= get_mac_list_cmd
.va
;
2996 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2997 OPCODE_COMMON_GET_MAC_LIST
,
2998 get_mac_list_cmd
.size
, wrb
, &get_mac_list_cmd
);
2999 req
->hdr
.domain
= domain
;
3000 req
->mac_type
= MAC_ADDRESS_TYPE_NETWORK
;
3001 if (*pmac_id_valid
) {
3002 req
->mac_id
= cpu_to_le32(*pmac_id
);
3003 req
->iface_id
= cpu_to_le16(if_handle
);
3004 req
->perm_override
= 0;
3006 req
->perm_override
= 1;
3009 status
= be_mcc_notify_wait(adapter
);
3011 struct be_cmd_resp_get_mac_list
*resp
=
3012 get_mac_list_cmd
.va
;
3014 if (*pmac_id_valid
) {
3015 memcpy(mac
, resp
->macid_macaddr
.mac_addr_id
.macaddr
,
3020 mac_count
= resp
->true_mac_count
+ resp
->pseudo_mac_count
;
3021 /* Mac list returned could contain one or more active mac_ids
3022 * or one or more true or pseudo permanent mac addresses.
3023 * If an active mac_id is present, return first active mac_id
3026 for (i
= 0; i
< mac_count
; i
++) {
3027 struct get_list_macaddr
*mac_entry
;
3031 mac_entry
= &resp
->macaddr_list
[i
];
3032 mac_addr_size
= le16_to_cpu(mac_entry
->mac_addr_size
);
3033 /* mac_id is a 32 bit value and mac_addr size
3036 if (mac_addr_size
== sizeof(u32
)) {
3037 *pmac_id_valid
= true;
3038 mac_id
= mac_entry
->mac_addr_id
.s_mac_id
.mac_id
;
3039 *pmac_id
= le32_to_cpu(mac_id
);
3043 /* If no active mac_id found, return first mac addr */
3044 *pmac_id_valid
= false;
3045 memcpy(mac
, resp
->macaddr_list
[0].mac_addr_id
.macaddr
,
3050 spin_unlock_bh(&adapter
->mcc_lock
);
3051 pci_free_consistent(adapter
->pdev
, get_mac_list_cmd
.size
,
3052 get_mac_list_cmd
.va
, get_mac_list_cmd
.dma
);
3056 int be_cmd_get_active_mac(struct be_adapter
*adapter
, u32 curr_pmac_id
,
3057 u8
*mac
, u32 if_handle
, bool active
, u32 domain
)
3060 be_cmd_get_mac_from_list(adapter
, mac
, &active
, &curr_pmac_id
,
3062 if (BEx_chip(adapter
))
3063 return be_cmd_mac_addr_query(adapter
, mac
, false,
3064 if_handle
, curr_pmac_id
);
3066 /* Fetch the MAC address using pmac_id */
3067 return be_cmd_get_mac_from_list(adapter
, mac
, &active
,
3072 int be_cmd_get_perm_mac(struct be_adapter
*adapter
, u8
*mac
)
3075 bool pmac_valid
= false;
3079 if (BEx_chip(adapter
)) {
3080 if (be_physfn(adapter
))
3081 status
= be_cmd_mac_addr_query(adapter
, mac
, true, 0,
3084 status
= be_cmd_mac_addr_query(adapter
, mac
, false,
3085 adapter
->if_handle
, 0);
3087 status
= be_cmd_get_mac_from_list(adapter
, mac
, &pmac_valid
,
3088 NULL
, adapter
->if_handle
, 0);
3094 /* Uses synchronous MCCQ */
3095 int be_cmd_set_mac_list(struct be_adapter
*adapter
, u8
*mac_array
,
3096 u8 mac_count
, u32 domain
)
3098 struct be_mcc_wrb
*wrb
;
3099 struct be_cmd_req_set_mac_list
*req
;
3101 struct be_dma_mem cmd
;
3103 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
3104 cmd
.size
= sizeof(struct be_cmd_req_set_mac_list
);
3105 cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, cmd
.size
,
3106 &cmd
.dma
, GFP_KERNEL
);
3110 spin_lock_bh(&adapter
->mcc_lock
);
3112 wrb
= wrb_from_mccq(adapter
);
3119 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3120 OPCODE_COMMON_SET_MAC_LIST
, sizeof(*req
),
3123 req
->hdr
.domain
= domain
;
3124 req
->mac_count
= mac_count
;
3126 memcpy(req
->mac
, mac_array
, ETH_ALEN
*mac_count
);
3128 status
= be_mcc_notify_wait(adapter
);
3131 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
, cmd
.dma
);
3132 spin_unlock_bh(&adapter
->mcc_lock
);
3136 /* Wrapper to delete any active MACs and provision the new mac.
3137 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3138 * current list are active.
3140 int be_cmd_set_mac(struct be_adapter
*adapter
, u8
*mac
, int if_id
, u32 dom
)
3142 bool active_mac
= false;
3143 u8 old_mac
[ETH_ALEN
];
3147 status
= be_cmd_get_mac_from_list(adapter
, old_mac
, &active_mac
,
3148 &pmac_id
, if_id
, dom
);
3150 if (!status
&& active_mac
)
3151 be_cmd_pmac_del(adapter
, if_id
, pmac_id
, dom
);
3153 return be_cmd_set_mac_list(adapter
, mac
, mac
? 1 : 0, dom
);
3156 int be_cmd_set_hsw_config(struct be_adapter
*adapter
, u16 pvid
,
3157 u32 domain
, u16 intf_id
, u16 hsw_mode
, u8 spoofchk
)
3159 struct be_mcc_wrb
*wrb
;
3160 struct be_cmd_req_set_hsw_config
*req
;
3164 spin_lock_bh(&adapter
->mcc_lock
);
3166 wrb
= wrb_from_mccq(adapter
);
3172 req
= embedded_payload(wrb
);
3173 ctxt
= &req
->context
;
3175 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3176 OPCODE_COMMON_SET_HSW_CONFIG
, sizeof(*req
), wrb
,
3179 req
->hdr
.domain
= domain
;
3180 AMAP_SET_BITS(struct amap_set_hsw_context
, interface_id
, ctxt
, intf_id
);
3182 AMAP_SET_BITS(struct amap_set_hsw_context
, pvid_valid
, ctxt
, 1);
3183 AMAP_SET_BITS(struct amap_set_hsw_context
, pvid
, ctxt
, pvid
);
3185 if (!BEx_chip(adapter
) && hsw_mode
) {
3186 AMAP_SET_BITS(struct amap_set_hsw_context
, interface_id
,
3187 ctxt
, adapter
->hba_port_num
);
3188 AMAP_SET_BITS(struct amap_set_hsw_context
, pport
, ctxt
, 1);
3189 AMAP_SET_BITS(struct amap_set_hsw_context
, port_fwd_type
,
3193 /* Enable/disable both mac and vlan spoof checking */
3194 if (!BEx_chip(adapter
) && spoofchk
) {
3195 AMAP_SET_BITS(struct amap_set_hsw_context
, mac_spoofchk
,
3197 AMAP_SET_BITS(struct amap_set_hsw_context
, vlan_spoofchk
,
3201 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
3202 status
= be_mcc_notify_wait(adapter
);
3205 spin_unlock_bh(&adapter
->mcc_lock
);
3209 /* Get Hyper switch config */
3210 int be_cmd_get_hsw_config(struct be_adapter
*adapter
, u16
*pvid
,
3211 u32 domain
, u16 intf_id
, u8
*mode
, bool *spoofchk
)
3213 struct be_mcc_wrb
*wrb
;
3214 struct be_cmd_req_get_hsw_config
*req
;
3219 spin_lock_bh(&adapter
->mcc_lock
);
3221 wrb
= wrb_from_mccq(adapter
);
3227 req
= embedded_payload(wrb
);
3228 ctxt
= &req
->context
;
3230 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3231 OPCODE_COMMON_GET_HSW_CONFIG
, sizeof(*req
), wrb
,
3234 req
->hdr
.domain
= domain
;
3235 AMAP_SET_BITS(struct amap_get_hsw_req_context
, interface_id
,
3237 AMAP_SET_BITS(struct amap_get_hsw_req_context
, pvid_valid
, ctxt
, 1);
3239 if (!BEx_chip(adapter
) && mode
) {
3240 AMAP_SET_BITS(struct amap_get_hsw_req_context
, interface_id
,
3241 ctxt
, adapter
->hba_port_num
);
3242 AMAP_SET_BITS(struct amap_get_hsw_req_context
, pport
, ctxt
, 1);
3244 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
3246 status
= be_mcc_notify_wait(adapter
);
3248 struct be_cmd_resp_get_hsw_config
*resp
=
3249 embedded_payload(wrb
);
3251 be_dws_le_to_cpu(&resp
->context
, sizeof(resp
->context
));
3252 vid
= AMAP_GET_BITS(struct amap_get_hsw_resp_context
,
3253 pvid
, &resp
->context
);
3255 *pvid
= le16_to_cpu(vid
);
3257 *mode
= AMAP_GET_BITS(struct amap_get_hsw_resp_context
,
3258 port_fwd_type
, &resp
->context
);
3261 AMAP_GET_BITS(struct amap_get_hsw_resp_context
,
3262 spoofchk
, &resp
->context
);
3266 spin_unlock_bh(&adapter
->mcc_lock
);
3270 static bool be_is_wol_excluded(struct be_adapter
*adapter
)
3272 struct pci_dev
*pdev
= adapter
->pdev
;
3274 if (!be_physfn(adapter
))
3277 switch (pdev
->subsystem_device
) {
3278 case OC_SUBSYS_DEVICE_ID1
:
3279 case OC_SUBSYS_DEVICE_ID2
:
3280 case OC_SUBSYS_DEVICE_ID3
:
3281 case OC_SUBSYS_DEVICE_ID4
:
3288 int be_cmd_get_acpi_wol_cap(struct be_adapter
*adapter
)
3290 struct be_mcc_wrb
*wrb
;
3291 struct be_cmd_req_acpi_wol_magic_config_v1
*req
;
3293 struct be_dma_mem cmd
;
3295 if (!be_cmd_allowed(adapter
, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
,
3299 if (be_is_wol_excluded(adapter
))
3302 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
3305 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
3306 cmd
.size
= sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1
);
3307 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
, &cmd
.dma
);
3309 dev_err(&adapter
->pdev
->dev
, "Memory allocation failure\n");
3314 wrb
= wrb_from_mbox(adapter
);
3322 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
3323 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
,
3324 sizeof(*req
), wrb
, &cmd
);
3326 req
->hdr
.version
= 1;
3327 req
->query_options
= BE_GET_WOL_CAP
;
3329 status
= be_mbox_notify_wait(adapter
);
3331 struct be_cmd_resp_acpi_wol_magic_config_v1
*resp
;
3333 resp
= (struct be_cmd_resp_acpi_wol_magic_config_v1
*)cmd
.va
;
3335 adapter
->wol_cap
= resp
->wol_settings
;
3336 if (adapter
->wol_cap
& BE_WOL_CAP
)
3337 adapter
->wol_en
= true;
3340 mutex_unlock(&adapter
->mbox_lock
);
3342 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
, cmd
.dma
);
3347 int be_cmd_set_fw_log_level(struct be_adapter
*adapter
, u32 level
)
3349 struct be_dma_mem extfat_cmd
;
3350 struct be_fat_conf_params
*cfgs
;
3354 memset(&extfat_cmd
, 0, sizeof(struct be_dma_mem
));
3355 extfat_cmd
.size
= sizeof(struct be_cmd_resp_get_ext_fat_caps
);
3356 extfat_cmd
.va
= pci_alloc_consistent(adapter
->pdev
, extfat_cmd
.size
,
3361 status
= be_cmd_get_ext_fat_capabilites(adapter
, &extfat_cmd
);
3365 cfgs
= (struct be_fat_conf_params
*)
3366 (extfat_cmd
.va
+ sizeof(struct be_cmd_resp_hdr
));
3367 for (i
= 0; i
< le32_to_cpu(cfgs
->num_modules
); i
++) {
3368 u32 num_modes
= le32_to_cpu(cfgs
->module
[i
].num_modes
);
3370 for (j
= 0; j
< num_modes
; j
++) {
3371 if (cfgs
->module
[i
].trace_lvl
[j
].mode
== MODE_UART
)
3372 cfgs
->module
[i
].trace_lvl
[j
].dbg_lvl
=
3377 status
= be_cmd_set_ext_fat_capabilites(adapter
, &extfat_cmd
, cfgs
);
3379 pci_free_consistent(adapter
->pdev
, extfat_cmd
.size
, extfat_cmd
.va
,
3384 int be_cmd_get_fw_log_level(struct be_adapter
*adapter
)
3386 struct be_dma_mem extfat_cmd
;
3387 struct be_fat_conf_params
*cfgs
;
3391 memset(&extfat_cmd
, 0, sizeof(struct be_dma_mem
));
3392 extfat_cmd
.size
= sizeof(struct be_cmd_resp_get_ext_fat_caps
);
3393 extfat_cmd
.va
= pci_alloc_consistent(adapter
->pdev
, extfat_cmd
.size
,
3396 if (!extfat_cmd
.va
) {
3397 dev_err(&adapter
->pdev
->dev
, "%s: Memory allocation failure\n",
3402 status
= be_cmd_get_ext_fat_capabilites(adapter
, &extfat_cmd
);
3404 cfgs
= (struct be_fat_conf_params
*)(extfat_cmd
.va
+
3405 sizeof(struct be_cmd_resp_hdr
));
3407 for (j
= 0; j
< le32_to_cpu(cfgs
->module
[0].num_modes
); j
++) {
3408 if (cfgs
->module
[0].trace_lvl
[j
].mode
== MODE_UART
)
3409 level
= cfgs
->module
[0].trace_lvl
[j
].dbg_lvl
;
3412 pci_free_consistent(adapter
->pdev
, extfat_cmd
.size
, extfat_cmd
.va
,
3418 int be_cmd_get_ext_fat_capabilites(struct be_adapter
*adapter
,
3419 struct be_dma_mem
*cmd
)
3421 struct be_mcc_wrb
*wrb
;
3422 struct be_cmd_req_get_ext_fat_caps
*req
;
3425 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
3428 wrb
= wrb_from_mbox(adapter
);
3435 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3436 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES
,
3437 cmd
->size
, wrb
, cmd
);
3438 req
->parameter_type
= cpu_to_le32(1);
3440 status
= be_mbox_notify_wait(adapter
);
3442 mutex_unlock(&adapter
->mbox_lock
);
3446 int be_cmd_set_ext_fat_capabilites(struct be_adapter
*adapter
,
3447 struct be_dma_mem
*cmd
,
3448 struct be_fat_conf_params
*configs
)
3450 struct be_mcc_wrb
*wrb
;
3451 struct be_cmd_req_set_ext_fat_caps
*req
;
3454 spin_lock_bh(&adapter
->mcc_lock
);
3456 wrb
= wrb_from_mccq(adapter
);
3463 memcpy(&req
->set_params
, configs
, sizeof(struct be_fat_conf_params
));
3464 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3465 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES
,
3466 cmd
->size
, wrb
, cmd
);
3468 status
= be_mcc_notify_wait(adapter
);
3470 spin_unlock_bh(&adapter
->mcc_lock
);
3474 int be_cmd_query_port_name(struct be_adapter
*adapter
)
3476 struct be_cmd_req_get_port_name
*req
;
3477 struct be_mcc_wrb
*wrb
;
3480 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
3483 wrb
= wrb_from_mbox(adapter
);
3484 req
= embedded_payload(wrb
);
3486 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3487 OPCODE_COMMON_GET_PORT_NAME
, sizeof(*req
), wrb
,
3489 if (!BEx_chip(adapter
))
3490 req
->hdr
.version
= 1;
3492 status
= be_mbox_notify_wait(adapter
);
3494 struct be_cmd_resp_get_port_name
*resp
= embedded_payload(wrb
);
3496 adapter
->port_name
= resp
->port_name
[adapter
->hba_port_num
];
3498 adapter
->port_name
= adapter
->hba_port_num
+ '0';
3501 mutex_unlock(&adapter
->mbox_lock
);
3505 /* Descriptor type */
3511 static struct be_nic_res_desc
*be_get_nic_desc(u8
*buf
, u32 desc_count
,
3514 struct be_res_desc_hdr
*hdr
= (struct be_res_desc_hdr
*)buf
;
3515 struct be_nic_res_desc
*nic
;
3518 for (i
= 0; i
< desc_count
; i
++) {
3519 if (hdr
->desc_type
== NIC_RESOURCE_DESC_TYPE_V0
||
3520 hdr
->desc_type
== NIC_RESOURCE_DESC_TYPE_V1
) {
3521 nic
= (struct be_nic_res_desc
*)hdr
;
3522 if (desc_type
== FUNC_DESC
||
3523 (desc_type
== VFT_DESC
&&
3524 nic
->flags
& (1 << VFT_SHIFT
)))
3528 hdr
->desc_len
= hdr
->desc_len
? : RESOURCE_DESC_SIZE_V0
;
3529 hdr
= (void *)hdr
+ hdr
->desc_len
;
3534 static struct be_nic_res_desc
*be_get_vft_desc(u8
*buf
, u32 desc_count
)
3536 return be_get_nic_desc(buf
, desc_count
, VFT_DESC
);
3539 static struct be_nic_res_desc
*be_get_func_nic_desc(u8
*buf
, u32 desc_count
)
3541 return be_get_nic_desc(buf
, desc_count
, FUNC_DESC
);
3544 static struct be_pcie_res_desc
*be_get_pcie_desc(u8 devfn
, u8
*buf
,
3547 struct be_res_desc_hdr
*hdr
= (struct be_res_desc_hdr
*)buf
;
3548 struct be_pcie_res_desc
*pcie
;
3551 for (i
= 0; i
< desc_count
; i
++) {
3552 if ((hdr
->desc_type
== PCIE_RESOURCE_DESC_TYPE_V0
||
3553 hdr
->desc_type
== PCIE_RESOURCE_DESC_TYPE_V1
)) {
3554 pcie
= (struct be_pcie_res_desc
*)hdr
;
3555 if (pcie
->pf_num
== devfn
)
3559 hdr
->desc_len
= hdr
->desc_len
? : RESOURCE_DESC_SIZE_V0
;
3560 hdr
= (void *)hdr
+ hdr
->desc_len
;
3565 static struct be_port_res_desc
*be_get_port_desc(u8
*buf
, u32 desc_count
)
3567 struct be_res_desc_hdr
*hdr
= (struct be_res_desc_hdr
*)buf
;
3570 for (i
= 0; i
< desc_count
; i
++) {
3571 if (hdr
->desc_type
== PORT_RESOURCE_DESC_TYPE_V1
)
3572 return (struct be_port_res_desc
*)hdr
;
3574 hdr
->desc_len
= hdr
->desc_len
? : RESOURCE_DESC_SIZE_V0
;
3575 hdr
= (void *)hdr
+ hdr
->desc_len
;
3580 static void be_copy_nic_desc(struct be_resources
*res
,
3581 struct be_nic_res_desc
*desc
)
3583 res
->max_uc_mac
= le16_to_cpu(desc
->unicast_mac_count
);
3584 res
->max_vlans
= le16_to_cpu(desc
->vlan_count
);
3585 res
->max_mcast_mac
= le16_to_cpu(desc
->mcast_mac_count
);
3586 res
->max_tx_qs
= le16_to_cpu(desc
->txq_count
);
3587 res
->max_rss_qs
= le16_to_cpu(desc
->rssq_count
);
3588 res
->max_rx_qs
= le16_to_cpu(desc
->rq_count
);
3589 res
->max_evt_qs
= le16_to_cpu(desc
->eq_count
);
3590 res
->max_cq_count
= le16_to_cpu(desc
->cq_count
);
3591 res
->max_iface_count
= le16_to_cpu(desc
->iface_count
);
3592 res
->max_mcc_count
= le16_to_cpu(desc
->mcc_count
);
3593 /* Clear flags that driver is not interested in */
3594 res
->if_cap_flags
= le32_to_cpu(desc
->cap_flags
) &
3595 BE_IF_CAP_FLAGS_WANT
;
3599 int be_cmd_get_func_config(struct be_adapter
*adapter
, struct be_resources
*res
)
3601 struct be_mcc_wrb
*wrb
;
3602 struct be_cmd_req_get_func_config
*req
;
3604 struct be_dma_mem cmd
;
3606 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
3609 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
3610 cmd
.size
= sizeof(struct be_cmd_resp_get_func_config
);
3611 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
, &cmd
.dma
);
3613 dev_err(&adapter
->pdev
->dev
, "Memory alloc failure\n");
3618 wrb
= wrb_from_mbox(adapter
);
3626 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3627 OPCODE_COMMON_GET_FUNC_CONFIG
,
3628 cmd
.size
, wrb
, &cmd
);
3630 if (skyhawk_chip(adapter
))
3631 req
->hdr
.version
= 1;
3633 status
= be_mbox_notify_wait(adapter
);
3635 struct be_cmd_resp_get_func_config
*resp
= cmd
.va
;
3636 u32 desc_count
= le32_to_cpu(resp
->desc_count
);
3637 struct be_nic_res_desc
*desc
;
3639 desc
= be_get_func_nic_desc(resp
->func_param
, desc_count
);
3645 adapter
->pf_number
= desc
->pf_num
;
3646 be_copy_nic_desc(res
, desc
);
3649 mutex_unlock(&adapter
->mbox_lock
);
3651 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
, cmd
.dma
);
3655 /* Will use MBOX only if MCCQ has not been created */
3656 int be_cmd_get_profile_config(struct be_adapter
*adapter
,
3657 struct be_resources
*res
, u8 query
, u8 domain
)
3659 struct be_cmd_resp_get_profile_config
*resp
;
3660 struct be_cmd_req_get_profile_config
*req
;
3661 struct be_nic_res_desc
*vf_res
;
3662 struct be_pcie_res_desc
*pcie
;
3663 struct be_port_res_desc
*port
;
3664 struct be_nic_res_desc
*nic
;
3665 struct be_mcc_wrb wrb
= {0};
3666 struct be_dma_mem cmd
;
3670 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
3671 cmd
.size
= sizeof(struct be_cmd_resp_get_profile_config
);
3672 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
, &cmd
.dma
);
3677 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3678 OPCODE_COMMON_GET_PROFILE_CONFIG
,
3679 cmd
.size
, &wrb
, &cmd
);
3681 req
->hdr
.domain
= domain
;
3682 if (!lancer_chip(adapter
))
3683 req
->hdr
.version
= 1;
3684 req
->type
= ACTIVE_PROFILE_TYPE
;
3686 /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
3687 * descriptors with all bits set to "1" for the fields which can be
3688 * modified using SET_PROFILE_CONFIG cmd.
3690 if (query
== RESOURCE_MODIFIABLE
)
3691 req
->type
|= QUERY_MODIFIABLE_FIELDS_TYPE
;
3693 status
= be_cmd_notify_wait(adapter
, &wrb
);
3698 desc_count
= le16_to_cpu(resp
->desc_count
);
3700 pcie
= be_get_pcie_desc(adapter
->pdev
->devfn
, resp
->func_param
,
3703 res
->max_vfs
= le16_to_cpu(pcie
->num_vfs
);
3705 port
= be_get_port_desc(resp
->func_param
, desc_count
);
3707 adapter
->mc_type
= port
->mc_type
;
3709 nic
= be_get_func_nic_desc(resp
->func_param
, desc_count
);
3711 be_copy_nic_desc(res
, nic
);
3713 vf_res
= be_get_vft_desc(resp
->func_param
, desc_count
);
3715 res
->vf_if_cap_flags
= vf_res
->cap_flags
;
3718 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
, cmd
.dma
);
3722 /* Will use MBOX only if MCCQ has not been created */
3723 static int be_cmd_set_profile_config(struct be_adapter
*adapter
, void *desc
,
3724 int size
, int count
, u8 version
, u8 domain
)
3726 struct be_cmd_req_set_profile_config
*req
;
3727 struct be_mcc_wrb wrb
= {0};
3728 struct be_dma_mem cmd
;
3731 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
3732 cmd
.size
= sizeof(struct be_cmd_req_set_profile_config
);
3733 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
, &cmd
.dma
);
3738 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3739 OPCODE_COMMON_SET_PROFILE_CONFIG
, cmd
.size
,
3741 req
->hdr
.version
= version
;
3742 req
->hdr
.domain
= domain
;
3743 req
->desc_count
= cpu_to_le32(count
);
3744 memcpy(req
->desc
, desc
, size
);
3746 status
= be_cmd_notify_wait(adapter
, &wrb
);
3749 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
, cmd
.dma
);
3753 /* Mark all fields invalid */
3754 static void be_reset_nic_desc(struct be_nic_res_desc
*nic
)
3756 memset(nic
, 0, sizeof(*nic
));
3757 nic
->unicast_mac_count
= 0xFFFF;
3758 nic
->mcc_count
= 0xFFFF;
3759 nic
->vlan_count
= 0xFFFF;
3760 nic
->mcast_mac_count
= 0xFFFF;
3761 nic
->txq_count
= 0xFFFF;
3762 nic
->rq_count
= 0xFFFF;
3763 nic
->rssq_count
= 0xFFFF;
3764 nic
->lro_count
= 0xFFFF;
3765 nic
->cq_count
= 0xFFFF;
3766 nic
->toe_conn_count
= 0xFFFF;
3767 nic
->eq_count
= 0xFFFF;
3768 nic
->iface_count
= 0xFFFF;
3769 nic
->link_param
= 0xFF;
3770 nic
->channel_id_param
= cpu_to_le16(0xF000);
3771 nic
->acpi_params
= 0xFF;
3772 nic
->wol_param
= 0x0F;
3773 nic
->tunnel_iface_count
= 0xFFFF;
3774 nic
->direct_tenant_iface_count
= 0xFFFF;
3775 nic
->bw_min
= 0xFFFFFFFF;
3776 nic
->bw_max
= 0xFFFFFFFF;
3779 /* Mark all fields invalid */
3780 static void be_reset_pcie_desc(struct be_pcie_res_desc
*pcie
)
3782 memset(pcie
, 0, sizeof(*pcie
));
3783 pcie
->sriov_state
= 0xFF;
3784 pcie
->pf_state
= 0xFF;
3785 pcie
->pf_type
= 0xFF;
3786 pcie
->num_vfs
= 0xFFFF;
3789 int be_cmd_config_qos(struct be_adapter
*adapter
, u32 max_rate
, u16 link_speed
,
3792 struct be_nic_res_desc nic_desc
;
3796 if (BE3_chip(adapter
))
3797 return be_cmd_set_qos(adapter
, max_rate
/ 10, domain
);
3799 be_reset_nic_desc(&nic_desc
);
3800 nic_desc
.pf_num
= adapter
->pf_number
;
3801 nic_desc
.vf_num
= domain
;
3802 nic_desc
.bw_min
= 0;
3803 if (lancer_chip(adapter
)) {
3804 nic_desc
.hdr
.desc_type
= NIC_RESOURCE_DESC_TYPE_V0
;
3805 nic_desc
.hdr
.desc_len
= RESOURCE_DESC_SIZE_V0
;
3806 nic_desc
.flags
= (1 << QUN_SHIFT
) | (1 << IMM_SHIFT
) |
3808 nic_desc
.bw_max
= cpu_to_le32(max_rate
/ 10);
3811 nic_desc
.hdr
.desc_type
= NIC_RESOURCE_DESC_TYPE_V1
;
3812 nic_desc
.hdr
.desc_len
= RESOURCE_DESC_SIZE_V1
;
3813 nic_desc
.flags
= (1 << IMM_SHIFT
) | (1 << NOSV_SHIFT
);
3814 bw_percent
= max_rate
? (max_rate
* 100) / link_speed
: 100;
3815 nic_desc
.bw_max
= cpu_to_le32(bw_percent
);
3818 return be_cmd_set_profile_config(adapter
, &nic_desc
,
3819 nic_desc
.hdr
.desc_len
,
3820 1, version
, domain
);
3823 static void be_fill_vf_res_template(struct be_adapter
*adapter
,
3824 struct be_resources pool_res
,
3825 u16 num_vfs
, u16 num_vf_qs
,
3826 struct be_nic_res_desc
*nic_vft
)
3828 u32 vf_if_cap_flags
= pool_res
.vf_if_cap_flags
;
3829 struct be_resources res_mod
= {0};
3831 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3832 * which are modifiable using SET_PROFILE_CONFIG cmd.
3834 be_cmd_get_profile_config(adapter
, &res_mod
, RESOURCE_MODIFIABLE
, 0);
3836 /* If RSS IFACE capability flags are modifiable for a VF, set the
3837 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3838 * more than 1 RSSQ is available for a VF.
3839 * Otherwise, provision only 1 queue pair for VF.
3841 if (res_mod
.vf_if_cap_flags
& BE_IF_FLAGS_RSS
) {
3842 nic_vft
->flags
|= BIT(IF_CAPS_FLAGS_VALID_SHIFT
);
3843 if (num_vf_qs
> 1) {
3844 vf_if_cap_flags
|= BE_IF_FLAGS_RSS
;
3845 if (pool_res
.if_cap_flags
& BE_IF_FLAGS_DEFQ_RSS
)
3846 vf_if_cap_flags
|= BE_IF_FLAGS_DEFQ_RSS
;
3848 vf_if_cap_flags
&= ~(BE_IF_FLAGS_RSS
|
3849 BE_IF_FLAGS_DEFQ_RSS
);
3852 nic_vft
->cap_flags
= cpu_to_le32(vf_if_cap_flags
);
3857 nic_vft
->rq_count
= cpu_to_le16(num_vf_qs
);
3858 nic_vft
->txq_count
= cpu_to_le16(num_vf_qs
);
3859 nic_vft
->rssq_count
= cpu_to_le16(num_vf_qs
);
3860 nic_vft
->cq_count
= cpu_to_le16(pool_res
.max_cq_count
/
3863 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
3864 * among the PF and it's VFs, if the fields are changeable
3866 if (res_mod
.max_uc_mac
== FIELD_MODIFIABLE
)
3867 nic_vft
->unicast_mac_count
= cpu_to_le16(pool_res
.max_uc_mac
/
3870 if (res_mod
.max_vlans
== FIELD_MODIFIABLE
)
3871 nic_vft
->vlan_count
= cpu_to_le16(pool_res
.max_vlans
/
3874 if (res_mod
.max_iface_count
== FIELD_MODIFIABLE
)
3875 nic_vft
->iface_count
= cpu_to_le16(pool_res
.max_iface_count
/
3878 if (res_mod
.max_mcc_count
== FIELD_MODIFIABLE
)
3879 nic_vft
->mcc_count
= cpu_to_le16(pool_res
.max_mcc_count
/
3883 int be_cmd_set_sriov_config(struct be_adapter
*adapter
,
3884 struct be_resources pool_res
, u16 num_vfs
,
3888 struct be_pcie_res_desc pcie
;
3889 struct be_nic_res_desc nic_vft
;
3892 /* PF PCIE descriptor */
3893 be_reset_pcie_desc(&desc
.pcie
);
3894 desc
.pcie
.hdr
.desc_type
= PCIE_RESOURCE_DESC_TYPE_V1
;
3895 desc
.pcie
.hdr
.desc_len
= RESOURCE_DESC_SIZE_V1
;
3896 desc
.pcie
.flags
= BIT(IMM_SHIFT
) | BIT(NOSV_SHIFT
);
3897 desc
.pcie
.pf_num
= adapter
->pdev
->devfn
;
3898 desc
.pcie
.sriov_state
= num_vfs
? 1 : 0;
3899 desc
.pcie
.num_vfs
= cpu_to_le16(num_vfs
);
3901 /* VF NIC Template descriptor */
3902 be_reset_nic_desc(&desc
.nic_vft
);
3903 desc
.nic_vft
.hdr
.desc_type
= NIC_RESOURCE_DESC_TYPE_V1
;
3904 desc
.nic_vft
.hdr
.desc_len
= RESOURCE_DESC_SIZE_V1
;
3905 desc
.nic_vft
.flags
= BIT(VFT_SHIFT
) | BIT(IMM_SHIFT
) | BIT(NOSV_SHIFT
);
3906 desc
.nic_vft
.pf_num
= adapter
->pdev
->devfn
;
3907 desc
.nic_vft
.vf_num
= 0;
3909 be_fill_vf_res_template(adapter
, pool_res
, num_vfs
, num_vf_qs
,
3912 return be_cmd_set_profile_config(adapter
, &desc
,
3913 2 * RESOURCE_DESC_SIZE_V1
, 2, 1, 0);
3916 int be_cmd_manage_iface(struct be_adapter
*adapter
, u32 iface
, u8 op
)
3918 struct be_mcc_wrb
*wrb
;
3919 struct be_cmd_req_manage_iface_filters
*req
;
3922 if (iface
== 0xFFFFFFFF)
3925 spin_lock_bh(&adapter
->mcc_lock
);
3927 wrb
= wrb_from_mccq(adapter
);
3932 req
= embedded_payload(wrb
);
3934 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3935 OPCODE_COMMON_MANAGE_IFACE_FILTERS
, sizeof(*req
),
3938 req
->target_iface_id
= cpu_to_le32(iface
);
3940 status
= be_mcc_notify_wait(adapter
);
3942 spin_unlock_bh(&adapter
->mcc_lock
);
3946 int be_cmd_set_vxlan_port(struct be_adapter
*adapter
, __be16 port
)
3948 struct be_port_res_desc port_desc
;
3950 memset(&port_desc
, 0, sizeof(port_desc
));
3951 port_desc
.hdr
.desc_type
= PORT_RESOURCE_DESC_TYPE_V1
;
3952 port_desc
.hdr
.desc_len
= RESOURCE_DESC_SIZE_V1
;
3953 port_desc
.flags
= (1 << IMM_SHIFT
) | (1 << NOSV_SHIFT
);
3954 port_desc
.link_num
= adapter
->hba_port_num
;
3956 port_desc
.nv_flags
= NV_TYPE_VXLAN
| (1 << SOCVID_SHIFT
) |
3958 port_desc
.nv_port
= swab16(port
);
3960 port_desc
.nv_flags
= NV_TYPE_DISABLED
;
3961 port_desc
.nv_port
= 0;
3964 return be_cmd_set_profile_config(adapter
, &port_desc
,
3965 RESOURCE_DESC_SIZE_V1
, 1, 1, 0);
3968 int be_cmd_get_if_id(struct be_adapter
*adapter
, struct be_vf_cfg
*vf_cfg
,
3971 struct be_mcc_wrb
*wrb
;
3972 struct be_cmd_req_get_iface_list
*req
;
3973 struct be_cmd_resp_get_iface_list
*resp
;
3976 spin_lock_bh(&adapter
->mcc_lock
);
3978 wrb
= wrb_from_mccq(adapter
);
3983 req
= embedded_payload(wrb
);
3985 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3986 OPCODE_COMMON_GET_IFACE_LIST
, sizeof(*resp
),
3988 req
->hdr
.domain
= vf_num
+ 1;
3990 status
= be_mcc_notify_wait(adapter
);
3992 resp
= (struct be_cmd_resp_get_iface_list
*)req
;
3993 vf_cfg
->if_handle
= le32_to_cpu(resp
->if_desc
.if_id
);
3997 spin_unlock_bh(&adapter
->mcc_lock
);
4001 static int lancer_wait_idle(struct be_adapter
*adapter
)
4003 #define SLIPORT_IDLE_TIMEOUT 30
4007 for (i
= 0; i
< SLIPORT_IDLE_TIMEOUT
; i
++) {
4008 reg_val
= ioread32(adapter
->db
+ PHYSDEV_CONTROL_OFFSET
);
4009 if ((reg_val
& PHYSDEV_CONTROL_INP_MASK
) == 0)
4015 if (i
== SLIPORT_IDLE_TIMEOUT
)
4021 int lancer_physdev_ctrl(struct be_adapter
*adapter
, u32 mask
)
4025 status
= lancer_wait_idle(adapter
);
4029 iowrite32(mask
, adapter
->db
+ PHYSDEV_CONTROL_OFFSET
);
4034 /* Routine to check whether dump image is present or not */
4035 bool dump_present(struct be_adapter
*adapter
)
4037 u32 sliport_status
= 0;
4039 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
4040 return !!(sliport_status
& SLIPORT_STATUS_DIP_MASK
);
4043 int lancer_initiate_dump(struct be_adapter
*adapter
)
4045 struct device
*dev
= &adapter
->pdev
->dev
;
4048 if (dump_present(adapter
)) {
4049 dev_info(dev
, "Previous dump not cleared, not forcing dump\n");
4053 /* give firmware reset and diagnostic dump */
4054 status
= lancer_physdev_ctrl(adapter
, PHYSDEV_CONTROL_FW_RESET_MASK
|
4055 PHYSDEV_CONTROL_DD_MASK
);
4057 dev_err(dev
, "FW reset failed\n");
4061 status
= lancer_wait_idle(adapter
);
4065 if (!dump_present(adapter
)) {
4066 dev_err(dev
, "FW dump not generated\n");
4073 int lancer_delete_dump(struct be_adapter
*adapter
)
4077 status
= lancer_cmd_delete_object(adapter
, LANCER_FW_DUMP_FILE
);
4078 return be_cmd_status(status
);
4082 int be_cmd_enable_vf(struct be_adapter
*adapter
, u8 domain
)
4084 struct be_mcc_wrb
*wrb
;
4085 struct be_cmd_enable_disable_vf
*req
;
4088 if (BEx_chip(adapter
))
4091 spin_lock_bh(&adapter
->mcc_lock
);
4093 wrb
= wrb_from_mccq(adapter
);
4099 req
= embedded_payload(wrb
);
4101 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
4102 OPCODE_COMMON_ENABLE_DISABLE_VF
, sizeof(*req
),
4105 req
->hdr
.domain
= domain
;
4107 status
= be_mcc_notify_wait(adapter
);
4109 spin_unlock_bh(&adapter
->mcc_lock
);
4113 int be_cmd_intr_set(struct be_adapter
*adapter
, bool intr_enable
)
4115 struct be_mcc_wrb
*wrb
;
4116 struct be_cmd_req_intr_set
*req
;
4119 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
4122 wrb
= wrb_from_mbox(adapter
);
4124 req
= embedded_payload(wrb
);
4126 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
4127 OPCODE_COMMON_SET_INTERRUPT_ENABLE
, sizeof(*req
),
4130 req
->intr_enabled
= intr_enable
;
4132 status
= be_mbox_notify_wait(adapter
);
4134 mutex_unlock(&adapter
->mbox_lock
);
4139 int be_cmd_get_active_profile(struct be_adapter
*adapter
, u16
*profile_id
)
4141 struct be_cmd_req_get_active_profile
*req
;
4142 struct be_mcc_wrb
*wrb
;
4145 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
4148 wrb
= wrb_from_mbox(adapter
);
4154 req
= embedded_payload(wrb
);
4156 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
4157 OPCODE_COMMON_GET_ACTIVE_PROFILE
, sizeof(*req
),
4160 status
= be_mbox_notify_wait(adapter
);
4162 struct be_cmd_resp_get_active_profile
*resp
=
4163 embedded_payload(wrb
);
4165 *profile_id
= le16_to_cpu(resp
->active_profile_id
);
4169 mutex_unlock(&adapter
->mbox_lock
);
4173 int be_cmd_set_logical_link_config(struct be_adapter
*adapter
,
4174 int link_state
, u8 domain
)
4176 struct be_mcc_wrb
*wrb
;
4177 struct be_cmd_req_set_ll_link
*req
;
4180 if (BEx_chip(adapter
) || lancer_chip(adapter
))
4183 spin_lock_bh(&adapter
->mcc_lock
);
4185 wrb
= wrb_from_mccq(adapter
);
4191 req
= embedded_payload(wrb
);
4193 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
4194 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG
,
4195 sizeof(*req
), wrb
, NULL
);
4197 req
->hdr
.version
= 1;
4198 req
->hdr
.domain
= domain
;
4200 if (link_state
== IFLA_VF_LINK_STATE_ENABLE
)
4201 req
->link_config
|= 1;
4203 if (link_state
== IFLA_VF_LINK_STATE_AUTO
)
4204 req
->link_config
|= 1 << PLINK_TRACK_SHIFT
;
4206 status
= be_mcc_notify_wait(adapter
);
4208 spin_unlock_bh(&adapter
->mcc_lock
);
4212 int be_roce_mcc_cmd(void *netdev_handle
, void *wrb_payload
,
4213 int wrb_payload_size
, u16
*cmd_status
, u16
*ext_status
)
4215 struct be_adapter
*adapter
= netdev_priv(netdev_handle
);
4216 struct be_mcc_wrb
*wrb
;
4217 struct be_cmd_req_hdr
*hdr
= (struct be_cmd_req_hdr
*)wrb_payload
;
4218 struct be_cmd_req_hdr
*req
;
4219 struct be_cmd_resp_hdr
*resp
;
4222 spin_lock_bh(&adapter
->mcc_lock
);
4224 wrb
= wrb_from_mccq(adapter
);
4229 req
= embedded_payload(wrb
);
4230 resp
= embedded_payload(wrb
);
4232 be_wrb_cmd_hdr_prepare(req
, hdr
->subsystem
,
4233 hdr
->opcode
, wrb_payload_size
, wrb
, NULL
);
4234 memcpy(req
, wrb_payload
, wrb_payload_size
);
4235 be_dws_cpu_to_le(req
, wrb_payload_size
);
4237 status
= be_mcc_notify_wait(adapter
);
4239 *cmd_status
= (status
& 0xffff);
4242 memcpy(wrb_payload
, resp
, sizeof(*resp
) + resp
->response_length
);
4243 be_dws_le_to_cpu(wrb_payload
, sizeof(*resp
) + resp
->response_length
);
4245 spin_unlock_bh(&adapter
->mcc_lock
);
4248 EXPORT_SYMBOL(be_roce_mcc_cmd
);