2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
10 static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl
[] = {
11 {QLCNIC_CMD_CREATE_RX_CTX
, 4, 1},
12 {QLCNIC_CMD_DESTROY_RX_CTX
, 2, 1},
13 {QLCNIC_CMD_CREATE_TX_CTX
, 4, 1},
14 {QLCNIC_CMD_DESTROY_TX_CTX
, 2, 1},
15 {QLCNIC_CMD_INTRPT_TEST
, 4, 1},
16 {QLCNIC_CMD_SET_MTU
, 4, 1},
17 {QLCNIC_CMD_READ_PHY
, 4, 2},
18 {QLCNIC_CMD_WRITE_PHY
, 5, 1},
19 {QLCNIC_CMD_READ_HW_REG
, 4, 1},
20 {QLCNIC_CMD_GET_FLOW_CTL
, 4, 2},
21 {QLCNIC_CMD_SET_FLOW_CTL
, 4, 1},
22 {QLCNIC_CMD_READ_MAX_MTU
, 4, 2},
23 {QLCNIC_CMD_READ_MAX_LRO
, 4, 2},
24 {QLCNIC_CMD_MAC_ADDRESS
, 4, 3},
25 {QLCNIC_CMD_GET_PCI_INFO
, 4, 1},
26 {QLCNIC_CMD_GET_NIC_INFO
, 4, 1},
27 {QLCNIC_CMD_SET_NIC_INFO
, 4, 1},
28 {QLCNIC_CMD_GET_ESWITCH_CAPABILITY
, 4, 3},
29 {QLCNIC_CMD_TOGGLE_ESWITCH
, 4, 1},
30 {QLCNIC_CMD_GET_ESWITCH_STATUS
, 4, 3},
31 {QLCNIC_CMD_SET_PORTMIRRORING
, 4, 1},
32 {QLCNIC_CMD_CONFIGURE_ESWITCH
, 4, 1},
33 {QLCNIC_CMD_GET_MAC_STATS
, 4, 1},
34 {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG
, 4, 3},
35 {QLCNIC_CMD_GET_ESWITCH_STATS
, 5, 1},
36 {QLCNIC_CMD_CONFIG_PORT
, 4, 1},
37 {QLCNIC_CMD_TEMP_SIZE
, 4, 4},
38 {QLCNIC_CMD_GET_TEMP_HDR
, 4, 1},
39 {QLCNIC_CMD_SET_DRV_VER
, 4, 1},
42 static inline u32
qlcnic_get_cmd_signature(struct qlcnic_hardware_context
*ahw
)
44 return (ahw
->pci_func
& 0xff) | ((ahw
->fw_hal_version
& 0xff) << 8) |
48 /* Allocate mailbox registers */
49 int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args
*mbx
,
50 struct qlcnic_adapter
*adapter
, u32 type
)
53 const struct qlcnic_mailbox_metadata
*mbx_tbl
;
55 mbx_tbl
= qlcnic_mbx_tbl
;
56 size
= ARRAY_SIZE(qlcnic_mbx_tbl
);
57 for (i
= 0; i
< size
; i
++) {
58 if (type
== mbx_tbl
[i
].cmd
) {
59 mbx
->req
.num
= mbx_tbl
[i
].in_args
;
60 mbx
->rsp
.num
= mbx_tbl
[i
].out_args
;
61 mbx
->req
.arg
= kcalloc(mbx
->req
.num
,
62 sizeof(u32
), GFP_ATOMIC
);
65 mbx
->rsp
.arg
= kcalloc(mbx
->rsp
.num
,
66 sizeof(u32
), GFP_ATOMIC
);
72 memset(mbx
->req
.arg
, 0, sizeof(u32
) * mbx
->req
.num
);
73 memset(mbx
->rsp
.arg
, 0, sizeof(u32
) * mbx
->rsp
.num
);
74 mbx
->req
.arg
[0] = type
;
81 /* Free up mailbox registers */
82 void qlcnic_free_mbx_args(struct qlcnic_cmd_args
*cmd
)
90 static int qlcnic_is_valid_nic_func(struct qlcnic_adapter
*adapter
, u8 pci_func
)
94 for (i
= 0; i
< adapter
->ahw
->act_pci_func
; i
++) {
95 if (adapter
->npars
[i
].pci_func
== pci_func
)
103 qlcnic_poll_rsp(struct qlcnic_adapter
*adapter
)
109 /* give atleast 1ms for firmware to respond */
112 if (++timeout
> QLCNIC_OS_CRB_RETRY_COUNT
)
113 return QLCNIC_CDRP_RSP_TIMEOUT
;
115 rsp
= QLCRD32(adapter
, QLCNIC_CDRP_CRB_OFFSET
);
116 } while (!QLCNIC_CDRP_IS_RSP(rsp
));
121 int qlcnic_82xx_issue_cmd(struct qlcnic_adapter
*adapter
,
122 struct qlcnic_cmd_args
*cmd
)
127 struct pci_dev
*pdev
= adapter
->pdev
;
128 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
130 signature
= qlcnic_get_cmd_signature(ahw
);
132 /* Acquire semaphore before accessing CRB */
133 if (qlcnic_api_lock(adapter
)) {
134 cmd
->rsp
.arg
[0] = QLCNIC_RCODE_TIMEOUT
;
135 return cmd
->rsp
.arg
[0];
138 QLCWR32(adapter
, QLCNIC_SIGN_CRB_OFFSET
, signature
);
139 for (i
= 1; i
< QLCNIC_CDRP_MAX_ARGS
; i
++)
140 QLCWR32(adapter
, QLCNIC_CDRP_ARG(i
), cmd
->req
.arg
[i
]);
141 QLCWR32(adapter
, QLCNIC_CDRP_CRB_OFFSET
,
142 QLCNIC_CDRP_FORM_CMD(cmd
->req
.arg
[0]));
143 rsp
= qlcnic_poll_rsp(adapter
);
145 if (rsp
== QLCNIC_CDRP_RSP_TIMEOUT
) {
146 dev_err(&pdev
->dev
, "card response timeout.\n");
147 cmd
->rsp
.arg
[0] = QLCNIC_RCODE_TIMEOUT
;
148 } else if (rsp
== QLCNIC_CDRP_RSP_FAIL
) {
149 cmd
->rsp
.arg
[0] = QLCRD32(adapter
, QLCNIC_CDRP_ARG(1));
150 dev_err(&pdev
->dev
, "failed card response code:0x%x\n",
152 } else if (rsp
== QLCNIC_CDRP_RSP_OK
)
153 cmd
->rsp
.arg
[0] = QLCNIC_RCODE_SUCCESS
;
155 for (i
= 1; i
< cmd
->rsp
.num
; i
++)
156 cmd
->rsp
.arg
[i
] = QLCRD32(adapter
, QLCNIC_CDRP_ARG(i
));
158 /* Release semaphore */
159 qlcnic_api_unlock(adapter
);
160 return cmd
->rsp
.arg
[0];
164 qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter
*adapter
, int mtu
)
167 struct qlcnic_cmd_args cmd
;
168 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
170 if (recv_ctx
->state
!= QLCNIC_HOST_CTX_STATE_ACTIVE
)
172 qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_SET_MTU
);
173 cmd
.req
.arg
[1] = recv_ctx
->context_id
;
174 cmd
.req
.arg
[2] = mtu
;
176 err
= qlcnic_issue_cmd(adapter
, &cmd
);
178 dev_err(&adapter
->pdev
->dev
, "Failed to set mtu\n");
181 qlcnic_free_mbx_args(&cmd
);
185 int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter
*adapter
)
188 struct qlcnic_hostrq_rx_ctx
*prq
;
189 struct qlcnic_cardrsp_rx_ctx
*prsp
;
190 struct qlcnic_hostrq_rds_ring
*prq_rds
;
191 struct qlcnic_hostrq_sds_ring
*prq_sds
;
192 struct qlcnic_cardrsp_rds_ring
*prsp_rds
;
193 struct qlcnic_cardrsp_sds_ring
*prsp_sds
;
194 struct qlcnic_host_rds_ring
*rds_ring
;
195 struct qlcnic_host_sds_ring
*sds_ring
;
196 struct qlcnic_cmd_args cmd
;
198 dma_addr_t hostrq_phys_addr
, cardrsp_phys_addr
;
201 u8 i
, nrds_rings
, nsds_rings
;
203 size_t rq_size
, rsp_size
;
204 u32 cap
, reg
, val
, reg2
;
207 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
209 nrds_rings
= adapter
->max_rds_rings
;
210 nsds_rings
= adapter
->max_sds_rings
;
213 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx
, nrds_rings
,
216 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx
, nrds_rings
,
219 addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, rq_size
,
220 &hostrq_phys_addr
, GFP_KERNEL
);
225 addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, rsp_size
,
226 &cardrsp_phys_addr
, GFP_KERNEL
);
233 prq
->host_rsp_dma_addr
= cpu_to_le64(cardrsp_phys_addr
);
235 cap
= (QLCNIC_CAP0_LEGACY_CONTEXT
| QLCNIC_CAP0_LEGACY_MN
236 | QLCNIC_CAP0_VALIDOFF
);
237 cap
|= (QLCNIC_CAP0_JUMBO_CONTIGUOUS
| QLCNIC_CAP0_LRO_CONTIGUOUS
);
239 temp_u16
= offsetof(struct qlcnic_hostrq_rx_ctx
, msix_handler
);
240 prq
->valid_field_offset
= cpu_to_le16(temp_u16
);
241 prq
->txrx_sds_binding
= nsds_rings
- 1;
243 prq
->capabilities
[0] = cpu_to_le32(cap
);
244 prq
->host_int_crb_mode
=
245 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED
);
246 prq
->host_rds_crb_mode
=
247 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE
);
249 prq
->num_rds_rings
= cpu_to_le16(nrds_rings
);
250 prq
->num_sds_rings
= cpu_to_le16(nsds_rings
);
251 prq
->rds_ring_offset
= 0;
253 val
= le32_to_cpu(prq
->rds_ring_offset
) +
254 (sizeof(struct qlcnic_hostrq_rds_ring
) * nrds_rings
);
255 prq
->sds_ring_offset
= cpu_to_le32(val
);
257 prq_rds
= (struct qlcnic_hostrq_rds_ring
*)(prq
->data
+
258 le32_to_cpu(prq
->rds_ring_offset
));
260 for (i
= 0; i
< nrds_rings
; i
++) {
262 rds_ring
= &recv_ctx
->rds_rings
[i
];
263 rds_ring
->producer
= 0;
265 prq_rds
[i
].host_phys_addr
= cpu_to_le64(rds_ring
->phys_addr
);
266 prq_rds
[i
].ring_size
= cpu_to_le32(rds_ring
->num_desc
);
267 prq_rds
[i
].ring_kind
= cpu_to_le32(i
);
268 prq_rds
[i
].buff_size
= cpu_to_le64(rds_ring
->dma_size
);
271 prq_sds
= (struct qlcnic_hostrq_sds_ring
*)(prq
->data
+
272 le32_to_cpu(prq
->sds_ring_offset
));
274 for (i
= 0; i
< nsds_rings
; i
++) {
276 sds_ring
= &recv_ctx
->sds_rings
[i
];
277 sds_ring
->consumer
= 0;
278 memset(sds_ring
->desc_head
, 0, STATUS_DESC_RINGSIZE(sds_ring
));
280 prq_sds
[i
].host_phys_addr
= cpu_to_le64(sds_ring
->phys_addr
);
281 prq_sds
[i
].ring_size
= cpu_to_le32(sds_ring
->num_desc
);
282 prq_sds
[i
].msi_index
= cpu_to_le16(i
);
285 phys_addr
= hostrq_phys_addr
;
286 qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_CREATE_RX_CTX
);
287 cmd
.req
.arg
[1] = MSD(phys_addr
);
288 cmd
.req
.arg
[2] = LSD(phys_addr
);
289 cmd
.req
.arg
[3] = rq_size
;
290 err
= qlcnic_issue_cmd(adapter
, &cmd
);
292 dev_err(&adapter
->pdev
->dev
,
293 "Failed to create rx ctx in firmware%d\n", err
);
297 prsp_rds
= ((struct qlcnic_cardrsp_rds_ring
*)
298 &prsp
->data
[le32_to_cpu(prsp
->rds_ring_offset
)]);
300 for (i
= 0; i
< le16_to_cpu(prsp
->num_rds_rings
); i
++) {
301 rds_ring
= &recv_ctx
->rds_rings
[i
];
303 reg
= le32_to_cpu(prsp_rds
[i
].host_producer_crb
);
304 rds_ring
->crb_rcv_producer
= adapter
->ahw
->pci_base0
+ reg
;
307 prsp_sds
= ((struct qlcnic_cardrsp_sds_ring
*)
308 &prsp
->data
[le32_to_cpu(prsp
->sds_ring_offset
)]);
310 for (i
= 0; i
< le16_to_cpu(prsp
->num_sds_rings
); i
++) {
311 sds_ring
= &recv_ctx
->sds_rings
[i
];
313 reg
= le32_to_cpu(prsp_sds
[i
].host_consumer_crb
);
314 reg2
= le32_to_cpu(prsp_sds
[i
].interrupt_crb
);
316 sds_ring
->crb_sts_consumer
= adapter
->ahw
->pci_base0
+ reg
;
317 sds_ring
->crb_intr_mask
= adapter
->ahw
->pci_base0
+ reg2
;
320 recv_ctx
->state
= le32_to_cpu(prsp
->host_ctx_state
);
321 recv_ctx
->context_id
= le16_to_cpu(prsp
->context_id
);
322 recv_ctx
->virt_port
= prsp
->virt_port
;
325 dma_free_coherent(&adapter
->pdev
->dev
, rsp_size
, prsp
,
327 qlcnic_free_mbx_args(&cmd
);
329 dma_free_coherent(&adapter
->pdev
->dev
, rq_size
, prq
, hostrq_phys_addr
);
334 qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter
*adapter
)
337 struct qlcnic_cmd_args cmd
;
338 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
340 qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_DESTROY_RX_CTX
);
341 cmd
.req
.arg
[1] = recv_ctx
->context_id
;
342 err
= qlcnic_issue_cmd(adapter
, &cmd
);
344 dev_err(&adapter
->pdev
->dev
,
345 "Failed to destroy rx ctx in firmware\n");
347 recv_ctx
->state
= QLCNIC_HOST_CTX_STATE_FREED
;
348 qlcnic_free_mbx_args(&cmd
);
351 int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter
*adapter
,
352 struct qlcnic_host_tx_ring
*tx_ring
,
355 struct qlcnic_hostrq_tx_ctx
*prq
;
356 struct qlcnic_hostrq_cds_ring
*prq_cds
;
357 struct qlcnic_cardrsp_tx_ctx
*prsp
;
358 void *rq_addr
, *rsp_addr
;
359 size_t rq_size
, rsp_size
;
361 struct qlcnic_cmd_args cmd
;
364 dma_addr_t rq_phys_addr
, rsp_phys_addr
;
366 /* reset host resources */
367 tx_ring
->producer
= 0;
368 tx_ring
->sw_consumer
= 0;
369 *(tx_ring
->hw_consumer
) = 0;
371 rq_size
= SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx
);
372 rq_addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, rq_size
,
373 &rq_phys_addr
, GFP_KERNEL
);
377 rsp_size
= SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx
);
378 rsp_addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, rsp_size
,
379 &rsp_phys_addr
, GFP_KERNEL
);
385 memset(rq_addr
, 0, rq_size
);
388 memset(rsp_addr
, 0, rsp_size
);
391 prq
->host_rsp_dma_addr
= cpu_to_le64(rsp_phys_addr
);
393 temp
= (QLCNIC_CAP0_LEGACY_CONTEXT
| QLCNIC_CAP0_LEGACY_MN
|
395 prq
->capabilities
[0] = cpu_to_le32(temp
);
397 prq
->host_int_crb_mode
=
398 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED
);
401 prq
->interrupt_ctl
= 0;
402 prq
->cmd_cons_dma_addr
= cpu_to_le64(tx_ring
->hw_cons_phys_addr
);
404 prq_cds
= &prq
->cds_ring
;
406 prq_cds
->host_phys_addr
= cpu_to_le64(tx_ring
->phys_addr
);
407 prq_cds
->ring_size
= cpu_to_le32(tx_ring
->num_desc
);
409 phys_addr
= rq_phys_addr
;
411 qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_CREATE_TX_CTX
);
412 cmd
.req
.arg
[1] = MSD(phys_addr
);
413 cmd
.req
.arg
[2] = LSD(phys_addr
);
414 cmd
.req
.arg
[3] = rq_size
;
415 err
= qlcnic_issue_cmd(adapter
, &cmd
);
417 if (err
== QLCNIC_RCODE_SUCCESS
) {
418 temp
= le32_to_cpu(prsp
->cds_ring
.host_producer_crb
);
419 tx_ring
->crb_cmd_producer
= adapter
->ahw
->pci_base0
+ temp
;
420 tx_ring
->ctx_id
= le16_to_cpu(prsp
->context_id
);
422 dev_err(&adapter
->pdev
->dev
,
423 "Failed to create tx ctx in firmware%d\n", err
);
427 dma_free_coherent(&adapter
->pdev
->dev
, rsp_size
, rsp_addr
,
431 dma_free_coherent(&adapter
->pdev
->dev
, rq_size
, rq_addr
, rq_phys_addr
);
432 qlcnic_free_mbx_args(&cmd
);
438 qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter
*adapter
,
439 struct qlcnic_host_tx_ring
*tx_ring
)
441 struct qlcnic_cmd_args cmd
;
443 qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_DESTROY_TX_CTX
);
444 cmd
.req
.arg
[1] = tx_ring
->ctx_id
;
445 if (qlcnic_issue_cmd(adapter
, &cmd
))
446 dev_err(&adapter
->pdev
->dev
,
447 "Failed to destroy tx ctx in firmware\n");
448 qlcnic_free_mbx_args(&cmd
);
452 qlcnic_fw_cmd_set_port(struct qlcnic_adapter
*adapter
, u32 config
)
455 struct qlcnic_cmd_args cmd
;
457 qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_CONFIG_PORT
);
458 cmd
.req
.arg
[1] = config
;
459 err
= qlcnic_issue_cmd(adapter
, &cmd
);
460 qlcnic_free_mbx_args(&cmd
);
464 int qlcnic_alloc_hw_resources(struct qlcnic_adapter
*adapter
)
468 struct qlcnic_recv_context
*recv_ctx
;
469 struct qlcnic_host_rds_ring
*rds_ring
;
470 struct qlcnic_host_sds_ring
*sds_ring
;
471 struct qlcnic_host_tx_ring
*tx_ring
;
474 struct pci_dev
*pdev
= adapter
->pdev
;
476 recv_ctx
= adapter
->recv_ctx
;
478 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
479 tx_ring
= &adapter
->tx_ring
[ring
];
480 ptr
= (__le32
*)dma_alloc_coherent(&pdev
->dev
, sizeof(u32
),
481 &tx_ring
->hw_cons_phys_addr
,
485 dev_err(&pdev
->dev
, "failed to allocate tx consumer\n");
488 tx_ring
->hw_consumer
= ptr
;
490 addr
= dma_alloc_coherent(&pdev
->dev
, TX_DESC_RINGSIZE(tx_ring
),
496 "failed to allocate tx desc ring\n");
501 tx_ring
->desc_head
= addr
;
504 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
505 rds_ring
= &recv_ctx
->rds_rings
[ring
];
506 addr
= dma_alloc_coherent(&adapter
->pdev
->dev
,
507 RCV_DESC_RINGSIZE(rds_ring
),
508 &rds_ring
->phys_addr
, GFP_KERNEL
);
511 "failed to allocate rds ring [%d]\n", ring
);
515 rds_ring
->desc_head
= addr
;
519 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
520 sds_ring
= &recv_ctx
->sds_rings
[ring
];
522 addr
= dma_alloc_coherent(&adapter
->pdev
->dev
,
523 STATUS_DESC_RINGSIZE(sds_ring
),
524 &sds_ring
->phys_addr
, GFP_KERNEL
);
527 "failed to allocate sds ring [%d]\n", ring
);
531 sds_ring
->desc_head
= addr
;
537 qlcnic_free_hw_resources(adapter
);
541 int qlcnic_fw_create_ctx(struct qlcnic_adapter
*dev
)
545 if (dev
->flags
& QLCNIC_NEED_FLR
) {
546 pci_reset_function(dev
->pdev
);
547 dev
->flags
&= ~QLCNIC_NEED_FLR
;
550 err
= qlcnic_fw_cmd_create_rx_ctx(dev
);
554 for (ring
= 0; ring
< dev
->max_drv_tx_rings
; ring
++) {
555 err
= qlcnic_fw_cmd_create_tx_ctx(dev
,
559 qlcnic_fw_cmd_destroy_rx_ctx(dev
);
563 for (i
= 0; i
< ring
; i
++)
564 qlcnic_fw_cmd_destroy_tx_ctx(dev
,
571 set_bit(__QLCNIC_FW_ATTACHED
, &dev
->state
);
575 void qlcnic_fw_destroy_ctx(struct qlcnic_adapter
*adapter
)
579 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
)) {
580 qlcnic_fw_cmd_destroy_rx_ctx(adapter
);
581 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++)
582 qlcnic_fw_cmd_destroy_tx_ctx(adapter
,
583 &adapter
->tx_ring
[ring
]);
584 /* Allow dma queues to drain after context reset */
589 void qlcnic_free_hw_resources(struct qlcnic_adapter
*adapter
)
591 struct qlcnic_recv_context
*recv_ctx
;
592 struct qlcnic_host_rds_ring
*rds_ring
;
593 struct qlcnic_host_sds_ring
*sds_ring
;
594 struct qlcnic_host_tx_ring
*tx_ring
;
597 recv_ctx
= adapter
->recv_ctx
;
599 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
600 tx_ring
= &adapter
->tx_ring
[ring
];
601 if (tx_ring
->hw_consumer
!= NULL
) {
602 dma_free_coherent(&adapter
->pdev
->dev
, sizeof(u32
),
603 tx_ring
->hw_consumer
,
604 tx_ring
->hw_cons_phys_addr
);
606 tx_ring
->hw_consumer
= NULL
;
609 if (tx_ring
->desc_head
!= NULL
) {
610 dma_free_coherent(&adapter
->pdev
->dev
,
611 TX_DESC_RINGSIZE(tx_ring
),
614 tx_ring
->desc_head
= NULL
;
618 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
619 rds_ring
= &recv_ctx
->rds_rings
[ring
];
621 if (rds_ring
->desc_head
!= NULL
) {
622 dma_free_coherent(&adapter
->pdev
->dev
,
623 RCV_DESC_RINGSIZE(rds_ring
),
625 rds_ring
->phys_addr
);
626 rds_ring
->desc_head
= NULL
;
630 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
631 sds_ring
= &recv_ctx
->sds_rings
[ring
];
633 if (sds_ring
->desc_head
!= NULL
) {
634 dma_free_coherent(&adapter
->pdev
->dev
,
635 STATUS_DESC_RINGSIZE(sds_ring
),
637 sds_ring
->phys_addr
);
638 sds_ring
->desc_head
= NULL
;
644 int qlcnic_82xx_get_mac_address(struct qlcnic_adapter
*adapter
, u8
*mac
)
647 struct qlcnic_cmd_args cmd
;
648 u32 mac_low
, mac_high
;
650 qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_MAC_ADDRESS
);
651 cmd
.req
.arg
[1] = adapter
->ahw
->pci_func
| BIT_8
;
652 err
= qlcnic_issue_cmd(adapter
, &cmd
);
654 if (err
== QLCNIC_RCODE_SUCCESS
) {
655 mac_low
= cmd
.rsp
.arg
[1];
656 mac_high
= cmd
.rsp
.arg
[2];
658 for (i
= 0; i
< 2; i
++)
659 mac
[i
] = (u8
) (mac_high
>> ((1 - i
) * 8));
660 for (i
= 2; i
< 6; i
++)
661 mac
[i
] = (u8
) (mac_low
>> ((5 - i
) * 8));
663 dev_err(&adapter
->pdev
->dev
,
664 "Failed to get mac address%d\n", err
);
667 qlcnic_free_mbx_args(&cmd
);
671 /* Get info of a NIC partition */
672 int qlcnic_82xx_get_nic_info(struct qlcnic_adapter
*adapter
,
673 struct qlcnic_info
*npar_info
, u8 func_id
)
676 dma_addr_t nic_dma_t
;
677 const struct qlcnic_info_le
*nic_info
;
679 struct qlcnic_cmd_args cmd
;
680 size_t nic_size
= sizeof(struct qlcnic_info_le
);
682 nic_info_addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, nic_size
,
683 &nic_dma_t
, GFP_KERNEL
);
686 memset(nic_info_addr
, 0, nic_size
);
688 nic_info
= nic_info_addr
;
690 qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_GET_NIC_INFO
);
691 cmd
.req
.arg
[1] = MSD(nic_dma_t
);
692 cmd
.req
.arg
[2] = LSD(nic_dma_t
);
693 cmd
.req
.arg
[3] = (func_id
<< 16 | nic_size
);
694 err
= qlcnic_issue_cmd(adapter
, &cmd
);
695 if (err
!= QLCNIC_RCODE_SUCCESS
) {
696 dev_err(&adapter
->pdev
->dev
,
697 "Failed to get nic info%d\n", err
);
700 npar_info
->pci_func
= le16_to_cpu(nic_info
->pci_func
);
701 npar_info
->op_mode
= le16_to_cpu(nic_info
->op_mode
);
702 npar_info
->min_tx_bw
= le16_to_cpu(nic_info
->min_tx_bw
);
703 npar_info
->max_tx_bw
= le16_to_cpu(nic_info
->max_tx_bw
);
704 npar_info
->phys_port
= le16_to_cpu(nic_info
->phys_port
);
705 npar_info
->switch_mode
= le16_to_cpu(nic_info
->switch_mode
);
706 npar_info
->max_tx_ques
= le16_to_cpu(nic_info
->max_tx_ques
);
707 npar_info
->max_rx_ques
= le16_to_cpu(nic_info
->max_rx_ques
);
708 npar_info
->capabilities
= le32_to_cpu(nic_info
->capabilities
);
709 npar_info
->max_mtu
= le16_to_cpu(nic_info
->max_mtu
);
712 dma_free_coherent(&adapter
->pdev
->dev
, nic_size
, nic_info_addr
,
714 qlcnic_free_mbx_args(&cmd
);
719 /* Configure a NIC partition */
720 int qlcnic_82xx_set_nic_info(struct qlcnic_adapter
*adapter
,
721 struct qlcnic_info
*nic
)
724 dma_addr_t nic_dma_t
;
726 struct qlcnic_cmd_args cmd
;
727 struct qlcnic_info_le
*nic_info
;
728 size_t nic_size
= sizeof(struct qlcnic_info_le
);
730 if (adapter
->ahw
->op_mode
!= QLCNIC_MGMT_FUNC
)
733 nic_info_addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, nic_size
,
734 &nic_dma_t
, GFP_KERNEL
);
738 memset(nic_info_addr
, 0, nic_size
);
739 nic_info
= nic_info_addr
;
741 nic_info
->pci_func
= cpu_to_le16(nic
->pci_func
);
742 nic_info
->op_mode
= cpu_to_le16(nic
->op_mode
);
743 nic_info
->phys_port
= cpu_to_le16(nic
->phys_port
);
744 nic_info
->switch_mode
= cpu_to_le16(nic
->switch_mode
);
745 nic_info
->capabilities
= cpu_to_le32(nic
->capabilities
);
746 nic_info
->max_mac_filters
= nic
->max_mac_filters
;
747 nic_info
->max_tx_ques
= cpu_to_le16(nic
->max_tx_ques
);
748 nic_info
->max_rx_ques
= cpu_to_le16(nic
->max_rx_ques
);
749 nic_info
->min_tx_bw
= cpu_to_le16(nic
->min_tx_bw
);
750 nic_info
->max_tx_bw
= cpu_to_le16(nic
->max_tx_bw
);
752 qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_SET_NIC_INFO
);
753 cmd
.req
.arg
[1] = MSD(nic_dma_t
);
754 cmd
.req
.arg
[2] = LSD(nic_dma_t
);
755 cmd
.req
.arg
[3] = ((nic
->pci_func
<< 16) | nic_size
);
756 err
= qlcnic_issue_cmd(adapter
, &cmd
);
758 if (err
!= QLCNIC_RCODE_SUCCESS
) {
759 dev_err(&adapter
->pdev
->dev
,
760 "Failed to set nic info%d\n", err
);
764 dma_free_coherent(&adapter
->pdev
->dev
, nic_size
, nic_info_addr
,
766 qlcnic_free_mbx_args(&cmd
);
771 /* Get PCI Info of a partition */
772 int qlcnic_82xx_get_pci_info(struct qlcnic_adapter
*adapter
,
773 struct qlcnic_pci_info
*pci_info
)
776 struct qlcnic_cmd_args cmd
;
777 dma_addr_t pci_info_dma_t
;
778 struct qlcnic_pci_info_le
*npar
;
780 size_t npar_size
= sizeof(struct qlcnic_pci_info_le
);
781 size_t pci_size
= npar_size
* QLCNIC_MAX_PCI_FUNC
;
783 pci_info_addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, pci_size
,
784 &pci_info_dma_t
, GFP_KERNEL
);
787 memset(pci_info_addr
, 0, pci_size
);
789 npar
= pci_info_addr
;
790 qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_GET_PCI_INFO
);
791 cmd
.req
.arg
[1] = MSD(pci_info_dma_t
);
792 cmd
.req
.arg
[2] = LSD(pci_info_dma_t
);
793 cmd
.req
.arg
[3] = pci_size
;
794 err
= qlcnic_issue_cmd(adapter
, &cmd
);
796 adapter
->ahw
->act_pci_func
= 0;
797 if (err
== QLCNIC_RCODE_SUCCESS
) {
798 for (i
= 0; i
< QLCNIC_MAX_PCI_FUNC
; i
++, npar
++, pci_info
++) {
799 pci_info
->id
= le16_to_cpu(npar
->id
);
800 pci_info
->active
= le16_to_cpu(npar
->active
);
801 pci_info
->type
= le16_to_cpu(npar
->type
);
802 if (pci_info
->type
== QLCNIC_TYPE_NIC
)
803 adapter
->ahw
->act_pci_func
++;
804 pci_info
->default_port
=
805 le16_to_cpu(npar
->default_port
);
806 pci_info
->tx_min_bw
=
807 le16_to_cpu(npar
->tx_min_bw
);
808 pci_info
->tx_max_bw
=
809 le16_to_cpu(npar
->tx_max_bw
);
810 memcpy(pci_info
->mac
, npar
->mac
, ETH_ALEN
);
813 dev_err(&adapter
->pdev
->dev
,
814 "Failed to get PCI Info%d\n", err
);
818 dma_free_coherent(&adapter
->pdev
->dev
, pci_size
, pci_info_addr
,
820 qlcnic_free_mbx_args(&cmd
);
825 /* Configure eSwitch for port mirroring */
826 int qlcnic_config_port_mirroring(struct qlcnic_adapter
*adapter
, u8 id
,
827 u8 enable_mirroring
, u8 pci_func
)
831 struct qlcnic_cmd_args cmd
;
833 if (adapter
->ahw
->op_mode
!= QLCNIC_MGMT_FUNC
||
834 !(adapter
->eswitch
[id
].flags
& QLCNIC_SWITCH_ENABLE
))
837 arg1
= id
| (enable_mirroring
? BIT_4
: 0);
838 arg1
|= pci_func
<< 8;
840 qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_SET_PORTMIRRORING
);
841 cmd
.req
.arg
[1] = arg1
;
842 err
= qlcnic_issue_cmd(adapter
, &cmd
);
844 if (err
!= QLCNIC_RCODE_SUCCESS
)
845 dev_err(&adapter
->pdev
->dev
,
846 "Failed to configure port mirroring%d on eswitch:%d\n",
849 dev_info(&adapter
->pdev
->dev
,
850 "Configured eSwitch %d for port mirroring:%d\n",
852 qlcnic_free_mbx_args(&cmd
);
857 int qlcnic_get_port_stats(struct qlcnic_adapter
*adapter
, const u8 func
,
858 const u8 rx_tx
, struct __qlcnic_esw_statistics
*esw_stats
) {
860 size_t stats_size
= sizeof(struct qlcnic_esw_stats_le
);
861 struct qlcnic_esw_stats_le
*stats
;
862 dma_addr_t stats_dma_t
;
865 struct qlcnic_cmd_args cmd
;
868 if (esw_stats
== NULL
)
871 if ((adapter
->ahw
->op_mode
!= QLCNIC_MGMT_FUNC
) &&
872 (func
!= adapter
->ahw
->pci_func
)) {
873 dev_err(&adapter
->pdev
->dev
,
874 "Not privilege to query stats for func=%d", func
);
878 stats_addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, stats_size
,
879 &stats_dma_t
, GFP_KERNEL
);
881 dev_err(&adapter
->pdev
->dev
, "Unable to allocate memory\n");
884 memset(stats_addr
, 0, stats_size
);
886 arg1
= func
| QLCNIC_STATS_VERSION
<< 8 | QLCNIC_STATS_PORT
<< 12;
887 arg1
|= rx_tx
<< 15 | stats_size
<< 16;
889 qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_GET_ESWITCH_STATS
);
890 cmd
.req
.arg
[1] = arg1
;
891 cmd
.req
.arg
[2] = MSD(stats_dma_t
);
892 cmd
.req
.arg
[3] = LSD(stats_dma_t
);
893 err
= qlcnic_issue_cmd(adapter
, &cmd
);
897 esw_stats
->context_id
= le16_to_cpu(stats
->context_id
);
898 esw_stats
->version
= le16_to_cpu(stats
->version
);
899 esw_stats
->size
= le16_to_cpu(stats
->size
);
900 esw_stats
->multicast_frames
=
901 le64_to_cpu(stats
->multicast_frames
);
902 esw_stats
->broadcast_frames
=
903 le64_to_cpu(stats
->broadcast_frames
);
904 esw_stats
->unicast_frames
= le64_to_cpu(stats
->unicast_frames
);
905 esw_stats
->dropped_frames
= le64_to_cpu(stats
->dropped_frames
);
906 esw_stats
->local_frames
= le64_to_cpu(stats
->local_frames
);
907 esw_stats
->errors
= le64_to_cpu(stats
->errors
);
908 esw_stats
->numbytes
= le64_to_cpu(stats
->numbytes
);
911 dma_free_coherent(&adapter
->pdev
->dev
, stats_size
, stats_addr
,
913 qlcnic_free_mbx_args(&cmd
);
918 /* This routine will retrieve the MAC statistics from firmware */
919 int qlcnic_get_mac_stats(struct qlcnic_adapter
*adapter
,
920 struct qlcnic_mac_statistics
*mac_stats
)
922 struct qlcnic_mac_statistics_le
*stats
;
923 struct qlcnic_cmd_args cmd
;
924 size_t stats_size
= sizeof(struct qlcnic_mac_statistics_le
);
925 dma_addr_t stats_dma_t
;
929 if (mac_stats
== NULL
)
932 stats_addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, stats_size
,
933 &stats_dma_t
, GFP_KERNEL
);
935 dev_err(&adapter
->pdev
->dev
,
936 "%s: Unable to allocate memory.\n", __func__
);
939 memset(stats_addr
, 0, stats_size
);
940 qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_GET_MAC_STATS
);
941 cmd
.req
.arg
[1] = stats_size
<< 16;
942 cmd
.req
.arg
[2] = MSD(stats_dma_t
);
943 cmd
.req
.arg
[3] = LSD(stats_dma_t
);
944 err
= qlcnic_issue_cmd(adapter
, &cmd
);
947 mac_stats
->mac_tx_frames
= le64_to_cpu(stats
->mac_tx_frames
);
948 mac_stats
->mac_tx_bytes
= le64_to_cpu(stats
->mac_tx_bytes
);
949 mac_stats
->mac_tx_mcast_pkts
=
950 le64_to_cpu(stats
->mac_tx_mcast_pkts
);
951 mac_stats
->mac_tx_bcast_pkts
=
952 le64_to_cpu(stats
->mac_tx_bcast_pkts
);
953 mac_stats
->mac_rx_frames
= le64_to_cpu(stats
->mac_rx_frames
);
954 mac_stats
->mac_rx_bytes
= le64_to_cpu(stats
->mac_rx_bytes
);
955 mac_stats
->mac_rx_mcast_pkts
=
956 le64_to_cpu(stats
->mac_rx_mcast_pkts
);
957 mac_stats
->mac_rx_length_error
=
958 le64_to_cpu(stats
->mac_rx_length_error
);
959 mac_stats
->mac_rx_length_small
=
960 le64_to_cpu(stats
->mac_rx_length_small
);
961 mac_stats
->mac_rx_length_large
=
962 le64_to_cpu(stats
->mac_rx_length_large
);
963 mac_stats
->mac_rx_jabber
= le64_to_cpu(stats
->mac_rx_jabber
);
964 mac_stats
->mac_rx_dropped
= le64_to_cpu(stats
->mac_rx_dropped
);
965 mac_stats
->mac_rx_crc_error
= le64_to_cpu(stats
->mac_rx_crc_error
);
967 dev_err(&adapter
->pdev
->dev
,
968 "%s: Get mac stats failed, err=%d.\n", __func__
, err
);
971 dma_free_coherent(&adapter
->pdev
->dev
, stats_size
, stats_addr
,
974 qlcnic_free_mbx_args(&cmd
);
979 int qlcnic_get_eswitch_stats(struct qlcnic_adapter
*adapter
, const u8 eswitch
,
980 const u8 rx_tx
, struct __qlcnic_esw_statistics
*esw_stats
) {
982 struct __qlcnic_esw_statistics port_stats
;
986 if (esw_stats
== NULL
)
988 if (adapter
->ahw
->op_mode
!= QLCNIC_MGMT_FUNC
)
990 if (adapter
->npars
== NULL
)
993 memset(esw_stats
, 0, sizeof(u64
));
994 esw_stats
->unicast_frames
= QLCNIC_STATS_NOT_AVAIL
;
995 esw_stats
->multicast_frames
= QLCNIC_STATS_NOT_AVAIL
;
996 esw_stats
->broadcast_frames
= QLCNIC_STATS_NOT_AVAIL
;
997 esw_stats
->dropped_frames
= QLCNIC_STATS_NOT_AVAIL
;
998 esw_stats
->errors
= QLCNIC_STATS_NOT_AVAIL
;
999 esw_stats
->local_frames
= QLCNIC_STATS_NOT_AVAIL
;
1000 esw_stats
->numbytes
= QLCNIC_STATS_NOT_AVAIL
;
1001 esw_stats
->context_id
= eswitch
;
1003 for (i
= 0; i
< adapter
->ahw
->act_pci_func
; i
++) {
1004 if (adapter
->npars
[i
].phy_port
!= eswitch
)
1007 memset(&port_stats
, 0, sizeof(struct __qlcnic_esw_statistics
));
1008 if (qlcnic_get_port_stats(adapter
, adapter
->npars
[i
].pci_func
,
1009 rx_tx
, &port_stats
))
1012 esw_stats
->size
= port_stats
.size
;
1013 esw_stats
->version
= port_stats
.version
;
1014 QLCNIC_ADD_ESW_STATS(esw_stats
->unicast_frames
,
1015 port_stats
.unicast_frames
);
1016 QLCNIC_ADD_ESW_STATS(esw_stats
->multicast_frames
,
1017 port_stats
.multicast_frames
);
1018 QLCNIC_ADD_ESW_STATS(esw_stats
->broadcast_frames
,
1019 port_stats
.broadcast_frames
);
1020 QLCNIC_ADD_ESW_STATS(esw_stats
->dropped_frames
,
1021 port_stats
.dropped_frames
);
1022 QLCNIC_ADD_ESW_STATS(esw_stats
->errors
,
1024 QLCNIC_ADD_ESW_STATS(esw_stats
->local_frames
,
1025 port_stats
.local_frames
);
1026 QLCNIC_ADD_ESW_STATS(esw_stats
->numbytes
,
1027 port_stats
.numbytes
);
1033 int qlcnic_clear_esw_stats(struct qlcnic_adapter
*adapter
, const u8 func_esw
,
1034 const u8 port
, const u8 rx_tx
)
1038 struct qlcnic_cmd_args cmd
;
1040 if (adapter
->ahw
->op_mode
!= QLCNIC_MGMT_FUNC
)
1043 if (func_esw
== QLCNIC_STATS_PORT
) {
1044 if (port
>= QLCNIC_MAX_PCI_FUNC
)
1046 } else if (func_esw
== QLCNIC_STATS_ESWITCH
) {
1047 if (port
>= QLCNIC_NIU_MAX_XG_PORTS
)
1053 if (rx_tx
> QLCNIC_QUERY_TX_COUNTER
)
1056 arg1
= port
| QLCNIC_STATS_VERSION
<< 8 | func_esw
<< 12;
1057 arg1
|= BIT_14
| rx_tx
<< 15;
1059 qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_GET_ESWITCH_STATS
);
1060 cmd
.req
.arg
[1] = arg1
;
1061 err
= qlcnic_issue_cmd(adapter
, &cmd
);
1062 qlcnic_free_mbx_args(&cmd
);
1066 dev_err(&adapter
->pdev
->dev
,
1067 "Invalid args func_esw %d port %d rx_ctx %d\n",
1068 func_esw
, port
, rx_tx
);
1073 __qlcnic_get_eswitch_port_config(struct qlcnic_adapter
*adapter
,
1074 u32
*arg1
, u32
*arg2
)
1077 struct qlcnic_cmd_args cmd
;
1079 pci_func
= (*arg1
>> 8);
1081 qlcnic_alloc_mbx_args(&cmd
, adapter
,
1082 QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG
);
1083 cmd
.req
.arg
[1] = *arg1
;
1084 err
= qlcnic_issue_cmd(adapter
, &cmd
);
1085 *arg1
= cmd
.rsp
.arg
[1];
1086 *arg2
= cmd
.rsp
.arg
[2];
1087 qlcnic_free_mbx_args(&cmd
);
1089 if (err
== QLCNIC_RCODE_SUCCESS
)
1090 dev_info(&adapter
->pdev
->dev
,
1091 "eSwitch port config for pci func %d\n", pci_func
);
1093 dev_err(&adapter
->pdev
->dev
,
1094 "Failed to get eswitch port config for pci func %d\n",
1098 /* Configure eSwitch port
1099 op_mode = 0 for setting default port behavior
1100 op_mode = 1 for setting vlan id
1101 op_mode = 2 for deleting vlan id
1102 op_type = 0 for vlan_id
1103 op_type = 1 for port vlan_id
1105 int qlcnic_config_switch_port(struct qlcnic_adapter
*adapter
,
1106 struct qlcnic_esw_func_cfg
*esw_cfg
)
1108 int err
= -EIO
, index
;
1110 struct qlcnic_cmd_args cmd
;
1113 if (adapter
->ahw
->op_mode
!= QLCNIC_MGMT_FUNC
)
1115 pci_func
= esw_cfg
->pci_func
;
1116 index
= qlcnic_is_valid_nic_func(adapter
, pci_func
);
1119 arg1
= (adapter
->npars
[index
].phy_port
& BIT_0
);
1120 arg1
|= (pci_func
<< 8);
1122 if (__qlcnic_get_eswitch_port_config(adapter
, &arg1
, &arg2
))
1124 arg1
&= ~(0x0ff << 8);
1125 arg1
|= (pci_func
<< 8);
1126 arg1
&= ~(BIT_2
| BIT_3
);
1127 switch (esw_cfg
->op_mode
) {
1128 case QLCNIC_PORT_DEFAULTS
:
1129 arg1
|= (BIT_4
| BIT_6
| BIT_7
);
1130 arg2
|= (BIT_0
| BIT_1
);
1131 if (adapter
->ahw
->capabilities
& QLCNIC_FW_CAPABILITY_TSO
)
1132 arg2
|= (BIT_2
| BIT_3
);
1133 if (!(esw_cfg
->discard_tagged
))
1135 if (!(esw_cfg
->promisc_mode
))
1137 if (!(esw_cfg
->mac_override
))
1139 if (!(esw_cfg
->mac_anti_spoof
))
1141 if (!(esw_cfg
->offload_flags
& BIT_0
))
1142 arg2
&= ~(BIT_1
| BIT_2
| BIT_3
);
1143 if (!(esw_cfg
->offload_flags
& BIT_1
))
1145 if (!(esw_cfg
->offload_flags
& BIT_2
))
1148 case QLCNIC_ADD_VLAN
:
1149 arg1
|= (BIT_2
| BIT_5
);
1150 arg1
|= (esw_cfg
->vlan_id
<< 16);
1152 case QLCNIC_DEL_VLAN
:
1153 arg1
|= (BIT_3
| BIT_5
);
1154 arg1
&= ~(0x0ffff << 16);
1160 qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_CONFIGURE_ESWITCH
);
1161 cmd
.req
.arg
[1] = arg1
;
1162 cmd
.req
.arg
[2] = arg2
;
1163 err
= qlcnic_issue_cmd(adapter
, &cmd
);
1164 qlcnic_free_mbx_args(&cmd
);
1166 if (err
!= QLCNIC_RCODE_SUCCESS
)
1167 dev_err(&adapter
->pdev
->dev
,
1168 "Failed to configure eswitch pci func %d\n", pci_func
);
1170 dev_info(&adapter
->pdev
->dev
,
1171 "Configured eSwitch for pci func %d\n", pci_func
);
1177 qlcnic_get_eswitch_port_config(struct qlcnic_adapter
*adapter
,
1178 struct qlcnic_esw_func_cfg
*esw_cfg
)
1184 if (adapter
->ahw
->op_mode
== QLCNIC_MGMT_FUNC
) {
1185 index
= qlcnic_is_valid_nic_func(adapter
, esw_cfg
->pci_func
);
1188 phy_port
= adapter
->npars
[index
].phy_port
;
1190 phy_port
= adapter
->ahw
->physical_port
;
1193 arg1
|= (esw_cfg
->pci_func
<< 8);
1194 if (__qlcnic_get_eswitch_port_config(adapter
, &arg1
, &arg2
))
1197 esw_cfg
->discard_tagged
= !!(arg1
& BIT_4
);
1198 esw_cfg
->host_vlan_tag
= !!(arg1
& BIT_5
);
1199 esw_cfg
->promisc_mode
= !!(arg1
& BIT_6
);
1200 esw_cfg
->mac_override
= !!(arg1
& BIT_7
);
1201 esw_cfg
->vlan_id
= LSW(arg1
>> 16);
1202 esw_cfg
->mac_anti_spoof
= (arg2
& 0x1);
1203 esw_cfg
->offload_flags
= ((arg2
>> 1) & 0x7);