qlcnic: 83xx register dump routines
[deliverable/linux.git] / drivers / net / ethernet / qlogic / qlcnic / qlcnic_ctx.c
1 /*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8 #include "qlcnic.h"
9
10 static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
11 {QLCNIC_CMD_CREATE_RX_CTX, 4, 1},
12 {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
13 {QLCNIC_CMD_CREATE_TX_CTX, 4, 1},
14 {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1},
15 {QLCNIC_CMD_INTRPT_TEST, 4, 1},
16 {QLCNIC_CMD_SET_MTU, 4, 1},
17 {QLCNIC_CMD_READ_PHY, 4, 2},
18 {QLCNIC_CMD_WRITE_PHY, 5, 1},
19 {QLCNIC_CMD_READ_HW_REG, 4, 1},
20 {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
21 {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
22 {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
23 {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
24 {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
25 {QLCNIC_CMD_GET_PCI_INFO, 4, 1},
26 {QLCNIC_CMD_GET_NIC_INFO, 4, 1},
27 {QLCNIC_CMD_SET_NIC_INFO, 4, 1},
28 {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
29 {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
30 {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
31 {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
32 {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
33 {QLCNIC_CMD_GET_MAC_STATS, 4, 1},
34 {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
35 {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1},
36 {QLCNIC_CMD_CONFIG_PORT, 4, 1},
37 {QLCNIC_CMD_TEMP_SIZE, 4, 4},
38 {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
39 {QLCNIC_CMD_SET_DRV_VER, 4, 1},
40 };
41
42 static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
43 {
44 return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) |
45 (0xcafe << 16);
46 }
47
48 /* Allocate mailbox registers */
49 int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
50 struct qlcnic_adapter *adapter, u32 type)
51 {
52 int i, size;
53 const struct qlcnic_mailbox_metadata *mbx_tbl;
54
55 mbx_tbl = qlcnic_mbx_tbl;
56 size = ARRAY_SIZE(qlcnic_mbx_tbl);
57 for (i = 0; i < size; i++) {
58 if (type == mbx_tbl[i].cmd) {
59 mbx->req.num = mbx_tbl[i].in_args;
60 mbx->rsp.num = mbx_tbl[i].out_args;
61 mbx->req.arg = kcalloc(mbx->req.num,
62 sizeof(u32), GFP_ATOMIC);
63 if (!mbx->req.arg)
64 return -ENOMEM;
65 mbx->rsp.arg = kcalloc(mbx->rsp.num,
66 sizeof(u32), GFP_ATOMIC);
67 if (!mbx->rsp.arg) {
68 kfree(mbx->req.arg);
69 mbx->req.arg = NULL;
70 return -ENOMEM;
71 }
72 memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
73 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
74 mbx->req.arg[0] = type;
75 break;
76 }
77 }
78 return 0;
79 }
80
81 /* Free up mailbox registers */
82 void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
83 {
84 kfree(cmd->req.arg);
85 cmd->req.arg = NULL;
86 kfree(cmd->rsp.arg);
87 cmd->rsp.arg = NULL;
88 }
89
90 static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
91 {
92 int i;
93
94 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
95 if (adapter->npars[i].pci_func == pci_func)
96 return i;
97 }
98
99 return -1;
100 }
101
102 static u32
103 qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
104 {
105 u32 rsp;
106 int timeout = 0;
107
108 do {
109 /* give atleast 1ms for firmware to respond */
110 mdelay(1);
111
112 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
113 return QLCNIC_CDRP_RSP_TIMEOUT;
114
115 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
116 } while (!QLCNIC_CDRP_IS_RSP(rsp));
117
118 return rsp;
119 }
120
121 int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
122 struct qlcnic_cmd_args *cmd)
123 {
124 int i;
125 u32 rsp;
126 u32 signature;
127 struct pci_dev *pdev = adapter->pdev;
128 struct qlcnic_hardware_context *ahw = adapter->ahw;
129
130 signature = qlcnic_get_cmd_signature(ahw);
131
132 /* Acquire semaphore before accessing CRB */
133 if (qlcnic_api_lock(adapter)) {
134 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
135 return cmd->rsp.arg[0];
136 }
137
138 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
139 for (i = 1; i < QLCNIC_CDRP_MAX_ARGS; i++)
140 QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]);
141 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET,
142 QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0]));
143 rsp = qlcnic_poll_rsp(adapter);
144
145 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
146 dev_err(&pdev->dev, "card response timeout.\n");
147 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
148 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
149 cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1));
150 dev_err(&pdev->dev, "failed card response code:0x%x\n",
151 cmd->rsp.arg[0]);
152 } else if (rsp == QLCNIC_CDRP_RSP_OK)
153 cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
154
155 for (i = 1; i < cmd->rsp.num; i++)
156 cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i));
157
158 /* Release semaphore */
159 qlcnic_api_unlock(adapter);
160 return cmd->rsp.arg[0];
161 }
162
163 int
164 qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
165 {
166 int err = 0;
167 struct qlcnic_cmd_args cmd;
168 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
169
170 if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE)
171 return err;
172 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU);
173 cmd.req.arg[1] = recv_ctx->context_id;
174 cmd.req.arg[2] = mtu;
175
176 err = qlcnic_issue_cmd(adapter, &cmd);
177 if (err) {
178 dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
179 err = -EIO;
180 }
181 qlcnic_free_mbx_args(&cmd);
182 return err;
183 }
184
185 int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
186 {
187 void *addr;
188 struct qlcnic_hostrq_rx_ctx *prq;
189 struct qlcnic_cardrsp_rx_ctx *prsp;
190 struct qlcnic_hostrq_rds_ring *prq_rds;
191 struct qlcnic_hostrq_sds_ring *prq_sds;
192 struct qlcnic_cardrsp_rds_ring *prsp_rds;
193 struct qlcnic_cardrsp_sds_ring *prsp_sds;
194 struct qlcnic_host_rds_ring *rds_ring;
195 struct qlcnic_host_sds_ring *sds_ring;
196 struct qlcnic_cmd_args cmd;
197
198 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
199 u64 phys_addr;
200
201 u8 i, nrds_rings, nsds_rings;
202 u16 temp_u16;
203 size_t rq_size, rsp_size;
204 u32 cap, reg, val, reg2;
205 int err;
206
207 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
208
209 nrds_rings = adapter->max_rds_rings;
210 nsds_rings = adapter->max_sds_rings;
211
212 rq_size =
213 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
214 nsds_rings);
215 rsp_size =
216 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
217 nsds_rings);
218
219 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
220 &hostrq_phys_addr, GFP_KERNEL);
221 if (addr == NULL)
222 return -ENOMEM;
223 prq = addr;
224
225 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
226 &cardrsp_phys_addr, GFP_KERNEL);
227 if (addr == NULL) {
228 err = -ENOMEM;
229 goto out_free_rq;
230 }
231 prsp = addr;
232
233 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
234
235 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
236 | QLCNIC_CAP0_VALIDOFF);
237 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
238
239 temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
240 prq->valid_field_offset = cpu_to_le16(temp_u16);
241 prq->txrx_sds_binding = nsds_rings - 1;
242
243 prq->capabilities[0] = cpu_to_le32(cap);
244 prq->host_int_crb_mode =
245 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
246 prq->host_rds_crb_mode =
247 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
248
249 prq->num_rds_rings = cpu_to_le16(nrds_rings);
250 prq->num_sds_rings = cpu_to_le16(nsds_rings);
251 prq->rds_ring_offset = 0;
252
253 val = le32_to_cpu(prq->rds_ring_offset) +
254 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
255 prq->sds_ring_offset = cpu_to_le32(val);
256
257 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
258 le32_to_cpu(prq->rds_ring_offset));
259
260 for (i = 0; i < nrds_rings; i++) {
261
262 rds_ring = &recv_ctx->rds_rings[i];
263 rds_ring->producer = 0;
264
265 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
266 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
267 prq_rds[i].ring_kind = cpu_to_le32(i);
268 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
269 }
270
271 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
272 le32_to_cpu(prq->sds_ring_offset));
273
274 for (i = 0; i < nsds_rings; i++) {
275
276 sds_ring = &recv_ctx->sds_rings[i];
277 sds_ring->consumer = 0;
278 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
279
280 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
281 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
282 prq_sds[i].msi_index = cpu_to_le16(i);
283 }
284
285 phys_addr = hostrq_phys_addr;
286 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX);
287 cmd.req.arg[1] = MSD(phys_addr);
288 cmd.req.arg[2] = LSD(phys_addr);
289 cmd.req.arg[3] = rq_size;
290 err = qlcnic_issue_cmd(adapter, &cmd);
291 if (err) {
292 dev_err(&adapter->pdev->dev,
293 "Failed to create rx ctx in firmware%d\n", err);
294 goto out_free_rsp;
295 }
296
297 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
298 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
299
300 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
301 rds_ring = &recv_ctx->rds_rings[i];
302
303 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
304 rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg;
305 }
306
307 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
308 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
309
310 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
311 sds_ring = &recv_ctx->sds_rings[i];
312
313 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
314 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
315
316 sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg;
317 sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2;
318 }
319
320 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
321 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
322 recv_ctx->virt_port = prsp->virt_port;
323
324 out_free_rsp:
325 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
326 cardrsp_phys_addr);
327 qlcnic_free_mbx_args(&cmd);
328 out_free_rq:
329 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
330 return err;
331 }
332
333 static void
334 qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
335 {
336 int err;
337 struct qlcnic_cmd_args cmd;
338 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
339
340 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX);
341 cmd.req.arg[1] = recv_ctx->context_id;
342 err = qlcnic_issue_cmd(adapter, &cmd);
343 if (err)
344 dev_err(&adapter->pdev->dev,
345 "Failed to destroy rx ctx in firmware\n");
346
347 recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
348 qlcnic_free_mbx_args(&cmd);
349 }
350
351 int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
352 struct qlcnic_host_tx_ring *tx_ring,
353 int ring)
354 {
355 struct qlcnic_hostrq_tx_ctx *prq;
356 struct qlcnic_hostrq_cds_ring *prq_cds;
357 struct qlcnic_cardrsp_tx_ctx *prsp;
358 void *rq_addr, *rsp_addr;
359 size_t rq_size, rsp_size;
360 u32 temp;
361 struct qlcnic_cmd_args cmd;
362 int err;
363 u64 phys_addr;
364 dma_addr_t rq_phys_addr, rsp_phys_addr;
365
366 /* reset host resources */
367 tx_ring->producer = 0;
368 tx_ring->sw_consumer = 0;
369 *(tx_ring->hw_consumer) = 0;
370
371 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
372 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
373 &rq_phys_addr, GFP_KERNEL);
374 if (!rq_addr)
375 return -ENOMEM;
376
377 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
378 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
379 &rsp_phys_addr, GFP_KERNEL);
380 if (!rsp_addr) {
381 err = -ENOMEM;
382 goto out_free_rq;
383 }
384
385 memset(rq_addr, 0, rq_size);
386 prq = rq_addr;
387
388 memset(rsp_addr, 0, rsp_size);
389 prsp = rsp_addr;
390
391 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
392
393 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
394 QLCNIC_CAP0_LSO);
395 prq->capabilities[0] = cpu_to_le32(temp);
396
397 prq->host_int_crb_mode =
398 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
399 prq->msi_index = 0;
400
401 prq->interrupt_ctl = 0;
402 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
403
404 prq_cds = &prq->cds_ring;
405
406 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
407 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
408
409 phys_addr = rq_phys_addr;
410
411 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
412 cmd.req.arg[1] = MSD(phys_addr);
413 cmd.req.arg[2] = LSD(phys_addr);
414 cmd.req.arg[3] = rq_size;
415 err = qlcnic_issue_cmd(adapter, &cmd);
416
417 if (err == QLCNIC_RCODE_SUCCESS) {
418 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
419 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
420 tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
421 } else {
422 dev_err(&adapter->pdev->dev,
423 "Failed to create tx ctx in firmware%d\n", err);
424 err = -EIO;
425 }
426
427 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
428 rsp_phys_addr);
429
430 out_free_rq:
431 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
432 qlcnic_free_mbx_args(&cmd);
433
434 return err;
435 }
436
437 static void
438 qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter,
439 struct qlcnic_host_tx_ring *tx_ring)
440 {
441 struct qlcnic_cmd_args cmd;
442
443 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
444 cmd.req.arg[1] = tx_ring->ctx_id;
445 if (qlcnic_issue_cmd(adapter, &cmd))
446 dev_err(&adapter->pdev->dev,
447 "Failed to destroy tx ctx in firmware\n");
448 qlcnic_free_mbx_args(&cmd);
449 }
450
451 int
452 qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
453 {
454 int err;
455 struct qlcnic_cmd_args cmd;
456
457 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT);
458 cmd.req.arg[1] = config;
459 err = qlcnic_issue_cmd(adapter, &cmd);
460 qlcnic_free_mbx_args(&cmd);
461 return err;
462 }
463
464 int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
465 {
466 void *addr;
467 int err, ring;
468 struct qlcnic_recv_context *recv_ctx;
469 struct qlcnic_host_rds_ring *rds_ring;
470 struct qlcnic_host_sds_ring *sds_ring;
471 struct qlcnic_host_tx_ring *tx_ring;
472 __le32 *ptr;
473
474 struct pci_dev *pdev = adapter->pdev;
475
476 recv_ctx = adapter->recv_ctx;
477
478 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
479 tx_ring = &adapter->tx_ring[ring];
480 ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
481 &tx_ring->hw_cons_phys_addr,
482 GFP_KERNEL);
483
484 if (ptr == NULL) {
485 dev_err(&pdev->dev, "failed to allocate tx consumer\n");
486 return -ENOMEM;
487 }
488 tx_ring->hw_consumer = ptr;
489 /* cmd desc ring */
490 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
491 &tx_ring->phys_addr,
492 GFP_KERNEL);
493
494 if (addr == NULL) {
495 dev_err(&pdev->dev,
496 "failed to allocate tx desc ring\n");
497 err = -ENOMEM;
498 goto err_out_free;
499 }
500
501 tx_ring->desc_head = addr;
502 }
503
504 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
505 rds_ring = &recv_ctx->rds_rings[ring];
506 addr = dma_alloc_coherent(&adapter->pdev->dev,
507 RCV_DESC_RINGSIZE(rds_ring),
508 &rds_ring->phys_addr, GFP_KERNEL);
509 if (addr == NULL) {
510 dev_err(&pdev->dev,
511 "failed to allocate rds ring [%d]\n", ring);
512 err = -ENOMEM;
513 goto err_out_free;
514 }
515 rds_ring->desc_head = addr;
516
517 }
518
519 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
520 sds_ring = &recv_ctx->sds_rings[ring];
521
522 addr = dma_alloc_coherent(&adapter->pdev->dev,
523 STATUS_DESC_RINGSIZE(sds_ring),
524 &sds_ring->phys_addr, GFP_KERNEL);
525 if (addr == NULL) {
526 dev_err(&pdev->dev,
527 "failed to allocate sds ring [%d]\n", ring);
528 err = -ENOMEM;
529 goto err_out_free;
530 }
531 sds_ring->desc_head = addr;
532 }
533
534 return 0;
535
536 err_out_free:
537 qlcnic_free_hw_resources(adapter);
538 return err;
539 }
540
541 int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
542 {
543 int i, err, ring;
544
545 if (dev->flags & QLCNIC_NEED_FLR) {
546 pci_reset_function(dev->pdev);
547 dev->flags &= ~QLCNIC_NEED_FLR;
548 }
549
550 err = qlcnic_fw_cmd_create_rx_ctx(dev);
551 if (err)
552 return err;
553
554 for (ring = 0; ring < dev->max_drv_tx_rings; ring++) {
555 err = qlcnic_fw_cmd_create_tx_ctx(dev,
556 &dev->tx_ring[ring],
557 ring);
558 if (err) {
559 qlcnic_fw_cmd_destroy_rx_ctx(dev);
560 if (ring == 0)
561 return err;
562
563 for (i = 0; i < ring; i++)
564 qlcnic_fw_cmd_destroy_tx_ctx(dev,
565 &dev->tx_ring[i]);
566
567 return err;
568 }
569 }
570
571 set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
572 return 0;
573 }
574
575 void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
576 {
577 int ring;
578
579 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
580 qlcnic_fw_cmd_destroy_rx_ctx(adapter);
581 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
582 qlcnic_fw_cmd_destroy_tx_ctx(adapter,
583 &adapter->tx_ring[ring]);
584 /* Allow dma queues to drain after context reset */
585 mdelay(20);
586 }
587 }
588
589 void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
590 {
591 struct qlcnic_recv_context *recv_ctx;
592 struct qlcnic_host_rds_ring *rds_ring;
593 struct qlcnic_host_sds_ring *sds_ring;
594 struct qlcnic_host_tx_ring *tx_ring;
595 int ring;
596
597 recv_ctx = adapter->recv_ctx;
598
599 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
600 tx_ring = &adapter->tx_ring[ring];
601 if (tx_ring->hw_consumer != NULL) {
602 dma_free_coherent(&adapter->pdev->dev, sizeof(u32),
603 tx_ring->hw_consumer,
604 tx_ring->hw_cons_phys_addr);
605
606 tx_ring->hw_consumer = NULL;
607 }
608
609 if (tx_ring->desc_head != NULL) {
610 dma_free_coherent(&adapter->pdev->dev,
611 TX_DESC_RINGSIZE(tx_ring),
612 tx_ring->desc_head,
613 tx_ring->phys_addr);
614 tx_ring->desc_head = NULL;
615 }
616 }
617
618 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
619 rds_ring = &recv_ctx->rds_rings[ring];
620
621 if (rds_ring->desc_head != NULL) {
622 dma_free_coherent(&adapter->pdev->dev,
623 RCV_DESC_RINGSIZE(rds_ring),
624 rds_ring->desc_head,
625 rds_ring->phys_addr);
626 rds_ring->desc_head = NULL;
627 }
628 }
629
630 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
631 sds_ring = &recv_ctx->sds_rings[ring];
632
633 if (sds_ring->desc_head != NULL) {
634 dma_free_coherent(&adapter->pdev->dev,
635 STATUS_DESC_RINGSIZE(sds_ring),
636 sds_ring->desc_head,
637 sds_ring->phys_addr);
638 sds_ring->desc_head = NULL;
639 }
640 }
641 }
642
643
644 int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
645 {
646 int err, i;
647 struct qlcnic_cmd_args cmd;
648 u32 mac_low, mac_high;
649
650 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
651 cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8;
652 err = qlcnic_issue_cmd(adapter, &cmd);
653
654 if (err == QLCNIC_RCODE_SUCCESS) {
655 mac_low = cmd.rsp.arg[1];
656 mac_high = cmd.rsp.arg[2];
657
658 for (i = 0; i < 2; i++)
659 mac[i] = (u8) (mac_high >> ((1 - i) * 8));
660 for (i = 2; i < 6; i++)
661 mac[i] = (u8) (mac_low >> ((5 - i) * 8));
662 } else {
663 dev_err(&adapter->pdev->dev,
664 "Failed to get mac address%d\n", err);
665 err = -EIO;
666 }
667 qlcnic_free_mbx_args(&cmd);
668 return err;
669 }
670
671 /* Get info of a NIC partition */
672 int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
673 struct qlcnic_info *npar_info, u8 func_id)
674 {
675 int err;
676 dma_addr_t nic_dma_t;
677 const struct qlcnic_info_le *nic_info;
678 void *nic_info_addr;
679 struct qlcnic_cmd_args cmd;
680 size_t nic_size = sizeof(struct qlcnic_info_le);
681
682 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
683 &nic_dma_t, GFP_KERNEL);
684 if (!nic_info_addr)
685 return -ENOMEM;
686 memset(nic_info_addr, 0, nic_size);
687
688 nic_info = nic_info_addr;
689
690 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
691 cmd.req.arg[1] = MSD(nic_dma_t);
692 cmd.req.arg[2] = LSD(nic_dma_t);
693 cmd.req.arg[3] = (func_id << 16 | nic_size);
694 err = qlcnic_issue_cmd(adapter, &cmd);
695 if (err != QLCNIC_RCODE_SUCCESS) {
696 dev_err(&adapter->pdev->dev,
697 "Failed to get nic info%d\n", err);
698 err = -EIO;
699 } else {
700 npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
701 npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
702 npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
703 npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
704 npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
705 npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
706 npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
707 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
708 npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
709 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
710 }
711
712 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
713 nic_dma_t);
714 qlcnic_free_mbx_args(&cmd);
715
716 return err;
717 }
718
719 /* Configure a NIC partition */
720 int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
721 struct qlcnic_info *nic)
722 {
723 int err = -EIO;
724 dma_addr_t nic_dma_t;
725 void *nic_info_addr;
726 struct qlcnic_cmd_args cmd;
727 struct qlcnic_info_le *nic_info;
728 size_t nic_size = sizeof(struct qlcnic_info_le);
729
730 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
731 return err;
732
733 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
734 &nic_dma_t, GFP_KERNEL);
735 if (!nic_info_addr)
736 return -ENOMEM;
737
738 memset(nic_info_addr, 0, nic_size);
739 nic_info = nic_info_addr;
740
741 nic_info->pci_func = cpu_to_le16(nic->pci_func);
742 nic_info->op_mode = cpu_to_le16(nic->op_mode);
743 nic_info->phys_port = cpu_to_le16(nic->phys_port);
744 nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
745 nic_info->capabilities = cpu_to_le32(nic->capabilities);
746 nic_info->max_mac_filters = nic->max_mac_filters;
747 nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
748 nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
749 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
750 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
751
752 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
753 cmd.req.arg[1] = MSD(nic_dma_t);
754 cmd.req.arg[2] = LSD(nic_dma_t);
755 cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size);
756 err = qlcnic_issue_cmd(adapter, &cmd);
757
758 if (err != QLCNIC_RCODE_SUCCESS) {
759 dev_err(&adapter->pdev->dev,
760 "Failed to set nic info%d\n", err);
761 err = -EIO;
762 }
763
764 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
765 nic_dma_t);
766 qlcnic_free_mbx_args(&cmd);
767
768 return err;
769 }
770
771 /* Get PCI Info of a partition */
772 int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
773 struct qlcnic_pci_info *pci_info)
774 {
775 int err = 0, i;
776 struct qlcnic_cmd_args cmd;
777 dma_addr_t pci_info_dma_t;
778 struct qlcnic_pci_info_le *npar;
779 void *pci_info_addr;
780 size_t npar_size = sizeof(struct qlcnic_pci_info_le);
781 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
782
783 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
784 &pci_info_dma_t, GFP_KERNEL);
785 if (!pci_info_addr)
786 return -ENOMEM;
787 memset(pci_info_addr, 0, pci_size);
788
789 npar = pci_info_addr;
790 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
791 cmd.req.arg[1] = MSD(pci_info_dma_t);
792 cmd.req.arg[2] = LSD(pci_info_dma_t);
793 cmd.req.arg[3] = pci_size;
794 err = qlcnic_issue_cmd(adapter, &cmd);
795
796 adapter->ahw->act_pci_func = 0;
797 if (err == QLCNIC_RCODE_SUCCESS) {
798 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
799 pci_info->id = le16_to_cpu(npar->id);
800 pci_info->active = le16_to_cpu(npar->active);
801 pci_info->type = le16_to_cpu(npar->type);
802 if (pci_info->type == QLCNIC_TYPE_NIC)
803 adapter->ahw->act_pci_func++;
804 pci_info->default_port =
805 le16_to_cpu(npar->default_port);
806 pci_info->tx_min_bw =
807 le16_to_cpu(npar->tx_min_bw);
808 pci_info->tx_max_bw =
809 le16_to_cpu(npar->tx_max_bw);
810 memcpy(pci_info->mac, npar->mac, ETH_ALEN);
811 }
812 } else {
813 dev_err(&adapter->pdev->dev,
814 "Failed to get PCI Info%d\n", err);
815 err = -EIO;
816 }
817
818 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
819 pci_info_dma_t);
820 qlcnic_free_mbx_args(&cmd);
821
822 return err;
823 }
824
825 /* Configure eSwitch for port mirroring */
826 int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
827 u8 enable_mirroring, u8 pci_func)
828 {
829 int err = -EIO;
830 u32 arg1;
831 struct qlcnic_cmd_args cmd;
832
833 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
834 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
835 return err;
836
837 arg1 = id | (enable_mirroring ? BIT_4 : 0);
838 arg1 |= pci_func << 8;
839
840 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORTMIRRORING);
841 cmd.req.arg[1] = arg1;
842 err = qlcnic_issue_cmd(adapter, &cmd);
843
844 if (err != QLCNIC_RCODE_SUCCESS)
845 dev_err(&adapter->pdev->dev,
846 "Failed to configure port mirroring%d on eswitch:%d\n",
847 pci_func, id);
848 else
849 dev_info(&adapter->pdev->dev,
850 "Configured eSwitch %d for port mirroring:%d\n",
851 id, pci_func);
852 qlcnic_free_mbx_args(&cmd);
853
854 return err;
855 }
856
857 int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
858 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
859
860 size_t stats_size = sizeof(struct qlcnic_esw_stats_le);
861 struct qlcnic_esw_stats_le *stats;
862 dma_addr_t stats_dma_t;
863 void *stats_addr;
864 u32 arg1;
865 struct qlcnic_cmd_args cmd;
866 int err;
867
868 if (esw_stats == NULL)
869 return -ENOMEM;
870
871 if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) &&
872 (func != adapter->ahw->pci_func)) {
873 dev_err(&adapter->pdev->dev,
874 "Not privilege to query stats for func=%d", func);
875 return -EIO;
876 }
877
878 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
879 &stats_dma_t, GFP_KERNEL);
880 if (!stats_addr) {
881 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
882 return -ENOMEM;
883 }
884 memset(stats_addr, 0, stats_size);
885
886 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
887 arg1 |= rx_tx << 15 | stats_size << 16;
888
889 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS);
890 cmd.req.arg[1] = arg1;
891 cmd.req.arg[2] = MSD(stats_dma_t);
892 cmd.req.arg[3] = LSD(stats_dma_t);
893 err = qlcnic_issue_cmd(adapter, &cmd);
894
895 if (!err) {
896 stats = stats_addr;
897 esw_stats->context_id = le16_to_cpu(stats->context_id);
898 esw_stats->version = le16_to_cpu(stats->version);
899 esw_stats->size = le16_to_cpu(stats->size);
900 esw_stats->multicast_frames =
901 le64_to_cpu(stats->multicast_frames);
902 esw_stats->broadcast_frames =
903 le64_to_cpu(stats->broadcast_frames);
904 esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
905 esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
906 esw_stats->local_frames = le64_to_cpu(stats->local_frames);
907 esw_stats->errors = le64_to_cpu(stats->errors);
908 esw_stats->numbytes = le64_to_cpu(stats->numbytes);
909 }
910
911 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
912 stats_dma_t);
913 qlcnic_free_mbx_args(&cmd);
914
915 return err;
916 }
917
918 /* This routine will retrieve the MAC statistics from firmware */
919 int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
920 struct qlcnic_mac_statistics *mac_stats)
921 {
922 struct qlcnic_mac_statistics_le *stats;
923 struct qlcnic_cmd_args cmd;
924 size_t stats_size = sizeof(struct qlcnic_mac_statistics_le);
925 dma_addr_t stats_dma_t;
926 void *stats_addr;
927 int err;
928
929 if (mac_stats == NULL)
930 return -ENOMEM;
931
932 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
933 &stats_dma_t, GFP_KERNEL);
934 if (!stats_addr) {
935 dev_err(&adapter->pdev->dev,
936 "%s: Unable to allocate memory.\n", __func__);
937 return -ENOMEM;
938 }
939 memset(stats_addr, 0, stats_size);
940 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
941 cmd.req.arg[1] = stats_size << 16;
942 cmd.req.arg[2] = MSD(stats_dma_t);
943 cmd.req.arg[3] = LSD(stats_dma_t);
944 err = qlcnic_issue_cmd(adapter, &cmd);
945 if (!err) {
946 stats = stats_addr;
947 mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames);
948 mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes);
949 mac_stats->mac_tx_mcast_pkts =
950 le64_to_cpu(stats->mac_tx_mcast_pkts);
951 mac_stats->mac_tx_bcast_pkts =
952 le64_to_cpu(stats->mac_tx_bcast_pkts);
953 mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames);
954 mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes);
955 mac_stats->mac_rx_mcast_pkts =
956 le64_to_cpu(stats->mac_rx_mcast_pkts);
957 mac_stats->mac_rx_length_error =
958 le64_to_cpu(stats->mac_rx_length_error);
959 mac_stats->mac_rx_length_small =
960 le64_to_cpu(stats->mac_rx_length_small);
961 mac_stats->mac_rx_length_large =
962 le64_to_cpu(stats->mac_rx_length_large);
963 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
964 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
965 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
966 } else {
967 dev_err(&adapter->pdev->dev,
968 "%s: Get mac stats failed, err=%d.\n", __func__, err);
969 }
970
971 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
972 stats_dma_t);
973
974 qlcnic_free_mbx_args(&cmd);
975
976 return err;
977 }
978
979 int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
980 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
981
982 struct __qlcnic_esw_statistics port_stats;
983 u8 i;
984 int ret = -EIO;
985
986 if (esw_stats == NULL)
987 return -ENOMEM;
988 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
989 return -EIO;
990 if (adapter->npars == NULL)
991 return -EIO;
992
993 memset(esw_stats, 0, sizeof(u64));
994 esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL;
995 esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL;
996 esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL;
997 esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL;
998 esw_stats->errors = QLCNIC_STATS_NOT_AVAIL;
999 esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL;
1000 esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
1001 esw_stats->context_id = eswitch;
1002
1003 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
1004 if (adapter->npars[i].phy_port != eswitch)
1005 continue;
1006
1007 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
1008 if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func,
1009 rx_tx, &port_stats))
1010 continue;
1011
1012 esw_stats->size = port_stats.size;
1013 esw_stats->version = port_stats.version;
1014 QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames,
1015 port_stats.unicast_frames);
1016 QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames,
1017 port_stats.multicast_frames);
1018 QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames,
1019 port_stats.broadcast_frames);
1020 QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames,
1021 port_stats.dropped_frames);
1022 QLCNIC_ADD_ESW_STATS(esw_stats->errors,
1023 port_stats.errors);
1024 QLCNIC_ADD_ESW_STATS(esw_stats->local_frames,
1025 port_stats.local_frames);
1026 QLCNIC_ADD_ESW_STATS(esw_stats->numbytes,
1027 port_stats.numbytes);
1028 ret = 0;
1029 }
1030 return ret;
1031 }
1032
1033 int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
1034 const u8 port, const u8 rx_tx)
1035 {
1036 int err;
1037 u32 arg1;
1038 struct qlcnic_cmd_args cmd;
1039
1040 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
1041 return -EIO;
1042
1043 if (func_esw == QLCNIC_STATS_PORT) {
1044 if (port >= QLCNIC_MAX_PCI_FUNC)
1045 goto err_ret;
1046 } else if (func_esw == QLCNIC_STATS_ESWITCH) {
1047 if (port >= QLCNIC_NIU_MAX_XG_PORTS)
1048 goto err_ret;
1049 } else {
1050 goto err_ret;
1051 }
1052
1053 if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
1054 goto err_ret;
1055
1056 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
1057 arg1 |= BIT_14 | rx_tx << 15;
1058
1059 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS);
1060 cmd.req.arg[1] = arg1;
1061 err = qlcnic_issue_cmd(adapter, &cmd);
1062 qlcnic_free_mbx_args(&cmd);
1063 return err;
1064
1065 err_ret:
1066 dev_err(&adapter->pdev->dev,
1067 "Invalid args func_esw %d port %d rx_ctx %d\n",
1068 func_esw, port, rx_tx);
1069 return -EIO;
1070 }
1071
1072 static int
1073 __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1074 u32 *arg1, u32 *arg2)
1075 {
1076 int err = -EIO;
1077 struct qlcnic_cmd_args cmd;
1078 u8 pci_func;
1079 pci_func = (*arg1 >> 8);
1080
1081 qlcnic_alloc_mbx_args(&cmd, adapter,
1082 QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG);
1083 cmd.req.arg[1] = *arg1;
1084 err = qlcnic_issue_cmd(adapter, &cmd);
1085 *arg1 = cmd.rsp.arg[1];
1086 *arg2 = cmd.rsp.arg[2];
1087 qlcnic_free_mbx_args(&cmd);
1088
1089 if (err == QLCNIC_RCODE_SUCCESS)
1090 dev_info(&adapter->pdev->dev,
1091 "eSwitch port config for pci func %d\n", pci_func);
1092 else
1093 dev_err(&adapter->pdev->dev,
1094 "Failed to get eswitch port config for pci func %d\n",
1095 pci_func);
1096 return err;
1097 }
1098 /* Configure eSwitch port
1099 op_mode = 0 for setting default port behavior
1100 op_mode = 1 for setting vlan id
1101 op_mode = 2 for deleting vlan id
1102 op_type = 0 for vlan_id
1103 op_type = 1 for port vlan_id
1104 */
1105 int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1106 struct qlcnic_esw_func_cfg *esw_cfg)
1107 {
1108 int err = -EIO, index;
1109 u32 arg1, arg2 = 0;
1110 struct qlcnic_cmd_args cmd;
1111 u8 pci_func;
1112
1113 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
1114 return err;
1115 pci_func = esw_cfg->pci_func;
1116 index = qlcnic_is_valid_nic_func(adapter, pci_func);
1117 if (index < 0)
1118 return err;
1119 arg1 = (adapter->npars[index].phy_port & BIT_0);
1120 arg1 |= (pci_func << 8);
1121
1122 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1123 return err;
1124 arg1 &= ~(0x0ff << 8);
1125 arg1 |= (pci_func << 8);
1126 arg1 &= ~(BIT_2 | BIT_3);
1127 switch (esw_cfg->op_mode) {
1128 case QLCNIC_PORT_DEFAULTS:
1129 arg1 |= (BIT_4 | BIT_6 | BIT_7);
1130 arg2 |= (BIT_0 | BIT_1);
1131 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1132 arg2 |= (BIT_2 | BIT_3);
1133 if (!(esw_cfg->discard_tagged))
1134 arg1 &= ~BIT_4;
1135 if (!(esw_cfg->promisc_mode))
1136 arg1 &= ~BIT_6;
1137 if (!(esw_cfg->mac_override))
1138 arg1 &= ~BIT_7;
1139 if (!(esw_cfg->mac_anti_spoof))
1140 arg2 &= ~BIT_0;
1141 if (!(esw_cfg->offload_flags & BIT_0))
1142 arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
1143 if (!(esw_cfg->offload_flags & BIT_1))
1144 arg2 &= ~BIT_2;
1145 if (!(esw_cfg->offload_flags & BIT_2))
1146 arg2 &= ~BIT_3;
1147 break;
1148 case QLCNIC_ADD_VLAN:
1149 arg1 |= (BIT_2 | BIT_5);
1150 arg1 |= (esw_cfg->vlan_id << 16);
1151 break;
1152 case QLCNIC_DEL_VLAN:
1153 arg1 |= (BIT_3 | BIT_5);
1154 arg1 &= ~(0x0ffff << 16);
1155 break;
1156 default:
1157 return err;
1158 }
1159
1160 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_ESWITCH);
1161 cmd.req.arg[1] = arg1;
1162 cmd.req.arg[2] = arg2;
1163 err = qlcnic_issue_cmd(adapter, &cmd);
1164 qlcnic_free_mbx_args(&cmd);
1165
1166 if (err != QLCNIC_RCODE_SUCCESS)
1167 dev_err(&adapter->pdev->dev,
1168 "Failed to configure eswitch pci func %d\n", pci_func);
1169 else
1170 dev_info(&adapter->pdev->dev,
1171 "Configured eSwitch for pci func %d\n", pci_func);
1172
1173 return err;
1174 }
1175
1176 int
1177 qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1178 struct qlcnic_esw_func_cfg *esw_cfg)
1179 {
1180 u32 arg1, arg2;
1181 int index;
1182 u8 phy_port;
1183
1184 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
1185 index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func);
1186 if (index < 0)
1187 return -EIO;
1188 phy_port = adapter->npars[index].phy_port;
1189 } else {
1190 phy_port = adapter->ahw->physical_port;
1191 }
1192 arg1 = phy_port;
1193 arg1 |= (esw_cfg->pci_func << 8);
1194 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1195 return -EIO;
1196
1197 esw_cfg->discard_tagged = !!(arg1 & BIT_4);
1198 esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
1199 esw_cfg->promisc_mode = !!(arg1 & BIT_6);
1200 esw_cfg->mac_override = !!(arg1 & BIT_7);
1201 esw_cfg->vlan_id = LSW(arg1 >> 16);
1202 esw_cfg->mac_anti_spoof = (arg2 & 0x1);
1203 esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);
1204
1205 return 0;
1206 }
This page took 0.0712 seconds and 5 git commands to generate.