netxen: remove netxen_nic_phan_reg.h
[deliverable/linux.git] / drivers / net / netxen / netxen_nic_ctx.c
1 /*
2 * Copyright (C) 2003 - 2009 NetXen, Inc.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
22 *
23 * Contact Information:
24 * info@netxen.com
25 * NetXen Inc,
26 * 18922 Forge Drive
27 * Cupertino, CA 95014-0701
28 *
29 */
30
31 #include "netxen_nic_hw.h"
32 #include "netxen_nic.h"
33
34 #define NXHAL_VERSION 1
35
36 static int
37 netxen_api_lock(struct netxen_adapter *adapter)
38 {
39 u32 done = 0, timeout = 0;
40
41 for (;;) {
42 /* Acquire PCIE HW semaphore5 */
43 done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM5_LOCK));
44
45 if (done == 1)
46 break;
47
48 if (++timeout >= NX_OS_CRB_RETRY_COUNT) {
49 printk(KERN_ERR "%s: lock timeout.\n", __func__);
50 return -1;
51 }
52
53 msleep(1);
54 }
55
56 #if 0
57 NXWR32(adapter,
58 NETXEN_API_LOCK_ID, NX_OS_API_LOCK_DRIVER);
59 #endif
60 return 0;
61 }
62
63 static int
64 netxen_api_unlock(struct netxen_adapter *adapter)
65 {
66 /* Release PCIE HW semaphore5 */
67 NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM5_UNLOCK));
68 return 0;
69 }
70
71 static u32
72 netxen_poll_rsp(struct netxen_adapter *adapter)
73 {
74 u32 rsp = NX_CDRP_RSP_OK;
75 int timeout = 0;
76
77 do {
78 /* give atleast 1ms for firmware to respond */
79 msleep(1);
80
81 if (++timeout > NX_OS_CRB_RETRY_COUNT)
82 return NX_CDRP_RSP_TIMEOUT;
83
84 rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
85 } while (!NX_CDRP_IS_RSP(rsp));
86
87 return rsp;
88 }
89
90 static u32
91 netxen_issue_cmd(struct netxen_adapter *adapter,
92 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
93 {
94 u32 rsp;
95 u32 signature = 0;
96 u32 rcode = NX_RCODE_SUCCESS;
97
98 signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version);
99
100 /* Acquire semaphore before accessing CRB */
101 if (netxen_api_lock(adapter))
102 return NX_RCODE_TIMEOUT;
103
104 NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
105
106 NXWR32(adapter, NX_ARG1_CRB_OFFSET, arg1);
107
108 NXWR32(adapter, NX_ARG2_CRB_OFFSET, arg2);
109
110 NXWR32(adapter, NX_ARG3_CRB_OFFSET, arg3);
111
112 NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd));
113
114 rsp = netxen_poll_rsp(adapter);
115
116 if (rsp == NX_CDRP_RSP_TIMEOUT) {
117 printk(KERN_ERR "%s: card response timeout.\n",
118 netxen_nic_driver_name);
119
120 rcode = NX_RCODE_TIMEOUT;
121 } else if (rsp == NX_CDRP_RSP_FAIL) {
122 rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
123
124 printk(KERN_ERR "%s: failed card response code:0x%x\n",
125 netxen_nic_driver_name, rcode);
126 }
127
128 /* Release semaphore */
129 netxen_api_unlock(adapter);
130
131 return rcode;
132 }
133
134 int
135 nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
136 {
137 u32 rcode = NX_RCODE_SUCCESS;
138 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
139
140 if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
141 rcode = netxen_issue_cmd(adapter,
142 adapter->ahw.pci_func,
143 NXHAL_VERSION,
144 recv_ctx->context_id,
145 mtu,
146 0,
147 NX_CDRP_CMD_SET_MTU);
148
149 if (rcode != NX_RCODE_SUCCESS)
150 return -EIO;
151
152 return 0;
153 }
154
155 static int
156 nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
157 {
158 void *addr;
159 nx_hostrq_rx_ctx_t *prq;
160 nx_cardrsp_rx_ctx_t *prsp;
161 nx_hostrq_rds_ring_t *prq_rds;
162 nx_hostrq_sds_ring_t *prq_sds;
163 nx_cardrsp_rds_ring_t *prsp_rds;
164 nx_cardrsp_sds_ring_t *prsp_sds;
165 struct nx_host_rds_ring *rds_ring;
166 struct nx_host_sds_ring *sds_ring;
167
168 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
169 u64 phys_addr;
170
171 int i, nrds_rings, nsds_rings;
172 size_t rq_size, rsp_size;
173 u32 cap, reg, val;
174
175 int err;
176
177 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
178
179 nrds_rings = adapter->max_rds_rings;
180 nsds_rings = adapter->max_sds_rings;
181
182 rq_size =
183 SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
184 rsp_size =
185 SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
186
187 addr = pci_alloc_consistent(adapter->pdev,
188 rq_size, &hostrq_phys_addr);
189 if (addr == NULL)
190 return -ENOMEM;
191 prq = (nx_hostrq_rx_ctx_t *)addr;
192
193 addr = pci_alloc_consistent(adapter->pdev,
194 rsp_size, &cardrsp_phys_addr);
195 if (addr == NULL) {
196 err = -ENOMEM;
197 goto out_free_rq;
198 }
199 prsp = (nx_cardrsp_rx_ctx_t *)addr;
200
201 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
202
203 cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
204 cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
205
206 prq->capabilities[0] = cpu_to_le32(cap);
207 prq->host_int_crb_mode =
208 cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
209 prq->host_rds_crb_mode =
210 cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
211
212 prq->num_rds_rings = cpu_to_le16(nrds_rings);
213 prq->num_sds_rings = cpu_to_le16(nsds_rings);
214 prq->rds_ring_offset = cpu_to_le32(0);
215
216 val = le32_to_cpu(prq->rds_ring_offset) +
217 (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
218 prq->sds_ring_offset = cpu_to_le32(val);
219
220 prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
221 le32_to_cpu(prq->rds_ring_offset));
222
223 for (i = 0; i < nrds_rings; i++) {
224
225 rds_ring = &recv_ctx->rds_rings[i];
226
227 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
228 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
229 prq_rds[i].ring_kind = cpu_to_le32(i);
230 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
231 }
232
233 prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
234 le32_to_cpu(prq->sds_ring_offset));
235
236 for (i = 0; i < nsds_rings; i++) {
237
238 sds_ring = &recv_ctx->sds_rings[i];
239
240 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
241 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
242 prq_sds[i].msi_index = cpu_to_le16(i);
243 }
244
245 phys_addr = hostrq_phys_addr;
246 err = netxen_issue_cmd(adapter,
247 adapter->ahw.pci_func,
248 NXHAL_VERSION,
249 (u32)(phys_addr >> 32),
250 (u32)(phys_addr & 0xffffffff),
251 rq_size,
252 NX_CDRP_CMD_CREATE_RX_CTX);
253 if (err) {
254 printk(KERN_WARNING
255 "Failed to create rx ctx in firmware%d\n", err);
256 goto out_free_rsp;
257 }
258
259
260 prsp_rds = ((nx_cardrsp_rds_ring_t *)
261 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
262
263 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
264 rds_ring = &recv_ctx->rds_rings[i];
265
266 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
267 rds_ring->crb_rcv_producer = NETXEN_NIC_REG(reg - 0x200);
268 }
269
270 prsp_sds = ((nx_cardrsp_sds_ring_t *)
271 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
272
273 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
274 sds_ring = &recv_ctx->sds_rings[i];
275
276 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
277 sds_ring->crb_sts_consumer = NETXEN_NIC_REG(reg - 0x200);
278
279 reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
280 sds_ring->crb_intr_mask = NETXEN_NIC_REG(reg - 0x200);
281 }
282
283 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
284 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
285 recv_ctx->virt_port = prsp->virt_port;
286
287 out_free_rsp:
288 pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
289 out_free_rq:
290 pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
291 return err;
292 }
293
294 static void
295 nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
296 {
297 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
298
299 if (netxen_issue_cmd(adapter,
300 adapter->ahw.pci_func,
301 NXHAL_VERSION,
302 recv_ctx->context_id,
303 NX_DESTROY_CTX_RESET,
304 0,
305 NX_CDRP_CMD_DESTROY_RX_CTX)) {
306
307 printk(KERN_WARNING
308 "%s: Failed to destroy rx ctx in firmware\n",
309 netxen_nic_driver_name);
310 }
311 }
312
313 static int
314 nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
315 {
316 nx_hostrq_tx_ctx_t *prq;
317 nx_hostrq_cds_ring_t *prq_cds;
318 nx_cardrsp_tx_ctx_t *prsp;
319 void *rq_addr, *rsp_addr;
320 size_t rq_size, rsp_size;
321 u32 temp;
322 int err = 0;
323 u64 offset, phys_addr;
324 dma_addr_t rq_phys_addr, rsp_phys_addr;
325 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
326 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
327
328 rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
329 rq_addr = pci_alloc_consistent(adapter->pdev,
330 rq_size, &rq_phys_addr);
331 if (!rq_addr)
332 return -ENOMEM;
333
334 rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
335 rsp_addr = pci_alloc_consistent(adapter->pdev,
336 rsp_size, &rsp_phys_addr);
337 if (!rsp_addr) {
338 err = -ENOMEM;
339 goto out_free_rq;
340 }
341
342 memset(rq_addr, 0, rq_size);
343 prq = (nx_hostrq_tx_ctx_t *)rq_addr;
344
345 memset(rsp_addr, 0, rsp_size);
346 prsp = (nx_cardrsp_tx_ctx_t *)rsp_addr;
347
348 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
349
350 temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
351 prq->capabilities[0] = cpu_to_le32(temp);
352
353 prq->host_int_crb_mode =
354 cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
355
356 prq->interrupt_ctl = 0;
357 prq->msi_index = 0;
358
359 prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
360
361 offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx);
362 prq->cmd_cons_dma_addr = cpu_to_le64(offset);
363
364 prq_cds = &prq->cds_ring;
365
366 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
367 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
368
369 phys_addr = rq_phys_addr;
370 err = netxen_issue_cmd(adapter,
371 adapter->ahw.pci_func,
372 NXHAL_VERSION,
373 (u32)(phys_addr >> 32),
374 ((u32)phys_addr & 0xffffffff),
375 rq_size,
376 NX_CDRP_CMD_CREATE_TX_CTX);
377
378 if (err == NX_RCODE_SUCCESS) {
379 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
380 tx_ring->crb_cmd_producer = NETXEN_NIC_REG(temp - 0x200);
381 #if 0
382 adapter->tx_state =
383 le32_to_cpu(prsp->host_ctx_state);
384 #endif
385 adapter->tx_context_id =
386 le16_to_cpu(prsp->context_id);
387 } else {
388 printk(KERN_WARNING
389 "Failed to create tx ctx in firmware%d\n", err);
390 err = -EIO;
391 }
392
393 pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
394
395 out_free_rq:
396 pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
397
398 return err;
399 }
400
401 static void
402 nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
403 {
404 if (netxen_issue_cmd(adapter,
405 adapter->ahw.pci_func,
406 NXHAL_VERSION,
407 adapter->tx_context_id,
408 NX_DESTROY_CTX_RESET,
409 0,
410 NX_CDRP_CMD_DESTROY_TX_CTX)) {
411
412 printk(KERN_WARNING
413 "%s: Failed to destroy tx ctx in firmware\n",
414 netxen_nic_driver_name);
415 }
416 }
417
418 static u64 ctx_addr_sig_regs[][3] = {
419 {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
420 {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
421 {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
422 {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
423 };
424
425 #define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
426 #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
427 #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
428
429 #define lower32(x) ((u32)((x) & 0xffffffff))
430 #define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
431
432 static struct netxen_recv_crb recv_crb_registers[] = {
433 /* Instance 0 */
434 {
435 /* crb_rcv_producer: */
436 {
437 NETXEN_NIC_REG(0x100),
438 /* Jumbo frames */
439 NETXEN_NIC_REG(0x110),
440 /* LRO */
441 NETXEN_NIC_REG(0x120)
442 },
443 /* crb_sts_consumer: */
444 {
445 NETXEN_NIC_REG(0x138),
446 NETXEN_NIC_REG_2(0x000),
447 NETXEN_NIC_REG_2(0x004),
448 NETXEN_NIC_REG_2(0x008),
449 },
450 /* sw_int_mask */
451 {
452 CRB_SW_INT_MASK_0,
453 NETXEN_NIC_REG_2(0x044),
454 NETXEN_NIC_REG_2(0x048),
455 NETXEN_NIC_REG_2(0x04c),
456 },
457 },
458 /* Instance 1 */
459 {
460 /* crb_rcv_producer: */
461 {
462 NETXEN_NIC_REG(0x144),
463 /* Jumbo frames */
464 NETXEN_NIC_REG(0x154),
465 /* LRO */
466 NETXEN_NIC_REG(0x164)
467 },
468 /* crb_sts_consumer: */
469 {
470 NETXEN_NIC_REG(0x17c),
471 NETXEN_NIC_REG_2(0x020),
472 NETXEN_NIC_REG_2(0x024),
473 NETXEN_NIC_REG_2(0x028),
474 },
475 /* sw_int_mask */
476 {
477 CRB_SW_INT_MASK_1,
478 NETXEN_NIC_REG_2(0x064),
479 NETXEN_NIC_REG_2(0x068),
480 NETXEN_NIC_REG_2(0x06c),
481 },
482 },
483 /* Instance 2 */
484 {
485 /* crb_rcv_producer: */
486 {
487 NETXEN_NIC_REG(0x1d8),
488 /* Jumbo frames */
489 NETXEN_NIC_REG(0x1f8),
490 /* LRO */
491 NETXEN_NIC_REG(0x208)
492 },
493 /* crb_sts_consumer: */
494 {
495 NETXEN_NIC_REG(0x220),
496 NETXEN_NIC_REG_2(0x03c),
497 NETXEN_NIC_REG_2(0x03c),
498 NETXEN_NIC_REG_2(0x03c),
499 },
500 /* sw_int_mask */
501 {
502 CRB_SW_INT_MASK_2,
503 NETXEN_NIC_REG_2(0x03c),
504 NETXEN_NIC_REG_2(0x03c),
505 NETXEN_NIC_REG_2(0x03c),
506 },
507 },
508 /* Instance 3 */
509 {
510 /* crb_rcv_producer: */
511 {
512 NETXEN_NIC_REG(0x22c),
513 /* Jumbo frames */
514 NETXEN_NIC_REG(0x23c),
515 /* LRO */
516 NETXEN_NIC_REG(0x24c)
517 },
518 /* crb_sts_consumer: */
519 {
520 NETXEN_NIC_REG(0x264),
521 NETXEN_NIC_REG_2(0x03c),
522 NETXEN_NIC_REG_2(0x03c),
523 NETXEN_NIC_REG_2(0x03c),
524 },
525 /* sw_int_mask */
526 {
527 CRB_SW_INT_MASK_3,
528 NETXEN_NIC_REG_2(0x03c),
529 NETXEN_NIC_REG_2(0x03c),
530 NETXEN_NIC_REG_2(0x03c),
531 },
532 },
533 };
534
535 static int
536 netxen_init_old_ctx(struct netxen_adapter *adapter)
537 {
538 struct netxen_recv_context *recv_ctx;
539 struct nx_host_rds_ring *rds_ring;
540 struct nx_host_sds_ring *sds_ring;
541 struct nx_host_tx_ring *tx_ring;
542 int ring;
543 int port = adapter->portnum;
544 struct netxen_ring_ctx *hwctx;
545 u32 signature;
546
547 tx_ring = adapter->tx_ring;
548 recv_ctx = &adapter->recv_ctx;
549 hwctx = recv_ctx->hwctx;
550
551 hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
552 hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
553
554
555 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
556 rds_ring = &recv_ctx->rds_rings[ring];
557
558 hwctx->rcv_rings[ring].addr =
559 cpu_to_le64(rds_ring->phys_addr);
560 hwctx->rcv_rings[ring].size =
561 cpu_to_le32(rds_ring->num_desc);
562 }
563
564 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
565 sds_ring = &recv_ctx->sds_rings[ring];
566
567 if (ring == 0) {
568 hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
569 hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
570 }
571 hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
572 hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
573 hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
574 }
575 hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
576
577 signature = (adapter->max_sds_rings > 1) ?
578 NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
579
580 NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
581 lower32(recv_ctx->phys_addr));
582 NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
583 upper32(recv_ctx->phys_addr));
584 NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
585 signature | port);
586 return 0;
587 }
588
589 int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
590 {
591 void *addr;
592 int err = 0;
593 int ring;
594 struct netxen_recv_context *recv_ctx;
595 struct nx_host_rds_ring *rds_ring;
596 struct nx_host_sds_ring *sds_ring;
597 struct nx_host_tx_ring *tx_ring;
598
599 struct pci_dev *pdev = adapter->pdev;
600 struct net_device *netdev = adapter->netdev;
601 int port = adapter->portnum;
602
603 recv_ctx = &adapter->recv_ctx;
604 tx_ring = adapter->tx_ring;
605
606 addr = pci_alloc_consistent(pdev,
607 sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
608 &recv_ctx->phys_addr);
609 if (addr == NULL) {
610 dev_err(&pdev->dev, "failed to allocate hw context\n");
611 return -ENOMEM;
612 }
613
614 memset(addr, 0, sizeof(struct netxen_ring_ctx));
615 recv_ctx->hwctx = (struct netxen_ring_ctx *)addr;
616 recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
617 recv_ctx->hwctx->cmd_consumer_offset =
618 cpu_to_le64(recv_ctx->phys_addr +
619 sizeof(struct netxen_ring_ctx));
620 tx_ring->hw_consumer =
621 (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
622
623 /* cmd desc ring */
624 addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
625 &tx_ring->phys_addr);
626
627 if (addr == NULL) {
628 dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
629 netdev->name);
630 return -ENOMEM;
631 }
632
633 tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
634
635 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
636 rds_ring = &recv_ctx->rds_rings[ring];
637 addr = pci_alloc_consistent(adapter->pdev,
638 RCV_DESC_RINGSIZE(rds_ring),
639 &rds_ring->phys_addr);
640 if (addr == NULL) {
641 dev_err(&pdev->dev,
642 "%s: failed to allocate rds ring [%d]\n",
643 netdev->name, ring);
644 err = -ENOMEM;
645 goto err_out_free;
646 }
647 rds_ring->desc_head = (struct rcv_desc *)addr;
648
649 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
650 rds_ring->crb_rcv_producer =
651 recv_crb_registers[port].crb_rcv_producer[ring];
652 }
653
654 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
655 sds_ring = &recv_ctx->sds_rings[ring];
656
657 addr = pci_alloc_consistent(adapter->pdev,
658 STATUS_DESC_RINGSIZE(sds_ring),
659 &sds_ring->phys_addr);
660 if (addr == NULL) {
661 dev_err(&pdev->dev,
662 "%s: failed to allocate sds ring [%d]\n",
663 netdev->name, ring);
664 err = -ENOMEM;
665 goto err_out_free;
666 }
667 sds_ring->desc_head = (struct status_desc *)addr;
668
669 sds_ring->crb_sts_consumer =
670 recv_crb_registers[port].crb_sts_consumer[ring];
671
672 sds_ring->crb_intr_mask =
673 recv_crb_registers[port].sw_int_mask[ring];
674 }
675
676
677 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
678 err = nx_fw_cmd_create_rx_ctx(adapter);
679 if (err)
680 goto err_out_free;
681 err = nx_fw_cmd_create_tx_ctx(adapter);
682 if (err)
683 goto err_out_free;
684 } else {
685 err = netxen_init_old_ctx(adapter);
686 if (err)
687 goto err_out_free;
688 }
689
690 return 0;
691
692 err_out_free:
693 netxen_free_hw_resources(adapter);
694 return err;
695 }
696
697 void netxen_free_hw_resources(struct netxen_adapter *adapter)
698 {
699 struct netxen_recv_context *recv_ctx;
700 struct nx_host_rds_ring *rds_ring;
701 struct nx_host_sds_ring *sds_ring;
702 struct nx_host_tx_ring *tx_ring;
703 int ring;
704
705 int port = adapter->portnum;
706
707 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
708 nx_fw_cmd_destroy_rx_ctx(adapter);
709 nx_fw_cmd_destroy_tx_ctx(adapter);
710 } else {
711 netxen_api_lock(adapter);
712 NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
713 NETXEN_CTX_D3_RESET | port);
714 netxen_api_unlock(adapter);
715 }
716
717 /* Allow dma queues to drain after context reset */
718 msleep(20);
719
720 recv_ctx = &adapter->recv_ctx;
721
722 if (recv_ctx->hwctx != NULL) {
723 pci_free_consistent(adapter->pdev,
724 sizeof(struct netxen_ring_ctx) +
725 sizeof(uint32_t),
726 recv_ctx->hwctx,
727 recv_ctx->phys_addr);
728 recv_ctx->hwctx = NULL;
729 }
730
731 tx_ring = adapter->tx_ring;
732 if (tx_ring->desc_head != NULL) {
733 pci_free_consistent(adapter->pdev,
734 TX_DESC_RINGSIZE(tx_ring),
735 tx_ring->desc_head, tx_ring->phys_addr);
736 tx_ring->desc_head = NULL;
737 }
738
739 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
740 rds_ring = &recv_ctx->rds_rings[ring];
741
742 if (rds_ring->desc_head != NULL) {
743 pci_free_consistent(adapter->pdev,
744 RCV_DESC_RINGSIZE(rds_ring),
745 rds_ring->desc_head,
746 rds_ring->phys_addr);
747 rds_ring->desc_head = NULL;
748 }
749 }
750
751 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
752 sds_ring = &recv_ctx->sds_rings[ring];
753
754 if (sds_ring->desc_head != NULL) {
755 pci_free_consistent(adapter->pdev,
756 STATUS_DESC_RINGSIZE(sds_ring),
757 sds_ring->desc_head,
758 sds_ring->phys_addr);
759 sds_ring->desc_head = NULL;
760 }
761 }
762 }
763
This page took 0.046501 seconds and 5 git commands to generate.