e84a3bae779baaab3c5562edc5d0e4f7b46894ed
[deliverable/linux.git] / drivers / net / netxen / netxen_nic_init.c
1 /*
2 * Copyright (C) 2003 - 2009 NetXen, Inc.
3 * Copyright (C) 2009 - QLogic Corporation.
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
19 * MA 02111-1307, USA.
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.
23 *
24 */
25
26 #include <linux/netdevice.h>
27 #include <linux/delay.h>
28 #include "netxen_nic.h"
29 #include "netxen_nic_hw.h"
30
31 struct crb_addr_pair {
32 u32 addr;
33 u32 data;
34 };
35
36 #define NETXEN_MAX_CRB_XFORM 60
37 static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
38 #define NETXEN_ADDR_ERROR (0xffffffff)
39
40 #define crb_addr_transform(name) \
41 crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
42 NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20
43
44 #define NETXEN_NIC_XDMA_RESET 0x8000ff
45
46 static void
47 netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
48 struct nx_host_rds_ring *rds_ring);
49 static int netxen_p3_has_mn(struct netxen_adapter *adapter);
50
51 static void crb_addr_transform_setup(void)
52 {
53 crb_addr_transform(XDMA);
54 crb_addr_transform(TIMR);
55 crb_addr_transform(SRE);
56 crb_addr_transform(SQN3);
57 crb_addr_transform(SQN2);
58 crb_addr_transform(SQN1);
59 crb_addr_transform(SQN0);
60 crb_addr_transform(SQS3);
61 crb_addr_transform(SQS2);
62 crb_addr_transform(SQS1);
63 crb_addr_transform(SQS0);
64 crb_addr_transform(RPMX7);
65 crb_addr_transform(RPMX6);
66 crb_addr_transform(RPMX5);
67 crb_addr_transform(RPMX4);
68 crb_addr_transform(RPMX3);
69 crb_addr_transform(RPMX2);
70 crb_addr_transform(RPMX1);
71 crb_addr_transform(RPMX0);
72 crb_addr_transform(ROMUSB);
73 crb_addr_transform(SN);
74 crb_addr_transform(QMN);
75 crb_addr_transform(QMS);
76 crb_addr_transform(PGNI);
77 crb_addr_transform(PGND);
78 crb_addr_transform(PGN3);
79 crb_addr_transform(PGN2);
80 crb_addr_transform(PGN1);
81 crb_addr_transform(PGN0);
82 crb_addr_transform(PGSI);
83 crb_addr_transform(PGSD);
84 crb_addr_transform(PGS3);
85 crb_addr_transform(PGS2);
86 crb_addr_transform(PGS1);
87 crb_addr_transform(PGS0);
88 crb_addr_transform(PS);
89 crb_addr_transform(PH);
90 crb_addr_transform(NIU);
91 crb_addr_transform(I2Q);
92 crb_addr_transform(EG);
93 crb_addr_transform(MN);
94 crb_addr_transform(MS);
95 crb_addr_transform(CAS2);
96 crb_addr_transform(CAS1);
97 crb_addr_transform(CAS0);
98 crb_addr_transform(CAM);
99 crb_addr_transform(C2C1);
100 crb_addr_transform(C2C0);
101 crb_addr_transform(SMB);
102 crb_addr_transform(OCM0);
103 crb_addr_transform(I2C0);
104 }
105
106 void netxen_release_rx_buffers(struct netxen_adapter *adapter)
107 {
108 struct netxen_recv_context *recv_ctx;
109 struct nx_host_rds_ring *rds_ring;
110 struct netxen_rx_buffer *rx_buf;
111 int i, ring;
112
113 recv_ctx = &adapter->recv_ctx;
114 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
115 rds_ring = &recv_ctx->rds_rings[ring];
116 for (i = 0; i < rds_ring->num_desc; ++i) {
117 rx_buf = &(rds_ring->rx_buf_arr[i]);
118 if (rx_buf->state == NETXEN_BUFFER_FREE)
119 continue;
120 pci_unmap_single(adapter->pdev,
121 rx_buf->dma,
122 rds_ring->dma_size,
123 PCI_DMA_FROMDEVICE);
124 if (rx_buf->skb != NULL)
125 dev_kfree_skb_any(rx_buf->skb);
126 }
127 }
128 }
129
130 void netxen_release_tx_buffers(struct netxen_adapter *adapter)
131 {
132 struct netxen_cmd_buffer *cmd_buf;
133 struct netxen_skb_frag *buffrag;
134 int i, j;
135 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
136
137 cmd_buf = tx_ring->cmd_buf_arr;
138 for (i = 0; i < tx_ring->num_desc; i++) {
139 buffrag = cmd_buf->frag_array;
140 if (buffrag->dma) {
141 pci_unmap_single(adapter->pdev, buffrag->dma,
142 buffrag->length, PCI_DMA_TODEVICE);
143 buffrag->dma = 0ULL;
144 }
145 for (j = 0; j < cmd_buf->frag_count; j++) {
146 buffrag++;
147 if (buffrag->dma) {
148 pci_unmap_page(adapter->pdev, buffrag->dma,
149 buffrag->length,
150 PCI_DMA_TODEVICE);
151 buffrag->dma = 0ULL;
152 }
153 }
154 if (cmd_buf->skb) {
155 dev_kfree_skb_any(cmd_buf->skb);
156 cmd_buf->skb = NULL;
157 }
158 cmd_buf++;
159 }
160 }
161
162 void netxen_free_sw_resources(struct netxen_adapter *adapter)
163 {
164 struct netxen_recv_context *recv_ctx;
165 struct nx_host_rds_ring *rds_ring;
166 struct nx_host_tx_ring *tx_ring;
167 int ring;
168
169 recv_ctx = &adapter->recv_ctx;
170
171 if (recv_ctx->rds_rings == NULL)
172 goto skip_rds;
173
174 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
175 rds_ring = &recv_ctx->rds_rings[ring];
176 vfree(rds_ring->rx_buf_arr);
177 rds_ring->rx_buf_arr = NULL;
178 }
179 kfree(recv_ctx->rds_rings);
180
181 skip_rds:
182 if (adapter->tx_ring == NULL)
183 return;
184
185 tx_ring = adapter->tx_ring;
186 vfree(tx_ring->cmd_buf_arr);
187 }
188
189 int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
190 {
191 struct netxen_recv_context *recv_ctx;
192 struct nx_host_rds_ring *rds_ring;
193 struct nx_host_sds_ring *sds_ring;
194 struct nx_host_tx_ring *tx_ring;
195 struct netxen_rx_buffer *rx_buf;
196 int ring, i, size;
197
198 struct netxen_cmd_buffer *cmd_buf_arr;
199 struct net_device *netdev = adapter->netdev;
200 struct pci_dev *pdev = adapter->pdev;
201
202 size = sizeof(struct nx_host_tx_ring);
203 tx_ring = kzalloc(size, GFP_KERNEL);
204 if (tx_ring == NULL) {
205 dev_err(&pdev->dev, "%s: failed to allocate tx ring struct\n",
206 netdev->name);
207 return -ENOMEM;
208 }
209 adapter->tx_ring = tx_ring;
210
211 tx_ring->num_desc = adapter->num_txd;
212 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
213
214 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
215 if (cmd_buf_arr == NULL) {
216 dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
217 netdev->name);
218 return -ENOMEM;
219 }
220 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
221 tx_ring->cmd_buf_arr = cmd_buf_arr;
222
223 recv_ctx = &adapter->recv_ctx;
224
225 size = adapter->max_rds_rings * sizeof (struct nx_host_rds_ring);
226 rds_ring = kzalloc(size, GFP_KERNEL);
227 if (rds_ring == NULL) {
228 dev_err(&pdev->dev, "%s: failed to allocate rds ring struct\n",
229 netdev->name);
230 return -ENOMEM;
231 }
232 recv_ctx->rds_rings = rds_ring;
233
234 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
235 rds_ring = &recv_ctx->rds_rings[ring];
236 switch (ring) {
237 case RCV_RING_NORMAL:
238 rds_ring->num_desc = adapter->num_rxd;
239 if (adapter->ahw.cut_through) {
240 rds_ring->dma_size =
241 NX_CT_DEFAULT_RX_BUF_LEN;
242 rds_ring->skb_size =
243 NX_CT_DEFAULT_RX_BUF_LEN;
244 } else {
245 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
246 rds_ring->dma_size =
247 NX_P3_RX_BUF_MAX_LEN;
248 else
249 rds_ring->dma_size =
250 NX_P2_RX_BUF_MAX_LEN;
251 rds_ring->skb_size =
252 rds_ring->dma_size + NET_IP_ALIGN;
253 }
254 break;
255
256 case RCV_RING_JUMBO:
257 rds_ring->num_desc = adapter->num_jumbo_rxd;
258 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
259 rds_ring->dma_size =
260 NX_P3_RX_JUMBO_BUF_MAX_LEN;
261 else
262 rds_ring->dma_size =
263 NX_P2_RX_JUMBO_BUF_MAX_LEN;
264
265 if (adapter->capabilities & NX_CAP0_HW_LRO)
266 rds_ring->dma_size += NX_LRO_BUFFER_EXTRA;
267
268 rds_ring->skb_size =
269 rds_ring->dma_size + NET_IP_ALIGN;
270 break;
271
272 case RCV_RING_LRO:
273 rds_ring->num_desc = adapter->num_lro_rxd;
274 rds_ring->dma_size = NX_RX_LRO_BUFFER_LENGTH;
275 rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
276 break;
277
278 }
279 rds_ring->rx_buf_arr = (struct netxen_rx_buffer *)
280 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
281 if (rds_ring->rx_buf_arr == NULL) {
282 printk(KERN_ERR "%s: Failed to allocate "
283 "rx buffer ring %d\n",
284 netdev->name, ring);
285 /* free whatever was already allocated */
286 goto err_out;
287 }
288 memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
289 INIT_LIST_HEAD(&rds_ring->free_list);
290 /*
291 * Now go through all of them, set reference handles
292 * and put them in the queues.
293 */
294 rx_buf = rds_ring->rx_buf_arr;
295 for (i = 0; i < rds_ring->num_desc; i++) {
296 list_add_tail(&rx_buf->list,
297 &rds_ring->free_list);
298 rx_buf->ref_handle = i;
299 rx_buf->state = NETXEN_BUFFER_FREE;
300 rx_buf++;
301 }
302 spin_lock_init(&rds_ring->lock);
303 }
304
305 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
306 sds_ring = &recv_ctx->sds_rings[ring];
307 sds_ring->irq = adapter->msix_entries[ring].vector;
308 sds_ring->adapter = adapter;
309 sds_ring->num_desc = adapter->num_rxd;
310
311 for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
312 INIT_LIST_HEAD(&sds_ring->free_list[i]);
313 }
314
315 return 0;
316
317 err_out:
318 netxen_free_sw_resources(adapter);
319 return -ENOMEM;
320 }
321
322 /*
323 * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
324 * address to external PCI CRB address.
325 */
326 static u32 netxen_decode_crb_addr(u32 addr)
327 {
328 int i;
329 u32 base_addr, offset, pci_base;
330
331 crb_addr_transform_setup();
332
333 pci_base = NETXEN_ADDR_ERROR;
334 base_addr = addr & 0xfff00000;
335 offset = addr & 0x000fffff;
336
337 for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) {
338 if (crb_addr_xform[i] == base_addr) {
339 pci_base = i << 20;
340 break;
341 }
342 }
343 if (pci_base == NETXEN_ADDR_ERROR)
344 return pci_base;
345 else
346 return (pci_base + offset);
347 }
348
349 #define NETXEN_MAX_ROM_WAIT_USEC 100
350
351 static int netxen_wait_rom_done(struct netxen_adapter *adapter)
352 {
353 long timeout = 0;
354 long done = 0;
355
356 cond_resched();
357
358 while (done == 0) {
359 done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS);
360 done &= 2;
361 if (++timeout >= NETXEN_MAX_ROM_WAIT_USEC) {
362 dev_err(&adapter->pdev->dev,
363 "Timeout reached waiting for rom done");
364 return -EIO;
365 }
366 udelay(1);
367 }
368 return 0;
369 }
370
371 static int do_rom_fast_read(struct netxen_adapter *adapter,
372 int addr, int *valp)
373 {
374 NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
375 NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
376 NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
377 NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
378 if (netxen_wait_rom_done(adapter)) {
379 printk("Error waiting for rom done\n");
380 return -EIO;
381 }
382 /* reset abyte_cnt and dummy_byte_cnt */
383 NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
384 udelay(10);
385 NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
386
387 *valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA);
388 return 0;
389 }
390
391 static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
392 u8 *bytes, size_t size)
393 {
394 int addridx;
395 int ret = 0;
396
397 for (addridx = addr; addridx < (addr + size); addridx += 4) {
398 int v;
399 ret = do_rom_fast_read(adapter, addridx, &v);
400 if (ret != 0)
401 break;
402 *(__le32 *)bytes = cpu_to_le32(v);
403 bytes += 4;
404 }
405
406 return ret;
407 }
408
409 int
410 netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
411 u8 *bytes, size_t size)
412 {
413 int ret;
414
415 ret = netxen_rom_lock(adapter);
416 if (ret < 0)
417 return ret;
418
419 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
420
421 netxen_rom_unlock(adapter);
422 return ret;
423 }
424
425 int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
426 {
427 int ret;
428
429 if (netxen_rom_lock(adapter) != 0)
430 return -EIO;
431
432 ret = do_rom_fast_read(adapter, addr, valp);
433 netxen_rom_unlock(adapter);
434 return ret;
435 }
436
437 #define NETXEN_BOARDTYPE 0x4008
438 #define NETXEN_BOARDNUM 0x400c
439 #define NETXEN_CHIPNUM 0x4010
440
441 int netxen_pinit_from_rom(struct netxen_adapter *adapter)
442 {
443 int addr, val;
444 int i, n, init_delay = 0;
445 struct crb_addr_pair *buf;
446 unsigned offset;
447 u32 off;
448
449 /* resetall */
450 netxen_rom_lock(adapter);
451 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xffffffff);
452 netxen_rom_unlock(adapter);
453
454 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
455 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
456 (n != 0xcafecafe) ||
457 netxen_rom_fast_read(adapter, 4, &n) != 0) {
458 printk(KERN_ERR "%s: ERROR Reading crb_init area: "
459 "n: %08x\n", netxen_nic_driver_name, n);
460 return -EIO;
461 }
462 offset = n & 0xffffU;
463 n = (n >> 16) & 0xffffU;
464 } else {
465 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
466 !(n & 0x80000000)) {
467 printk(KERN_ERR "%s: ERROR Reading crb_init area: "
468 "n: %08x\n", netxen_nic_driver_name, n);
469 return -EIO;
470 }
471 offset = 1;
472 n &= ~0x80000000;
473 }
474
475 if (n >= 1024) {
476 printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
477 " initialized.\n", __func__, n);
478 return -EIO;
479 }
480
481 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
482 if (buf == NULL) {
483 printk("%s: netxen_pinit_from_rom: Unable to calloc memory.\n",
484 netxen_nic_driver_name);
485 return -ENOMEM;
486 }
487
488 for (i = 0; i < n; i++) {
489 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
490 netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
491 kfree(buf);
492 return -EIO;
493 }
494
495 buf[i].addr = addr;
496 buf[i].data = val;
497
498 }
499
500 for (i = 0; i < n; i++) {
501
502 off = netxen_decode_crb_addr(buf[i].addr);
503 if (off == NETXEN_ADDR_ERROR) {
504 printk(KERN_ERR"CRB init value out of range %x\n",
505 buf[i].addr);
506 continue;
507 }
508 off += NETXEN_PCI_CRBSPACE;
509
510 if (off & 1)
511 continue;
512
513 /* skipping cold reboot MAGIC */
514 if (off == NETXEN_CAM_RAM(0x1fc))
515 continue;
516
517 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
518 if (off == (NETXEN_CRB_I2C0 + 0x1c))
519 continue;
520 /* do not reset PCI */
521 if (off == (ROMUSB_GLB + 0xbc))
522 continue;
523 if (off == (ROMUSB_GLB + 0xa8))
524 continue;
525 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
526 continue;
527 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
528 continue;
529 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
530 continue;
531 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) &&
532 !NX_IS_REVISION_P3P(adapter->ahw.revision_id))
533 buf[i].data = 0x1020;
534 /* skip the function enable register */
535 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
536 continue;
537 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2))
538 continue;
539 if ((off & 0x0ff00000) == NETXEN_CRB_SMB)
540 continue;
541 }
542
543 init_delay = 1;
544 /* After writing this register, HW needs time for CRB */
545 /* to quiet down (else crb_window returns 0xffffffff) */
546 if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
547 init_delay = 1000;
548 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
549 /* hold xdma in reset also */
550 buf[i].data = NETXEN_NIC_XDMA_RESET;
551 buf[i].data = 0x8000ff;
552 }
553 }
554
555 NXWR32(adapter, off, buf[i].data);
556
557 msleep(init_delay);
558 }
559 kfree(buf);
560
561 /* disable_peg_cache_all */
562
563 /* unreset_net_cache */
564 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
565 val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
566 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
567 }
568
569 /* p2dn replyCount */
570 NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
571 /* disable_peg_cache 0 */
572 NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
573 /* disable_peg_cache 1 */
574 NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
575
576 /* peg_clr_all */
577
578 /* peg_clr 0 */
579 NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
580 NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
581 /* peg_clr 1 */
582 NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
583 NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
584 /* peg_clr 2 */
585 NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
586 NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
587 /* peg_clr 3 */
588 NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
589 NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
590 return 0;
591 }
592
593 static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
594 {
595 uint32_t i;
596 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
597 __le32 entries = cpu_to_le32(directory->num_entries);
598
599 for (i = 0; i < entries; i++) {
600
601 __le32 offs = cpu_to_le32(directory->findex) +
602 (i * cpu_to_le32(directory->entry_size));
603 __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
604
605 if (tab_type == section)
606 return (struct uni_table_desc *) &unirom[offs];
607 }
608
609 return NULL;
610 }
611
612 static int
613 nx_set_product_offs(struct netxen_adapter *adapter)
614 {
615 struct uni_table_desc *ptab_descr;
616 const u8 *unirom = adapter->fw->data;
617 uint32_t i;
618 __le32 entries;
619
620 ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
621 if (ptab_descr == NULL)
622 return -1;
623
624 entries = cpu_to_le32(ptab_descr->num_entries);
625
626 for (i = 0; i < entries; i++) {
627
628 __le32 flags, file_chiprev, offs;
629 u8 chiprev = adapter->ahw.revision_id;
630 int mn_present = netxen_p3_has_mn(adapter);
631 uint32_t flagbit;
632
633 offs = cpu_to_le32(ptab_descr->findex) +
634 (i * cpu_to_le32(ptab_descr->entry_size));
635 flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF));
636 file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
637 NX_UNI_CHIP_REV_OFF));
638
639 flagbit = mn_present ? 1 : 2;
640
641 if ((chiprev == file_chiprev) &&
642 ((1ULL << flagbit) & flags)) {
643 adapter->file_prd_off = offs;
644 return 0;
645 }
646 }
647
648 return -1;
649 }
650
651
652 static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
653 u32 section, u32 idx_offset)
654 {
655 const u8 *unirom = adapter->fw->data;
656 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
657 idx_offset));
658 struct uni_table_desc *tab_desc;
659 __le32 offs;
660
661 tab_desc = nx_get_table_desc(unirom, section);
662
663 if (tab_desc == NULL)
664 return NULL;
665
666 offs = cpu_to_le32(tab_desc->findex) +
667 (cpu_to_le32(tab_desc->entry_size) * idx);
668
669 return (struct uni_data_desc *)&unirom[offs];
670 }
671
672 static u8 *
673 nx_get_bootld_offs(struct netxen_adapter *adapter)
674 {
675 u32 offs = NETXEN_BOOTLD_START;
676
677 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
678 offs = cpu_to_le32((nx_get_data_desc(adapter,
679 NX_UNI_DIR_SECT_BOOTLD,
680 NX_UNI_BOOTLD_IDX_OFF))->findex);
681
682 return (u8 *)&adapter->fw->data[offs];
683 }
684
685 static u8 *
686 nx_get_fw_offs(struct netxen_adapter *adapter)
687 {
688 u32 offs = NETXEN_IMAGE_START;
689
690 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
691 offs = cpu_to_le32((nx_get_data_desc(adapter,
692 NX_UNI_DIR_SECT_FW,
693 NX_UNI_FIRMWARE_IDX_OFF))->findex);
694
695 return (u8 *)&adapter->fw->data[offs];
696 }
697
698 static __le32
699 nx_get_fw_size(struct netxen_adapter *adapter)
700 {
701 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
702 return cpu_to_le32((nx_get_data_desc(adapter,
703 NX_UNI_DIR_SECT_FW,
704 NX_UNI_FIRMWARE_IDX_OFF))->size);
705 else
706 return cpu_to_le32(
707 *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]);
708 }
709
710 static __le32
711 nx_get_fw_version(struct netxen_adapter *adapter)
712 {
713 struct uni_data_desc *fw_data_desc;
714 const struct firmware *fw = adapter->fw;
715 __le32 major, minor, sub;
716 const u8 *ver_str;
717 int i, ret = 0;
718
719 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
720
721 fw_data_desc = nx_get_data_desc(adapter,
722 NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF);
723 ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
724 cpu_to_le32(fw_data_desc->size) - 17;
725
726 for (i = 0; i < 12; i++) {
727 if (!strncmp(&ver_str[i], "REV=", 4)) {
728 ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
729 &major, &minor, &sub);
730 break;
731 }
732 }
733
734 if (ret != 3)
735 return 0;
736
737 return major + (minor << 8) + (sub << 16);
738
739 } else
740 return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
741 }
742
743 static __le32
744 nx_get_bios_version(struct netxen_adapter *adapter)
745 {
746 const struct firmware *fw = adapter->fw;
747 __le32 bios_ver, prd_off = adapter->file_prd_off;
748
749 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
750 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
751 + NX_UNI_BIOS_VERSION_OFF));
752 return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) +
753 (bios_ver >> 24);
754 } else
755 return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
756
757 }
758
759 int
760 netxen_need_fw_reset(struct netxen_adapter *adapter)
761 {
762 u32 count, old_count;
763 u32 val, version, major, minor, build;
764 int i, timeout;
765 u8 fw_type;
766
767 /* NX2031 firmware doesn't support heartbit */
768 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
769 return 1;
770
771 /* last attempt had failed */
772 if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
773 return 1;
774
775 old_count = count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
776
777 for (i = 0; i < 10; i++) {
778
779 timeout = msleep_interruptible(200);
780 if (timeout) {
781 NXWR32(adapter, CRB_CMDPEG_STATE,
782 PHAN_INITIALIZE_FAILED);
783 return -EINTR;
784 }
785
786 count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
787 if (count != old_count)
788 break;
789 }
790
791 /* firmware is dead */
792 if (count == old_count)
793 return 1;
794
795 /* check if we have got newer or different file firmware */
796 if (adapter->fw) {
797
798 val = nx_get_fw_version(adapter);
799
800 version = NETXEN_DECODE_VERSION(val);
801
802 major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
803 minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
804 build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
805
806 if (version > NETXEN_VERSION_CODE(major, minor, build))
807 return 1;
808
809 if (version == NETXEN_VERSION_CODE(major, minor, build) &&
810 adapter->fw_type != NX_UNIFIED_ROMIMAGE) {
811
812 val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
813 fw_type = (val & 0x4) ?
814 NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE;
815
816 if (adapter->fw_type != fw_type)
817 return 1;
818 }
819 }
820
821 return 0;
822 }
823
824 static char *fw_name[] = {
825 "nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin", "phanfw.bin", "flash",
826 };
827
828 int
829 netxen_load_firmware(struct netxen_adapter *adapter)
830 {
831 u64 *ptr64;
832 u32 i, flashaddr, size;
833 const struct firmware *fw = adapter->fw;
834 struct pci_dev *pdev = adapter->pdev;
835
836 dev_info(&pdev->dev, "loading firmware from %s\n",
837 fw_name[adapter->fw_type]);
838
839 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
840 NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1);
841
842 if (fw) {
843 __le64 data;
844
845 size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
846
847 ptr64 = (u64 *)nx_get_bootld_offs(adapter);
848 flashaddr = NETXEN_BOOTLD_START;
849
850 for (i = 0; i < size; i++) {
851 data = cpu_to_le64(ptr64[i]);
852
853 if (adapter->pci_mem_write(adapter, flashaddr, data))
854 return -EIO;
855
856 flashaddr += 8;
857 }
858
859 size = (__force u32)nx_get_fw_size(adapter) / 8;
860
861 ptr64 = (u64 *)nx_get_fw_offs(adapter);
862 flashaddr = NETXEN_IMAGE_START;
863
864 for (i = 0; i < size; i++) {
865 data = cpu_to_le64(ptr64[i]);
866
867 if (adapter->pci_mem_write(adapter,
868 flashaddr, data))
869 return -EIO;
870
871 flashaddr += 8;
872 }
873 } else {
874 u64 data;
875 u32 hi, lo;
876
877 size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
878 flashaddr = NETXEN_BOOTLD_START;
879
880 for (i = 0; i < size; i++) {
881 if (netxen_rom_fast_read(adapter,
882 flashaddr, (int *)&lo) != 0)
883 return -EIO;
884 if (netxen_rom_fast_read(adapter,
885 flashaddr + 4, (int *)&hi) != 0)
886 return -EIO;
887
888 /* hi, lo are already in host endian byteorder */
889 data = (((u64)hi << 32) | lo);
890
891 if (adapter->pci_mem_write(adapter,
892 flashaddr, data))
893 return -EIO;
894
895 flashaddr += 8;
896 }
897 }
898 msleep(1);
899
900 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
901 NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020);
902 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e);
903 } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
904 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d);
905 else {
906 NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff);
907 NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0);
908 }
909
910 return 0;
911 }
912
913 static int
914 netxen_validate_firmware(struct netxen_adapter *adapter)
915 {
916 __le32 val;
917 u32 ver, min_ver, bios, min_size;
918 struct pci_dev *pdev = adapter->pdev;
919 const struct firmware *fw = adapter->fw;
920 u8 fw_type = adapter->fw_type;
921
922 if (fw_type == NX_UNIFIED_ROMIMAGE) {
923 if (nx_set_product_offs(adapter))
924 return -EINVAL;
925
926 min_size = NX_UNI_FW_MIN_SIZE;
927 } else {
928 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
929 if ((__force u32)val != NETXEN_BDINFO_MAGIC)
930 return -EINVAL;
931
932 min_size = NX_FW_MIN_SIZE;
933 }
934
935 if (fw->size < min_size)
936 return -EINVAL;
937
938 val = nx_get_fw_version(adapter);
939
940 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
941 min_ver = NETXEN_VERSION_CODE(4, 0, 216);
942 else
943 min_ver = NETXEN_VERSION_CODE(3, 4, 216);
944
945 ver = NETXEN_DECODE_VERSION(val);
946
947 if ((_major(ver) > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) {
948 dev_err(&pdev->dev,
949 "%s: firmware version %d.%d.%d unsupported\n",
950 fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
951 return -EINVAL;
952 }
953
954 val = nx_get_bios_version(adapter);
955 netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
956 if ((__force u32)val != bios) {
957 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
958 fw_name[fw_type]);
959 return -EINVAL;
960 }
961
962 /* check if flashed firmware is newer */
963 if (netxen_rom_fast_read(adapter,
964 NX_FW_VERSION_OFFSET, (int *)&val))
965 return -EIO;
966 val = NETXEN_DECODE_VERSION(val);
967 if (val > ver) {
968 dev_info(&pdev->dev, "%s: firmware is older than flash\n",
969 fw_name[fw_type]);
970 return -EINVAL;
971 }
972
973 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
974 return 0;
975 }
976
977 static void
978 nx_get_next_fwtype(struct netxen_adapter *adapter)
979 {
980 u8 fw_type;
981
982 switch (adapter->fw_type) {
983 case NX_UNKNOWN_ROMIMAGE:
984 fw_type = NX_UNIFIED_ROMIMAGE;
985 break;
986
987 case NX_UNIFIED_ROMIMAGE:
988 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
989 fw_type = NX_FLASH_ROMIMAGE;
990 else if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
991 fw_type = NX_P2_MN_ROMIMAGE;
992 else if (netxen_p3_has_mn(adapter))
993 fw_type = NX_P3_MN_ROMIMAGE;
994 else
995 fw_type = NX_P3_CT_ROMIMAGE;
996 break;
997
998 case NX_P3_MN_ROMIMAGE:
999 fw_type = NX_P3_CT_ROMIMAGE;
1000 break;
1001
1002 case NX_P2_MN_ROMIMAGE:
1003 case NX_P3_CT_ROMIMAGE:
1004 default:
1005 fw_type = NX_FLASH_ROMIMAGE;
1006 break;
1007 }
1008
1009 adapter->fw_type = fw_type;
1010 }
1011
1012 static int
1013 netxen_p3_has_mn(struct netxen_adapter *adapter)
1014 {
1015 u32 capability, flashed_ver;
1016 capability = 0;
1017
1018 netxen_rom_fast_read(adapter,
1019 NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
1020 flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
1021
1022 if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
1023
1024 capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
1025 if (capability & NX_PEG_TUNE_MN_PRESENT)
1026 return 1;
1027 }
1028 return 0;
1029 }
1030
1031 void netxen_request_firmware(struct netxen_adapter *adapter)
1032 {
1033 struct pci_dev *pdev = adapter->pdev;
1034 int rc = 0;
1035
1036 adapter->fw_type = NX_UNKNOWN_ROMIMAGE;
1037
1038 next:
1039 nx_get_next_fwtype(adapter);
1040
1041 if (adapter->fw_type == NX_FLASH_ROMIMAGE) {
1042 adapter->fw = NULL;
1043 } else {
1044 rc = request_firmware(&adapter->fw,
1045 fw_name[adapter->fw_type], &pdev->dev);
1046 if (rc != 0)
1047 goto next;
1048
1049 rc = netxen_validate_firmware(adapter);
1050 if (rc != 0) {
1051 release_firmware(adapter->fw);
1052 msleep(1);
1053 goto next;
1054 }
1055 }
1056 }
1057
1058
1059 void
1060 netxen_release_firmware(struct netxen_adapter *adapter)
1061 {
1062 if (adapter->fw)
1063 release_firmware(adapter->fw);
1064 adapter->fw = NULL;
1065 }
1066
1067 int netxen_init_dummy_dma(struct netxen_adapter *adapter)
1068 {
1069 u64 addr;
1070 u32 hi, lo;
1071
1072 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
1073 return 0;
1074
1075 adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev,
1076 NETXEN_HOST_DUMMY_DMA_SIZE,
1077 &adapter->dummy_dma.phys_addr);
1078 if (adapter->dummy_dma.addr == NULL) {
1079 dev_err(&adapter->pdev->dev,
1080 "ERROR: Could not allocate dummy DMA memory\n");
1081 return -ENOMEM;
1082 }
1083
1084 addr = (uint64_t) adapter->dummy_dma.phys_addr;
1085 hi = (addr >> 32) & 0xffffffff;
1086 lo = addr & 0xffffffff;
1087
1088 NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
1089 NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
1090
1091 return 0;
1092 }
1093
1094 /*
1095 * NetXen DMA watchdog control:
1096 *
1097 * Bit 0 : enabled => R/O: 1 watchdog active, 0 inactive
1098 * Bit 1 : disable_request => 1 req disable dma watchdog
1099 * Bit 2 : enable_request => 1 req enable dma watchdog
1100 * Bit 3-31 : unused
1101 */
1102 void netxen_free_dummy_dma(struct netxen_adapter *adapter)
1103 {
1104 int i = 100;
1105 u32 ctrl;
1106
1107 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
1108 return;
1109
1110 if (!adapter->dummy_dma.addr)
1111 return;
1112
1113 ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL);
1114 if ((ctrl & 0x1) != 0) {
1115 NXWR32(adapter, NETXEN_DMA_WATCHDOG_CTRL, (ctrl | 0x2));
1116
1117 while ((ctrl & 0x1) != 0) {
1118
1119 msleep(50);
1120
1121 ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL);
1122
1123 if (--i == 0)
1124 break;
1125 };
1126 }
1127
1128 if (i) {
1129 pci_free_consistent(adapter->pdev,
1130 NETXEN_HOST_DUMMY_DMA_SIZE,
1131 adapter->dummy_dma.addr,
1132 adapter->dummy_dma.phys_addr);
1133 adapter->dummy_dma.addr = NULL;
1134 } else
1135 dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n");
1136 }
1137
1138 int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
1139 {
1140 u32 val = 0;
1141 int retries = 60;
1142
1143 if (pegtune_val)
1144 return 0;
1145
1146 do {
1147 val = NXRD32(adapter, CRB_CMDPEG_STATE);
1148
1149 switch (val) {
1150 case PHAN_INITIALIZE_COMPLETE:
1151 case PHAN_INITIALIZE_ACK:
1152 return 0;
1153 case PHAN_INITIALIZE_FAILED:
1154 goto out_err;
1155 default:
1156 break;
1157 }
1158
1159 msleep(500);
1160
1161 } while (--retries);
1162
1163 NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
1164
1165 out_err:
1166 dev_warn(&adapter->pdev->dev, "firmware init failed\n");
1167 return -EIO;
1168 }
1169
1170 static int
1171 netxen_receive_peg_ready(struct netxen_adapter *adapter)
1172 {
1173 u32 val = 0;
1174 int retries = 2000;
1175
1176 do {
1177 val = NXRD32(adapter, CRB_RCVPEG_STATE);
1178
1179 if (val == PHAN_PEG_RCV_INITIALIZED)
1180 return 0;
1181
1182 msleep(10);
1183
1184 } while (--retries);
1185
1186 if (!retries) {
1187 printk(KERN_ERR "Receive Peg initialization not "
1188 "complete, state: 0x%x.\n", val);
1189 return -EIO;
1190 }
1191
1192 return 0;
1193 }
1194
1195 int netxen_init_firmware(struct netxen_adapter *adapter)
1196 {
1197 int err;
1198
1199 err = netxen_receive_peg_ready(adapter);
1200 if (err)
1201 return err;
1202
1203 NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
1204 NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
1205 NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
1206 NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
1207
1208 return err;
1209 }
1210
1211 static void
1212 netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
1213 {
1214 u32 cable_OUI;
1215 u16 cable_len;
1216 u16 link_speed;
1217 u8 link_status, module, duplex, autoneg;
1218 struct net_device *netdev = adapter->netdev;
1219
1220 adapter->has_link_events = 1;
1221
1222 cable_OUI = msg->body[1] & 0xffffffff;
1223 cable_len = (msg->body[1] >> 32) & 0xffff;
1224 link_speed = (msg->body[1] >> 48) & 0xffff;
1225
1226 link_status = msg->body[2] & 0xff;
1227 duplex = (msg->body[2] >> 16) & 0xff;
1228 autoneg = (msg->body[2] >> 24) & 0xff;
1229
1230 module = (msg->body[2] >> 8) & 0xff;
1231 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) {
1232 printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n",
1233 netdev->name, cable_OUI, cable_len);
1234 } else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) {
1235 printk(KERN_INFO "%s: unsupported cable length %d\n",
1236 netdev->name, cable_len);
1237 }
1238
1239 netxen_advert_link_change(adapter, link_status);
1240
1241 /* update link parameters */
1242 if (duplex == LINKEVENT_FULL_DUPLEX)
1243 adapter->link_duplex = DUPLEX_FULL;
1244 else
1245 adapter->link_duplex = DUPLEX_HALF;
1246 adapter->module_type = module;
1247 adapter->link_autoneg = autoneg;
1248 adapter->link_speed = link_speed;
1249 }
1250
1251 static void
1252 netxen_handle_fw_message(int desc_cnt, int index,
1253 struct nx_host_sds_ring *sds_ring)
1254 {
1255 nx_fw_msg_t msg;
1256 struct status_desc *desc;
1257 int i = 0, opcode;
1258
1259 while (desc_cnt > 0 && i < 8) {
1260 desc = &sds_ring->desc_head[index];
1261 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
1262 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
1263
1264 index = get_next_index(index, sds_ring->num_desc);
1265 desc_cnt--;
1266 }
1267
1268 opcode = netxen_get_nic_msg_opcode(msg.body[0]);
1269 switch (opcode) {
1270 case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
1271 netxen_handle_linkevent(sds_ring->adapter, &msg);
1272 break;
1273 default:
1274 break;
1275 }
1276 }
1277
1278 static int
1279 netxen_alloc_rx_skb(struct netxen_adapter *adapter,
1280 struct nx_host_rds_ring *rds_ring,
1281 struct netxen_rx_buffer *buffer)
1282 {
1283 struct sk_buff *skb;
1284 dma_addr_t dma;
1285 struct pci_dev *pdev = adapter->pdev;
1286
1287 buffer->skb = dev_alloc_skb(rds_ring->skb_size);
1288 if (!buffer->skb)
1289 return 1;
1290
1291 skb = buffer->skb;
1292
1293 if (!adapter->ahw.cut_through)
1294 skb_reserve(skb, 2);
1295
1296 dma = pci_map_single(pdev, skb->data,
1297 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1298
1299 if (pci_dma_mapping_error(pdev, dma)) {
1300 dev_kfree_skb_any(skb);
1301 buffer->skb = NULL;
1302 return 1;
1303 }
1304
1305 buffer->skb = skb;
1306 buffer->dma = dma;
1307 buffer->state = NETXEN_BUFFER_BUSY;
1308
1309 return 0;
1310 }
1311
1312 static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
1313 struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum)
1314 {
1315 struct netxen_rx_buffer *buffer;
1316 struct sk_buff *skb;
1317
1318 buffer = &rds_ring->rx_buf_arr[index];
1319
1320 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
1321 PCI_DMA_FROMDEVICE);
1322
1323 skb = buffer->skb;
1324 if (!skb)
1325 goto no_skb;
1326
1327 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
1328 adapter->stats.csummed++;
1329 skb->ip_summed = CHECKSUM_UNNECESSARY;
1330 } else
1331 skb->ip_summed = CHECKSUM_NONE;
1332
1333 skb->dev = adapter->netdev;
1334
1335 buffer->skb = NULL;
1336 no_skb:
1337 buffer->state = NETXEN_BUFFER_FREE;
1338 return skb;
1339 }
1340
1341 static struct netxen_rx_buffer *
1342 netxen_process_rcv(struct netxen_adapter *adapter,
1343 struct nx_host_sds_ring *sds_ring,
1344 int ring, u64 sts_data0)
1345 {
1346 struct net_device *netdev = adapter->netdev;
1347 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
1348 struct netxen_rx_buffer *buffer;
1349 struct sk_buff *skb;
1350 struct nx_host_rds_ring *rds_ring;
1351 int index, length, cksum, pkt_offset;
1352
1353 if (unlikely(ring >= adapter->max_rds_rings))
1354 return NULL;
1355
1356 rds_ring = &recv_ctx->rds_rings[ring];
1357
1358 index = netxen_get_sts_refhandle(sts_data0);
1359 if (unlikely(index >= rds_ring->num_desc))
1360 return NULL;
1361
1362 buffer = &rds_ring->rx_buf_arr[index];
1363
1364 length = netxen_get_sts_totallength(sts_data0);
1365 cksum = netxen_get_sts_status(sts_data0);
1366 pkt_offset = netxen_get_sts_pkt_offset(sts_data0);
1367
1368 skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
1369 if (!skb)
1370 return buffer;
1371
1372 if (length > rds_ring->skb_size)
1373 skb_put(skb, rds_ring->skb_size);
1374 else
1375 skb_put(skb, length);
1376
1377
1378 if (pkt_offset)
1379 skb_pull(skb, pkt_offset);
1380
1381 skb->truesize = skb->len + sizeof(struct sk_buff);
1382 skb->protocol = eth_type_trans(skb, netdev);
1383
1384 napi_gro_receive(&sds_ring->napi, skb);
1385
1386 adapter->stats.rx_pkts++;
1387 adapter->stats.rxbytes += length;
1388
1389 return buffer;
1390 }
1391
1392 #define TCP_HDR_SIZE 20
1393 #define TCP_TS_OPTION_SIZE 12
1394 #define TCP_TS_HDR_SIZE (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE)
1395
1396 static struct netxen_rx_buffer *
1397 netxen_process_lro(struct netxen_adapter *adapter,
1398 struct nx_host_sds_ring *sds_ring,
1399 int ring, u64 sts_data0, u64 sts_data1)
1400 {
1401 struct net_device *netdev = adapter->netdev;
1402 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
1403 struct netxen_rx_buffer *buffer;
1404 struct sk_buff *skb;
1405 struct nx_host_rds_ring *rds_ring;
1406 struct iphdr *iph;
1407 struct tcphdr *th;
1408 bool push, timestamp;
1409 int l2_hdr_offset, l4_hdr_offset;
1410 int index;
1411 u16 lro_length, length, data_offset;
1412 u32 seq_number;
1413
1414 if (unlikely(ring > adapter->max_rds_rings))
1415 return NULL;
1416
1417 rds_ring = &recv_ctx->rds_rings[ring];
1418
1419 index = netxen_get_lro_sts_refhandle(sts_data0);
1420 if (unlikely(index > rds_ring->num_desc))
1421 return NULL;
1422
1423 buffer = &rds_ring->rx_buf_arr[index];
1424
1425 timestamp = netxen_get_lro_sts_timestamp(sts_data0);
1426 lro_length = netxen_get_lro_sts_length(sts_data0);
1427 l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0);
1428 l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0);
1429 push = netxen_get_lro_sts_push_flag(sts_data0);
1430 seq_number = netxen_get_lro_sts_seq_number(sts_data1);
1431
1432 skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1433 if (!skb)
1434 return buffer;
1435
1436 if (timestamp)
1437 data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE;
1438 else
1439 data_offset = l4_hdr_offset + TCP_HDR_SIZE;
1440
1441 skb_put(skb, lro_length + data_offset);
1442
1443 skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
1444
1445 skb_pull(skb, l2_hdr_offset);
1446 skb->protocol = eth_type_trans(skb, netdev);
1447
1448 iph = (struct iphdr *)skb->data;
1449 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1450
1451 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1452 iph->tot_len = htons(length);
1453 iph->check = 0;
1454 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1455 th->psh = push;
1456 th->seq = htonl(seq_number);
1457
1458 length = skb->len;
1459
1460 netif_receive_skb(skb);
1461
1462 adapter->stats.lro_pkts++;
1463 adapter->stats.rxbytes += length;
1464
1465 return buffer;
1466 }
1467
1468 #define netxen_merge_rx_buffers(list, head) \
1469 do { list_splice_tail_init(list, head); } while (0);
1470
1471 int
1472 netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
1473 {
1474 struct netxen_adapter *adapter = sds_ring->adapter;
1475
1476 struct list_head *cur;
1477
1478 struct status_desc *desc;
1479 struct netxen_rx_buffer *rxbuf;
1480
1481 u32 consumer = sds_ring->consumer;
1482
1483 int count = 0;
1484 u64 sts_data0, sts_data1;
1485 int opcode, ring = 0, desc_cnt;
1486
1487 while (count < max) {
1488 desc = &sds_ring->desc_head[consumer];
1489 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1490
1491 if (!(sts_data0 & STATUS_OWNER_HOST))
1492 break;
1493
1494 desc_cnt = netxen_get_sts_desc_cnt(sts_data0);
1495
1496 opcode = netxen_get_sts_opcode(sts_data0);
1497
1498 switch (opcode) {
1499 case NETXEN_NIC_RXPKT_DESC:
1500 case NETXEN_OLD_RXPKT_DESC:
1501 case NETXEN_NIC_SYN_OFFLOAD:
1502 ring = netxen_get_sts_type(sts_data0);
1503 rxbuf = netxen_process_rcv(adapter, sds_ring,
1504 ring, sts_data0);
1505 break;
1506 case NETXEN_NIC_LRO_DESC:
1507 ring = netxen_get_lro_sts_type(sts_data0);
1508 sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1509 rxbuf = netxen_process_lro(adapter, sds_ring,
1510 ring, sts_data0, sts_data1);
1511 break;
1512 case NETXEN_NIC_RESPONSE_DESC:
1513 netxen_handle_fw_message(desc_cnt, consumer, sds_ring);
1514 default:
1515 goto skip;
1516 }
1517
1518 WARN_ON(desc_cnt > 1);
1519
1520 if (rxbuf)
1521 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1522
1523 skip:
1524 for (; desc_cnt > 0; desc_cnt--) {
1525 desc = &sds_ring->desc_head[consumer];
1526 desc->status_desc_data[0] =
1527 cpu_to_le64(STATUS_OWNER_PHANTOM);
1528 consumer = get_next_index(consumer, sds_ring->num_desc);
1529 }
1530 count++;
1531 }
1532
1533 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1534 struct nx_host_rds_ring *rds_ring =
1535 &adapter->recv_ctx.rds_rings[ring];
1536
1537 if (!list_empty(&sds_ring->free_list[ring])) {
1538 list_for_each(cur, &sds_ring->free_list[ring]) {
1539 rxbuf = list_entry(cur,
1540 struct netxen_rx_buffer, list);
1541 netxen_alloc_rx_skb(adapter, rds_ring, rxbuf);
1542 }
1543 spin_lock(&rds_ring->lock);
1544 netxen_merge_rx_buffers(&sds_ring->free_list[ring],
1545 &rds_ring->free_list);
1546 spin_unlock(&rds_ring->lock);
1547 }
1548
1549 netxen_post_rx_buffers_nodb(adapter, rds_ring);
1550 }
1551
1552 if (count) {
1553 sds_ring->consumer = consumer;
1554 NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer);
1555 }
1556
1557 return count;
1558 }
1559
1560 /* Process Command status ring */
1561 int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1562 {
1563 u32 sw_consumer, hw_consumer;
1564 int count = 0, i;
1565 struct netxen_cmd_buffer *buffer;
1566 struct pci_dev *pdev = adapter->pdev;
1567 struct net_device *netdev = adapter->netdev;
1568 struct netxen_skb_frag *frag;
1569 int done = 0;
1570 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
1571
1572 if (!spin_trylock(&adapter->tx_clean_lock))
1573 return 1;
1574
1575 sw_consumer = tx_ring->sw_consumer;
1576 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1577
1578 while (sw_consumer != hw_consumer) {
1579 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
1580 if (buffer->skb) {
1581 frag = &buffer->frag_array[0];
1582 pci_unmap_single(pdev, frag->dma, frag->length,
1583 PCI_DMA_TODEVICE);
1584 frag->dma = 0ULL;
1585 for (i = 1; i < buffer->frag_count; i++) {
1586 frag++; /* Get the next frag */
1587 pci_unmap_page(pdev, frag->dma, frag->length,
1588 PCI_DMA_TODEVICE);
1589 frag->dma = 0ULL;
1590 }
1591
1592 adapter->stats.xmitfinished++;
1593 dev_kfree_skb_any(buffer->skb);
1594 buffer->skb = NULL;
1595 }
1596
1597 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
1598 if (++count >= MAX_STATUS_HANDLE)
1599 break;
1600 }
1601
1602 if (count && netif_running(netdev)) {
1603 tx_ring->sw_consumer = sw_consumer;
1604
1605 smp_mb();
1606
1607 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
1608 __netif_tx_lock(tx_ring->txq, smp_processor_id());
1609 if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) {
1610 netif_wake_queue(netdev);
1611 adapter->tx_timeo_cnt = 0;
1612 }
1613 __netif_tx_unlock(tx_ring->txq);
1614 }
1615 }
1616 /*
1617 * If everything is freed up to consumer then check if the ring is full
1618 * If the ring is full then check if more needs to be freed and
1619 * schedule the call back again.
1620 *
1621 * This happens when there are 2 CPUs. One could be freeing and the
1622 * other filling it. If the ring is full when we get out of here and
1623 * the card has already interrupted the host then the host can miss the
1624 * interrupt.
1625 *
1626 * There is still a possible race condition and the host could miss an
1627 * interrupt. The card has to take care of this.
1628 */
1629 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1630 done = (sw_consumer == hw_consumer);
1631 spin_unlock(&adapter->tx_clean_lock);
1632
1633 return (done);
1634 }
1635
1636 void
1637 netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
1638 struct nx_host_rds_ring *rds_ring)
1639 {
1640 struct rcv_desc *pdesc;
1641 struct netxen_rx_buffer *buffer;
1642 int producer, count = 0;
1643 netxen_ctx_msg msg = 0;
1644 struct list_head *head;
1645
1646 producer = rds_ring->producer;
1647
1648 spin_lock(&rds_ring->lock);
1649 head = &rds_ring->free_list;
1650 while (!list_empty(head)) {
1651
1652 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1653
1654 if (!buffer->skb) {
1655 if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
1656 break;
1657 }
1658
1659 count++;
1660 list_del(&buffer->list);
1661
1662 /* make a rcv descriptor */
1663 pdesc = &rds_ring->desc_head[producer];
1664 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1665 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1666 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1667
1668 producer = get_next_index(producer, rds_ring->num_desc);
1669 }
1670 spin_unlock(&rds_ring->lock);
1671
1672 if (count) {
1673 rds_ring->producer = producer;
1674 NXWRIO(adapter, rds_ring->crb_rcv_producer,
1675 (producer-1) & (rds_ring->num_desc-1));
1676
1677 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1678 /*
1679 * Write a doorbell msg to tell phanmon of change in
1680 * receive ring producer
1681 * Only for firmware version < 4.0.0
1682 */
1683 netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
1684 netxen_set_msg_privid(msg);
1685 netxen_set_msg_count(msg,
1686 ((producer - 1) &
1687 (rds_ring->num_desc - 1)));
1688 netxen_set_msg_ctxid(msg, adapter->portnum);
1689 netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
1690 NXWRIO(adapter, DB_NORMALIZE(adapter,
1691 NETXEN_RCV_PRODUCER_OFFSET), msg);
1692 }
1693 }
1694 }
1695
1696 static void
1697 netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
1698 struct nx_host_rds_ring *rds_ring)
1699 {
1700 struct rcv_desc *pdesc;
1701 struct netxen_rx_buffer *buffer;
1702 int producer, count = 0;
1703 struct list_head *head;
1704
1705 producer = rds_ring->producer;
1706 if (!spin_trylock(&rds_ring->lock))
1707 return;
1708
1709 head = &rds_ring->free_list;
1710 while (!list_empty(head)) {
1711
1712 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1713
1714 if (!buffer->skb) {
1715 if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
1716 break;
1717 }
1718
1719 count++;
1720 list_del(&buffer->list);
1721
1722 /* make a rcv descriptor */
1723 pdesc = &rds_ring->desc_head[producer];
1724 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1725 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1726 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1727
1728 producer = get_next_index(producer, rds_ring->num_desc);
1729 }
1730
1731 if (count) {
1732 rds_ring->producer = producer;
1733 NXWRIO(adapter, rds_ring->crb_rcv_producer,
1734 (producer - 1) & (rds_ring->num_desc - 1));
1735 }
1736 spin_unlock(&rds_ring->lock);
1737 }
1738
1739 void netxen_nic_clear_stats(struct netxen_adapter *adapter)
1740 {
1741 memset(&adapter->stats, 0, sizeof(adapter->stats));
1742 return;
1743 }
1744
This page took 0.062937 seconds and 4 git commands to generate.