hwmon: (max6650) Add support for alarms
[deliverable/linux.git] / drivers / net / netxen / netxen_nic_init.c
1 /*
2 * Copyright (C) 2003 - 2009 NetXen, Inc.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
22 *
23 * Contact Information:
24 * info@netxen.com
25 * NetXen Inc,
26 * 18922 Forge Drive
27 * Cupertino, CA 95014-0701
28 *
29 */
30
31 #include <linux/netdevice.h>
32 #include <linux/delay.h>
33 #include "netxen_nic.h"
34 #include "netxen_nic_hw.h"
35 #include "netxen_nic_phan_reg.h"
36
37 struct crb_addr_pair {
38 u32 addr;
39 u32 data;
40 };
41
42 #define NETXEN_MAX_CRB_XFORM 60
43 static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
44 #define NETXEN_ADDR_ERROR (0xffffffff)
45
46 #define crb_addr_transform(name) \
47 crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
48 NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20
49
50 #define NETXEN_NIC_XDMA_RESET 0x8000ff
51
52 static void
53 netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
54 struct nx_host_rds_ring *rds_ring);
55
56 static void crb_addr_transform_setup(void)
57 {
58 crb_addr_transform(XDMA);
59 crb_addr_transform(TIMR);
60 crb_addr_transform(SRE);
61 crb_addr_transform(SQN3);
62 crb_addr_transform(SQN2);
63 crb_addr_transform(SQN1);
64 crb_addr_transform(SQN0);
65 crb_addr_transform(SQS3);
66 crb_addr_transform(SQS2);
67 crb_addr_transform(SQS1);
68 crb_addr_transform(SQS0);
69 crb_addr_transform(RPMX7);
70 crb_addr_transform(RPMX6);
71 crb_addr_transform(RPMX5);
72 crb_addr_transform(RPMX4);
73 crb_addr_transform(RPMX3);
74 crb_addr_transform(RPMX2);
75 crb_addr_transform(RPMX1);
76 crb_addr_transform(RPMX0);
77 crb_addr_transform(ROMUSB);
78 crb_addr_transform(SN);
79 crb_addr_transform(QMN);
80 crb_addr_transform(QMS);
81 crb_addr_transform(PGNI);
82 crb_addr_transform(PGND);
83 crb_addr_transform(PGN3);
84 crb_addr_transform(PGN2);
85 crb_addr_transform(PGN1);
86 crb_addr_transform(PGN0);
87 crb_addr_transform(PGSI);
88 crb_addr_transform(PGSD);
89 crb_addr_transform(PGS3);
90 crb_addr_transform(PGS2);
91 crb_addr_transform(PGS1);
92 crb_addr_transform(PGS0);
93 crb_addr_transform(PS);
94 crb_addr_transform(PH);
95 crb_addr_transform(NIU);
96 crb_addr_transform(I2Q);
97 crb_addr_transform(EG);
98 crb_addr_transform(MN);
99 crb_addr_transform(MS);
100 crb_addr_transform(CAS2);
101 crb_addr_transform(CAS1);
102 crb_addr_transform(CAS0);
103 crb_addr_transform(CAM);
104 crb_addr_transform(C2C1);
105 crb_addr_transform(C2C0);
106 crb_addr_transform(SMB);
107 crb_addr_transform(OCM0);
108 crb_addr_transform(I2C0);
109 }
110
111 int netxen_init_firmware(struct netxen_adapter *adapter)
112 {
113 u32 state = 0, loops = 0, err = 0;
114
115 /* Window 1 call */
116 state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
117
118 if (state == PHAN_INITIALIZE_ACK)
119 return 0;
120
121 while (state != PHAN_INITIALIZE_COMPLETE && loops < 2000) {
122 msleep(1);
123 /* Window 1 call */
124 state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
125
126 loops++;
127 }
128 if (loops >= 2000) {
129 printk(KERN_ERR "Cmd Peg initialization not complete:%x.\n",
130 state);
131 err = -EIO;
132 return err;
133 }
134 /* Window 1 call */
135 adapter->pci_write_normalize(adapter,
136 CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
137 adapter->pci_write_normalize(adapter,
138 CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
139 adapter->pci_write_normalize(adapter,
140 CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
141 adapter->pci_write_normalize(adapter,
142 CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
143
144 return err;
145 }
146
147 void netxen_release_rx_buffers(struct netxen_adapter *adapter)
148 {
149 struct netxen_recv_context *recv_ctx;
150 struct nx_host_rds_ring *rds_ring;
151 struct netxen_rx_buffer *rx_buf;
152 int i, ring;
153
154 recv_ctx = &adapter->recv_ctx;
155 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
156 rds_ring = &recv_ctx->rds_rings[ring];
157 for (i = 0; i < rds_ring->num_desc; ++i) {
158 rx_buf = &(rds_ring->rx_buf_arr[i]);
159 if (rx_buf->state == NETXEN_BUFFER_FREE)
160 continue;
161 pci_unmap_single(adapter->pdev,
162 rx_buf->dma,
163 rds_ring->dma_size,
164 PCI_DMA_FROMDEVICE);
165 if (rx_buf->skb != NULL)
166 dev_kfree_skb_any(rx_buf->skb);
167 }
168 }
169 }
170
171 void netxen_release_tx_buffers(struct netxen_adapter *adapter)
172 {
173 struct netxen_cmd_buffer *cmd_buf;
174 struct netxen_skb_frag *buffrag;
175 int i, j;
176
177 cmd_buf = adapter->cmd_buf_arr;
178 for (i = 0; i < adapter->num_txd; i++) {
179 buffrag = cmd_buf->frag_array;
180 if (buffrag->dma) {
181 pci_unmap_single(adapter->pdev, buffrag->dma,
182 buffrag->length, PCI_DMA_TODEVICE);
183 buffrag->dma = 0ULL;
184 }
185 for (j = 0; j < cmd_buf->frag_count; j++) {
186 buffrag++;
187 if (buffrag->dma) {
188 pci_unmap_page(adapter->pdev, buffrag->dma,
189 buffrag->length,
190 PCI_DMA_TODEVICE);
191 buffrag->dma = 0ULL;
192 }
193 }
194 if (cmd_buf->skb) {
195 dev_kfree_skb_any(cmd_buf->skb);
196 cmd_buf->skb = NULL;
197 }
198 cmd_buf++;
199 }
200 }
201
202 void netxen_free_sw_resources(struct netxen_adapter *adapter)
203 {
204 struct netxen_recv_context *recv_ctx;
205 struct nx_host_rds_ring *rds_ring;
206 int ring;
207
208 recv_ctx = &adapter->recv_ctx;
209 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
210 rds_ring = &recv_ctx->rds_rings[ring];
211 if (rds_ring->rx_buf_arr) {
212 vfree(rds_ring->rx_buf_arr);
213 rds_ring->rx_buf_arr = NULL;
214 }
215 }
216
217 if (adapter->cmd_buf_arr)
218 vfree(adapter->cmd_buf_arr);
219 return;
220 }
221
222 int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
223 {
224 struct netxen_recv_context *recv_ctx;
225 struct nx_host_rds_ring *rds_ring;
226 struct nx_host_sds_ring *sds_ring;
227 struct netxen_rx_buffer *rx_buf;
228 int ring, i, num_rx_bufs;
229
230 struct netxen_cmd_buffer *cmd_buf_arr;
231 struct net_device *netdev = adapter->netdev;
232
233 cmd_buf_arr =
234 (struct netxen_cmd_buffer *)vmalloc(TX_BUFF_RINGSIZE(adapter));
235 if (cmd_buf_arr == NULL) {
236 printk(KERN_ERR "%s: Failed to allocate cmd buffer ring\n",
237 netdev->name);
238 return -ENOMEM;
239 }
240 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(adapter));
241 adapter->cmd_buf_arr = cmd_buf_arr;
242
243 recv_ctx = &adapter->recv_ctx;
244 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
245 rds_ring = &recv_ctx->rds_rings[ring];
246 switch (ring) {
247 case RCV_RING_NORMAL:
248 rds_ring->num_desc = adapter->num_rxd;
249 if (adapter->ahw.cut_through) {
250 rds_ring->dma_size =
251 NX_CT_DEFAULT_RX_BUF_LEN;
252 rds_ring->skb_size =
253 NX_CT_DEFAULT_RX_BUF_LEN;
254 } else {
255 rds_ring->dma_size = RX_DMA_MAP_LEN;
256 rds_ring->skb_size =
257 MAX_RX_BUFFER_LENGTH;
258 }
259 break;
260
261 case RCV_RING_JUMBO:
262 rds_ring->num_desc = adapter->num_jumbo_rxd;
263 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
264 rds_ring->dma_size =
265 NX_P3_RX_JUMBO_BUF_MAX_LEN;
266 else
267 rds_ring->dma_size =
268 NX_P2_RX_JUMBO_BUF_MAX_LEN;
269 rds_ring->skb_size =
270 rds_ring->dma_size + NET_IP_ALIGN;
271 break;
272
273 case RCV_RING_LRO:
274 rds_ring->num_desc = adapter->num_lro_rxd;
275 rds_ring->dma_size = RX_LRO_DMA_MAP_LEN;
276 rds_ring->skb_size = MAX_RX_LRO_BUFFER_LENGTH;
277 break;
278
279 }
280 rds_ring->rx_buf_arr = (struct netxen_rx_buffer *)
281 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
282 if (rds_ring->rx_buf_arr == NULL) {
283 printk(KERN_ERR "%s: Failed to allocate "
284 "rx buffer ring %d\n",
285 netdev->name, ring);
286 /* free whatever was already allocated */
287 goto err_out;
288 }
289 memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
290 INIT_LIST_HEAD(&rds_ring->free_list);
291 /*
292 * Now go through all of them, set reference handles
293 * and put them in the queues.
294 */
295 num_rx_bufs = rds_ring->num_desc;
296 rx_buf = rds_ring->rx_buf_arr;
297 for (i = 0; i < num_rx_bufs; i++) {
298 list_add_tail(&rx_buf->list,
299 &rds_ring->free_list);
300 rx_buf->ref_handle = i;
301 rx_buf->state = NETXEN_BUFFER_FREE;
302 rx_buf++;
303 }
304 spin_lock_init(&rds_ring->lock);
305 }
306
307 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
308 sds_ring = &recv_ctx->sds_rings[ring];
309 sds_ring->irq = adapter->msix_entries[ring].vector;
310 sds_ring->clean_tx = (ring == 0);
311 sds_ring->post_rxd = (ring == 0);
312 sds_ring->adapter = adapter;
313 sds_ring->num_desc = adapter->num_rxd;
314
315 for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
316 INIT_LIST_HEAD(&sds_ring->free_list[i]);
317 }
318
319 return 0;
320
321 err_out:
322 netxen_free_sw_resources(adapter);
323 return -ENOMEM;
324 }
325
326 void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
327 {
328 switch (adapter->ahw.port_type) {
329 case NETXEN_NIC_GBE:
330 adapter->enable_phy_interrupts =
331 netxen_niu_gbe_enable_phy_interrupts;
332 adapter->disable_phy_interrupts =
333 netxen_niu_gbe_disable_phy_interrupts;
334 adapter->macaddr_set = netxen_niu_macaddr_set;
335 adapter->set_mtu = netxen_nic_set_mtu_gb;
336 adapter->set_promisc = netxen_niu_set_promiscuous_mode;
337 adapter->phy_read = netxen_niu_gbe_phy_read;
338 adapter->phy_write = netxen_niu_gbe_phy_write;
339 adapter->init_port = netxen_niu_gbe_init_port;
340 adapter->stop_port = netxen_niu_disable_gbe_port;
341 break;
342
343 case NETXEN_NIC_XGBE:
344 adapter->enable_phy_interrupts =
345 netxen_niu_xgbe_enable_phy_interrupts;
346 adapter->disable_phy_interrupts =
347 netxen_niu_xgbe_disable_phy_interrupts;
348 adapter->macaddr_set = netxen_niu_xg_macaddr_set;
349 adapter->set_mtu = netxen_nic_set_mtu_xgb;
350 adapter->init_port = netxen_niu_xg_init_port;
351 adapter->set_promisc = netxen_niu_xg_set_promiscuous_mode;
352 adapter->stop_port = netxen_niu_disable_xg_port;
353 break;
354
355 default:
356 break;
357 }
358
359 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
360 adapter->set_mtu = nx_fw_cmd_set_mtu;
361 adapter->set_promisc = netxen_p3_nic_set_promisc;
362 }
363 }
364
365 /*
366 * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
367 * address to external PCI CRB address.
368 */
369 static u32 netxen_decode_crb_addr(u32 addr)
370 {
371 int i;
372 u32 base_addr, offset, pci_base;
373
374 crb_addr_transform_setup();
375
376 pci_base = NETXEN_ADDR_ERROR;
377 base_addr = addr & 0xfff00000;
378 offset = addr & 0x000fffff;
379
380 for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) {
381 if (crb_addr_xform[i] == base_addr) {
382 pci_base = i << 20;
383 break;
384 }
385 }
386 if (pci_base == NETXEN_ADDR_ERROR)
387 return pci_base;
388 else
389 return (pci_base + offset);
390 }
391
392 static long rom_max_timeout = 100;
393 static long rom_lock_timeout = 10000;
394
395 static int rom_lock(struct netxen_adapter *adapter)
396 {
397 int iter;
398 u32 done = 0;
399 int timeout = 0;
400
401 while (!done) {
402 /* acquire semaphore2 from PCI HW block */
403 netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_LOCK),
404 &done);
405 if (done == 1)
406 break;
407 if (timeout >= rom_lock_timeout)
408 return -EIO;
409
410 timeout++;
411 /*
412 * Yield CPU
413 */
414 if (!in_atomic())
415 schedule();
416 else {
417 for (iter = 0; iter < 20; iter++)
418 cpu_relax(); /*This a nop instr on i386 */
419 }
420 }
421 netxen_nic_reg_write(adapter, NETXEN_ROM_LOCK_ID, ROM_LOCK_DRIVER);
422 return 0;
423 }
424
425 static int netxen_wait_rom_done(struct netxen_adapter *adapter)
426 {
427 long timeout = 0;
428 long done = 0;
429
430 cond_resched();
431
432 while (done == 0) {
433 done = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_GLB_STATUS);
434 done &= 2;
435 timeout++;
436 if (timeout >= rom_max_timeout) {
437 printk("Timeout reached waiting for rom done");
438 return -EIO;
439 }
440 }
441 return 0;
442 }
443
444 static void netxen_rom_unlock(struct netxen_adapter *adapter)
445 {
446 u32 val;
447
448 /* release semaphore2 */
449 netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_UNLOCK), &val);
450
451 }
452
453 static int do_rom_fast_read(struct netxen_adapter *adapter,
454 int addr, int *valp)
455 {
456 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
457 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
458 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
459 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
460 if (netxen_wait_rom_done(adapter)) {
461 printk("Error waiting for rom done\n");
462 return -EIO;
463 }
464 /* reset abyte_cnt and dummy_byte_cnt */
465 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
466 udelay(10);
467 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
468
469 *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
470 return 0;
471 }
472
473 static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
474 u8 *bytes, size_t size)
475 {
476 int addridx;
477 int ret = 0;
478
479 for (addridx = addr; addridx < (addr + size); addridx += 4) {
480 int v;
481 ret = do_rom_fast_read(adapter, addridx, &v);
482 if (ret != 0)
483 break;
484 *(__le32 *)bytes = cpu_to_le32(v);
485 bytes += 4;
486 }
487
488 return ret;
489 }
490
491 int
492 netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
493 u8 *bytes, size_t size)
494 {
495 int ret;
496
497 ret = rom_lock(adapter);
498 if (ret < 0)
499 return ret;
500
501 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
502
503 netxen_rom_unlock(adapter);
504 return ret;
505 }
506
507 int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
508 {
509 int ret;
510
511 if (rom_lock(adapter) != 0)
512 return -EIO;
513
514 ret = do_rom_fast_read(adapter, addr, valp);
515 netxen_rom_unlock(adapter);
516 return ret;
517 }
518
519 #define NETXEN_BOARDTYPE 0x4008
520 #define NETXEN_BOARDNUM 0x400c
521 #define NETXEN_CHIPNUM 0x4010
522
523 int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
524 {
525 int addr, val;
526 int i, n, init_delay = 0;
527 struct crb_addr_pair *buf;
528 unsigned offset;
529 u32 off;
530
531 /* resetall */
532 rom_lock(adapter);
533 netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET,
534 0xffffffff);
535 netxen_rom_unlock(adapter);
536
537 if (verbose) {
538 if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0)
539 printk("P2 ROM board type: 0x%08x\n", val);
540 else
541 printk("Could not read board type\n");
542 if (netxen_rom_fast_read(adapter, NETXEN_BOARDNUM, &val) == 0)
543 printk("P2 ROM board num: 0x%08x\n", val);
544 else
545 printk("Could not read board number\n");
546 if (netxen_rom_fast_read(adapter, NETXEN_CHIPNUM, &val) == 0)
547 printk("P2 ROM chip num: 0x%08x\n", val);
548 else
549 printk("Could not read chip number\n");
550 }
551
552 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
553 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
554 (n != 0xcafecafe) ||
555 netxen_rom_fast_read(adapter, 4, &n) != 0) {
556 printk(KERN_ERR "%s: ERROR Reading crb_init area: "
557 "n: %08x\n", netxen_nic_driver_name, n);
558 return -EIO;
559 }
560 offset = n & 0xffffU;
561 n = (n >> 16) & 0xffffU;
562 } else {
563 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
564 !(n & 0x80000000)) {
565 printk(KERN_ERR "%s: ERROR Reading crb_init area: "
566 "n: %08x\n", netxen_nic_driver_name, n);
567 return -EIO;
568 }
569 offset = 1;
570 n &= ~0x80000000;
571 }
572
573 if (n < 1024) {
574 if (verbose)
575 printk(KERN_DEBUG "%s: %d CRB init values found"
576 " in ROM.\n", netxen_nic_driver_name, n);
577 } else {
578 printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
579 " initialized.\n", __func__, n);
580 return -EIO;
581 }
582
583 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
584 if (buf == NULL) {
585 printk("%s: netxen_pinit_from_rom: Unable to calloc memory.\n",
586 netxen_nic_driver_name);
587 return -ENOMEM;
588 }
589 for (i = 0; i < n; i++) {
590 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
591 netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
592 kfree(buf);
593 return -EIO;
594 }
595
596 buf[i].addr = addr;
597 buf[i].data = val;
598
599 if (verbose)
600 printk(KERN_DEBUG "%s: PCI: 0x%08x == 0x%08x\n",
601 netxen_nic_driver_name,
602 (u32)netxen_decode_crb_addr(addr), val);
603 }
604 for (i = 0; i < n; i++) {
605
606 off = netxen_decode_crb_addr(buf[i].addr);
607 if (off == NETXEN_ADDR_ERROR) {
608 printk(KERN_ERR"CRB init value out of range %x\n",
609 buf[i].addr);
610 continue;
611 }
612 off += NETXEN_PCI_CRBSPACE;
613 /* skipping cold reboot MAGIC */
614 if (off == NETXEN_CAM_RAM(0x1fc))
615 continue;
616
617 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
618 /* do not reset PCI */
619 if (off == (ROMUSB_GLB + 0xbc))
620 continue;
621 if (off == (ROMUSB_GLB + 0xa8))
622 continue;
623 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
624 continue;
625 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
626 continue;
627 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
628 continue;
629 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18))
630 buf[i].data = 0x1020;
631 /* skip the function enable register */
632 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
633 continue;
634 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2))
635 continue;
636 if ((off & 0x0ff00000) == NETXEN_CRB_SMB)
637 continue;
638 }
639
640 if (off == NETXEN_ADDR_ERROR) {
641 printk(KERN_ERR "%s: Err: Unknown addr: 0x%08x\n",
642 netxen_nic_driver_name, buf[i].addr);
643 continue;
644 }
645
646 init_delay = 1;
647 /* After writing this register, HW needs time for CRB */
648 /* to quiet down (else crb_window returns 0xffffffff) */
649 if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
650 init_delay = 1000;
651 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
652 /* hold xdma in reset also */
653 buf[i].data = NETXEN_NIC_XDMA_RESET;
654 buf[i].data = 0x8000ff;
655 }
656 }
657
658 adapter->hw_write_wx(adapter, off, &buf[i].data, 4);
659
660 msleep(init_delay);
661 }
662 kfree(buf);
663
664 /* disable_peg_cache_all */
665
666 /* unreset_net_cache */
667 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
668 adapter->hw_read_wx(adapter,
669 NETXEN_ROMUSB_GLB_SW_RESET, &val, 4);
670 netxen_crb_writelit_adapter(adapter,
671 NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
672 }
673
674 /* p2dn replyCount */
675 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
676 /* disable_peg_cache 0 */
677 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
678 /* disable_peg_cache 1 */
679 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
680
681 /* peg_clr_all */
682
683 /* peg_clr 0 */
684 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
685 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
686 /* peg_clr 1 */
687 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
688 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
689 /* peg_clr 2 */
690 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
691 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
692 /* peg_clr 3 */
693 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
694 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
695 return 0;
696 }
697
698 int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
699 {
700 uint64_t addr;
701 uint32_t hi;
702 uint32_t lo;
703
704 adapter->dummy_dma.addr =
705 pci_alloc_consistent(adapter->pdev,
706 NETXEN_HOST_DUMMY_DMA_SIZE,
707 &adapter->dummy_dma.phys_addr);
708 if (adapter->dummy_dma.addr == NULL) {
709 printk("%s: ERROR: Could not allocate dummy DMA memory\n",
710 __func__);
711 return -ENOMEM;
712 }
713
714 addr = (uint64_t) adapter->dummy_dma.phys_addr;
715 hi = (addr >> 32) & 0xffffffff;
716 lo = addr & 0xffffffff;
717
718 adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
719 adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
720
721 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
722 uint32_t temp = 0;
723 adapter->hw_write_wx(adapter, CRB_HOST_DUMMY_BUF, &temp, 4);
724 }
725
726 return 0;
727 }
728
729 void netxen_free_adapter_offload(struct netxen_adapter *adapter)
730 {
731 int i = 100;
732
733 if (!adapter->dummy_dma.addr)
734 return;
735
736 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
737 do {
738 if (dma_watchdog_shutdown_request(adapter) == 1)
739 break;
740 msleep(50);
741 if (dma_watchdog_shutdown_poll_result(adapter) == 1)
742 break;
743 } while (--i);
744 }
745
746 if (i) {
747 pci_free_consistent(adapter->pdev,
748 NETXEN_HOST_DUMMY_DMA_SIZE,
749 adapter->dummy_dma.addr,
750 adapter->dummy_dma.phys_addr);
751 adapter->dummy_dma.addr = NULL;
752 } else {
753 printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n",
754 adapter->netdev->name);
755 }
756 }
757
758 int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
759 {
760 u32 val = 0;
761 int retries = 60;
762
763 if (!pegtune_val) {
764 do {
765 val = adapter->pci_read_normalize(adapter,
766 CRB_CMDPEG_STATE);
767
768 if (val == PHAN_INITIALIZE_COMPLETE ||
769 val == PHAN_INITIALIZE_ACK)
770 return 0;
771
772 msleep(500);
773
774 } while (--retries);
775
776 if (!retries) {
777 pegtune_val = adapter->pci_read_normalize(adapter,
778 NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
779 printk(KERN_WARNING "netxen_phantom_init: init failed, "
780 "pegtune_val=%x\n", pegtune_val);
781 return -1;
782 }
783 }
784
785 return 0;
786 }
787
788 int netxen_receive_peg_ready(struct netxen_adapter *adapter)
789 {
790 u32 val = 0;
791 int retries = 2000;
792
793 do {
794 val = adapter->pci_read_normalize(adapter, CRB_RCVPEG_STATE);
795
796 if (val == PHAN_PEG_RCV_INITIALIZED)
797 return 0;
798
799 msleep(10);
800
801 } while (--retries);
802
803 if (!retries) {
804 printk(KERN_ERR "Receive Peg initialization not "
805 "complete, state: 0x%x.\n", val);
806 return -EIO;
807 }
808
809 return 0;
810 }
811
812 static int
813 netxen_alloc_rx_skb(struct netxen_adapter *adapter,
814 struct nx_host_rds_ring *rds_ring,
815 struct netxen_rx_buffer *buffer)
816 {
817 struct sk_buff *skb;
818 dma_addr_t dma;
819 struct pci_dev *pdev = adapter->pdev;
820
821 buffer->skb = dev_alloc_skb(rds_ring->skb_size);
822 if (!buffer->skb)
823 return 1;
824
825 skb = buffer->skb;
826
827 if (!adapter->ahw.cut_through)
828 skb_reserve(skb, 2);
829
830 dma = pci_map_single(pdev, skb->data,
831 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
832
833 if (pci_dma_mapping_error(pdev, dma)) {
834 dev_kfree_skb_any(skb);
835 buffer->skb = NULL;
836 return 1;
837 }
838
839 buffer->skb = skb;
840 buffer->dma = dma;
841 buffer->state = NETXEN_BUFFER_BUSY;
842
843 return 0;
844 }
845
846 static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
847 struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum)
848 {
849 struct netxen_rx_buffer *buffer;
850 struct sk_buff *skb;
851
852 buffer = &rds_ring->rx_buf_arr[index];
853
854 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
855 PCI_DMA_FROMDEVICE);
856
857 skb = buffer->skb;
858 if (!skb)
859 goto no_skb;
860
861 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
862 adapter->stats.csummed++;
863 skb->ip_summed = CHECKSUM_UNNECESSARY;
864 } else
865 skb->ip_summed = CHECKSUM_NONE;
866
867 skb->dev = adapter->netdev;
868
869 buffer->skb = NULL;
870 no_skb:
871 buffer->state = NETXEN_BUFFER_FREE;
872 return skb;
873 }
874
875 static struct netxen_rx_buffer *
876 netxen_process_rcv(struct netxen_adapter *adapter,
877 int ring, int index, int length, int cksum, int pkt_offset)
878 {
879 struct net_device *netdev = adapter->netdev;
880 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
881 struct netxen_rx_buffer *buffer;
882 struct sk_buff *skb;
883 struct nx_host_rds_ring *rds_ring = &recv_ctx->rds_rings[ring];
884
885 if (unlikely(index > rds_ring->num_desc))
886 return NULL;
887
888 buffer = &rds_ring->rx_buf_arr[index];
889
890 skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
891 if (!skb)
892 return buffer;
893
894 if (length > rds_ring->skb_size)
895 skb_put(skb, rds_ring->skb_size);
896 else
897 skb_put(skb, length);
898
899
900 if (pkt_offset)
901 skb_pull(skb, pkt_offset);
902
903 skb->protocol = eth_type_trans(skb, netdev);
904
905 netif_receive_skb(skb);
906
907 adapter->stats.no_rcv++;
908 adapter->stats.rxbytes += length;
909
910 return buffer;
911 }
912
913 #define netxen_merge_rx_buffers(list, head) \
914 do { list_splice_tail_init(list, head); } while (0);
915
916 int
917 netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
918 {
919 struct netxen_adapter *adapter = sds_ring->adapter;
920
921 struct list_head *cur;
922
923 struct status_desc *desc;
924 struct netxen_rx_buffer *rxbuf;
925
926 u32 consumer = sds_ring->consumer;
927
928 int count = 0;
929 u64 sts_data;
930 int opcode, ring, index, length, cksum, pkt_offset;
931
932 while (count < max) {
933 desc = &sds_ring->desc_head[consumer];
934 sts_data = le64_to_cpu(desc->status_desc_data);
935
936 if (!(sts_data & STATUS_OWNER_HOST))
937 break;
938
939 ring = netxen_get_sts_type(sts_data);
940 if (ring > RCV_RING_JUMBO)
941 continue;
942
943 opcode = netxen_get_sts_opcode(sts_data);
944
945 index = netxen_get_sts_refhandle(sts_data);
946 length = netxen_get_sts_totallength(sts_data);
947 cksum = netxen_get_sts_status(sts_data);
948 pkt_offset = netxen_get_sts_pkt_offset(sts_data);
949
950 rxbuf = netxen_process_rcv(adapter, ring, index,
951 length, cksum, pkt_offset);
952
953 if (rxbuf)
954 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
955
956 desc->status_desc_data = cpu_to_le64(STATUS_OWNER_PHANTOM);
957
958 consumer = get_next_index(consumer, sds_ring->num_desc);
959 count++;
960 }
961
962 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
963 struct nx_host_rds_ring *rds_ring =
964 &adapter->recv_ctx.rds_rings[ring];
965
966 if (!list_empty(&sds_ring->free_list[ring])) {
967 list_for_each(cur, &sds_ring->free_list[ring]) {
968 rxbuf = list_entry(cur,
969 struct netxen_rx_buffer, list);
970 netxen_alloc_rx_skb(adapter, rds_ring, rxbuf);
971 }
972 spin_lock(&rds_ring->lock);
973 netxen_merge_rx_buffers(&sds_ring->free_list[ring],
974 &rds_ring->free_list);
975 spin_unlock(&rds_ring->lock);
976 }
977
978 netxen_post_rx_buffers_nodb(adapter, rds_ring);
979 }
980
981 if (count) {
982 sds_ring->consumer = consumer;
983 adapter->pci_write_normalize(adapter,
984 sds_ring->crb_sts_consumer, consumer);
985 }
986
987 return count;
988 }
989
990 /* Process Command status ring */
991 int netxen_process_cmd_ring(struct netxen_adapter *adapter)
992 {
993 u32 last_consumer, consumer;
994 int count = 0, i;
995 struct netxen_cmd_buffer *buffer;
996 struct pci_dev *pdev = adapter->pdev;
997 struct net_device *netdev = adapter->netdev;
998 struct netxen_skb_frag *frag;
999 int done = 0;
1000
1001 if (!spin_trylock(&adapter->tx_clean_lock))
1002 return 1;
1003
1004 last_consumer = adapter->last_cmd_consumer;
1005 barrier(); /* cmd_consumer can change underneath */
1006 consumer = le32_to_cpu(*(adapter->cmd_consumer));
1007
1008 while (last_consumer != consumer) {
1009 buffer = &adapter->cmd_buf_arr[last_consumer];
1010 if (buffer->skb) {
1011 frag = &buffer->frag_array[0];
1012 pci_unmap_single(pdev, frag->dma, frag->length,
1013 PCI_DMA_TODEVICE);
1014 frag->dma = 0ULL;
1015 for (i = 1; i < buffer->frag_count; i++) {
1016 frag++; /* Get the next frag */
1017 pci_unmap_page(pdev, frag->dma, frag->length,
1018 PCI_DMA_TODEVICE);
1019 frag->dma = 0ULL;
1020 }
1021
1022 adapter->stats.xmitfinished++;
1023 dev_kfree_skb_any(buffer->skb);
1024 buffer->skb = NULL;
1025 }
1026
1027 last_consumer = get_next_index(last_consumer,
1028 adapter->num_txd);
1029 if (++count >= MAX_STATUS_HANDLE)
1030 break;
1031 }
1032
1033 if (count) {
1034 adapter->last_cmd_consumer = last_consumer;
1035 smp_mb();
1036 if (netif_queue_stopped(netdev) && netif_running(netdev)) {
1037 netif_tx_lock(netdev);
1038 netif_wake_queue(netdev);
1039 smp_mb();
1040 netif_tx_unlock(netdev);
1041 }
1042 }
1043 /*
1044 * If everything is freed up to consumer then check if the ring is full
1045 * If the ring is full then check if more needs to be freed and
1046 * schedule the call back again.
1047 *
1048 * This happens when there are 2 CPUs. One could be freeing and the
1049 * other filling it. If the ring is full when we get out of here and
1050 * the card has already interrupted the host then the host can miss the
1051 * interrupt.
1052 *
1053 * There is still a possible race condition and the host could miss an
1054 * interrupt. The card has to take care of this.
1055 */
1056 barrier(); /* cmd_consumer can change underneath */
1057 consumer = le32_to_cpu(*(adapter->cmd_consumer));
1058 done = (last_consumer == consumer);
1059 spin_unlock(&adapter->tx_clean_lock);
1060
1061 return (done);
1062 }
1063
1064 void
1065 netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
1066 struct nx_host_rds_ring *rds_ring)
1067 {
1068 struct rcv_desc *pdesc;
1069 struct netxen_rx_buffer *buffer;
1070 int producer, count = 0;
1071 netxen_ctx_msg msg = 0;
1072 struct list_head *head;
1073
1074 producer = rds_ring->producer;
1075
1076 spin_lock(&rds_ring->lock);
1077 head = &rds_ring->free_list;
1078 while (!list_empty(head)) {
1079
1080 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1081
1082 if (!buffer->skb) {
1083 if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
1084 break;
1085 }
1086
1087 count++;
1088 list_del(&buffer->list);
1089
1090 /* make a rcv descriptor */
1091 pdesc = &rds_ring->desc_head[producer];
1092 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1093 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1094 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1095
1096 producer = get_next_index(producer, rds_ring->num_desc);
1097 }
1098 spin_unlock(&rds_ring->lock);
1099
1100 if (count) {
1101 rds_ring->producer = producer;
1102 adapter->pci_write_normalize(adapter,
1103 rds_ring->crb_rcv_producer,
1104 (producer-1) & (rds_ring->num_desc-1));
1105
1106 if (adapter->fw_major < 4) {
1107 /*
1108 * Write a doorbell msg to tell phanmon of change in
1109 * receive ring producer
1110 * Only for firmware version < 4.0.0
1111 */
1112 netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
1113 netxen_set_msg_privid(msg);
1114 netxen_set_msg_count(msg,
1115 ((producer - 1) &
1116 (rds_ring->num_desc - 1)));
1117 netxen_set_msg_ctxid(msg, adapter->portnum);
1118 netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
1119 writel(msg,
1120 DB_NORMALIZE(adapter,
1121 NETXEN_RCV_PRODUCER_OFFSET));
1122 }
1123 }
1124 }
1125
1126 static void
1127 netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
1128 struct nx_host_rds_ring *rds_ring)
1129 {
1130 struct rcv_desc *pdesc;
1131 struct netxen_rx_buffer *buffer;
1132 int producer, count = 0;
1133 struct list_head *head;
1134
1135 producer = rds_ring->producer;
1136 if (!spin_trylock(&rds_ring->lock))
1137 return;
1138
1139 head = &rds_ring->free_list;
1140 while (!list_empty(head)) {
1141
1142 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1143
1144 if (!buffer->skb) {
1145 if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
1146 break;
1147 }
1148
1149 count++;
1150 list_del(&buffer->list);
1151
1152 /* make a rcv descriptor */
1153 pdesc = &rds_ring->desc_head[producer];
1154 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1155 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1156 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1157
1158 producer = get_next_index(producer, rds_ring->num_desc);
1159 }
1160
1161 if (count) {
1162 rds_ring->producer = producer;
1163 adapter->pci_write_normalize(adapter,
1164 rds_ring->crb_rcv_producer,
1165 (producer - 1) & (rds_ring->num_desc - 1));
1166 wmb();
1167 }
1168 spin_unlock(&rds_ring->lock);
1169 }
1170
1171 void netxen_nic_clear_stats(struct netxen_adapter *adapter)
1172 {
1173 memset(&adapter->stats, 0, sizeof(adapter->stats));
1174 return;
1175 }
1176
This page took 0.057652 seconds and 5 git commands to generate.