ebf5e49628784fa993533605392ab504163e85ff
[deliverable/linux.git] / drivers / staging / et131x / et131x.c
1 /* Agere Systems Inc.
2 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
3 *
4 * Copyright © 2005 Agere Systems Inc.
5 * All rights reserved.
6 * http://www.agere.com
7 *
8 * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
9 *
10 *------------------------------------------------------------------------------
11 *
12 * SOFTWARE LICENSE
13 *
14 * This software is provided subject to the following terms and conditions,
15 * which you should read carefully before using the software. Using this
16 * software indicates your acceptance of these terms and conditions. If you do
17 * not agree with these terms and conditions, do not use the software.
18 *
19 * Copyright © 2005 Agere Systems Inc.
20 * All rights reserved.
21 *
22 * Redistribution and use in source or binary forms, with or without
23 * modifications, are permitted provided that the following conditions are met:
24 *
25 * . Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following Disclaimer as comments in the code as
27 * well as in the documentation and/or other materials provided with the
28 * distribution.
29 *
30 * . Redistributions in binary form must reproduce the above copyright notice,
31 * this list of conditions and the following Disclaimer in the documentation
32 * and/or other materials provided with the distribution.
33 *
34 * . Neither the name of Agere Systems Inc. nor the names of the contributors
35 * may be used to endorse or promote products derived from this software
36 * without specific prior written permission.
37 *
38 * Disclaimer
39 *
40 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
41 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
42 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
43 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
44 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
48 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
51 * DAMAGE.
52 */
53
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56 #include <linux/pci.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
59 #include <linux/types.h>
60 #include <linux/kernel.h>
61
62 #include <linux/sched.h>
63 #include <linux/ptrace.h>
64 #include <linux/slab.h>
65 #include <linux/ctype.h>
66 #include <linux/string.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/in.h>
70 #include <linux/delay.h>
71 #include <linux/bitops.h>
72 #include <linux/io.h>
73
74 #include <linux/netdevice.h>
75 #include <linux/etherdevice.h>
76 #include <linux/skbuff.h>
77 #include <linux/if_arp.h>
78 #include <linux/ioport.h>
79 #include <linux/crc32.h>
80 #include <linux/random.h>
81 #include <linux/phy.h>
82
83 #include "et131x.h"
84
85 MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
86 MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
87 MODULE_LICENSE("Dual BSD/GPL");
88 MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
89
90 /* EEPROM defines */
91 #define MAX_NUM_REGISTER_POLLS 1000
92 #define MAX_NUM_WRITE_RETRIES 2
93
94 /* MAC defines */
95 #define COUNTER_WRAP_16_BIT 0x10000
96 #define COUNTER_WRAP_12_BIT 0x1000
97
98 /* PCI defines */
99 #define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */
100 #define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */
101
102 /* ISR defines */
103 /* For interrupts, normal running is:
104 * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
105 * watchdog_interrupt & txdma_xfer_done
106 *
107 * In both cases, when flow control is enabled for either Tx or bi-direction,
108 * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
109 * buffer rings are running low.
110 */
111 #define INT_MASK_DISABLE 0xffffffff
112
113 /* NOTE: Masking out MAC_STAT Interrupt for now...
114 * #define INT_MASK_ENABLE 0xfff6bf17
115 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7
116 */
117 #define INT_MASK_ENABLE 0xfffebf17
118 #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
119
120 /* General defines */
121 /* Packet and header sizes */
122 #define NIC_MIN_PACKET_SIZE 60
123
124 /* Multicast list size */
125 #define NIC_MAX_MCAST_LIST 128
126
127 /* Supported Filters */
128 #define ET131X_PACKET_TYPE_DIRECTED 0x0001
129 #define ET131X_PACKET_TYPE_MULTICAST 0x0002
130 #define ET131X_PACKET_TYPE_BROADCAST 0x0004
131 #define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
132 #define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
133
134 /* Tx Timeout */
135 #define ET131X_TX_TIMEOUT (1 * HZ)
136 #define NIC_SEND_HANG_THRESHOLD 0
137
138 /* MP_TCB flags */
139 #define FMP_DEST_MULTI 0x00000001
140 #define FMP_DEST_BROAD 0x00000002
141
142 /* MP_ADAPTER flags */
143 #define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
144
145 /* MP_SHARED flags */
146 #define FMP_ADAPTER_LOWER_POWER 0x00200000
147
148 #define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
149 #define FMP_ADAPTER_HARDWARE_ERROR 0x04000000
150
151 #define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
152
153 /* Some offsets in PCI config space that are actually used. */
154 #define ET1310_PCI_MAC_ADDRESS 0xA4
155 #define ET1310_PCI_EEPROM_STATUS 0xB2
156 #define ET1310_PCI_ACK_NACK 0xC0
157 #define ET1310_PCI_REPLAY 0xC2
158 #define ET1310_PCI_L0L1LATENCY 0xCF
159
160 /* PCI Product IDs */
161 #define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */
162 #define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */
163
164 /* Define order of magnitude converter */
165 #define NANO_IN_A_MICRO 1000
166
167 #define PARM_RX_NUM_BUFS_DEF 4
168 #define PARM_RX_TIME_INT_DEF 10
169 #define PARM_RX_MEM_END_DEF 0x2bc
170 #define PARM_TX_TIME_INT_DEF 40
171 #define PARM_TX_NUM_BUFS_DEF 4
172 #define PARM_DMA_CACHE_DEF 0
173
174 /* RX defines */
175 #define FBR_CHUNKS 32
176 #define MAX_DESC_PER_RING_RX 1024
177
178 /* number of RFDs - default and min */
179 #define RFD_LOW_WATER_MARK 40
180 #define NIC_DEFAULT_NUM_RFD 1024
181 #define NUM_FBRS 2
182
183 #define NUM_PACKETS_HANDLED 256
184
185 #define ALCATEL_MULTICAST_PKT 0x01000000
186 #define ALCATEL_BROADCAST_PKT 0x02000000
187
188 /* typedefs for Free Buffer Descriptors */
189 struct fbr_desc {
190 u32 addr_lo;
191 u32 addr_hi;
192 u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */
193 };
194
195 /* Packet Status Ring Descriptors
196 *
197 * Word 0:
198 *
199 * top 16 bits are from the Alcatel Status Word as enumerated in
200 * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
201 *
202 * 0: hp hash pass
203 * 1: ipa IP checksum assist
204 * 2: ipp IP checksum pass
205 * 3: tcpa TCP checksum assist
206 * 4: tcpp TCP checksum pass
207 * 5: wol WOL Event
208 * 6: rxmac_error RXMAC Error Indicator
209 * 7: drop Drop packet
210 * 8: ft Frame Truncated
211 * 9: jp Jumbo Packet
212 * 10: vp VLAN Packet
213 * 11-15: unused
214 * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
215 * 17: asw_RX_DV_event short receive event detected
216 * 18: asw_false_carrier_event bad carrier since last good packet
217 * 19: asw_code_err one or more nibbles signalled as errors
218 * 20: asw_CRC_err CRC error
219 * 21: asw_len_chk_err frame length field incorrect
220 * 22: asw_too_long frame length > 1518 bytes
221 * 23: asw_OK valid CRC + no code error
222 * 24: asw_multicast has a multicast address
223 * 25: asw_broadcast has a broadcast address
224 * 26: asw_dribble_nibble spurious bits after EOP
225 * 27: asw_control_frame is a control frame
226 * 28: asw_pause_frame is a pause frame
227 * 29: asw_unsupported_op unsupported OP code
228 * 30: asw_VLAN_tag VLAN tag detected
229 * 31: asw_long_evt Rx long event
230 *
231 * Word 1:
232 * 0-15: length length in bytes
233 * 16-25: bi Buffer Index
234 * 26-27: ri Ring Index
235 * 28-31: reserved
236 */
237
238 struct pkt_stat_desc {
239 u32 word0;
240 u32 word1;
241 };
242
243 /* Typedefs for the RX DMA status word */
244
245 /* rx status word 0 holds part of the status bits of the Rx DMA engine
246 * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word
247 * which contains the Free Buffer ring 0 and 1 available offset.
248 *
249 * bit 0-9 FBR1 offset
250 * bit 10 Wrap flag for FBR1
251 * bit 16-25 FBR0 offset
252 * bit 26 Wrap flag for FBR0
253 */
254
255 /* RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
256 * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word
257 * which contains the Packet Status Ring available offset.
258 *
259 * bit 0-15 reserved
260 * bit 16-27 PSRoffset
261 * bit 28 PSRwrap
262 * bit 29-31 unused
263 */
264
265 /* struct rx_status_block is a structure representing the status of the Rx
266 * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
267 */
268 struct rx_status_block {
269 u32 word0;
270 u32 word1;
271 };
272
273 /* Structure for look-up table holding free buffer ring pointers, addresses
274 * and state.
275 */
276 struct fbr_lookup {
277 void *virt[MAX_DESC_PER_RING_RX];
278 u32 bus_high[MAX_DESC_PER_RING_RX];
279 u32 bus_low[MAX_DESC_PER_RING_RX];
280 void *ring_virtaddr;
281 dma_addr_t ring_physaddr;
282 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
283 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
284 u32 local_full;
285 u32 num_entries;
286 dma_addr_t buffsize;
287 };
288
289 /* struct rx_ring is the sructure representing the adaptor's local
290 * reference(s) to the rings
291 */
292 struct rx_ring {
293 struct fbr_lookup *fbr[NUM_FBRS];
294 void *ps_ring_virtaddr;
295 dma_addr_t ps_ring_physaddr;
296 u32 local_psr_full;
297 u32 psr_num_entries;
298
299 struct rx_status_block *rx_status_block;
300 dma_addr_t rx_status_bus;
301
302 /* RECV */
303 struct list_head recv_list;
304 u32 num_ready_recv;
305
306 u32 num_rfd;
307
308 bool unfinished_receives;
309 };
310
311 /* TX defines */
312 /* word 2 of the control bits in the Tx Descriptor ring for the ET-1310
313 *
314 * 0-15: length of packet
315 * 16-27: VLAN tag
316 * 28: VLAN CFI
317 * 29-31: VLAN priority
318 *
319 * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
320 *
321 * 0: last packet in the sequence
322 * 1: first packet in the sequence
323 * 2: interrupt the processor when this pkt sent
324 * 3: Control word - no packet data
325 * 4: Issue half-duplex backpressure : XON/XOFF
326 * 5: send pause frame
327 * 6: Tx frame has error
328 * 7: append CRC
329 * 8: MAC override
330 * 9: pad packet
331 * 10: Packet is a Huge packet
332 * 11: append VLAN tag
333 * 12: IP checksum assist
334 * 13: TCP checksum assist
335 * 14: UDP checksum assist
336 */
337
338 #define TXDESC_FLAG_LASTPKT 0x0001
339 #define TXDESC_FLAG_FIRSTPKT 0x0002
340 #define TXDESC_FLAG_INTPROC 0x0004
341
342 /* struct tx_desc represents each descriptor on the ring */
343 struct tx_desc {
344 u32 addr_hi;
345 u32 addr_lo;
346 u32 len_vlan; /* control words how to xmit the */
347 u32 flags; /* data (detailed above) */
348 };
349
350 /* The status of the Tx DMA engine it sits in free memory, and is pointed to
351 * by 0x101c / 0x1020. This is a DMA10 type
352 */
353
354 /* TCB (Transmit Control Block: Host Side) */
355 struct tcb {
356 struct tcb *next; /* Next entry in ring */
357 u32 flags; /* Our flags for the packet */
358 u32 count; /* Used to spot stuck/lost packets */
359 u32 stale; /* Used to spot stuck/lost packets */
360 struct sk_buff *skb; /* Network skb we are tied to */
361 u32 index; /* Ring indexes */
362 u32 index_start;
363 };
364
365 /* Structure representing our local reference(s) to the ring */
366 struct tx_ring {
367 /* TCB (Transmit Control Block) memory and lists */
368 struct tcb *tcb_ring;
369
370 /* List of TCBs that are ready to be used */
371 struct tcb *tcb_qhead;
372 struct tcb *tcb_qtail;
373
374 /* list of TCBs that are currently being sent. NOTE that access to all
375 * three of these (including used) are controlled via the
376 * TCBSendQLock. This lock should be secured prior to incementing /
377 * decrementing used, or any queue manipulation on send_head /
378 * tail
379 */
380 struct tcb *send_head;
381 struct tcb *send_tail;
382 int used;
383
384 /* The actual descriptor ring */
385 struct tx_desc *tx_desc_ring;
386 dma_addr_t tx_desc_ring_pa;
387
388 /* send_idx indicates where we last wrote to in the descriptor ring. */
389 u32 send_idx;
390
391 /* The location of the write-back status block */
392 u32 *tx_status;
393 dma_addr_t tx_status_pa;
394
395 /* Packets since the last IRQ: used for interrupt coalescing */
396 int since_irq;
397 };
398
399 /* Do not change these values: if changed, then change also in respective
400 * TXdma and Rxdma engines
401 */
402 #define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
403 #define NUM_TCB 64
404
405 /* These values are all superseded by registry entries to facilitate tuning.
406 * Once the desired performance has been achieved, the optimal registry values
407 * should be re-populated to these #defines:
408 */
409 #define TX_ERROR_PERIOD 1000
410
411 #define LO_MARK_PERCENT_FOR_PSR 15
412 #define LO_MARK_PERCENT_FOR_RX 15
413
414 /* RFD (Receive Frame Descriptor) */
415 struct rfd {
416 struct list_head list_node;
417 struct sk_buff *skb;
418 u32 len; /* total size of receive frame */
419 u16 bufferindex;
420 u8 ringindex;
421 };
422
423 /* Flow Control */
424 #define FLOW_BOTH 0
425 #define FLOW_TXONLY 1
426 #define FLOW_RXONLY 2
427 #define FLOW_NONE 3
428
429 /* Struct to define some device statistics */
430 struct ce_stats {
431 /* MIB II variables
432 *
433 * NOTE: atomic_t types are only guaranteed to store 24-bits; if we
434 * MUST have 32, then we'll need another way to perform atomic
435 * operations
436 */
437 u32 unicast_pkts_rcvd;
438 atomic_t unicast_pkts_xmtd;
439 u32 multicast_pkts_rcvd;
440 atomic_t multicast_pkts_xmtd;
441 u32 broadcast_pkts_rcvd;
442 atomic_t broadcast_pkts_xmtd;
443 u32 rcvd_pkts_dropped;
444
445 /* Tx Statistics. */
446 u32 tx_underflows;
447
448 u32 tx_collisions;
449 u32 tx_excessive_collisions;
450 u32 tx_first_collisions;
451 u32 tx_late_collisions;
452 u32 tx_max_pkt_errs;
453 u32 tx_deferred;
454
455 /* Rx Statistics. */
456 u32 rx_overflows;
457
458 u32 rx_length_errs;
459 u32 rx_align_errs;
460 u32 rx_crc_errs;
461 u32 rx_code_violations;
462 u32 rx_other_errs;
463
464 u32 synchronous_iterations;
465 u32 interrupt_status;
466 };
467
468 /* The private adapter structure */
469 struct et131x_adapter {
470 struct net_device *netdev;
471 struct pci_dev *pdev;
472 struct mii_bus *mii_bus;
473 struct phy_device *phydev;
474 struct work_struct task;
475
476 /* Flags that indicate current state of the adapter */
477 u32 flags;
478
479 /* local link state, to determine if a state change has occurred */
480 int link;
481
482 /* Configuration */
483 u8 rom_addr[ETH_ALEN];
484 u8 addr[ETH_ALEN];
485 bool has_eeprom;
486 u8 eeprom_data[2];
487
488 /* Spinlocks */
489 spinlock_t lock;
490
491 spinlock_t tcb_send_qlock;
492 spinlock_t tcb_ready_qlock;
493 spinlock_t send_hw_lock;
494
495 spinlock_t rcv_lock;
496 spinlock_t rcv_pend_lock;
497 spinlock_t fbr_lock;
498
499 spinlock_t phy_lock;
500
501 /* Packet Filter and look ahead size */
502 u32 packet_filter;
503
504 /* multicast list */
505 u32 multicast_addr_count;
506 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
507
508 /* Pointer to the device's PCI register space */
509 struct address_map __iomem *regs;
510
511 /* Registry parameters */
512 u8 wanted_flow; /* Flow we want for 802.3x flow control */
513 u32 registry_jumbo_packet; /* Max supported ethernet packet size */
514
515 /* Derived from the registry: */
516 u8 flowcontrol; /* flow control validated by the far-end */
517
518 /* Minimize init-time */
519 struct timer_list error_timer;
520
521 /* variable putting the phy into coma mode when boot up with no cable
522 * plugged in after 5 seconds
523 */
524 u8 boot_coma;
525
526 /* Next two used to save power information at power down. This
527 * information will be used during power up to set up parts of Power
528 * Management in JAGCore
529 */
530 u16 pdown_speed;
531 u8 pdown_duplex;
532
533 /* Tx Memory Variables */
534 struct tx_ring tx_ring;
535
536 /* Rx Memory Variables */
537 struct rx_ring rx_ring;
538
539 /* Stats */
540 struct ce_stats stats;
541
542 struct net_device_stats net_stats;
543 };
544
545 static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
546 {
547 u32 reg;
548 int i;
549
550 /* 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
551 * bits 7,1:0 both equal to 1, at least once after reset.
552 * Subsequent operations need only to check that bits 1:0 are equal
553 * to 1 prior to starting a single byte read/write
554 */
555
556 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
557 /* Read registers grouped in DWORD1 */
558 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg))
559 return -EIO;
560
561 /* I2C idle and Phy Queue Avail both true */
562 if ((reg & 0x3000) == 0x3000) {
563 if (status)
564 *status = reg;
565 return reg & 0xFF;
566 }
567 }
568 return -ETIMEDOUT;
569 }
570
571 /* eeprom_write - Write a byte to the ET1310's EEPROM
572 * @adapter: pointer to our private adapter structure
573 * @addr: the address to write
574 * @data: the value to write
575 *
576 * Returns 1 for a successful write.
577 */
578 static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
579 {
580 struct pci_dev *pdev = adapter->pdev;
581 int index = 0;
582 int retries;
583 int err = 0;
584 int i2c_wack = 0;
585 int writeok = 0;
586 u32 status;
587 u32 val = 0;
588
589 /* For an EEPROM, an I2C single byte write is defined as a START
590 * condition followed by the device address, EEPROM address, one byte
591 * of data and a STOP condition. The STOP condition will trigger the
592 * EEPROM's internally timed write cycle to the nonvolatile memory.
593 * All inputs are disabled during this write cycle and the EEPROM will
594 * not respond to any access until the internal write is complete.
595 */
596
597 err = eeprom_wait_ready(pdev, NULL);
598 if (err)
599 return err;
600
601 /* 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
602 * and bits 1:0 both =0. Bit 5 should be set according to the
603 * type of EEPROM being accessed (1=two byte addressing, 0=one
604 * byte addressing).
605 */
606 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
607 LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
608 return -EIO;
609
610 i2c_wack = 1;
611
612 /* Prepare EEPROM address for Step 3 */
613
614 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
615 /* Write the address to the LBCIF Address Register */
616 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
617 break;
618 /* Write the data to the LBCIF Data Register (the I2C write
619 * will begin).
620 */
621 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
622 break;
623 /* Monitor bit 1:0 of the LBCIF Status Register. When bits
624 * 1:0 are both equal to 1, the I2C write has completed and the
625 * internal write cycle of the EEPROM is about to start.
626 * (bits 1:0 = 01 is a legal state while waiting from both
627 * equal to 1, but bits 1:0 = 10 is invalid and implies that
628 * something is broken).
629 */
630 err = eeprom_wait_ready(pdev, &status);
631 if (err < 0)
632 return 0;
633
634 /* Check bit 3 of the LBCIF Status Register. If equal to 1,
635 * an error has occurred.Don't break here if we are revision
636 * 1, this is so we do a blind write for load bug.
637 */
638 if ((status & LBCIF_STATUS_GENERAL_ERROR)
639 && adapter->pdev->revision == 0)
640 break;
641
642 /* Check bit 2 of the LBCIF Status Register. If equal to 1 an
643 * ACK error has occurred on the address phase of the write.
644 * This could be due to an actual hardware failure or the
645 * EEPROM may still be in its internal write cycle from a
646 * previous write. This write operation was ignored and must be
647 *repeated later.
648 */
649 if (status & LBCIF_STATUS_ACK_ERROR) {
650 /* This could be due to an actual hardware failure
651 * or the EEPROM may still be in its internal write
652 * cycle from a previous write. This write operation
653 * was ignored and must be repeated later.
654 */
655 udelay(10);
656 continue;
657 }
658
659 writeok = 1;
660 break;
661 }
662
663 /* Set bit 6 of the LBCIF Control Register = 0.
664 */
665 udelay(10);
666
667 while (i2c_wack) {
668 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
669 LBCIF_CONTROL_LBCIF_ENABLE))
670 writeok = 0;
671
672 /* Do read until internal ACK_ERROR goes away meaning write
673 * completed
674 */
675 do {
676 pci_write_config_dword(pdev,
677 LBCIF_ADDRESS_REGISTER,
678 addr);
679 do {
680 pci_read_config_dword(pdev,
681 LBCIF_DATA_REGISTER, &val);
682 } while ((val & 0x00010000) == 0);
683 } while (val & 0x00040000);
684
685 if ((val & 0xFF00) != 0xC000 || index == 10000)
686 break;
687 index++;
688 }
689 return writeok ? 0 : -EIO;
690 }
691
692 /* eeprom_read - Read a byte from the ET1310's EEPROM
693 * @adapter: pointer to our private adapter structure
694 * @addr: the address from which to read
695 * @pdata: a pointer to a byte in which to store the value of the read
696 * @eeprom_id: the ID of the EEPROM
697 * @addrmode: how the EEPROM is to be accessed
698 *
699 * Returns 1 for a successful read
700 */
701 static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
702 {
703 struct pci_dev *pdev = adapter->pdev;
704 int err;
705 u32 status;
706
707 /* A single byte read is similar to the single byte write, with the
708 * exception of the data flow:
709 */
710
711 err = eeprom_wait_ready(pdev, NULL);
712 if (err)
713 return err;
714 /* Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
715 * and bits 1:0 both =0. Bit 5 should be set according to the type
716 * of EEPROM being accessed (1=two byte addressing, 0=one byte
717 * addressing).
718 */
719 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
720 LBCIF_CONTROL_LBCIF_ENABLE))
721 return -EIO;
722 /* Write the address to the LBCIF Address Register (I2C read will
723 * begin).
724 */
725 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
726 return -EIO;
727 /* Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
728 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
729 * has occurred).
730 */
731 err = eeprom_wait_ready(pdev, &status);
732 if (err < 0)
733 return err;
734 /* Regardless of error status, read data byte from LBCIF Data
735 * Register.
736 */
737 *pdata = err;
738 /* Check bit 2 of the LBCIF Status Register. If = 1,
739 * then an error has occurred.
740 */
741 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
742 }
743
744 static int et131x_init_eeprom(struct et131x_adapter *adapter)
745 {
746 struct pci_dev *pdev = adapter->pdev;
747 u8 eestatus;
748
749 /* We first need to check the EEPROM Status code located at offset
750 * 0xB2 of config space
751 */
752 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus);
753
754 /* THIS IS A WORKAROUND:
755 * I need to call this function twice to get my card in a
756 * LG M1 Express Dual running. I tried also a msleep before this
757 * function, because I thought there could be some time conditions
758 * but it didn't work. Call the whole function twice also work.
759 */
760 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
761 dev_err(&pdev->dev,
762 "Could not read PCI config space for EEPROM Status\n");
763 return -EIO;
764 }
765
766 /* Determine if the error(s) we care about are present. If they are
767 * present we need to fail.
768 */
769 if (eestatus & 0x4C) {
770 int write_failed = 0;
771 if (pdev->revision == 0x01) {
772 int i;
773 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
774
775 /* Re-write the first 4 bytes if we have an eeprom
776 * present and the revision id is 1, this fixes the
777 * corruption seen with 1310 B Silicon
778 */
779 for (i = 0; i < 3; i++)
780 if (eeprom_write(adapter, i, eedata[i]) < 0)
781 write_failed = 1;
782 }
783 if (pdev->revision != 0x01 || write_failed) {
784 dev_err(&pdev->dev,
785 "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
786
787 /* This error could mean that there was an error
788 * reading the eeprom or that the eeprom doesn't exist.
789 * We will treat each case the same and not try to
790 * gather additional information that normally would
791 * come from the eeprom, like MAC Address
792 */
793 adapter->has_eeprom = 0;
794 return -EIO;
795 }
796 }
797 adapter->has_eeprom = 1;
798
799 /* Read the EEPROM for information regarding LED behavior. Refer to
800 * ET1310_phy.c, et131x_xcvr_init(), for its use.
801 */
802 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
803 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
804
805 if (adapter->eeprom_data[0] != 0xcd)
806 /* Disable all optional features */
807 adapter->eeprom_data[1] = 0x00;
808
809 return 0;
810 }
811
812 /* et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
813 * @adapter: pointer to our adapter structure
814 */
815 static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
816 {
817 /* Setup the receive dma configuration register for normal operation */
818 u32 csr = ET_RXDMA_CSR_FBR1_ENABLE;
819
820 if (adapter->rx_ring.fbr[1]->buffsize == 4096)
821 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO;
822 else if (adapter->rx_ring.fbr[1]->buffsize == 8192)
823 csr |= ET_RXDMA_CSR_FBR1_SIZE_HI;
824 else if (adapter->rx_ring.fbr[1]->buffsize == 16384)
825 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI;
826
827 csr |= ET_RXDMA_CSR_FBR0_ENABLE;
828 if (adapter->rx_ring.fbr[0]->buffsize == 256)
829 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO;
830 else if (adapter->rx_ring.fbr[0]->buffsize == 512)
831 csr |= ET_RXDMA_CSR_FBR0_SIZE_HI;
832 else if (adapter->rx_ring.fbr[0]->buffsize == 1024)
833 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI;
834 writel(csr, &adapter->regs->rxdma.csr);
835
836 csr = readl(&adapter->regs->rxdma.csr);
837 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
838 udelay(5);
839 csr = readl(&adapter->regs->rxdma.csr);
840 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
841 dev_err(&adapter->pdev->dev,
842 "RX Dma failed to exit halt state. CSR 0x%08x\n",
843 csr);
844 }
845 }
846 }
847
848 /* et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
849 * @adapter: pointer to our adapter structure
850 */
851 static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
852 {
853 u32 csr;
854 /* Setup the receive dma configuration register */
855 writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE,
856 &adapter->regs->rxdma.csr);
857 csr = readl(&adapter->regs->rxdma.csr);
858 if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) {
859 udelay(5);
860 csr = readl(&adapter->regs->rxdma.csr);
861 if (!(csr & ET_RXDMA_CSR_HALT_STATUS))
862 dev_err(&adapter->pdev->dev,
863 "RX Dma failed to enter halt state. CSR 0x%08x\n",
864 csr);
865 }
866 }
867
868 /* et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
869 * @adapter: pointer to our adapter structure
870 *
871 * Mainly used after a return to the D0 (full-power) state from a lower state.
872 */
873 static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
874 {
875 /* Setup the transmit dma configuration register for normal
876 * operation
877 */
878 writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
879 &adapter->regs->txdma.csr);
880 }
881
882 static inline void add_10bit(u32 *v, int n)
883 {
884 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
885 }
886
887 static inline void add_12bit(u32 *v, int n)
888 {
889 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
890 }
891
892 /* et1310_config_mac_regs1 - Initialize the first part of MAC regs
893 * @adapter: pointer to our adapter structure
894 */
895 static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
896 {
897 struct mac_regs __iomem *macregs = &adapter->regs->mac;
898 u32 station1;
899 u32 station2;
900 u32 ipg;
901
902 /* First we need to reset everything. Write to MAC configuration
903 * register 1 to perform reset.
904 */
905 writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
906 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
907 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC,
908 &macregs->cfg1);
909
910 /* Next lets configure the MAC Inter-packet gap register */
911 ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
912 ipg |= 0x50 << 8; /* ifg enforce 0x50 */
913 writel(ipg, &macregs->ipg);
914
915 /* Next lets configure the MAC Half Duplex register */
916 /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
917 writel(0x00A1F037, &macregs->hfdp);
918
919 /* Next lets configure the MAC Interface Control register */
920 writel(0, &macregs->if_ctrl);
921
922 /* Let's move on to setting up the mii management configuration */
923 writel(ET_MAC_MIIMGMT_CLK_RST, &macregs->mii_mgmt_cfg);
924
925 /* Next lets configure the MAC Station Address register. These
926 * values are read from the EEPROM during initialization and stored
927 * in the adapter structure. We write what is stored in the adapter
928 * structure to the MAC Station Address registers high and low. This
929 * station address is used for generating and checking pause control
930 * packets.
931 */
932 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
933 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
934 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
935 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
936 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
937 adapter->addr[2];
938 writel(station1, &macregs->station_addr_1);
939 writel(station2, &macregs->station_addr_2);
940
941 /* Max ethernet packet in bytes that will be passed by the mac without
942 * being truncated. Allow the MAC to pass 4 more than our max packet
943 * size. This is 4 for the Ethernet CRC.
944 *
945 * Packets larger than (registry_jumbo_packet) that do not contain a
946 * VLAN ID will be dropped by the Rx function.
947 */
948 writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len);
949
950 /* clear out MAC config reset */
951 writel(0, &macregs->cfg1);
952 }
953
954 /* et1310_config_mac_regs2 - Initialize the second part of MAC regs
955 * @adapter: pointer to our adapter structure
956 */
957 static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
958 {
959 int32_t delay = 0;
960 struct mac_regs __iomem *mac = &adapter->regs->mac;
961 struct phy_device *phydev = adapter->phydev;
962 u32 cfg1;
963 u32 cfg2;
964 u32 ifctrl;
965 u32 ctl;
966
967 ctl = readl(&adapter->regs->txmac.ctl);
968 cfg1 = readl(&mac->cfg1);
969 cfg2 = readl(&mac->cfg2);
970 ifctrl = readl(&mac->if_ctrl);
971
972 /* Set up the if mode bits */
973 cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK;
974 if (phydev && phydev->speed == SPEED_1000) {
975 cfg2 |= ET_MAC_CFG2_IFMODE_1000;
976 /* Phy mode bit */
977 ifctrl &= ~ET_MAC_IFCTRL_PHYMODE;
978 } else {
979 cfg2 |= ET_MAC_CFG2_IFMODE_100;
980 ifctrl |= ET_MAC_IFCTRL_PHYMODE;
981 }
982
983 /* We need to enable Rx/Tx */
984 cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE |
985 ET_MAC_CFG1_TX_FLOW;
986 /* Initialize loop back to off */
987 cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW);
988 if (adapter->flowcontrol == FLOW_RXONLY ||
989 adapter->flowcontrol == FLOW_BOTH)
990 cfg1 |= ET_MAC_CFG1_RX_FLOW;
991 writel(cfg1, &mac->cfg1);
992
993 /* Now we need to initialize the MAC Configuration 2 register */
994 /* preamble 7, check length, huge frame off, pad crc, crc enable
995 * full duplex off
996 */
997 cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT;
998 cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK;
999 cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC;
1000 cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE;
1001 cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME;
1002 cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX;
1003
1004 /* Turn on duplex if needed */
1005 if (phydev && phydev->duplex == DUPLEX_FULL)
1006 cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX;
1007
1008 ifctrl &= ~ET_MAC_IFCTRL_GHDMODE;
1009 if (phydev && phydev->duplex == DUPLEX_HALF)
1010 ifctrl |= ET_MAC_IFCTRL_GHDMODE;
1011
1012 writel(ifctrl, &mac->if_ctrl);
1013 writel(cfg2, &mac->cfg2);
1014
1015 do {
1016 udelay(10);
1017 delay++;
1018 cfg1 = readl(&mac->cfg1);
1019 } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100);
1020
1021 if (delay == 100) {
1022 dev_warn(&adapter->pdev->dev,
1023 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
1024 cfg1);
1025 }
1026
1027 /* Enable txmac */
1028 ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE;
1029 writel(ctl, &adapter->regs->txmac.ctl);
1030
1031 /* Ready to start the RXDMA/TXDMA engine */
1032 if (adapter->flags & FMP_ADAPTER_LOWER_POWER) {
1033 et131x_rx_dma_enable(adapter);
1034 et131x_tx_dma_enable(adapter);
1035 }
1036 }
1037
1038 /* et1310_in_phy_coma - check if the device is in phy coma
1039 * @adapter: pointer to our adapter structure
1040 *
1041 * Returns 0 if the device is not in phy coma, 1 if it is in phy coma
1042 */
1043 static int et1310_in_phy_coma(struct et131x_adapter *adapter)
1044 {
1045 u32 pmcsr;
1046
1047 pmcsr = readl(&adapter->regs->global.pm_csr);
1048
1049 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
1050 }
1051
1052 static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
1053 {
1054 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1055 u32 hash1 = 0;
1056 u32 hash2 = 0;
1057 u32 hash3 = 0;
1058 u32 hash4 = 0;
1059 u32 pm_csr;
1060
1061 /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
1062 * the multi-cast LIST. If it is NOT specified, (and "ALL" is not
1063 * specified) then we should pass NO multi-cast addresses to the
1064 * driver.
1065 */
1066 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
1067 int i;
1068
1069 /* Loop through our multicast array and set up the device */
1070 for (i = 0; i < adapter->multicast_addr_count; i++) {
1071 u32 result;
1072
1073 result = ether_crc(6, adapter->multicast_list[i]);
1074
1075 result = (result & 0x3F800000) >> 23;
1076
1077 if (result < 32) {
1078 hash1 |= (1 << result);
1079 } else if ((31 < result) && (result < 64)) {
1080 result -= 32;
1081 hash2 |= (1 << result);
1082 } else if ((63 < result) && (result < 96)) {
1083 result -= 64;
1084 hash3 |= (1 << result);
1085 } else {
1086 result -= 96;
1087 hash4 |= (1 << result);
1088 }
1089 }
1090 }
1091
1092 /* Write out the new hash to the device */
1093 pm_csr = readl(&adapter->regs->global.pm_csr);
1094 if (!et1310_in_phy_coma(adapter)) {
1095 writel(hash1, &rxmac->multi_hash1);
1096 writel(hash2, &rxmac->multi_hash2);
1097 writel(hash3, &rxmac->multi_hash3);
1098 writel(hash4, &rxmac->multi_hash4);
1099 }
1100 }
1101
1102 static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
1103 {
1104 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1105 u32 uni_pf1;
1106 u32 uni_pf2;
1107 u32 uni_pf3;
1108 u32 pm_csr;
1109
1110 /* Set up unicast packet filter reg 3 to be the first two octets of
1111 * the MAC address for both address
1112 *
1113 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
1114 * MAC address for second address
1115 *
1116 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
1117 * MAC address for first address
1118 */
1119 uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) |
1120 (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) |
1121 (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) |
1122 adapter->addr[1];
1123
1124 uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) |
1125 (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) |
1126 (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) |
1127 adapter->addr[5];
1128
1129 uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) |
1130 (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) |
1131 (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) |
1132 adapter->addr[5];
1133
1134 pm_csr = readl(&adapter->regs->global.pm_csr);
1135 if (!et1310_in_phy_coma(adapter)) {
1136 writel(uni_pf1, &rxmac->uni_pf_addr1);
1137 writel(uni_pf2, &rxmac->uni_pf_addr2);
1138 writel(uni_pf3, &rxmac->uni_pf_addr3);
1139 }
1140 }
1141
1142 static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1143 {
1144 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1145 struct phy_device *phydev = adapter->phydev;
1146 u32 sa_lo;
1147 u32 sa_hi = 0;
1148 u32 pf_ctrl = 0;
1149
1150 /* Disable the MAC while it is being configured (also disable WOL) */
1151 writel(0x8, &rxmac->ctrl);
1152
1153 /* Initialize WOL to disabled. */
1154 writel(0, &rxmac->crc0);
1155 writel(0, &rxmac->crc12);
1156 writel(0, &rxmac->crc34);
1157
1158 /* We need to set the WOL mask0 - mask4 next. We initialize it to
1159 * its default Values of 0x00000000 because there are not WOL masks
1160 * as of this time.
1161 */
1162 writel(0, &rxmac->mask0_word0);
1163 writel(0, &rxmac->mask0_word1);
1164 writel(0, &rxmac->mask0_word2);
1165 writel(0, &rxmac->mask0_word3);
1166
1167 writel(0, &rxmac->mask1_word0);
1168 writel(0, &rxmac->mask1_word1);
1169 writel(0, &rxmac->mask1_word2);
1170 writel(0, &rxmac->mask1_word3);
1171
1172 writel(0, &rxmac->mask2_word0);
1173 writel(0, &rxmac->mask2_word1);
1174 writel(0, &rxmac->mask2_word2);
1175 writel(0, &rxmac->mask2_word3);
1176
1177 writel(0, &rxmac->mask3_word0);
1178 writel(0, &rxmac->mask3_word1);
1179 writel(0, &rxmac->mask3_word2);
1180 writel(0, &rxmac->mask3_word3);
1181
1182 writel(0, &rxmac->mask4_word0);
1183 writel(0, &rxmac->mask4_word1);
1184 writel(0, &rxmac->mask4_word2);
1185 writel(0, &rxmac->mask4_word3);
1186
1187 /* Lets setup the WOL Source Address */
1188 sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) |
1189 (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) |
1190 (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) |
1191 adapter->addr[5];
1192 writel(sa_lo, &rxmac->sa_lo);
1193
1194 sa_hi = (u32) (adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) |
1195 adapter->addr[1];
1196 writel(sa_hi, &rxmac->sa_hi);
1197
1198 /* Disable all Packet Filtering */
1199 writel(0, &rxmac->pf_ctrl);
1200
1201 /* Let's initialize the Unicast Packet filtering address */
1202 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1203 et1310_setup_device_for_unicast(adapter);
1204 pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE;
1205 } else {
1206 writel(0, &rxmac->uni_pf_addr1);
1207 writel(0, &rxmac->uni_pf_addr2);
1208 writel(0, &rxmac->uni_pf_addr3);
1209 }
1210
1211 /* Let's initialize the Multicast hash */
1212 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1213 pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE;
1214 et1310_setup_device_for_multicast(adapter);
1215 }
1216
1217 /* Runt packet filtering. Didn't work in version A silicon. */
1218 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT;
1219 pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE;
1220
1221 if (adapter->registry_jumbo_packet > 8192)
1222 /* In order to transmit jumbo packets greater than 8k, the
1223 * FIFO between RxMAC and RxDMA needs to be reduced in size
1224 * to (16k - Jumbo packet size). In order to implement this,
1225 * we must use "cut through" mode in the RxMAC, which chops
1226 * packets down into segments which are (max_size * 16). In
1227 * this case we selected 256 bytes, since this is the size of
1228 * the PCI-Express TLP's that the 1310 uses.
1229 *
1230 * seg_en on, fc_en off, size 0x10
1231 */
1232 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1233 else
1234 writel(0, &rxmac->mcif_ctrl_max_seg);
1235
1236 /* Initialize the MCIF water marks */
1237 writel(0, &rxmac->mcif_water_mark);
1238
1239 /* Initialize the MIF control */
1240 writel(0, &rxmac->mif_ctrl);
1241
1242 /* Initialize the Space Available Register */
1243 writel(0, &rxmac->space_avail);
1244
1245 /* Initialize the the mif_ctrl register
1246 * bit 3: Receive code error. One or more nibbles were signaled as
1247 * errors during the reception of the packet. Clear this
1248 * bit in Gigabit, set it in 100Mbit. This was derived
1249 * experimentally at UNH.
1250 * bit 4: Receive CRC error. The packet's CRC did not match the
1251 * internally generated CRC.
1252 * bit 5: Receive length check error. Indicates that frame length
1253 * field value in the packet does not match the actual data
1254 * byte length and is not a type field.
1255 * bit 16: Receive frame truncated.
1256 * bit 17: Drop packet enable
1257 */
1258 if (phydev && phydev->speed == SPEED_100)
1259 writel(0x30038, &rxmac->mif_ctrl);
1260 else
1261 writel(0x30030, &rxmac->mif_ctrl);
1262
1263 /* Finally we initialize RxMac to be enabled & WOL disabled. Packet
1264 * filter is always enabled since it is where the runt packets are
1265 * supposed to be dropped. For version A silicon, runt packet
1266 * dropping doesn't work, so it is disabled in the pf_ctrl register,
1267 * but we still leave the packet filter on.
1268 */
1269 writel(pf_ctrl, &rxmac->pf_ctrl);
1270 writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl);
1271 }
1272
1273 static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1274 {
1275 struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1276
1277 /* We need to update the Control Frame Parameters
1278 * cfpt - control frame pause timer set to 64 (0x40)
1279 * cfep - control frame extended pause timer set to 0x0
1280 */
1281 if (adapter->flowcontrol == FLOW_NONE)
1282 writel(0, &txmac->cf_param);
1283 else
1284 writel(0x40, &txmac->cf_param);
1285 }
1286
1287 static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1288 {
1289 struct macstat_regs __iomem *macstat =
1290 &adapter->regs->macstat;
1291
1292 /* Next we need to initialize all the macstat registers to zero on
1293 * the device.
1294 */
1295 writel(0, &macstat->txrx_0_64_byte_frames);
1296 writel(0, &macstat->txrx_65_127_byte_frames);
1297 writel(0, &macstat->txrx_128_255_byte_frames);
1298 writel(0, &macstat->txrx_256_511_byte_frames);
1299 writel(0, &macstat->txrx_512_1023_byte_frames);
1300 writel(0, &macstat->txrx_1024_1518_byte_frames);
1301 writel(0, &macstat->txrx_1519_1522_gvln_frames);
1302
1303 writel(0, &macstat->rx_bytes);
1304 writel(0, &macstat->rx_packets);
1305 writel(0, &macstat->rx_fcs_errs);
1306 writel(0, &macstat->rx_multicast_packets);
1307 writel(0, &macstat->rx_broadcast_packets);
1308 writel(0, &macstat->rx_control_frames);
1309 writel(0, &macstat->rx_pause_frames);
1310 writel(0, &macstat->rx_unknown_opcodes);
1311 writel(0, &macstat->rx_align_errs);
1312 writel(0, &macstat->rx_frame_len_errs);
1313 writel(0, &macstat->rx_code_errs);
1314 writel(0, &macstat->rx_carrier_sense_errs);
1315 writel(0, &macstat->rx_undersize_packets);
1316 writel(0, &macstat->rx_oversize_packets);
1317 writel(0, &macstat->rx_fragment_packets);
1318 writel(0, &macstat->rx_jabbers);
1319 writel(0, &macstat->rx_drops);
1320
1321 writel(0, &macstat->tx_bytes);
1322 writel(0, &macstat->tx_packets);
1323 writel(0, &macstat->tx_multicast_packets);
1324 writel(0, &macstat->tx_broadcast_packets);
1325 writel(0, &macstat->tx_pause_frames);
1326 writel(0, &macstat->tx_deferred);
1327 writel(0, &macstat->tx_excessive_deferred);
1328 writel(0, &macstat->tx_single_collisions);
1329 writel(0, &macstat->tx_multiple_collisions);
1330 writel(0, &macstat->tx_late_collisions);
1331 writel(0, &macstat->tx_excessive_collisions);
1332 writel(0, &macstat->tx_total_collisions);
1333 writel(0, &macstat->tx_pause_honored_frames);
1334 writel(0, &macstat->tx_drops);
1335 writel(0, &macstat->tx_jabbers);
1336 writel(0, &macstat->tx_fcs_errs);
1337 writel(0, &macstat->tx_control_frames);
1338 writel(0, &macstat->tx_oversize_frames);
1339 writel(0, &macstat->tx_undersize_frames);
1340 writel(0, &macstat->tx_fragments);
1341 writel(0, &macstat->carry_reg1);
1342 writel(0, &macstat->carry_reg2);
1343
1344 /* Unmask any counters that we want to track the overflow of.
1345 * Initially this will be all counters. It may become clear later
1346 * that we do not need to track all counters.
1347 */
1348 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1349 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1350 }
1351
1352 /* et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC
1353 * @adapter: pointer to our private adapter structure
1354 * @addr: the address of the transceiver
1355 * @reg: the register to read
1356 * @value: pointer to a 16-bit value in which the value will be stored
1357 *
1358 * Returns 0 on success, errno on failure (as defined in errno.h)
1359 */
1360 static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1361 u8 reg, u16 *value)
1362 {
1363 struct mac_regs __iomem *mac = &adapter->regs->mac;
1364 int status = 0;
1365 u32 delay = 0;
1366 u32 mii_addr;
1367 u32 mii_cmd;
1368 u32 mii_indicator;
1369
1370 /* Save a local copy of the registers we are dealing with so we can
1371 * set them back
1372 */
1373 mii_addr = readl(&mac->mii_mgmt_addr);
1374 mii_cmd = readl(&mac->mii_mgmt_cmd);
1375
1376 /* Stop the current operation */
1377 writel(0, &mac->mii_mgmt_cmd);
1378
1379 /* Set up the register we need to read from on the correct PHY */
1380 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1381
1382 writel(0x1, &mac->mii_mgmt_cmd);
1383
1384 do {
1385 udelay(50);
1386 delay++;
1387 mii_indicator = readl(&mac->mii_mgmt_indicator);
1388 } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50);
1389
1390 /* If we hit the max delay, we could not read the register */
1391 if (delay == 50) {
1392 dev_warn(&adapter->pdev->dev,
1393 "reg 0x%08x could not be read\n", reg);
1394 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1395 mii_indicator);
1396
1397 status = -EIO;
1398 }
1399
1400 /* If we hit here we were able to read the register and we need to
1401 * return the value to the caller
1402 */
1403 *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK;
1404
1405 /* Stop the read operation */
1406 writel(0, &mac->mii_mgmt_cmd);
1407
1408 /* set the registers we touched back to the state at which we entered
1409 * this function
1410 */
1411 writel(mii_addr, &mac->mii_mgmt_addr);
1412 writel(mii_cmd, &mac->mii_mgmt_cmd);
1413
1414 return status;
1415 }
1416
1417 static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1418 {
1419 struct phy_device *phydev = adapter->phydev;
1420
1421 if (!phydev)
1422 return -EIO;
1423
1424 return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1425 }
1426
1427 /* et131x_mii_write - Write to a PHY reg through the MII interface of the MAC
1428 * @adapter: pointer to our private adapter structure
1429 * @reg: the register to read
1430 * @value: 16-bit value to write
1431 *
1432 * FIXME: one caller in netdev still
1433 *
1434 * Return 0 on success, errno on failure (as defined in errno.h)
1435 */
1436 static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
1437 {
1438 struct mac_regs __iomem *mac = &adapter->regs->mac;
1439 struct phy_device *phydev = adapter->phydev;
1440 int status = 0;
1441 u8 addr;
1442 u32 delay = 0;
1443 u32 mii_addr;
1444 u32 mii_cmd;
1445 u32 mii_indicator;
1446
1447 if (!phydev)
1448 return -EIO;
1449
1450 addr = phydev->addr;
1451
1452 /* Save a local copy of the registers we are dealing with so we can
1453 * set them back
1454 */
1455 mii_addr = readl(&mac->mii_mgmt_addr);
1456 mii_cmd = readl(&mac->mii_mgmt_cmd);
1457
1458 /* Stop the current operation */
1459 writel(0, &mac->mii_mgmt_cmd);
1460
1461 /* Set up the register we need to write to on the correct PHY */
1462 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1463
1464 /* Add the value to write to the registers to the mac */
1465 writel(value, &mac->mii_mgmt_ctrl);
1466
1467 do {
1468 udelay(50);
1469 delay++;
1470 mii_indicator = readl(&mac->mii_mgmt_indicator);
1471 } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100);
1472
1473 /* If we hit the max delay, we could not write the register */
1474 if (delay == 100) {
1475 u16 tmp;
1476
1477 dev_warn(&adapter->pdev->dev,
1478 "reg 0x%08x could not be written", reg);
1479 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1480 mii_indicator);
1481 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
1482 readl(&mac->mii_mgmt_cmd));
1483
1484 et131x_mii_read(adapter, reg, &tmp);
1485
1486 status = -EIO;
1487 }
1488 /* Stop the write operation */
1489 writel(0, &mac->mii_mgmt_cmd);
1490
1491 /* set the registers we touched back to the state at which we entered
1492 * this function
1493 */
1494 writel(mii_addr, &mac->mii_mgmt_addr);
1495 writel(mii_cmd, &mac->mii_mgmt_cmd);
1496
1497 return status;
1498 }
1499
1500 /* Still used from _mac for BIT_READ */
1501 static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
1502 u16 action, u16 regnum, u16 bitnum,
1503 u8 *value)
1504 {
1505 u16 reg;
1506 u16 mask = 1 << bitnum;
1507
1508 /* Read the requested register */
1509 et131x_mii_read(adapter, regnum, &reg);
1510
1511 switch (action) {
1512 case TRUEPHY_BIT_READ:
1513 *value = (reg & mask) >> bitnum;
1514 break;
1515
1516 case TRUEPHY_BIT_SET:
1517 et131x_mii_write(adapter, regnum, reg | mask);
1518 break;
1519
1520 case TRUEPHY_BIT_CLEAR:
1521 et131x_mii_write(adapter, regnum, reg & ~mask);
1522 break;
1523
1524 default:
1525 break;
1526 }
1527 }
1528
1529 static void et1310_config_flow_control(struct et131x_adapter *adapter)
1530 {
1531 struct phy_device *phydev = adapter->phydev;
1532
1533 if (phydev->duplex == DUPLEX_HALF) {
1534 adapter->flowcontrol = FLOW_NONE;
1535 } else {
1536 char remote_pause, remote_async_pause;
1537
1538 et1310_phy_access_mii_bit(adapter,
1539 TRUEPHY_BIT_READ, 5, 10, &remote_pause);
1540 et1310_phy_access_mii_bit(adapter,
1541 TRUEPHY_BIT_READ, 5, 11,
1542 &remote_async_pause);
1543
1544 if ((remote_pause == TRUEPHY_BIT_SET) &&
1545 (remote_async_pause == TRUEPHY_BIT_SET)) {
1546 adapter->flowcontrol = adapter->wanted_flow;
1547 } else if ((remote_pause == TRUEPHY_BIT_SET) &&
1548 (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1549 if (adapter->wanted_flow == FLOW_BOTH)
1550 adapter->flowcontrol = FLOW_BOTH;
1551 else
1552 adapter->flowcontrol = FLOW_NONE;
1553 } else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
1554 (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1555 adapter->flowcontrol = FLOW_NONE;
1556 } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT &&
1557 * remote_async_pause == TRUEPHY_SET_BIT)
1558 */
1559 if (adapter->wanted_flow == FLOW_BOTH)
1560 adapter->flowcontrol = FLOW_RXONLY;
1561 else
1562 adapter->flowcontrol = FLOW_NONE;
1563 }
1564 }
1565 }
1566
1567 /* et1310_update_macstat_host_counters - Update the local copy of the statistics
1568 * @adapter: pointer to the adapter structure
1569 */
1570 static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1571 {
1572 struct ce_stats *stats = &adapter->stats;
1573 struct macstat_regs __iomem *macstat =
1574 &adapter->regs->macstat;
1575
1576 stats->tx_collisions += readl(&macstat->tx_total_collisions);
1577 stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1578 stats->tx_deferred += readl(&macstat->tx_deferred);
1579 stats->tx_excessive_collisions +=
1580 readl(&macstat->tx_multiple_collisions);
1581 stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1582 stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1583 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1584
1585 stats->rx_align_errs += readl(&macstat->rx_align_errs);
1586 stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1587 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1588 stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1589 stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1590 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1591 stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1592 }
1593
1594 /* et1310_handle_macstat_interrupt
1595 * @adapter: pointer to the adapter structure
1596 *
1597 * One of the MACSTAT counters has wrapped. Update the local copy of
1598 * the statistics held in the adapter structure, checking the "wrap"
1599 * bit for each counter.
1600 */
1601 static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1602 {
1603 u32 carry_reg1;
1604 u32 carry_reg2;
1605
1606 /* Read the interrupt bits from the register(s). These are Clear On
1607 * Write.
1608 */
1609 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1610 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1611
1612 writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1613 writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1614
1615 /* We need to do update the host copy of all the MAC_STAT counters.
1616 * For each counter, check it's overflow bit. If the overflow bit is
1617 * set, then increment the host version of the count by one complete
1618 * revolution of the counter. This routine is called when the counter
1619 * block indicates that one of the counters has wrapped.
1620 */
1621 if (carry_reg1 & (1 << 14))
1622 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1623 if (carry_reg1 & (1 << 8))
1624 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1625 if (carry_reg1 & (1 << 7))
1626 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1627 if (carry_reg1 & (1 << 2))
1628 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1629 if (carry_reg1 & (1 << 6))
1630 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1631 if (carry_reg1 & (1 << 3))
1632 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1633 if (carry_reg1 & (1 << 0))
1634 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1635 if (carry_reg2 & (1 << 16))
1636 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1637 if (carry_reg2 & (1 << 15))
1638 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1639 if (carry_reg2 & (1 << 6))
1640 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1641 if (carry_reg2 & (1 << 8))
1642 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1643 if (carry_reg2 & (1 << 5))
1644 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1645 if (carry_reg2 & (1 << 4))
1646 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1647 if (carry_reg2 & (1 << 2))
1648 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1649 }
1650
1651 static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1652 {
1653 struct net_device *netdev = bus->priv;
1654 struct et131x_adapter *adapter = netdev_priv(netdev);
1655 u16 value;
1656 int ret;
1657
1658 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1659
1660 if (ret < 0)
1661 return ret;
1662 else
1663 return value;
1664 }
1665
1666 static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1667 int reg, u16 value)
1668 {
1669 struct net_device *netdev = bus->priv;
1670 struct et131x_adapter *adapter = netdev_priv(netdev);
1671
1672 return et131x_mii_write(adapter, reg, value);
1673 }
1674
1675 static int et131x_mdio_reset(struct mii_bus *bus)
1676 {
1677 struct net_device *netdev = bus->priv;
1678 struct et131x_adapter *adapter = netdev_priv(netdev);
1679
1680 et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
1681
1682 return 0;
1683 }
1684
1685 /* et1310_phy_power_down - PHY power control
1686 * @adapter: device to control
1687 * @down: true for off/false for back on
1688 *
1689 * one hundred, ten, one thousand megs
1690 * How would you like to have your LAN accessed
1691 * Can't you see that this code processed
1692 * Phy power, phy power..
1693 */
1694 static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
1695 {
1696 u16 data;
1697
1698 et131x_mii_read(adapter, MII_BMCR, &data);
1699 data &= ~BMCR_PDOWN;
1700 if (down)
1701 data |= BMCR_PDOWN;
1702 et131x_mii_write(adapter, MII_BMCR, data);
1703 }
1704
1705 /* et131x_xcvr_init - Init the phy if we are setting it into force mode
1706 * @adapter: pointer to our private adapter structure
1707 *
1708 */
1709 static void et131x_xcvr_init(struct et131x_adapter *adapter)
1710 {
1711 u16 lcr2;
1712
1713 /* Set the LED behavior such that LED 1 indicates speed (off =
1714 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
1715 * link and activity (on for link, blink off for activity).
1716 *
1717 * NOTE: Some customizations have been added here for specific
1718 * vendors; The LED behavior is now determined by vendor data in the
1719 * EEPROM. However, the above description is the default.
1720 */
1721 if ((adapter->eeprom_data[1] & 0x4) == 0) {
1722 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1723
1724 lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T);
1725 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1726
1727 if ((adapter->eeprom_data[1] & 0x8) == 0)
1728 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1729 else
1730 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1731
1732 et131x_mii_write(adapter, PHY_LED_2, lcr2);
1733 }
1734 }
1735
1736 /* et131x_configure_global_regs - configure JAGCore global regs
1737 * @adapter: pointer to our adapter structure
1738 *
1739 * Used to configure the global registers on the JAGCore
1740 */
1741 static void et131x_configure_global_regs(struct et131x_adapter *adapter)
1742 {
1743 struct global_regs __iomem *regs = &adapter->regs->global;
1744
1745 writel(0, &regs->rxq_start_addr);
1746 writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr);
1747
1748 if (adapter->registry_jumbo_packet < 2048) {
1749 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
1750 * block of RAM that the driver can split between Tx
1751 * and Rx as it desires. Our default is to split it
1752 * 50/50:
1753 */
1754 writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr);
1755 writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr);
1756 } else if (adapter->registry_jumbo_packet < 8192) {
1757 /* For jumbo packets > 2k but < 8k, split 50-50. */
1758 writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr);
1759 writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr);
1760 } else {
1761 /* 9216 is the only packet size greater than 8k that
1762 * is available. The Tx buffer has to be big enough
1763 * for one whole packet on the Tx side. We'll make
1764 * the Tx 9408, and give the rest to Rx
1765 */
1766 writel(0x01b3, &regs->rxq_end_addr);
1767 writel(0x01b4, &regs->txq_start_addr);
1768 }
1769
1770 /* Initialize the loopback register. Disable all loopbacks. */
1771 writel(0, &regs->loopback);
1772
1773 /* MSI Register */
1774 writel(0, &regs->msi_config);
1775
1776 /* By default, disable the watchdog timer. It will be enabled when
1777 * a packet is queued.
1778 */
1779 writel(0, &regs->watchdog_timer);
1780 }
1781
1782 /* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
1783 * @adapter: pointer to our adapter structure
1784 */
1785 static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1786 {
1787 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1788 struct rx_ring *rx_local = &adapter->rx_ring;
1789 struct fbr_desc *fbr_entry;
1790 u32 entry;
1791 u32 psr_num_des;
1792 unsigned long flags;
1793 u8 id;
1794
1795 /* Halt RXDMA to perform the reconfigure. */
1796 et131x_rx_dma_disable(adapter);
1797
1798 /* Load the completion writeback physical address */
1799 writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi);
1800 writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo);
1801
1802 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1803
1804 /* Set the address and parameters of the packet status ring into the
1805 * 1310's registers
1806 */
1807 writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi);
1808 writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo);
1809 writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
1810 writel(0, &rx_dma->psr_full_offset);
1811
1812 psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK;
1813 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1814 &rx_dma->psr_min_des);
1815
1816 spin_lock_irqsave(&adapter->rcv_lock, flags);
1817
1818 /* These local variables track the PSR in the adapter structure */
1819 rx_local->local_psr_full = 0;
1820
1821 for (id = 0; id < NUM_FBRS; id++) {
1822 u32 __iomem *num_des;
1823 u32 __iomem *full_offset;
1824 u32 __iomem *min_des;
1825 u32 __iomem *base_hi;
1826 u32 __iomem *base_lo;
1827
1828 if (id == 0) {
1829 num_des = &rx_dma->fbr0_num_des;
1830 full_offset = &rx_dma->fbr0_full_offset;
1831 min_des = &rx_dma->fbr0_min_des;
1832 base_hi = &rx_dma->fbr0_base_hi;
1833 base_lo = &rx_dma->fbr0_base_lo;
1834 } else {
1835 num_des = &rx_dma->fbr1_num_des;
1836 full_offset = &rx_dma->fbr1_full_offset;
1837 min_des = &rx_dma->fbr1_min_des;
1838 base_hi = &rx_dma->fbr1_base_hi;
1839 base_lo = &rx_dma->fbr1_base_lo;
1840 }
1841
1842 /* Now's the best time to initialize FBR contents */
1843 fbr_entry =
1844 (struct fbr_desc *) rx_local->fbr[id]->ring_virtaddr;
1845 for (entry = 0;
1846 entry < rx_local->fbr[id]->num_entries; entry++) {
1847 fbr_entry->addr_hi = rx_local->fbr[id]->bus_high[entry];
1848 fbr_entry->addr_lo = rx_local->fbr[id]->bus_low[entry];
1849 fbr_entry->word2 = entry;
1850 fbr_entry++;
1851 }
1852
1853 /* Set the address and parameters of Free buffer ring 1 and 0
1854 * into the 1310's registers
1855 */
1856 writel(upper_32_bits(rx_local->fbr[id]->ring_physaddr),
1857 base_hi);
1858 writel(lower_32_bits(rx_local->fbr[id]->ring_physaddr),
1859 base_lo);
1860 writel(rx_local->fbr[id]->num_entries - 1, num_des);
1861 writel(ET_DMA10_WRAP, full_offset);
1862
1863 /* This variable tracks the free buffer ring 1 full position,
1864 * so it has to match the above.
1865 */
1866 rx_local->fbr[id]->local_full = ET_DMA10_WRAP;
1867 writel(((rx_local->fbr[id]->num_entries *
1868 LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1869 min_des);
1870 }
1871
1872 /* Program the number of packets we will receive before generating an
1873 * interrupt.
1874 * For version B silicon, this value gets updated once autoneg is
1875 *complete.
1876 */
1877 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1878
1879 /* The "time_done" is not working correctly to coalesce interrupts
1880 * after a given time period, but rather is giving us an interrupt
1881 * regardless of whether we have received packets.
1882 * This value gets updated once autoneg is complete.
1883 */
1884 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1885
1886 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1887 }
1888
1889 /* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
1890 * @adapter: pointer to our private adapter structure
1891 *
1892 * Configure the transmit engine with the ring buffers we have created
1893 * and prepare it for use.
1894 */
1895 static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1896 {
1897 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1898
1899 /* Load the hardware with the start of the transmit descriptor ring. */
1900 writel(upper_32_bits(adapter->tx_ring.tx_desc_ring_pa),
1901 &txdma->pr_base_hi);
1902 writel(lower_32_bits(adapter->tx_ring.tx_desc_ring_pa),
1903 &txdma->pr_base_lo);
1904
1905 /* Initialise the transmit DMA engine */
1906 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1907
1908 /* Load the completion writeback physical address */
1909 writel(upper_32_bits(adapter->tx_ring.tx_status_pa),
1910 &txdma->dma_wb_base_hi);
1911 writel(lower_32_bits(adapter->tx_ring.tx_status_pa),
1912 &txdma->dma_wb_base_lo);
1913
1914 *adapter->tx_ring.tx_status = 0;
1915
1916 writel(0, &txdma->service_request);
1917 adapter->tx_ring.send_idx = 0;
1918 }
1919
1920 /* et131x_adapter_setup - Set the adapter up as per cassini+ documentation
1921 * @adapter: pointer to our private adapter structure
1922 *
1923 * Returns 0 on success, errno on failure (as defined in errno.h)
1924 */
1925 static void et131x_adapter_setup(struct et131x_adapter *adapter)
1926 {
1927 /* Configure the JAGCore */
1928 et131x_configure_global_regs(adapter);
1929
1930 et1310_config_mac_regs1(adapter);
1931
1932 /* Configure the MMC registers */
1933 /* All we need to do is initialize the Memory Control Register */
1934 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
1935
1936 et1310_config_rxmac_regs(adapter);
1937 et1310_config_txmac_regs(adapter);
1938
1939 et131x_config_rx_dma_regs(adapter);
1940 et131x_config_tx_dma_regs(adapter);
1941
1942 et1310_config_macstat_regs(adapter);
1943
1944 et1310_phy_power_down(adapter, 0);
1945 et131x_xcvr_init(adapter);
1946 }
1947
1948 /* et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
1949 * @adapter: pointer to our private adapter structure
1950 */
1951 static void et131x_soft_reset(struct et131x_adapter *adapter)
1952 {
1953 u32 reg;
1954
1955 /* Disable MAC Core */
1956 reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
1957 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1958 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1959 writel(reg, &adapter->regs->mac.cfg1);
1960
1961 reg = ET_RESET_ALL;
1962 writel(reg, &adapter->regs->global.sw_reset);
1963
1964 reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1965 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1966 writel(reg, &adapter->regs->mac.cfg1);
1967 writel(0, &adapter->regs->mac.cfg1);
1968 }
1969
1970 /* et131x_enable_interrupts - enable interrupt
1971 * @adapter: et131x device
1972 *
1973 * Enable the appropriate interrupts on the ET131x according to our
1974 * configuration
1975 */
1976 static void et131x_enable_interrupts(struct et131x_adapter *adapter)
1977 {
1978 u32 mask;
1979
1980 /* Enable all global interrupts */
1981 if (adapter->flowcontrol == FLOW_TXONLY ||
1982 adapter->flowcontrol == FLOW_BOTH)
1983 mask = INT_MASK_ENABLE;
1984 else
1985 mask = INT_MASK_ENABLE_NO_FLOW;
1986
1987 writel(mask, &adapter->regs->global.int_mask);
1988 }
1989
1990 /* et131x_disable_interrupts - interrupt disable
1991 * @adapter: et131x device
1992 *
1993 * Block all interrupts from the et131x device at the device itself
1994 */
1995 static void et131x_disable_interrupts(struct et131x_adapter *adapter)
1996 {
1997 /* Disable all global interrupts */
1998 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
1999 }
2000
2001 /* et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
2002 * @adapter: pointer to our adapter structure
2003 */
2004 static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
2005 {
2006 /* Setup the tramsmit dma configuration register */
2007 writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT,
2008 &adapter->regs->txdma.csr);
2009 }
2010
2011 /* et131x_enable_txrx - Enable tx/rx queues
2012 * @netdev: device to be enabled
2013 */
2014 static void et131x_enable_txrx(struct net_device *netdev)
2015 {
2016 struct et131x_adapter *adapter = netdev_priv(netdev);
2017
2018 /* Enable the Tx and Rx DMA engines (if not already enabled) */
2019 et131x_rx_dma_enable(adapter);
2020 et131x_tx_dma_enable(adapter);
2021
2022 /* Enable device interrupts */
2023 if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)
2024 et131x_enable_interrupts(adapter);
2025
2026 /* We're ready to move some data, so start the queue */
2027 netif_start_queue(netdev);
2028 }
2029
2030 /* et131x_disable_txrx - Disable tx/rx queues
2031 * @netdev: device to be disabled
2032 */
2033 static void et131x_disable_txrx(struct net_device *netdev)
2034 {
2035 struct et131x_adapter *adapter = netdev_priv(netdev);
2036
2037 /* First thing is to stop the queue */
2038 netif_stop_queue(netdev);
2039
2040 /* Stop the Tx and Rx DMA engines */
2041 et131x_rx_dma_disable(adapter);
2042 et131x_tx_dma_disable(adapter);
2043
2044 /* Disable device interrupts */
2045 et131x_disable_interrupts(adapter);
2046 }
2047
2048 /* et131x_init_send - Initialize send data structures
2049 * @adapter: pointer to our private adapter structure
2050 */
2051 static void et131x_init_send(struct et131x_adapter *adapter)
2052 {
2053 struct tcb *tcb;
2054 u32 ct;
2055 struct tx_ring *tx_ring;
2056
2057 /* Setup some convenience pointers */
2058 tx_ring = &adapter->tx_ring;
2059 tcb = adapter->tx_ring.tcb_ring;
2060
2061 tx_ring->tcb_qhead = tcb;
2062
2063 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
2064
2065 /* Go through and set up each TCB */
2066 for (ct = 0; ct++ < NUM_TCB; tcb++)
2067 /* Set the link pointer in HW TCB to the next TCB in the
2068 * chain
2069 */
2070 tcb->next = tcb + 1;
2071
2072 /* Set the tail pointer */
2073 tcb--;
2074 tx_ring->tcb_qtail = tcb;
2075 tcb->next = NULL;
2076 /* Curr send queue should now be empty */
2077 tx_ring->send_head = NULL;
2078 tx_ring->send_tail = NULL;
2079 }
2080
2081 /* et1310_enable_phy_coma - called when network cable is unplugged
2082 * @adapter: pointer to our adapter structure
2083 *
2084 * driver receive an phy status change interrupt while in D0 and check that
2085 * phy_status is down.
2086 *
2087 * -- gate off JAGCore;
2088 * -- set gigE PHY in Coma mode
2089 * -- wake on phy_interrupt; Perform software reset JAGCore,
2090 * re-initialize jagcore and gigE PHY
2091 *
2092 * Add D0-ASPM-PhyLinkDown Support:
2093 * -- while in D0, when there is a phy_interrupt indicating phy link
2094 * down status, call the MPSetPhyComa routine to enter this active
2095 * state power saving mode
2096 * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt
2097 * indicating linkup status, call the MPDisablePhyComa routine to
2098 * restore JAGCore and gigE PHY
2099 */
2100 static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
2101 {
2102 unsigned long flags;
2103 u32 pmcsr;
2104
2105 pmcsr = readl(&adapter->regs->global.pm_csr);
2106
2107 /* Save the GbE PHY speed and duplex modes. Need to restore this
2108 * when cable is plugged back in
2109 */
2110 /* TODO - when PM is re-enabled, check if we need to
2111 * perform a similar task as this -
2112 * adapter->pdown_speed = adapter->ai_force_speed;
2113 * adapter->pdown_duplex = adapter->ai_force_duplex;
2114 */
2115
2116 /* Stop sending packets. */
2117 spin_lock_irqsave(&adapter->send_hw_lock, flags);
2118 adapter->flags |= FMP_ADAPTER_LOWER_POWER;
2119 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
2120
2121 /* Wait for outstanding Receive packets */
2122
2123 et131x_disable_txrx(adapter->netdev);
2124
2125 /* Gate off JAGCore 3 clock domains */
2126 pmcsr &= ~ET_PMCSR_INIT;
2127 writel(pmcsr, &adapter->regs->global.pm_csr);
2128
2129 /* Program gigE PHY in to Coma mode */
2130 pmcsr |= ET_PM_PHY_SW_COMA;
2131 writel(pmcsr, &adapter->regs->global.pm_csr);
2132 }
2133
2134 /* et1310_disable_phy_coma - Disable the Phy Coma Mode
2135 * @adapter: pointer to our adapter structure
2136 */
2137 static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
2138 {
2139 u32 pmcsr;
2140
2141 pmcsr = readl(&adapter->regs->global.pm_csr);
2142
2143 /* Disable phy_sw_coma register and re-enable JAGCore clocks */
2144 pmcsr |= ET_PMCSR_INIT;
2145 pmcsr &= ~ET_PM_PHY_SW_COMA;
2146 writel(pmcsr, &adapter->regs->global.pm_csr);
2147
2148 /* Restore the GbE PHY speed and duplex modes;
2149 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
2150 */
2151 /* TODO - when PM is re-enabled, check if we need to
2152 * perform a similar task as this -
2153 * adapter->ai_force_speed = adapter->pdown_speed;
2154 * adapter->ai_force_duplex = adapter->pdown_duplex;
2155 */
2156
2157 /* Re-initialize the send structures */
2158 et131x_init_send(adapter);
2159
2160 /* Bring the device back to the state it was during init prior to
2161 * autonegotiation being complete. This way, when we get the auto-neg
2162 * complete interrupt, we can complete init by calling ConfigMacREGS2.
2163 */
2164 et131x_soft_reset(adapter);
2165
2166 /* setup et1310 as per the documentation ?? */
2167 et131x_adapter_setup(adapter);
2168
2169 /* Allow Tx to restart */
2170 adapter->flags &= ~FMP_ADAPTER_LOWER_POWER;
2171
2172 et131x_enable_txrx(adapter->netdev);
2173 }
2174
2175 static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
2176 {
2177 u32 tmp_free_buff_ring = *free_buff_ring;
2178 tmp_free_buff_ring++;
2179 /* This works for all cases where limit < 1024. The 1023 case
2180 * works because 1023++ is 1024 which means the if condition is not
2181 * taken but the carry of the bit into the wrap bit toggles the wrap
2182 * value correctly
2183 */
2184 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
2185 tmp_free_buff_ring &= ~ET_DMA10_MASK;
2186 tmp_free_buff_ring ^= ET_DMA10_WRAP;
2187 }
2188 /* For the 1023 case */
2189 tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
2190 *free_buff_ring = tmp_free_buff_ring;
2191 return tmp_free_buff_ring;
2192 }
2193
2194 /* et131x_rx_dma_memory_alloc
2195 * @adapter: pointer to our private adapter structure
2196 *
2197 * Returns 0 on success and errno on failure (as defined in errno.h)
2198 *
2199 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
2200 * and the Packet Status Ring.
2201 */
2202 static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
2203 {
2204 u8 id;
2205 u32 i, j;
2206 u32 bufsize;
2207 u32 pktstat_ringsize;
2208 u32 fbr_chunksize;
2209 struct rx_ring *rx_ring;
2210
2211 /* Setup some convenience pointers */
2212 rx_ring = &adapter->rx_ring;
2213
2214 /* Alloc memory for the lookup table */
2215 rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2216 rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2217
2218 /* The first thing we will do is configure the sizes of the buffer
2219 * rings. These will change based on jumbo packet support. Larger
2220 * jumbo packets increases the size of each entry in FBR0, and the
2221 * number of entries in FBR0, while at the same time decreasing the
2222 * number of entries in FBR1.
2223 *
2224 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
2225 * entries are huge in order to accommodate a "jumbo" frame, then it
2226 * will have less entries. Conversely, FBR1 will now be relied upon
2227 * to carry more "normal" frames, thus it's entry size also increases
2228 * and the number of entries goes up too (since it now carries
2229 * "small" + "regular" packets.
2230 *
2231 * In this scheme, we try to maintain 512 entries between the two
2232 * rings. Also, FBR1 remains a constant size - when it's size doubles
2233 * the number of entries halves. FBR0 increases in size, however.
2234 */
2235
2236 if (adapter->registry_jumbo_packet < 2048) {
2237 rx_ring->fbr[0]->buffsize = 256;
2238 rx_ring->fbr[0]->num_entries = 512;
2239 rx_ring->fbr[1]->buffsize = 2048;
2240 rx_ring->fbr[1]->num_entries = 512;
2241 } else if (adapter->registry_jumbo_packet < 4096) {
2242 rx_ring->fbr[0]->buffsize = 512;
2243 rx_ring->fbr[0]->num_entries = 1024;
2244 rx_ring->fbr[1]->buffsize = 4096;
2245 rx_ring->fbr[1]->num_entries = 512;
2246 } else {
2247 rx_ring->fbr[0]->buffsize = 1024;
2248 rx_ring->fbr[0]->num_entries = 768;
2249 rx_ring->fbr[1]->buffsize = 16384;
2250 rx_ring->fbr[1]->num_entries = 128;
2251 }
2252
2253 adapter->rx_ring.psr_num_entries =
2254 adapter->rx_ring.fbr[0]->num_entries +
2255 adapter->rx_ring.fbr[1]->num_entries;
2256
2257 for (id = 0; id < NUM_FBRS; id++) {
2258 /* Allocate an area of memory for Free Buffer Ring */
2259 bufsize =
2260 (sizeof(struct fbr_desc) * rx_ring->fbr[id]->num_entries);
2261 rx_ring->fbr[id]->ring_virtaddr =
2262 dma_alloc_coherent(&adapter->pdev->dev,
2263 bufsize,
2264 &rx_ring->fbr[id]->ring_physaddr,
2265 GFP_KERNEL);
2266 if (!rx_ring->fbr[id]->ring_virtaddr) {
2267 dev_err(&adapter->pdev->dev,
2268 "Cannot alloc memory for Free Buffer Ring %d\n", id);
2269 return -ENOMEM;
2270 }
2271 }
2272
2273 for (id = 0; id < NUM_FBRS; id++) {
2274 fbr_chunksize = (FBR_CHUNKS * rx_ring->fbr[id]->buffsize);
2275
2276 for (i = 0;
2277 i < (rx_ring->fbr[id]->num_entries / FBR_CHUNKS); i++) {
2278 dma_addr_t fbr_tmp_physaddr;
2279
2280 rx_ring->fbr[id]->mem_virtaddrs[i] = dma_alloc_coherent(
2281 &adapter->pdev->dev, fbr_chunksize,
2282 &rx_ring->fbr[id]->mem_physaddrs[i],
2283 GFP_KERNEL);
2284
2285 if (!rx_ring->fbr[id]->mem_virtaddrs[i]) {
2286 dev_err(&adapter->pdev->dev,
2287 "Could not alloc memory\n");
2288 return -ENOMEM;
2289 }
2290
2291 /* See NOTE in "Save Physical Address" comment above */
2292 fbr_tmp_physaddr = rx_ring->fbr[id]->mem_physaddrs[i];
2293
2294 for (j = 0; j < FBR_CHUNKS; j++) {
2295 u32 index = (i * FBR_CHUNKS) + j;
2296
2297 /* Save the Virtual address of this index for
2298 * quick access later
2299 */
2300 rx_ring->fbr[id]->virt[index] =
2301 (u8 *) rx_ring->fbr[id]->mem_virtaddrs[i] +
2302 (j * rx_ring->fbr[id]->buffsize);
2303
2304 /* now store the physical address in the
2305 * descriptor so the device can access it
2306 */
2307 rx_ring->fbr[id]->bus_high[index] =
2308 upper_32_bits(fbr_tmp_physaddr);
2309 rx_ring->fbr[id]->bus_low[index] =
2310 lower_32_bits(fbr_tmp_physaddr);
2311
2312 fbr_tmp_physaddr += rx_ring->fbr[id]->buffsize;
2313 }
2314 }
2315 }
2316
2317 /* Allocate an area of memory for FIFO of Packet Status ring entries */
2318 pktstat_ringsize =
2319 sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries;
2320
2321 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2322 pktstat_ringsize,
2323 &rx_ring->ps_ring_physaddr,
2324 GFP_KERNEL);
2325
2326 if (!rx_ring->ps_ring_virtaddr) {
2327 dev_err(&adapter->pdev->dev,
2328 "Cannot alloc memory for Packet Status Ring\n");
2329 return -ENOMEM;
2330 }
2331 pr_info("Packet Status Ring %llx\n",
2332 (unsigned long long) rx_ring->ps_ring_physaddr);
2333
2334 /* NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
2335 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2336 * are ever returned, make sure the high part is retrieved here before
2337 * storing the adjusted address.
2338 */
2339
2340 /* Allocate an area of memory for writeback of status information */
2341 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
2342 sizeof(struct rx_status_block),
2343 &rx_ring->rx_status_bus,
2344 GFP_KERNEL);
2345 if (!rx_ring->rx_status_block) {
2346 dev_err(&adapter->pdev->dev,
2347 "Cannot alloc memory for Status Block\n");
2348 return -ENOMEM;
2349 }
2350 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
2351 pr_info("PRS %llx\n", (unsigned long long)rx_ring->rx_status_bus);
2352
2353 /* The RFDs are going to be put on lists later on, so initialize the
2354 * lists now.
2355 */
2356 INIT_LIST_HEAD(&rx_ring->recv_list);
2357 return 0;
2358 }
2359
2360 /* et131x_rx_dma_memory_free - Free all memory allocated within this module.
2361 * @adapter: pointer to our private adapter structure
2362 */
2363 static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2364 {
2365 u8 id;
2366 u32 index;
2367 u32 bufsize;
2368 u32 pktstat_ringsize;
2369 struct rfd *rfd;
2370 struct rx_ring *rx_ring;
2371
2372 /* Setup some convenience pointers */
2373 rx_ring = &adapter->rx_ring;
2374
2375 /* Free RFDs and associated packet descriptors */
2376 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2377
2378 while (!list_empty(&rx_ring->recv_list)) {
2379 rfd = (struct rfd *) list_entry(rx_ring->recv_list.next,
2380 struct rfd, list_node);
2381
2382 list_del(&rfd->list_node);
2383 rfd->skb = NULL;
2384 kfree(rfd);
2385 }
2386
2387 /* Free Free Buffer Rings */
2388 for (id = 0; id < NUM_FBRS; id++) {
2389 if (!rx_ring->fbr[id]->ring_virtaddr)
2390 continue;
2391
2392 /* First the packet memory */
2393 for (index = 0;
2394 index < (rx_ring->fbr[id]->num_entries / FBR_CHUNKS);
2395 index++) {
2396 if (rx_ring->fbr[id]->mem_virtaddrs[index]) {
2397 bufsize =
2398 rx_ring->fbr[id]->buffsize * FBR_CHUNKS;
2399
2400 dma_free_coherent(&adapter->pdev->dev,
2401 bufsize,
2402 rx_ring->fbr[id]->mem_virtaddrs[index],
2403 rx_ring->fbr[id]->mem_physaddrs[index]);
2404
2405 rx_ring->fbr[id]->mem_virtaddrs[index] = NULL;
2406 }
2407 }
2408
2409 bufsize =
2410 sizeof(struct fbr_desc) * rx_ring->fbr[id]->num_entries;
2411
2412 dma_free_coherent(&adapter->pdev->dev, bufsize,
2413 rx_ring->fbr[id]->ring_virtaddr,
2414 rx_ring->fbr[id]->ring_physaddr);
2415
2416 rx_ring->fbr[id]->ring_virtaddr = NULL;
2417 }
2418
2419 /* Free Packet Status Ring */
2420 if (rx_ring->ps_ring_virtaddr) {
2421 pktstat_ringsize = sizeof(struct pkt_stat_desc) *
2422 adapter->rx_ring.psr_num_entries;
2423
2424 dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
2425 rx_ring->ps_ring_virtaddr,
2426 rx_ring->ps_ring_physaddr);
2427
2428 rx_ring->ps_ring_virtaddr = NULL;
2429 }
2430
2431 /* Free area of memory for the writeback of status information */
2432 if (rx_ring->rx_status_block) {
2433 dma_free_coherent(&adapter->pdev->dev,
2434 sizeof(struct rx_status_block),
2435 rx_ring->rx_status_block, rx_ring->rx_status_bus);
2436 rx_ring->rx_status_block = NULL;
2437 }
2438
2439 /* Free the FBR Lookup Table */
2440 kfree(rx_ring->fbr[0]);
2441 kfree(rx_ring->fbr[1]);
2442
2443 /* Reset Counters */
2444 rx_ring->num_ready_recv = 0;
2445 }
2446
2447 /* et131x_init_recv - Initialize receive data structures.
2448 * @adapter: pointer to our private adapter structure
2449 *
2450 * Returns 0 on success and errno on failure (as defined in errno.h)
2451 */
2452 static int et131x_init_recv(struct et131x_adapter *adapter)
2453 {
2454 struct rfd *rfd;
2455 u32 rfdct;
2456 u32 numrfd = 0;
2457 struct rx_ring *rx_ring;
2458
2459 /* Setup some convenience pointers */
2460 rx_ring = &adapter->rx_ring;
2461
2462 /* Setup each RFD */
2463 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2464 rfd = kzalloc(sizeof(struct rfd), GFP_ATOMIC | GFP_DMA);
2465 if (!rfd)
2466 return -ENOMEM;
2467
2468 rfd->skb = NULL;
2469
2470 /* Add this RFD to the recv_list */
2471 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2472
2473 /* Increment both the available RFD's, and the total RFD's. */
2474 rx_ring->num_ready_recv++;
2475 numrfd++;
2476 }
2477
2478 return 0;
2479 }
2480
2481 /* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
2482 * @adapter: pointer to our adapter structure
2483 */
2484 static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2485 {
2486 struct phy_device *phydev = adapter->phydev;
2487
2488 if (!phydev)
2489 return;
2490
2491 /* For version B silicon, we do not use the RxDMA timer for 10 and 100
2492 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
2493 */
2494 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2495 writel(0, &adapter->regs->rxdma.max_pkt_time);
2496 writel(1, &adapter->regs->rxdma.num_pkt_done);
2497 }
2498 }
2499
2500 /* NICReturnRFD - Recycle a RFD and put it back onto the receive list
2501 * @adapter: pointer to our adapter
2502 * @rfd: pointer to the RFD
2503 */
2504 static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2505 {
2506 struct rx_ring *rx_local = &adapter->rx_ring;
2507 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2508 u16 buff_index = rfd->bufferindex;
2509 u8 ring_index = rfd->ringindex;
2510 unsigned long flags;
2511
2512 /* We don't use any of the OOB data besides status. Otherwise, we
2513 * need to clean up OOB data
2514 */
2515 if (buff_index < rx_local->fbr[ring_index]->num_entries) {
2516 u32 __iomem *offset;
2517 struct fbr_desc *next;
2518
2519 spin_lock_irqsave(&adapter->fbr_lock, flags);
2520
2521 if (ring_index == 0)
2522 offset = &rx_dma->fbr0_full_offset;
2523 else
2524 offset = &rx_dma->fbr1_full_offset;
2525
2526 next = (struct fbr_desc *)
2527 (rx_local->fbr[ring_index]->ring_virtaddr) +
2528 INDEX10(rx_local->fbr[ring_index]->local_full);
2529
2530 /* Handle the Free Buffer Ring advancement here. Write
2531 * the PA / Buffer Index for the returned buffer into
2532 * the oldest (next to be freed)FBR entry
2533 */
2534 next->addr_hi = rx_local->fbr[ring_index]->bus_high[buff_index];
2535 next->addr_lo = rx_local->fbr[ring_index]->bus_low[buff_index];
2536 next->word2 = buff_index;
2537
2538 writel(bump_free_buff_ring(
2539 &rx_local->fbr[ring_index]->local_full,
2540 rx_local->fbr[ring_index]->num_entries - 1),
2541 offset);
2542
2543 spin_unlock_irqrestore(&adapter->fbr_lock, flags);
2544 } else {
2545 dev_err(&adapter->pdev->dev,
2546 "%s illegal Buffer Index returned\n", __func__);
2547 }
2548
2549 /* The processing on this RFD is done, so put it back on the tail of
2550 * our list
2551 */
2552 spin_lock_irqsave(&adapter->rcv_lock, flags);
2553 list_add_tail(&rfd->list_node, &rx_local->recv_list);
2554 rx_local->num_ready_recv++;
2555 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2556
2557 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2558 }
2559
2560 /* nic_rx_pkts - Checks the hardware for available packets
2561 * @adapter: pointer to our adapter
2562 *
2563 * Returns rfd, a pointer to our MPRFD.
2564 *
2565 * Checks the hardware for available packets, using completion ring
2566 * If packets are available, it gets an RFD from the recv_list, attaches
2567 * the packet to it, puts the RFD in the RecvPendList, and also returns
2568 * the pointer to the RFD.
2569 */
2570 static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2571 {
2572 struct rx_ring *rx_local = &adapter->rx_ring;
2573 struct rx_status_block *status;
2574 struct pkt_stat_desc *psr;
2575 struct rfd *rfd;
2576 u32 i;
2577 u8 *buf;
2578 unsigned long flags;
2579 struct list_head *element;
2580 u8 ring_index;
2581 u16 buff_index;
2582 u32 len;
2583 u32 word0;
2584 u32 word1;
2585 struct sk_buff *skb;
2586
2587 /* RX Status block is written by the DMA engine prior to every
2588 * interrupt. It contains the next to be used entry in the Packet
2589 * Status Ring, and also the two Free Buffer rings.
2590 */
2591 status = rx_local->rx_status_block;
2592 word1 = status->word1 >> 16; /* Get the useful bits */
2593
2594 /* Check the PSR and wrap bits do not match */
2595 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2596 return NULL; /* Looks like this ring is not updated yet */
2597
2598 /* The packet status ring indicates that data is available. */
2599 psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
2600 (rx_local->local_psr_full & 0xFFF);
2601
2602 /* Grab any information that is required once the PSR is advanced,
2603 * since we can no longer rely on the memory being accurate
2604 */
2605 len = psr->word1 & 0xFFFF;
2606 ring_index = (psr->word1 >> 26) & 0x03;
2607 buff_index = (psr->word1 >> 16) & 0x3FF;
2608 word0 = psr->word0;
2609
2610 /* Indicate that we have used this PSR entry. */
2611 /* FIXME wrap 12 */
2612 add_12bit(&rx_local->local_psr_full, 1);
2613 if (
2614 (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
2615 /* Clear psr full and toggle the wrap bit */
2616 rx_local->local_psr_full &= ~0xFFF;
2617 rx_local->local_psr_full ^= 0x1000;
2618 }
2619
2620 writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset);
2621
2622 if (ring_index > 1 ||
2623 buff_index > rx_local->fbr[ring_index]->num_entries - 1) {
2624 /* Illegal buffer or ring index cannot be used by S/W*/
2625 dev_err(&adapter->pdev->dev,
2626 "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
2627 rx_local->local_psr_full & 0xFFF, len, buff_index);
2628 return NULL;
2629 }
2630
2631 /* Get and fill the RFD. */
2632 spin_lock_irqsave(&adapter->rcv_lock, flags);
2633
2634 element = rx_local->recv_list.next;
2635 rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
2636
2637 if (!rfd) {
2638 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2639 return NULL;
2640 }
2641
2642 list_del(&rfd->list_node);
2643 rx_local->num_ready_recv--;
2644
2645 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2646
2647 rfd->bufferindex = buff_index;
2648 rfd->ringindex = ring_index;
2649
2650 /* In V1 silicon, there is a bug which screws up filtering of runt
2651 * packets. Therefore runt packet filtering is disabled in the MAC and
2652 * the packets are dropped here. They are also counted here.
2653 */
2654 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
2655 adapter->stats.rx_other_errs++;
2656 len = 0;
2657 }
2658
2659 if (len == 0) {
2660 rfd->len = 0;
2661 goto out;
2662 }
2663
2664 /* Determine if this is a multicast packet coming in */
2665 if ((word0 & ALCATEL_MULTICAST_PKT) &&
2666 !(word0 & ALCATEL_BROADCAST_PKT)) {
2667 /* Promiscuous mode and Multicast mode are not mutually
2668 * exclusive as was first thought. I guess Promiscuous is just
2669 * considered a super-set of the other filters. Generally filter
2670 * is 0x2b when in promiscuous mode.
2671 */
2672 if ((adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST)
2673 && !(adapter->packet_filter & ET131X_PACKET_TYPE_PROMISCUOUS)
2674 && !(adapter->packet_filter &
2675 ET131X_PACKET_TYPE_ALL_MULTICAST)) {
2676 buf = rx_local->fbr[ring_index]->virt[buff_index];
2677
2678 /* Loop through our list to see if the destination
2679 * address of this packet matches one in our list.
2680 */
2681 for (i = 0; i < adapter->multicast_addr_count; i++) {
2682 if (buf[0] == adapter->multicast_list[i][0]
2683 && buf[1] == adapter->multicast_list[i][1]
2684 && buf[2] == adapter->multicast_list[i][2]
2685 && buf[3] == adapter->multicast_list[i][3]
2686 && buf[4] == adapter->multicast_list[i][4]
2687 && buf[5] == adapter->multicast_list[i][5]) {
2688 break;
2689 }
2690 }
2691
2692 /* If our index is equal to the number of Multicast
2693 * address we have, then this means we did not find this
2694 * packet's matching address in our list. Set the len to
2695 * zero, so we free our RFD when we return from this
2696 * function.
2697 */
2698 if (i == adapter->multicast_addr_count)
2699 len = 0;
2700 }
2701
2702 if (len > 0)
2703 adapter->stats.multicast_pkts_rcvd++;
2704 } else if (word0 & ALCATEL_BROADCAST_PKT) {
2705 adapter->stats.broadcast_pkts_rcvd++;
2706 } else {
2707 /* Not sure what this counter measures in promiscuous mode.
2708 * Perhaps we should check the MAC address to see if it is
2709 * directed to us in promiscuous mode.
2710 */
2711 adapter->stats.unicast_pkts_rcvd++;
2712 }
2713
2714 if (len == 0) {
2715 rfd->len = 0;
2716 goto out;
2717 }
2718
2719 rfd->len = len;
2720
2721 skb = dev_alloc_skb(rfd->len + 2);
2722 if (!skb) {
2723 dev_err(&adapter->pdev->dev, "Couldn't alloc an SKB for Rx\n");
2724 return NULL;
2725 }
2726
2727 adapter->net_stats.rx_bytes += rfd->len;
2728
2729 memcpy(skb_put(skb, rfd->len),
2730 rx_local->fbr[ring_index]->virt[buff_index],
2731 rfd->len);
2732
2733 skb->dev = adapter->netdev;
2734 skb->protocol = eth_type_trans(skb, adapter->netdev);
2735 skb->ip_summed = CHECKSUM_NONE;
2736 netif_rx_ni(skb);
2737
2738 out:
2739 nic_return_rfd(adapter, rfd);
2740 return rfd;
2741 }
2742
2743 /* et131x_handle_recv_interrupt - Interrupt handler for receive processing
2744 * @adapter: pointer to our adapter
2745 *
2746 * Assumption, Rcv spinlock has been acquired.
2747 */
2748 static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
2749 {
2750 struct rfd *rfd = NULL;
2751 u32 count = 0;
2752 bool done = true;
2753
2754 /* Process up to available RFD's */
2755 while (count < NUM_PACKETS_HANDLED) {
2756 if (list_empty(&adapter->rx_ring.recv_list)) {
2757 WARN_ON(adapter->rx_ring.num_ready_recv != 0);
2758 done = false;
2759 break;
2760 }
2761
2762 rfd = nic_rx_pkts(adapter);
2763
2764 if (rfd == NULL)
2765 break;
2766
2767 /* Do not receive any packets until a filter has been set.
2768 * Do not receive any packets until we have link.
2769 * If length is zero, return the RFD in order to advance the
2770 * Free buffer ring.
2771 */
2772 if (!adapter->packet_filter ||
2773 !netif_carrier_ok(adapter->netdev) ||
2774 rfd->len == 0)
2775 continue;
2776
2777 /* Increment the number of packets we received */
2778 adapter->net_stats.rx_packets++;
2779
2780 /* Set the status on the packet, either resources or success */
2781 if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) {
2782 dev_warn(&adapter->pdev->dev,
2783 "RFD's are running out\n");
2784 }
2785 count++;
2786 }
2787
2788 if (count == NUM_PACKETS_HANDLED || !done) {
2789 adapter->rx_ring.unfinished_receives = true;
2790 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2791 &adapter->regs->global.watchdog_timer);
2792 } else
2793 /* Watchdog timer will disable itself if appropriate. */
2794 adapter->rx_ring.unfinished_receives = false;
2795 }
2796
2797 /* et131x_tx_dma_memory_alloc
2798 * @adapter: pointer to our private adapter structure
2799 *
2800 * Returns 0 on success and errno on failure (as defined in errno.h).
2801 *
2802 * Allocates memory that will be visible both to the device and to the CPU.
2803 * The OS will pass us packets, pointers to which we will insert in the Tx
2804 * Descriptor queue. The device will read this queue to find the packets in
2805 * memory. The device will update the "status" in memory each time it xmits a
2806 * packet.
2807 */
2808 static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2809 {
2810 int desc_size = 0;
2811 struct tx_ring *tx_ring = &adapter->tx_ring;
2812
2813 /* Allocate memory for the TCB's (Transmit Control Block) */
2814 adapter->tx_ring.tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
2815 GFP_ATOMIC | GFP_DMA);
2816 if (!adapter->tx_ring.tcb_ring)
2817 return -ENOMEM;
2818
2819 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2820 tx_ring->tx_desc_ring =
2821 (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev,
2822 desc_size,
2823 &tx_ring->tx_desc_ring_pa,
2824 GFP_KERNEL);
2825 if (!adapter->tx_ring.tx_desc_ring) {
2826 dev_err(&adapter->pdev->dev,
2827 "Cannot alloc memory for Tx Ring\n");
2828 return -ENOMEM;
2829 }
2830
2831 /* Save physical address
2832 *
2833 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
2834 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2835 * are ever returned, make sure the high part is retrieved here before
2836 * storing the adjusted address.
2837 */
2838 /* Allocate memory for the Tx status block */
2839 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
2840 sizeof(u32),
2841 &tx_ring->tx_status_pa,
2842 GFP_KERNEL);
2843 if (!adapter->tx_ring.tx_status_pa) {
2844 dev_err(&adapter->pdev->dev,
2845 "Cannot alloc memory for Tx status block\n");
2846 return -ENOMEM;
2847 }
2848 return 0;
2849 }
2850
2851 /* et131x_tx_dma_memory_free - Free all memory allocated within this module
2852 * @adapter: pointer to our private adapter structure
2853 *
2854 * Returns 0 on success and errno on failure (as defined in errno.h).
2855 */
2856 static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
2857 {
2858 int desc_size = 0;
2859
2860 if (adapter->tx_ring.tx_desc_ring) {
2861 /* Free memory relating to Tx rings here */
2862 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2863 dma_free_coherent(&adapter->pdev->dev,
2864 desc_size,
2865 adapter->tx_ring.tx_desc_ring,
2866 adapter->tx_ring.tx_desc_ring_pa);
2867 adapter->tx_ring.tx_desc_ring = NULL;
2868 }
2869
2870 /* Free memory for the Tx status block */
2871 if (adapter->tx_ring.tx_status) {
2872 dma_free_coherent(&adapter->pdev->dev,
2873 sizeof(u32),
2874 adapter->tx_ring.tx_status,
2875 adapter->tx_ring.tx_status_pa);
2876
2877 adapter->tx_ring.tx_status = NULL;
2878 }
2879 /* Free the memory for the tcb structures */
2880 kfree(adapter->tx_ring.tcb_ring);
2881 }
2882
2883 /* nic_send_packet - NIC specific send handler for version B silicon.
2884 * @adapter: pointer to our adapter
2885 * @tcb: pointer to struct tcb
2886 *
2887 * Returns 0 or errno.
2888 */
2889 static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
2890 {
2891 u32 i;
2892 struct tx_desc desc[24]; /* 24 x 16 byte */
2893 u32 frag = 0;
2894 u32 thiscopy, remainder;
2895 struct sk_buff *skb = tcb->skb;
2896 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
2897 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
2898 unsigned long flags;
2899 struct phy_device *phydev = adapter->phydev;
2900 dma_addr_t dma_addr;
2901
2902 /* Part of the optimizations of this send routine restrict us to
2903 * sending 24 fragments at a pass. In practice we should never see
2904 * more than 5 fragments.
2905 *
2906 * NOTE: The older version of this function (below) can handle any
2907 * number of fragments. If needed, we can call this function,
2908 * although it is less efficient.
2909 */
2910 if (nr_frags > 23)
2911 return -EIO;
2912
2913 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
2914
2915 for (i = 0; i < nr_frags; i++) {
2916 /* If there is something in this element, lets get a
2917 * descriptor from the ring and get the necessary data
2918 */
2919 if (i == 0) {
2920 /* If the fragments are smaller than a standard MTU,
2921 * then map them to a single descriptor in the Tx
2922 * Desc ring. However, if they're larger, as is
2923 * possible with support for jumbo packets, then
2924 * split them each across 2 descriptors.
2925 *
2926 * This will work until we determine why the hardware
2927 * doesn't seem to like large fragments.
2928 */
2929 if (skb_headlen(skb) <= 1514) {
2930 /* Low 16bits are length, high is vlan and
2931 * unused currently so zero
2932 */
2933 desc[frag].len_vlan = skb_headlen(skb);
2934 dma_addr = dma_map_single(&adapter->pdev->dev,
2935 skb->data,
2936 skb_headlen(skb),
2937 DMA_TO_DEVICE);
2938 desc[frag].addr_lo = lower_32_bits(dma_addr);
2939 desc[frag].addr_hi = upper_32_bits(dma_addr);
2940 frag++;
2941 } else {
2942 desc[frag].len_vlan = skb_headlen(skb) / 2;
2943 dma_addr = dma_map_single(&adapter->pdev->dev,
2944 skb->data,
2945 (skb_headlen(skb) / 2),
2946 DMA_TO_DEVICE);
2947 desc[frag].addr_lo = lower_32_bits(dma_addr);
2948 desc[frag].addr_hi = upper_32_bits(dma_addr);
2949 frag++;
2950
2951 desc[frag].len_vlan = skb_headlen(skb) / 2;
2952 dma_addr = dma_map_single(&adapter->pdev->dev,
2953 skb->data +
2954 (skb_headlen(skb) / 2),
2955 (skb_headlen(skb) / 2),
2956 DMA_TO_DEVICE);
2957 desc[frag].addr_lo = lower_32_bits(dma_addr);
2958 desc[frag].addr_hi = upper_32_bits(dma_addr);
2959 frag++;
2960 }
2961 } else {
2962 desc[frag].len_vlan = frags[i - 1].size;
2963 dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
2964 &frags[i - 1],
2965 0,
2966 frags[i - 1].size,
2967 DMA_TO_DEVICE);
2968 desc[frag].addr_lo = lower_32_bits(dma_addr);
2969 desc[frag].addr_hi = upper_32_bits(dma_addr);
2970 frag++;
2971 }
2972 }
2973
2974 if (phydev && phydev->speed == SPEED_1000) {
2975 if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
2976 /* Last element & Interrupt flag */
2977 desc[frag - 1].flags =
2978 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2979 adapter->tx_ring.since_irq = 0;
2980 } else { /* Last element */
2981 desc[frag - 1].flags = TXDESC_FLAG_LASTPKT;
2982 }
2983 } else
2984 desc[frag - 1].flags =
2985 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2986
2987 desc[0].flags |= TXDESC_FLAG_FIRSTPKT;
2988
2989 tcb->index_start = adapter->tx_ring.send_idx;
2990 tcb->stale = 0;
2991
2992 spin_lock_irqsave(&adapter->send_hw_lock, flags);
2993
2994 thiscopy = NUM_DESC_PER_RING_TX - INDEX10(adapter->tx_ring.send_idx);
2995
2996 if (thiscopy >= frag) {
2997 remainder = 0;
2998 thiscopy = frag;
2999 } else {
3000 remainder = frag - thiscopy;
3001 }
3002
3003 memcpy(adapter->tx_ring.tx_desc_ring +
3004 INDEX10(adapter->tx_ring.send_idx), desc,
3005 sizeof(struct tx_desc) * thiscopy);
3006
3007 add_10bit(&adapter->tx_ring.send_idx, thiscopy);
3008
3009 if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
3010 INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
3011 adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
3012 adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
3013 }
3014
3015 if (remainder) {
3016 memcpy(adapter->tx_ring.tx_desc_ring,
3017 desc + thiscopy,
3018 sizeof(struct tx_desc) * remainder);
3019
3020 add_10bit(&adapter->tx_ring.send_idx, remainder);
3021 }
3022
3023 if (INDEX10(adapter->tx_ring.send_idx) == 0) {
3024 if (adapter->tx_ring.send_idx)
3025 tcb->index = NUM_DESC_PER_RING_TX - 1;
3026 else
3027 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
3028 } else
3029 tcb->index = adapter->tx_ring.send_idx - 1;
3030
3031 spin_lock(&adapter->tcb_send_qlock);
3032
3033 if (adapter->tx_ring.send_tail)
3034 adapter->tx_ring.send_tail->next = tcb;
3035 else
3036 adapter->tx_ring.send_head = tcb;
3037
3038 adapter->tx_ring.send_tail = tcb;
3039
3040 WARN_ON(tcb->next != NULL);
3041
3042 adapter->tx_ring.used++;
3043
3044 spin_unlock(&adapter->tcb_send_qlock);
3045
3046 /* Write the new write pointer back to the device. */
3047 writel(adapter->tx_ring.send_idx,
3048 &adapter->regs->txdma.service_request);
3049
3050 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
3051 * timer to wake us up if this packet isn't followed by N more.
3052 */
3053 if (phydev && phydev->speed == SPEED_1000) {
3054 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3055 &adapter->regs->global.watchdog_timer);
3056 }
3057 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
3058
3059 return 0;
3060 }
3061
3062 /* send_packet - Do the work to send a packet
3063 * @skb: the packet(s) to send
3064 * @adapter: a pointer to the device's private adapter structure
3065 *
3066 * Return 0 in almost all cases; non-zero value in extreme hard failure only.
3067 *
3068 * Assumption: Send spinlock has been acquired
3069 */
3070 static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
3071 {
3072 int status;
3073 struct tcb *tcb = NULL;
3074 u16 *shbufva;
3075 unsigned long flags;
3076
3077 /* All packets must have at least a MAC address and a protocol type */
3078 if (skb->len < ETH_HLEN)
3079 return -EIO;
3080
3081 /* Get a TCB for this packet */
3082 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3083
3084 tcb = adapter->tx_ring.tcb_qhead;
3085
3086 if (tcb == NULL) {
3087 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3088 return -ENOMEM;
3089 }
3090
3091 adapter->tx_ring.tcb_qhead = tcb->next;
3092
3093 if (adapter->tx_ring.tcb_qhead == NULL)
3094 adapter->tx_ring.tcb_qtail = NULL;
3095
3096 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3097
3098 tcb->skb = skb;
3099
3100 if (skb->data != NULL && skb_headlen(skb) >= 6) {
3101 shbufva = (u16 *) skb->data;
3102
3103 if ((shbufva[0] == 0xffff) &&
3104 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
3105 tcb->flags |= FMP_DEST_BROAD;
3106 } else if ((shbufva[0] & 0x3) == 0x0001) {
3107 tcb->flags |= FMP_DEST_MULTI;
3108 }
3109 }
3110
3111 tcb->next = NULL;
3112
3113 /* Call the NIC specific send handler. */
3114 status = nic_send_packet(adapter, tcb);
3115
3116 if (status != 0) {
3117 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3118
3119 if (adapter->tx_ring.tcb_qtail)
3120 adapter->tx_ring.tcb_qtail->next = tcb;
3121 else
3122 /* Apparently ready Q is empty. */
3123 adapter->tx_ring.tcb_qhead = tcb;
3124
3125 adapter->tx_ring.tcb_qtail = tcb;
3126 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3127 return status;
3128 }
3129 WARN_ON(adapter->tx_ring.used > NUM_TCB);
3130 return 0;
3131 }
3132
3133 /* et131x_send_packets - This function is called by the OS to send packets
3134 * @skb: the packet(s) to send
3135 * @netdev:device on which to TX the above packet(s)
3136 *
3137 * Return 0 in almost all cases; non-zero value in extreme hard failure only
3138 */
3139 static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
3140 {
3141 int status = 0;
3142 struct et131x_adapter *adapter = netdev_priv(netdev);
3143
3144 /* Send these packets
3145 *
3146 * NOTE: The Linux Tx entry point is only given one packet at a time
3147 * to Tx, so the PacketCount and it's array used makes no sense here
3148 */
3149
3150 /* TCB is not available */
3151 if (adapter->tx_ring.used >= NUM_TCB) {
3152 /* NOTE: If there's an error on send, no need to queue the
3153 * packet under Linux; if we just send an error up to the
3154 * netif layer, it will resend the skb to us.
3155 */
3156 status = -ENOMEM;
3157 } else {
3158 /* We need to see if the link is up; if it's not, make the
3159 * netif layer think we're good and drop the packet
3160 */
3161 if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) ||
3162 !netif_carrier_ok(netdev)) {
3163 dev_kfree_skb_any(skb);
3164 skb = NULL;
3165
3166 adapter->net_stats.tx_dropped++;
3167 } else {
3168 status = send_packet(skb, adapter);
3169 if (status != 0 && status != -ENOMEM) {
3170 /* On any other error, make netif think we're
3171 * OK and drop the packet
3172 */
3173 dev_kfree_skb_any(skb);
3174 skb = NULL;
3175 adapter->net_stats.tx_dropped++;
3176 }
3177 }
3178 }
3179 return status;
3180 }
3181
3182 /* free_send_packet - Recycle a struct tcb
3183 * @adapter: pointer to our adapter
3184 * @tcb: pointer to struct tcb
3185 *
3186 * Complete the packet if necessary
3187 * Assumption - Send spinlock has been acquired
3188 */
3189 static inline void free_send_packet(struct et131x_adapter *adapter,
3190 struct tcb *tcb)
3191 {
3192 unsigned long flags;
3193 struct tx_desc *desc = NULL;
3194 struct net_device_stats *stats = &adapter->net_stats;
3195 u64 dma_addr;
3196
3197 if (tcb->flags & FMP_DEST_BROAD)
3198 atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
3199 else if (tcb->flags & FMP_DEST_MULTI)
3200 atomic_inc(&adapter->stats.multicast_pkts_xmtd);
3201 else
3202 atomic_inc(&adapter->stats.unicast_pkts_xmtd);
3203
3204 if (tcb->skb) {
3205 stats->tx_bytes += tcb->skb->len;
3206
3207 /* Iterate through the TX descriptors on the ring
3208 * corresponding to this packet and umap the fragments
3209 * they point to
3210 */
3211 do {
3212 desc = (struct tx_desc *)
3213 (adapter->tx_ring.tx_desc_ring +
3214 INDEX10(tcb->index_start));
3215
3216 dma_addr = desc->addr_lo;
3217 dma_addr |= (u64)desc->addr_hi << 32;
3218
3219 dma_unmap_single(&adapter->pdev->dev,
3220 dma_addr,
3221 desc->len_vlan, DMA_TO_DEVICE);
3222
3223 add_10bit(&tcb->index_start, 1);
3224 if (INDEX10(tcb->index_start) >=
3225 NUM_DESC_PER_RING_TX) {
3226 tcb->index_start &= ~ET_DMA10_MASK;
3227 tcb->index_start ^= ET_DMA10_WRAP;
3228 }
3229 } while (desc != (adapter->tx_ring.tx_desc_ring +
3230 INDEX10(tcb->index)));
3231
3232 dev_kfree_skb_any(tcb->skb);
3233 }
3234
3235 memset(tcb, 0, sizeof(struct tcb));
3236
3237 /* Add the TCB to the Ready Q */
3238 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3239
3240 adapter->net_stats.tx_packets++;
3241
3242 if (adapter->tx_ring.tcb_qtail)
3243 adapter->tx_ring.tcb_qtail->next = tcb;
3244 else
3245 /* Apparently ready Q is empty. */
3246 adapter->tx_ring.tcb_qhead = tcb;
3247
3248 adapter->tx_ring.tcb_qtail = tcb;
3249
3250 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3251 WARN_ON(adapter->tx_ring.used < 0);
3252 }
3253
3254 /* et131x_free_busy_send_packets - Free and complete the stopped active sends
3255 * @adapter: pointer to our adapter
3256 *
3257 * Assumption - Send spinlock has been acquired
3258 */
3259 static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
3260 {
3261 struct tcb *tcb;
3262 unsigned long flags;
3263 u32 freed = 0;
3264
3265 /* Any packets being sent? Check the first TCB on the send list */
3266 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3267
3268 tcb = adapter->tx_ring.send_head;
3269
3270 while (tcb != NULL && freed < NUM_TCB) {
3271 struct tcb *next = tcb->next;
3272
3273 adapter->tx_ring.send_head = next;
3274
3275 if (next == NULL)
3276 adapter->tx_ring.send_tail = NULL;
3277
3278 adapter->tx_ring.used--;
3279
3280 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3281
3282 freed++;
3283 free_send_packet(adapter, tcb);
3284
3285 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3286
3287 tcb = adapter->tx_ring.send_head;
3288 }
3289
3290 WARN_ON(freed == NUM_TCB);
3291
3292 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3293
3294 adapter->tx_ring.used = 0;
3295 }
3296
3297 /* et131x_handle_send_interrupt - Interrupt handler for sending processing
3298 * @adapter: pointer to our adapter
3299 *
3300 * Re-claim the send resources, complete sends and get more to send from
3301 * the send wait queue.
3302 *
3303 * Assumption - Send spinlock has been acquired
3304 */
3305 static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
3306 {
3307 unsigned long flags;
3308 u32 serviced;
3309 struct tcb *tcb;
3310 u32 index;
3311
3312 serviced = readl(&adapter->regs->txdma.new_service_complete);
3313 index = INDEX10(serviced);
3314
3315 /* Has the ring wrapped? Process any descriptors that do not have
3316 * the same "wrap" indicator as the current completion indicator
3317 */
3318 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3319
3320 tcb = adapter->tx_ring.send_head;
3321
3322 while (tcb &&
3323 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
3324 index < INDEX10(tcb->index)) {
3325 adapter->tx_ring.used--;
3326 adapter->tx_ring.send_head = tcb->next;
3327 if (tcb->next == NULL)
3328 adapter->tx_ring.send_tail = NULL;
3329
3330 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3331 free_send_packet(adapter, tcb);
3332 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3333
3334 /* Goto the next packet */
3335 tcb = adapter->tx_ring.send_head;
3336 }
3337 while (tcb &&
3338 !((serviced ^ tcb->index) & ET_DMA10_WRAP)
3339 && index > (tcb->index & ET_DMA10_MASK)) {
3340 adapter->tx_ring.used--;
3341 adapter->tx_ring.send_head = tcb->next;
3342 if (tcb->next == NULL)
3343 adapter->tx_ring.send_tail = NULL;
3344
3345 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3346 free_send_packet(adapter, tcb);
3347 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3348
3349 /* Goto the next packet */
3350 tcb = adapter->tx_ring.send_head;
3351 }
3352
3353 /* Wake up the queue when we hit a low-water mark */
3354 if (adapter->tx_ring.used <= NUM_TCB / 3)
3355 netif_wake_queue(adapter->netdev);
3356
3357 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3358 }
3359
3360 static int et131x_get_settings(struct net_device *netdev,
3361 struct ethtool_cmd *cmd)
3362 {
3363 struct et131x_adapter *adapter = netdev_priv(netdev);
3364
3365 return phy_ethtool_gset(adapter->phydev, cmd);
3366 }
3367
3368 static int et131x_set_settings(struct net_device *netdev,
3369 struct ethtool_cmd *cmd)
3370 {
3371 struct et131x_adapter *adapter = netdev_priv(netdev);
3372
3373 return phy_ethtool_sset(adapter->phydev, cmd);
3374 }
3375
3376 static int et131x_get_regs_len(struct net_device *netdev)
3377 {
3378 #define ET131X_REGS_LEN 256
3379 return ET131X_REGS_LEN * sizeof(u32);
3380 }
3381
3382 static void et131x_get_regs(struct net_device *netdev,
3383 struct ethtool_regs *regs, void *regs_data)
3384 {
3385 struct et131x_adapter *adapter = netdev_priv(netdev);
3386 struct address_map __iomem *aregs = adapter->regs;
3387 u32 *regs_buff = regs_data;
3388 u32 num = 0;
3389 u16 tmp;
3390
3391 memset(regs_data, 0, et131x_get_regs_len(netdev));
3392
3393 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
3394 adapter->pdev->device;
3395
3396 /* PHY regs */
3397 et131x_mii_read(adapter, MII_BMCR, &tmp);
3398 regs_buff[num++] = tmp;
3399 et131x_mii_read(adapter, MII_BMSR, &tmp);
3400 regs_buff[num++] = tmp;
3401 et131x_mii_read(adapter, MII_PHYSID1, &tmp);
3402 regs_buff[num++] = tmp;
3403 et131x_mii_read(adapter, MII_PHYSID2, &tmp);
3404 regs_buff[num++] = tmp;
3405 et131x_mii_read(adapter, MII_ADVERTISE, &tmp);
3406 regs_buff[num++] = tmp;
3407 et131x_mii_read(adapter, MII_LPA, &tmp);
3408 regs_buff[num++] = tmp;
3409 et131x_mii_read(adapter, MII_EXPANSION, &tmp);
3410 regs_buff[num++] = tmp;
3411 /* Autoneg next page transmit reg */
3412 et131x_mii_read(adapter, 0x07, &tmp);
3413 regs_buff[num++] = tmp;
3414 /* Link partner next page reg */
3415 et131x_mii_read(adapter, 0x08, &tmp);
3416 regs_buff[num++] = tmp;
3417 et131x_mii_read(adapter, MII_CTRL1000, &tmp);
3418 regs_buff[num++] = tmp;
3419 et131x_mii_read(adapter, MII_STAT1000, &tmp);
3420 regs_buff[num++] = tmp;
3421 et131x_mii_read(adapter, 0x0b, &tmp);
3422 regs_buff[num++] = tmp;
3423 et131x_mii_read(adapter, 0x0c, &tmp);
3424 regs_buff[num++] = tmp;
3425 et131x_mii_read(adapter, MII_MMD_CTRL, &tmp);
3426 regs_buff[num++] = tmp;
3427 et131x_mii_read(adapter, MII_MMD_DATA, &tmp);
3428 regs_buff[num++] = tmp;
3429 et131x_mii_read(adapter, MII_ESTATUS, &tmp);
3430 regs_buff[num++] = tmp;
3431
3432 et131x_mii_read(adapter, PHY_INDEX_REG, &tmp);
3433 regs_buff[num++] = tmp;
3434 et131x_mii_read(adapter, PHY_DATA_REG, &tmp);
3435 regs_buff[num++] = tmp;
3436 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp);
3437 regs_buff[num++] = tmp;
3438 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp);
3439 regs_buff[num++] = tmp;
3440 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp);
3441 regs_buff[num++] = tmp;
3442
3443 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp);
3444 regs_buff[num++] = tmp;
3445 et131x_mii_read(adapter, PHY_CONFIG, &tmp);
3446 regs_buff[num++] = tmp;
3447 et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp);
3448 regs_buff[num++] = tmp;
3449 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp);
3450 regs_buff[num++] = tmp;
3451 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp);
3452 regs_buff[num++] = tmp;
3453 et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp);
3454 regs_buff[num++] = tmp;
3455 et131x_mii_read(adapter, PHY_LED_1, &tmp);
3456 regs_buff[num++] = tmp;
3457 et131x_mii_read(adapter, PHY_LED_2, &tmp);
3458 regs_buff[num++] = tmp;
3459
3460 /* Global regs */
3461 regs_buff[num++] = readl(&aregs->global.txq_start_addr);
3462 regs_buff[num++] = readl(&aregs->global.txq_end_addr);
3463 regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
3464 regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
3465 regs_buff[num++] = readl(&aregs->global.pm_csr);
3466 regs_buff[num++] = adapter->stats.interrupt_status;
3467 regs_buff[num++] = readl(&aregs->global.int_mask);
3468 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
3469 regs_buff[num++] = readl(&aregs->global.int_status_alias);
3470 regs_buff[num++] = readl(&aregs->global.sw_reset);
3471 regs_buff[num++] = readl(&aregs->global.slv_timer);
3472 regs_buff[num++] = readl(&aregs->global.msi_config);
3473 regs_buff[num++] = readl(&aregs->global.loopback);
3474 regs_buff[num++] = readl(&aregs->global.watchdog_timer);
3475
3476 /* TXDMA regs */
3477 regs_buff[num++] = readl(&aregs->txdma.csr);
3478 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
3479 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
3480 regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
3481 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
3482 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
3483 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
3484 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
3485 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
3486 regs_buff[num++] = readl(&aregs->txdma.service_request);
3487 regs_buff[num++] = readl(&aregs->txdma.service_complete);
3488 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
3489 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
3490 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
3491 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
3492 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
3493 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
3494 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
3495 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
3496 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
3497 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
3498 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
3499 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
3500 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
3501 regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
3502 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
3503
3504 /* RXDMA regs */
3505 regs_buff[num++] = readl(&aregs->rxdma.csr);
3506 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
3507 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
3508 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
3509 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
3510 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
3511 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
3512 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
3513 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
3514 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
3515 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
3516 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
3517 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
3518 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
3519 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
3520 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
3521 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
3522 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
3523 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
3524 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
3525 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
3526 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
3527 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
3528 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
3529 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
3530 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
3531 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
3532 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
3533 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
3534 }
3535
3536 #define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */
3537 static void et131x_get_drvinfo(struct net_device *netdev,
3538 struct ethtool_drvinfo *info)
3539 {
3540 struct et131x_adapter *adapter = netdev_priv(netdev);
3541
3542 strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN);
3543 strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN);
3544 strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN);
3545 }
3546
3547 static struct ethtool_ops et131x_ethtool_ops = {
3548 .get_settings = et131x_get_settings,
3549 .set_settings = et131x_set_settings,
3550 .get_drvinfo = et131x_get_drvinfo,
3551 .get_regs_len = et131x_get_regs_len,
3552 .get_regs = et131x_get_regs,
3553 .get_link = ethtool_op_get_link,
3554 };
3555
3556 /* et131x_hwaddr_init - set up the MAC Address on the ET1310
3557 * @adapter: pointer to our private adapter structure
3558 */
3559 static void et131x_hwaddr_init(struct et131x_adapter *adapter)
3560 {
3561 /* If have our default mac from init and no mac address from
3562 * EEPROM then we need to generate the last octet and set it on the
3563 * device
3564 */
3565 if (is_zero_ether_addr(adapter->rom_addr)) {
3566 /* We need to randomly generate the last octet so we
3567 * decrease our chances of setting the mac address to
3568 * same as another one of our cards in the system
3569 */
3570 get_random_bytes(&adapter->addr[5], 1);
3571 /* We have the default value in the register we are
3572 * working with so we need to copy the current
3573 * address into the permanent address
3574 */
3575 memcpy(adapter->rom_addr,
3576 adapter->addr, ETH_ALEN);
3577 } else {
3578 /* We do not have an override address, so set the
3579 * current address to the permanent address and add
3580 * it to the device
3581 */
3582 memcpy(adapter->addr,
3583 adapter->rom_addr, ETH_ALEN);
3584 }
3585 }
3586
3587 /* et131x_pci_init - initial PCI setup
3588 * @adapter: pointer to our private adapter structure
3589 * @pdev: our PCI device
3590 *
3591 * Perform the initial setup of PCI registers and if possible initialise
3592 * the MAC address. At this point the I/O registers have yet to be mapped
3593 */
3594 static int et131x_pci_init(struct et131x_adapter *adapter,
3595 struct pci_dev *pdev)
3596 {
3597 u16 max_payload;
3598 int i, rc;
3599
3600 rc = et131x_init_eeprom(adapter);
3601 if (rc < 0)
3602 goto out;
3603
3604 if (!pci_is_pcie(pdev)) {
3605 dev_err(&pdev->dev, "Missing PCIe capabilities\n");
3606 goto err_out;
3607 }
3608
3609 /* Let's set up the PORT LOGIC Register. First we need to know what
3610 * the max_payload_size is
3611 */
3612 if (pcie_capability_read_word(pdev, PCI_EXP_DEVCAP, &max_payload)) {
3613 dev_err(&pdev->dev,
3614 "Could not read PCI config space for Max Payload Size\n");
3615 goto err_out;
3616 }
3617
3618 /* Program the Ack/Nak latency and replay timers */
3619 max_payload &= 0x07;
3620
3621 if (max_payload < 2) {
3622 static const u16 acknak[2] = { 0x76, 0xD0 };
3623 static const u16 replay[2] = { 0x1E0, 0x2ED };
3624
3625 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
3626 acknak[max_payload])) {
3627 dev_err(&pdev->dev,
3628 "Could not write PCI config space for ACK/NAK\n");
3629 goto err_out;
3630 }
3631 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
3632 replay[max_payload])) {
3633 dev_err(&pdev->dev,
3634 "Could not write PCI config space for Replay Timer\n");
3635 goto err_out;
3636 }
3637 }
3638
3639 /* l0s and l1 latency timers. We are using default values.
3640 * Representing 001 for L0s and 010 for L1
3641 */
3642 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
3643 dev_err(&pdev->dev,
3644 "Could not write PCI config space for Latency Timers\n");
3645 goto err_out;
3646 }
3647
3648 /* Change the max read size to 2k */
3649 if (pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
3650 PCI_EXP_DEVCTL_READRQ, 0x4 << 12)) {
3651 dev_err(&pdev->dev,
3652 "Couldn't change PCI config space for Max read size\n");
3653 goto err_out;
3654 }
3655
3656 /* Get MAC address from config space if an eeprom exists, otherwise
3657 * the MAC address there will not be valid
3658 */
3659 if (!adapter->has_eeprom) {
3660 et131x_hwaddr_init(adapter);
3661 return 0;
3662 }
3663
3664 for (i = 0; i < ETH_ALEN; i++) {
3665 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
3666 adapter->rom_addr + i)) {
3667 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
3668 goto err_out;
3669 }
3670 }
3671 memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
3672 out:
3673 return rc;
3674 err_out:
3675 rc = -EIO;
3676 goto out;
3677 }
3678
3679 /* et131x_error_timer_handler
3680 * @data: timer-specific variable; here a pointer to our adapter structure
3681 *
3682 * The routine called when the error timer expires, to track the number of
3683 * recurring errors.
3684 */
3685 static void et131x_error_timer_handler(unsigned long data)
3686 {
3687 struct et131x_adapter *adapter = (struct et131x_adapter *) data;
3688 struct phy_device *phydev = adapter->phydev;
3689
3690 if (et1310_in_phy_coma(adapter)) {
3691 /* Bring the device immediately out of coma, to
3692 * prevent it from sleeping indefinitely, this
3693 * mechanism could be improved!
3694 */
3695 et1310_disable_phy_coma(adapter);
3696 adapter->boot_coma = 20;
3697 } else {
3698 et1310_update_macstat_host_counters(adapter);
3699 }
3700
3701 if (!phydev->link && adapter->boot_coma < 11)
3702 adapter->boot_coma++;
3703
3704 if (adapter->boot_coma == 10) {
3705 if (!phydev->link) {
3706 if (!et1310_in_phy_coma(adapter)) {
3707 /* NOTE - This was originally a 'sync with
3708 * interrupt'. How to do that under Linux?
3709 */
3710 et131x_enable_interrupts(adapter);
3711 et1310_enable_phy_coma(adapter);
3712 }
3713 }
3714 }
3715
3716 /* This is a periodic timer, so reschedule */
3717 mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000);
3718 }
3719
3720 /* et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
3721 * @adapter: pointer to our private adapter structure
3722 */
3723 static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
3724 {
3725 /* Free DMA memory */
3726 et131x_tx_dma_memory_free(adapter);
3727 et131x_rx_dma_memory_free(adapter);
3728 }
3729
3730 /* et131x_adapter_memory_alloc
3731 * @adapter: pointer to our private adapter structure
3732 *
3733 * Returns 0 on success, errno on failure (as defined in errno.h).
3734 *
3735 * Allocate all the memory blocks for send, receive and others.
3736 */
3737 static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
3738 {
3739 int status;
3740
3741 /* Allocate memory for the Tx Ring */
3742 status = et131x_tx_dma_memory_alloc(adapter);
3743 if (status != 0) {
3744 dev_err(&adapter->pdev->dev,
3745 "et131x_tx_dma_memory_alloc FAILED\n");
3746 return status;
3747 }
3748 /* Receive buffer memory allocation */
3749 status = et131x_rx_dma_memory_alloc(adapter);
3750 if (status != 0) {
3751 dev_err(&adapter->pdev->dev,
3752 "et131x_rx_dma_memory_alloc FAILED\n");
3753 et131x_tx_dma_memory_free(adapter);
3754 return status;
3755 }
3756
3757 /* Init receive data structures */
3758 status = et131x_init_recv(adapter);
3759 if (status) {
3760 dev_err(&adapter->pdev->dev,
3761 "et131x_init_recv FAILED\n");
3762 et131x_adapter_memory_free(adapter);
3763 }
3764 return status;
3765 }
3766
3767 static void et131x_adjust_link(struct net_device *netdev)
3768 {
3769 struct et131x_adapter *adapter = netdev_priv(netdev);
3770 struct phy_device *phydev = adapter->phydev;
3771
3772 if (phydev && phydev->link != adapter->link) {
3773 /* Check to see if we are in coma mode and if
3774 * so, disable it because we will not be able
3775 * to read PHY values until we are out.
3776 */
3777 if (et1310_in_phy_coma(adapter))
3778 et1310_disable_phy_coma(adapter);
3779
3780 adapter->link = phydev->link;
3781 phy_print_status(phydev);
3782
3783 if (phydev->link) {
3784 adapter->boot_coma = 20;
3785 if (phydev && phydev->speed == SPEED_10) {
3786 /* NOTE - Is there a way to query this without
3787 * TruePHY?
3788 * && TRU_QueryCoreType(adapter->hTruePhy, 0)==
3789 * EMI_TRUEPHY_A13O) {
3790 */
3791 u16 register18;
3792
3793 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3794 &register18);
3795 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
3796 register18 | 0x4);
3797 et131x_mii_write(adapter, PHY_INDEX_REG,
3798 register18 | 0x8402);
3799 et131x_mii_write(adapter, PHY_DATA_REG,
3800 register18 | 511);
3801 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
3802 register18);
3803 }
3804
3805 et1310_config_flow_control(adapter);
3806
3807 if (phydev && phydev->speed == SPEED_1000 &&
3808 adapter->registry_jumbo_packet > 2048) {
3809 u16 reg;
3810
3811 et131x_mii_read(adapter, PHY_CONFIG, &reg);
3812 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
3813 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
3814 et131x_mii_write(adapter, PHY_CONFIG, reg);
3815 }
3816
3817 et131x_set_rx_dma_timer(adapter);
3818 et1310_config_mac_regs2(adapter);
3819 } else {
3820 adapter->boot_coma = 0;
3821
3822 if (phydev->speed == SPEED_10) {
3823 /* NOTE - Is there a way to query this without
3824 * TruePHY?
3825 * && TRU_QueryCoreType(adapter->hTruePhy, 0) ==
3826 * EMI_TRUEPHY_A13O)
3827 */
3828 u16 register18;
3829
3830 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3831 &register18);
3832 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
3833 register18 | 0x4);
3834 et131x_mii_write(adapter, PHY_INDEX_REG,
3835 register18 | 0x8402);
3836 et131x_mii_write(adapter, PHY_DATA_REG,
3837 register18 | 511);
3838 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
3839 register18);
3840 }
3841
3842 /* Free the packets being actively sent & stopped */
3843 et131x_free_busy_send_packets(adapter);
3844
3845 /* Re-initialize the send structures */
3846 et131x_init_send(adapter);
3847
3848 /* Bring the device back to the state it was during
3849 * init prior to autonegotiation being complete. This
3850 * way, when we get the auto-neg complete interrupt,
3851 * we can complete init by calling config_mac_regs2.
3852 */
3853 et131x_soft_reset(adapter);
3854
3855 /* Setup ET1310 as per the documentation */
3856 et131x_adapter_setup(adapter);
3857
3858 /* perform reset of tx/rx */
3859 et131x_disable_txrx(netdev);
3860 et131x_enable_txrx(netdev);
3861 }
3862
3863 }
3864 }
3865
3866 static int et131x_mii_probe(struct net_device *netdev)
3867 {
3868 struct et131x_adapter *adapter = netdev_priv(netdev);
3869 struct phy_device *phydev = NULL;
3870
3871 phydev = phy_find_first(adapter->mii_bus);
3872 if (!phydev) {
3873 dev_err(&adapter->pdev->dev, "no PHY found\n");
3874 return -ENODEV;
3875 }
3876
3877 phydev = phy_connect(netdev, dev_name(&phydev->dev),
3878 &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII);
3879
3880 if (IS_ERR(phydev)) {
3881 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
3882 return PTR_ERR(phydev);
3883 }
3884
3885 phydev->supported &= (SUPPORTED_10baseT_Half
3886 | SUPPORTED_10baseT_Full
3887 | SUPPORTED_100baseT_Half
3888 | SUPPORTED_100baseT_Full
3889 | SUPPORTED_Autoneg
3890 | SUPPORTED_MII
3891 | SUPPORTED_TP);
3892
3893 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
3894 phydev->supported |= SUPPORTED_1000baseT_Full;
3895
3896 phydev->advertising = phydev->supported;
3897 adapter->phydev = phydev;
3898
3899 dev_info(&adapter->pdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
3900 phydev->drv->name, dev_name(&phydev->dev));
3901
3902 return 0;
3903 }
3904
3905 /* et131x_adapter_init
3906 * @adapter: pointer to the private adapter struct
3907 * @pdev: pointer to the PCI device
3908 *
3909 * Initialize the data structures for the et131x_adapter object and link
3910 * them together with the platform provided device structures.
3911 */
3912 static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
3913 struct pci_dev *pdev)
3914 {
3915 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
3916
3917 struct et131x_adapter *adapter;
3918
3919 /* Allocate private adapter struct and copy in relevant information */
3920 adapter = netdev_priv(netdev);
3921 adapter->pdev = pci_dev_get(pdev);
3922 adapter->netdev = netdev;
3923
3924 /* Initialize spinlocks here */
3925 spin_lock_init(&adapter->lock);
3926 spin_lock_init(&adapter->tcb_send_qlock);
3927 spin_lock_init(&adapter->tcb_ready_qlock);
3928 spin_lock_init(&adapter->send_hw_lock);
3929 spin_lock_init(&adapter->rcv_lock);
3930 spin_lock_init(&adapter->rcv_pend_lock);
3931 spin_lock_init(&adapter->fbr_lock);
3932 spin_lock_init(&adapter->phy_lock);
3933
3934 adapter->registry_jumbo_packet = 1514; /* 1514-9216 */
3935
3936 /* Set the MAC address to a default */
3937 memcpy(adapter->addr, default_mac, ETH_ALEN);
3938
3939 return adapter;
3940 }
3941
3942 /* et131x_pci_remove
3943 * @pdev: a pointer to the device's pci_dev structure
3944 *
3945 * Registered in the pci_driver structure, this function is called when the
3946 * PCI subsystem detects that a PCI device which matches the information
3947 * contained in the pci_device_id table has been removed.
3948 */
3949 static void et131x_pci_remove(struct pci_dev *pdev)
3950 {
3951 struct net_device *netdev = pci_get_drvdata(pdev);
3952 struct et131x_adapter *adapter = netdev_priv(netdev);
3953
3954 unregister_netdev(netdev);
3955 phy_disconnect(adapter->phydev);
3956 mdiobus_unregister(adapter->mii_bus);
3957 kfree(adapter->mii_bus->irq);
3958 mdiobus_free(adapter->mii_bus);
3959
3960 et131x_adapter_memory_free(adapter);
3961 iounmap(adapter->regs);
3962 pci_dev_put(pdev);
3963
3964 free_netdev(netdev);
3965 pci_release_regions(pdev);
3966 pci_disable_device(pdev);
3967 }
3968
3969 /* et131x_up - Bring up a device for use.
3970 * @netdev: device to be opened
3971 */
3972 static void et131x_up(struct net_device *netdev)
3973 {
3974 struct et131x_adapter *adapter = netdev_priv(netdev);
3975
3976 et131x_enable_txrx(netdev);
3977 phy_start(adapter->phydev);
3978 }
3979
3980 /* et131x_down - Bring down the device
3981 * @netdev: device to be brought down
3982 */
3983 static void et131x_down(struct net_device *netdev)
3984 {
3985 struct et131x_adapter *adapter = netdev_priv(netdev);
3986
3987 /* Save the timestamp for the TX watchdog, prevent a timeout */
3988 netdev->trans_start = jiffies;
3989
3990 phy_stop(adapter->phydev);
3991 et131x_disable_txrx(netdev);
3992 }
3993
3994 #ifdef CONFIG_PM_SLEEP
3995 static int et131x_suspend(struct device *dev)
3996 {
3997 struct pci_dev *pdev = to_pci_dev(dev);
3998 struct net_device *netdev = pci_get_drvdata(pdev);
3999
4000 if (netif_running(netdev)) {
4001 netif_device_detach(netdev);
4002 et131x_down(netdev);
4003 pci_save_state(pdev);
4004 }
4005
4006 return 0;
4007 }
4008
4009 static int et131x_resume(struct device *dev)
4010 {
4011 struct pci_dev *pdev = to_pci_dev(dev);
4012 struct net_device *netdev = pci_get_drvdata(pdev);
4013
4014 if (netif_running(netdev)) {
4015 pci_restore_state(pdev);
4016 et131x_up(netdev);
4017 netif_device_attach(netdev);
4018 }
4019
4020 return 0;
4021 }
4022
4023 static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
4024 #define ET131X_PM_OPS (&et131x_pm_ops)
4025 #else
4026 #define ET131X_PM_OPS NULL
4027 #endif
4028
4029 /* et131x_isr - The Interrupt Service Routine for the driver.
4030 * @irq: the IRQ on which the interrupt was received.
4031 * @dev_id: device-specific info (here a pointer to a net_device struct)
4032 *
4033 * Returns a value indicating if the interrupt was handled.
4034 */
4035 static irqreturn_t et131x_isr(int irq, void *dev_id)
4036 {
4037 bool handled = true;
4038 struct net_device *netdev = (struct net_device *)dev_id;
4039 struct et131x_adapter *adapter = NULL;
4040 u32 status;
4041
4042 if (!netif_device_present(netdev)) {
4043 handled = false;
4044 goto out;
4045 }
4046
4047 adapter = netdev_priv(netdev);
4048
4049 /* If the adapter is in low power state, then it should not
4050 * recognize any interrupt
4051 */
4052
4053 /* Disable Device Interrupts */
4054 et131x_disable_interrupts(adapter);
4055
4056 /* Get a copy of the value in the interrupt status register
4057 * so we can process the interrupting section
4058 */
4059 status = readl(&adapter->regs->global.int_status);
4060
4061 if (adapter->flowcontrol == FLOW_TXONLY ||
4062 adapter->flowcontrol == FLOW_BOTH) {
4063 status &= ~INT_MASK_ENABLE;
4064 } else {
4065 status &= ~INT_MASK_ENABLE_NO_FLOW;
4066 }
4067
4068 /* Make sure this is our interrupt */
4069 if (!status) {
4070 handled = false;
4071 et131x_enable_interrupts(adapter);
4072 goto out;
4073 }
4074
4075 /* This is our interrupt, so process accordingly */
4076
4077 if (status & ET_INTR_WATCHDOG) {
4078 struct tcb *tcb = adapter->tx_ring.send_head;
4079
4080 if (tcb)
4081 if (++tcb->stale > 1)
4082 status |= ET_INTR_TXDMA_ISR;
4083
4084 if (adapter->rx_ring.unfinished_receives)
4085 status |= ET_INTR_RXDMA_XFR_DONE;
4086 else if (tcb == NULL)
4087 writel(0, &adapter->regs->global.watchdog_timer);
4088
4089 status &= ~ET_INTR_WATCHDOG;
4090 }
4091
4092 if (status == 0) {
4093 /* This interrupt has in some way been "handled" by
4094 * the ISR. Either it was a spurious Rx interrupt, or
4095 * it was a Tx interrupt that has been filtered by
4096 * the ISR.
4097 */
4098 et131x_enable_interrupts(adapter);
4099 goto out;
4100 }
4101
4102 /* We need to save the interrupt status value for use in our
4103 * DPC. We will clear the software copy of that in that
4104 * routine.
4105 */
4106 adapter->stats.interrupt_status = status;
4107
4108 /* Schedule the ISR handler as a bottom-half task in the
4109 * kernel's tq_immediate queue, and mark the queue for
4110 * execution
4111 */
4112 schedule_work(&adapter->task);
4113 out:
4114 return IRQ_RETVAL(handled);
4115 }
4116
4117 /* et131x_isr_handler - The ISR handler
4118 * @p_adapter, a pointer to the device's private adapter structure
4119 *
4120 * scheduled to run in a deferred context by the ISR. This is where the ISR's
4121 * work actually gets done.
4122 */
4123 static void et131x_isr_handler(struct work_struct *work)
4124 {
4125 struct et131x_adapter *adapter =
4126 container_of(work, struct et131x_adapter, task);
4127 u32 status = adapter->stats.interrupt_status;
4128 struct address_map __iomem *iomem = adapter->regs;
4129
4130 /* These first two are by far the most common. Once handled, we clear
4131 * their two bits in the status word. If the word is now zero, we
4132 * exit.
4133 */
4134 /* Handle all the completed Transmit interrupts */
4135 if (status & ET_INTR_TXDMA_ISR)
4136 et131x_handle_send_interrupt(adapter);
4137
4138 /* Handle all the completed Receives interrupts */
4139 if (status & ET_INTR_RXDMA_XFR_DONE)
4140 et131x_handle_recv_interrupt(adapter);
4141
4142 status &= 0xffffffd7;
4143
4144 if (!status)
4145 goto out;
4146
4147 /* Handle the TXDMA Error interrupt */
4148 if (status & ET_INTR_TXDMA_ERR) {
4149 u32 txdma_err;
4150
4151 /* Following read also clears the register (COR) */
4152 txdma_err = readl(&iomem->txdma.tx_dma_error);
4153
4154 dev_warn(&adapter->pdev->dev,
4155 "TXDMA_ERR interrupt, error = %d\n",
4156 txdma_err);
4157 }
4158
4159 /* Handle Free Buffer Ring 0 and 1 Low interrupt */
4160 if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
4161 /* This indicates the number of unused buffers in RXDMA free
4162 * buffer ring 0 is <= the limit you programmed. Free buffer
4163 * resources need to be returned. Free buffers are consumed as
4164 * packets are passed from the network to the host. The host
4165 * becomes aware of the packets from the contents of the packet
4166 * status ring. This ring is queried when the packet done
4167 * interrupt occurs. Packets are then passed to the OS. When
4168 * the OS is done with the packets the resources can be
4169 * returned to the ET1310 for re-use. This interrupt is one
4170 * method of returning resources.
4171 */
4172
4173 /* If the user has flow control on, then we will
4174 * send a pause packet, otherwise just exit
4175 */
4176 if (adapter->flowcontrol == FLOW_TXONLY ||
4177 adapter->flowcontrol == FLOW_BOTH) {
4178 u32 pm_csr;
4179
4180 /* Tell the device to send a pause packet via the back
4181 * pressure register (bp req and bp xon/xoff)
4182 */
4183 pm_csr = readl(&iomem->global.pm_csr);
4184 if (!et1310_in_phy_coma(adapter))
4185 writel(3, &iomem->txmac.bp_ctrl);
4186 }
4187 }
4188
4189 /* Handle Packet Status Ring Low Interrupt */
4190 if (status & ET_INTR_RXDMA_STAT_LOW) {
4191 /* Same idea as with the two Free Buffer Rings. Packets going
4192 * from the network to the host each consume a free buffer
4193 * resource and a packet status resource. These resoures are
4194 * passed to the OS. When the OS is done with the resources,
4195 * they need to be returned to the ET1310. This is one method
4196 * of returning the resources.
4197 */
4198 }
4199
4200 /* Handle RXDMA Error Interrupt */
4201 if (status & ET_INTR_RXDMA_ERR) {
4202 /* The rxdma_error interrupt is sent when a time-out on a
4203 * request issued by the JAGCore has occurred or a completion is
4204 * returned with an un-successful status. In both cases the
4205 * request is considered complete. The JAGCore will
4206 * automatically re-try the request in question. Normally
4207 * information on events like these are sent to the host using
4208 * the "Advanced Error Reporting" capability. This interrupt is
4209 * another way of getting similar information. The only thing
4210 * required is to clear the interrupt by reading the ISR in the
4211 * global resources. The JAGCore will do a re-try on the
4212 * request. Normally you should never see this interrupt. If
4213 * you start to see this interrupt occurring frequently then
4214 * something bad has occurred. A reset might be the thing to do.
4215 */
4216 /* TRAP();*/
4217
4218 dev_warn(&adapter->pdev->dev,
4219 "RxDMA_ERR interrupt, error %x\n",
4220 readl(&iomem->txmac.tx_test));
4221 }
4222
4223 /* Handle the Wake on LAN Event */
4224 if (status & ET_INTR_WOL) {
4225 /* This is a secondary interrupt for wake on LAN. The driver
4226 * should never see this, if it does, something serious is
4227 * wrong. We will TRAP the message when we are in DBG mode,
4228 * otherwise we will ignore it.
4229 */
4230 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
4231 }
4232
4233 /* Let's move on to the TxMac */
4234 if (status & ET_INTR_TXMAC) {
4235 u32 err = readl(&iomem->txmac.err);
4236
4237 /* When any of the errors occur and TXMAC generates an
4238 * interrupt to report these errors, it usually means that
4239 * TXMAC has detected an error in the data stream retrieved
4240 * from the on-chip Tx Q. All of these errors are catastrophic
4241 * and TXMAC won't be able to recover data when these errors
4242 * occur. In a nutshell, the whole Tx path will have to be reset
4243 * and re-configured afterwards.
4244 */
4245 dev_warn(&adapter->pdev->dev,
4246 "TXMAC interrupt, error 0x%08x\n",
4247 err);
4248
4249 /* If we are debugging, we want to see this error, otherwise we
4250 * just want the device to be reset and continue
4251 */
4252 }
4253
4254 /* Handle RXMAC Interrupt */
4255 if (status & ET_INTR_RXMAC) {
4256 /* These interrupts are catastrophic to the device, what we need
4257 * to do is disable the interrupts and set the flag to cause us
4258 * to reset so we can solve this issue.
4259 */
4260 /* MP_SET_FLAG( adapter, FMP_ADAPTER_HARDWARE_ERROR); */
4261
4262 dev_warn(&adapter->pdev->dev,
4263 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
4264 readl(&iomem->rxmac.err_reg));
4265
4266 dev_warn(&adapter->pdev->dev,
4267 "Enable 0x%08x, Diag 0x%08x\n",
4268 readl(&iomem->rxmac.ctrl),
4269 readl(&iomem->rxmac.rxq_diag));
4270
4271 /* If we are debugging, we want to see this error, otherwise we
4272 * just want the device to be reset and continue
4273 */
4274 }
4275
4276 /* Handle MAC_STAT Interrupt */
4277 if (status & ET_INTR_MAC_STAT) {
4278 /* This means at least one of the un-masked counters in the
4279 * MAC_STAT block has rolled over. Use this to maintain the top,
4280 * software managed bits of the counter(s).
4281 */
4282 et1310_handle_macstat_interrupt(adapter);
4283 }
4284
4285 /* Handle SLV Timeout Interrupt */
4286 if (status & ET_INTR_SLV_TIMEOUT) {
4287 /* This means a timeout has occurred on a read or write request
4288 * to one of the JAGCore registers. The Global Resources block
4289 * has terminated the request and on a read request, returned a
4290 * "fake" value. The most likely reasons are: Bad Address or the
4291 * addressed module is in a power-down state and can't respond.
4292 */
4293 }
4294 out:
4295 et131x_enable_interrupts(adapter);
4296 }
4297
4298 /* et131x_stats - Return the current device statistics.
4299 * @netdev: device whose stats are being queried
4300 *
4301 * Returns 0 on success, errno on failure (as defined in errno.h)
4302 */
4303 static struct net_device_stats *et131x_stats(struct net_device *netdev)
4304 {
4305 struct et131x_adapter *adapter = netdev_priv(netdev);
4306 struct net_device_stats *stats = &adapter->net_stats;
4307 struct ce_stats *devstat = &adapter->stats;
4308
4309 stats->rx_errors = devstat->rx_length_errs +
4310 devstat->rx_align_errs +
4311 devstat->rx_crc_errs +
4312 devstat->rx_code_violations +
4313 devstat->rx_other_errs;
4314 stats->tx_errors = devstat->tx_max_pkt_errs;
4315 stats->multicast = devstat->multicast_pkts_rcvd;
4316 stats->collisions = devstat->tx_collisions;
4317
4318 stats->rx_length_errors = devstat->rx_length_errs;
4319 stats->rx_over_errors = devstat->rx_overflows;
4320 stats->rx_crc_errors = devstat->rx_crc_errs;
4321
4322 /* NOTE: These stats don't have corresponding values in CE_STATS,
4323 * so we're going to have to update these directly from within the
4324 * TX/RX code
4325 */
4326 /* stats->rx_bytes = 20; devstat->; */
4327 /* stats->tx_bytes = 20; devstat->; */
4328 /* stats->rx_dropped = devstat->; */
4329 /* stats->tx_dropped = devstat->; */
4330
4331 /* NOTE: Not used, can't find analogous statistics */
4332 /* stats->rx_frame_errors = devstat->; */
4333 /* stats->rx_fifo_errors = devstat->; */
4334 /* stats->rx_missed_errors = devstat->; */
4335
4336 /* stats->tx_aborted_errors = devstat->; */
4337 /* stats->tx_carrier_errors = devstat->; */
4338 /* stats->tx_fifo_errors = devstat->; */
4339 /* stats->tx_heartbeat_errors = devstat->; */
4340 /* stats->tx_window_errors = devstat->; */
4341 return stats;
4342 }
4343
4344 /* et131x_open - Open the device for use.
4345 * @netdev: device to be opened
4346 *
4347 * Returns 0 on success, errno on failure (as defined in errno.h)
4348 */
4349 static int et131x_open(struct net_device *netdev)
4350 {
4351 struct et131x_adapter *adapter = netdev_priv(netdev);
4352 struct pci_dev *pdev = adapter->pdev;
4353 unsigned int irq = pdev->irq;
4354 int result;
4355
4356 /* Start the timer to track NIC errors */
4357 init_timer(&adapter->error_timer);
4358 adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
4359 adapter->error_timer.function = et131x_error_timer_handler;
4360 adapter->error_timer.data = (unsigned long)adapter;
4361 add_timer(&adapter->error_timer);
4362
4363 result = request_irq(irq, et131x_isr,
4364 IRQF_SHARED, netdev->name, netdev);
4365 if (result) {
4366 dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
4367 return result;
4368 }
4369
4370 adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE;
4371
4372 et131x_up(netdev);
4373
4374 return result;
4375 }
4376
4377 /* et131x_close - Close the device
4378 * @netdev: device to be closed
4379 *
4380 * Returns 0 on success, errno on failure (as defined in errno.h)
4381 */
4382 static int et131x_close(struct net_device *netdev)
4383 {
4384 struct et131x_adapter *adapter = netdev_priv(netdev);
4385
4386 et131x_down(netdev);
4387
4388 adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE;
4389 free_irq(adapter->pdev->irq, netdev);
4390
4391 /* Stop the error timer */
4392 return del_timer_sync(&adapter->error_timer);
4393 }
4394
4395 /* et131x_ioctl - The I/O Control handler for the driver
4396 * @netdev: device on which the control request is being made
4397 * @reqbuf: a pointer to the IOCTL request buffer
4398 * @cmd: the IOCTL command code
4399 *
4400 * Returns 0 on success, errno on failure (as defined in errno.h)
4401 */
4402 static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
4403 int cmd)
4404 {
4405 struct et131x_adapter *adapter = netdev_priv(netdev);
4406
4407 if (!adapter->phydev)
4408 return -EINVAL;
4409
4410 return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
4411 }
4412
4413 /* et131x_set_packet_filter - Configures the Rx Packet filtering on the device
4414 * @adapter: pointer to our private adapter structure
4415 *
4416 * FIXME: lot of dups with MAC code
4417 *
4418 * Returns 0 on success, errno on failure
4419 */
4420 static int et131x_set_packet_filter(struct et131x_adapter *adapter)
4421 {
4422 int filter = adapter->packet_filter;
4423 int status = 0;
4424 u32 ctrl;
4425 u32 pf_ctrl;
4426
4427 ctrl = readl(&adapter->regs->rxmac.ctrl);
4428 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
4429
4430 /* Default to disabled packet filtering. Enable it in the individual
4431 * case statements that require the device to filter something
4432 */
4433 ctrl |= 0x04;
4434
4435 /* Set us to be in promiscuous mode so we receive everything, this
4436 * is also true when we get a packet filter of 0
4437 */
4438 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
4439 pf_ctrl &= ~7; /* Clear filter bits */
4440 else {
4441 /* Set us up with Multicast packet filtering. Three cases are
4442 * possible - (1) we have a multi-cast list, (2) we receive ALL
4443 * multicast entries or (3) we receive none.
4444 */
4445 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
4446 pf_ctrl &= ~2; /* Multicast filter bit */
4447 else {
4448 et1310_setup_device_for_multicast(adapter);
4449 pf_ctrl |= 2;
4450 ctrl &= ~0x04;
4451 }
4452
4453 /* Set us up with Unicast packet filtering */
4454 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
4455 et1310_setup_device_for_unicast(adapter);
4456 pf_ctrl |= 4;
4457 ctrl &= ~0x04;
4458 }
4459
4460 /* Set us up with Broadcast packet filtering */
4461 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
4462 pf_ctrl |= 1; /* Broadcast filter bit */
4463 ctrl &= ~0x04;
4464 } else
4465 pf_ctrl &= ~1;
4466
4467 /* Setup the receive mac configuration registers - Packet
4468 * Filter control + the enable / disable for packet filter
4469 * in the control reg.
4470 */
4471 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
4472 writel(ctrl, &adapter->regs->rxmac.ctrl);
4473 }
4474 return status;
4475 }
4476
4477 /* et131x_multicast - The handler to configure multicasting on the interface
4478 * @netdev: a pointer to a net_device struct representing the device
4479 */
4480 static void et131x_multicast(struct net_device *netdev)
4481 {
4482 struct et131x_adapter *adapter = netdev_priv(netdev);
4483 int packet_filter;
4484 unsigned long flags;
4485 struct netdev_hw_addr *ha;
4486 int i;
4487
4488 spin_lock_irqsave(&adapter->lock, flags);
4489
4490 /* Before we modify the platform-independent filter flags, store them
4491 * locally. This allows us to determine if anything's changed and if
4492 * we even need to bother the hardware
4493 */
4494 packet_filter = adapter->packet_filter;
4495
4496 /* Clear the 'multicast' flag locally; because we only have a single
4497 * flag to check multicast, and multiple multicast addresses can be
4498 * set, this is the easiest way to determine if more than one
4499 * multicast address is being set.
4500 */
4501 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4502
4503 /* Check the net_device flags and set the device independent flags
4504 * accordingly
4505 */
4506
4507 if (netdev->flags & IFF_PROMISC)
4508 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
4509 else
4510 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
4511
4512 if (netdev->flags & IFF_ALLMULTI)
4513 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4514
4515 if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
4516 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4517
4518 if (netdev_mc_count(netdev) < 1) {
4519 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
4520 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4521 } else
4522 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
4523
4524 /* Set values in the private adapter struct */
4525 i = 0;
4526 netdev_for_each_mc_addr(ha, netdev) {
4527 if (i == NIC_MAX_MCAST_LIST)
4528 break;
4529 memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
4530 }
4531 adapter->multicast_addr_count = i;
4532
4533 /* Are the new flags different from the previous ones? If not, then no
4534 * action is required
4535 *
4536 * NOTE - This block will always update the multicast_list with the
4537 * hardware, even if the addresses aren't the same.
4538 */
4539 if (packet_filter != adapter->packet_filter) {
4540 /* Call the device's filter function */
4541 et131x_set_packet_filter(adapter);
4542 }
4543 spin_unlock_irqrestore(&adapter->lock, flags);
4544 }
4545
4546 /* et131x_tx - The handler to tx a packet on the device
4547 * @skb: data to be Tx'd
4548 * @netdev: device on which data is to be Tx'd
4549 *
4550 * Returns 0 on success, errno on failure (as defined in errno.h)
4551 */
4552 static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
4553 {
4554 int status = 0;
4555 struct et131x_adapter *adapter = netdev_priv(netdev);
4556
4557 /* stop the queue if it's getting full */
4558 if (adapter->tx_ring.used >= NUM_TCB - 1 &&
4559 !netif_queue_stopped(netdev))
4560 netif_stop_queue(netdev);
4561
4562 /* Save the timestamp for the TX timeout watchdog */
4563 netdev->trans_start = jiffies;
4564
4565 /* Call the device-specific data Tx routine */
4566 status = et131x_send_packets(skb, netdev);
4567
4568 /* Check status and manage the netif queue if necessary */
4569 if (status != 0) {
4570 if (status == -ENOMEM)
4571 status = NETDEV_TX_BUSY;
4572 else
4573 status = NETDEV_TX_OK;
4574 }
4575 return status;
4576 }
4577
4578 /* et131x_tx_timeout - Timeout handler
4579 * @netdev: a pointer to a net_device struct representing the device
4580 *
4581 * The handler called when a Tx request times out. The timeout period is
4582 * specified by the 'tx_timeo" element in the net_device structure (see
4583 * et131x_alloc_device() to see how this value is set).
4584 */
4585 static void et131x_tx_timeout(struct net_device *netdev)
4586 {
4587 struct et131x_adapter *adapter = netdev_priv(netdev);
4588 struct tcb *tcb;
4589 unsigned long flags;
4590
4591 /* If the device is closed, ignore the timeout */
4592 if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
4593 return;
4594
4595 /* Any nonrecoverable hardware error?
4596 * Checks adapter->flags for any failure in phy reading
4597 */
4598 if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR)
4599 return;
4600
4601 /* Hardware failure? */
4602 if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) {
4603 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
4604 return;
4605 }
4606
4607 /* Is send stuck? */
4608 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
4609
4610 tcb = adapter->tx_ring.send_head;
4611
4612 if (tcb != NULL) {
4613 tcb->count++;
4614
4615 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
4616 spin_unlock_irqrestore(&adapter->tcb_send_qlock,
4617 flags);
4618
4619 dev_warn(&adapter->pdev->dev,
4620 "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n",
4621 tcb->index,
4622 tcb->flags);
4623
4624 adapter->net_stats.tx_errors++;
4625
4626 /* perform reset of tx/rx */
4627 et131x_disable_txrx(netdev);
4628 et131x_enable_txrx(netdev);
4629 return;
4630 }
4631 }
4632
4633 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
4634 }
4635
4636 /* et131x_change_mtu - The handler called to change the MTU for the device
4637 * @netdev: device whose MTU is to be changed
4638 * @new_mtu: the desired MTU
4639 *
4640 * Returns 0 on success, errno on failure (as defined in errno.h)
4641 */
4642 static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
4643 {
4644 int result = 0;
4645 struct et131x_adapter *adapter = netdev_priv(netdev);
4646
4647 /* Make sure the requested MTU is valid */
4648 if (new_mtu < 64 || new_mtu > 9216)
4649 return -EINVAL;
4650
4651 et131x_disable_txrx(netdev);
4652 et131x_handle_send_interrupt(adapter);
4653 et131x_handle_recv_interrupt(adapter);
4654
4655 /* Set the new MTU */
4656 netdev->mtu = new_mtu;
4657
4658 /* Free Rx DMA memory */
4659 et131x_adapter_memory_free(adapter);
4660
4661 /* Set the config parameter for Jumbo Packet support */
4662 adapter->registry_jumbo_packet = new_mtu + 14;
4663 et131x_soft_reset(adapter);
4664
4665 /* Alloc and init Rx DMA memory */
4666 result = et131x_adapter_memory_alloc(adapter);
4667 if (result != 0) {
4668 dev_warn(&adapter->pdev->dev,
4669 "Change MTU failed; couldn't re-alloc DMA memory\n");
4670 return result;
4671 }
4672
4673 et131x_init_send(adapter);
4674
4675 et131x_hwaddr_init(adapter);
4676 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
4677
4678 /* Init the device with the new settings */
4679 et131x_adapter_setup(adapter);
4680
4681 et131x_enable_txrx(netdev);
4682
4683 return result;
4684 }
4685
4686 /* et131x_set_mac_addr - handler to change the MAC address for the device
4687 * @netdev: device whose MAC is to be changed
4688 * @new_mac: the desired MAC address
4689 *
4690 * Returns 0 on success, errno on failure (as defined in errno.h)
4691 *
4692 * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14
4693 */
4694 static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
4695 {
4696 int result = 0;
4697 struct et131x_adapter *adapter = netdev_priv(netdev);
4698 struct sockaddr *address = new_mac;
4699
4700 /* begin blux */
4701
4702 if (adapter == NULL)
4703 return -ENODEV;
4704
4705 /* Make sure the requested MAC is valid */
4706 if (!is_valid_ether_addr(address->sa_data))
4707 return -EADDRNOTAVAIL;
4708
4709 et131x_disable_txrx(netdev);
4710 et131x_handle_send_interrupt(adapter);
4711 et131x_handle_recv_interrupt(adapter);
4712
4713 /* Set the new MAC */
4714 /* netdev->set_mac_address = &new_mac; */
4715
4716 memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
4717
4718 netdev_info(netdev, "Setting MAC address to %pM\n",
4719 netdev->dev_addr);
4720
4721 /* Free Rx DMA memory */
4722 et131x_adapter_memory_free(adapter);
4723
4724 et131x_soft_reset(adapter);
4725
4726 /* Alloc and init Rx DMA memory */
4727 result = et131x_adapter_memory_alloc(adapter);
4728 if (result != 0) {
4729 dev_err(&adapter->pdev->dev,
4730 "Change MAC failed; couldn't re-alloc DMA memory\n");
4731 return result;
4732 }
4733
4734 et131x_init_send(adapter);
4735
4736 et131x_hwaddr_init(adapter);
4737
4738 /* Init the device with the new settings */
4739 et131x_adapter_setup(adapter);
4740
4741 et131x_enable_txrx(netdev);
4742
4743 return result;
4744 }
4745
4746 static const struct net_device_ops et131x_netdev_ops = {
4747 .ndo_open = et131x_open,
4748 .ndo_stop = et131x_close,
4749 .ndo_start_xmit = et131x_tx,
4750 .ndo_set_rx_mode = et131x_multicast,
4751 .ndo_tx_timeout = et131x_tx_timeout,
4752 .ndo_change_mtu = et131x_change_mtu,
4753 .ndo_set_mac_address = et131x_set_mac_addr,
4754 .ndo_validate_addr = eth_validate_addr,
4755 .ndo_get_stats = et131x_stats,
4756 .ndo_do_ioctl = et131x_ioctl,
4757 };
4758
4759 /* et131x_pci_setup - Perform device initialization
4760 * @pdev: a pointer to the device's pci_dev structure
4761 * @ent: this device's entry in the pci_device_id table
4762 *
4763 * Returns 0 on success, errno on failure (as defined in errno.h)
4764 *
4765 * Registered in the pci_driver structure, this function is called when the
4766 * PCI subsystem finds a new PCI device which matches the information
4767 * contained in the pci_device_id table. This routine is the equivalent to
4768 * a device insertion routine.
4769 */
4770 static int et131x_pci_setup(struct pci_dev *pdev,
4771 const struct pci_device_id *ent)
4772 {
4773 struct net_device *netdev;
4774 struct et131x_adapter *adapter;
4775 int rc;
4776 int ii;
4777
4778 rc = pci_enable_device(pdev);
4779 if (rc < 0) {
4780 dev_err(&pdev->dev, "pci_enable_device() failed\n");
4781 goto out;
4782 }
4783
4784 /* Perform some basic PCI checks */
4785 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4786 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
4787 rc = -ENODEV;
4788 goto err_disable;
4789 }
4790
4791 rc = pci_request_regions(pdev, DRIVER_NAME);
4792 if (rc < 0) {
4793 dev_err(&pdev->dev, "Can't get PCI resources\n");
4794 goto err_disable;
4795 }
4796
4797 pci_set_master(pdev);
4798
4799 /* Check the DMA addressing support of this device */
4800 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4801 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4802 if (rc < 0) {
4803 dev_err(&pdev->dev,
4804 "Unable to obtain 64 bit DMA for consistent allocations\n");
4805 goto err_release_res;
4806 }
4807 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
4808 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4809 if (rc < 0) {
4810 dev_err(&pdev->dev,
4811 "Unable to obtain 32 bit DMA for consistent allocations\n");
4812 goto err_release_res;
4813 }
4814 } else {
4815 dev_err(&pdev->dev, "No usable DMA addressing method\n");
4816 rc = -EIO;
4817 goto err_release_res;
4818 }
4819
4820 /* Allocate netdev and private adapter structs */
4821 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
4822 if (!netdev) {
4823 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
4824 rc = -ENOMEM;
4825 goto err_release_res;
4826 }
4827
4828 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
4829 netdev->netdev_ops = &et131x_netdev_ops;
4830
4831 SET_NETDEV_DEV(netdev, &pdev->dev);
4832 SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
4833
4834 adapter = et131x_adapter_init(netdev, pdev);
4835
4836 rc = et131x_pci_init(adapter, pdev);
4837 if (rc < 0)
4838 goto err_free_dev;
4839
4840 /* Map the bus-relative registers to system virtual memory */
4841 adapter->regs = pci_ioremap_bar(pdev, 0);
4842 if (!adapter->regs) {
4843 dev_err(&pdev->dev, "Cannot map device registers\n");
4844 rc = -ENOMEM;
4845 goto err_free_dev;
4846 }
4847
4848 /* If Phy COMA mode was enabled when we went down, disable it here. */
4849 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
4850
4851 /* Issue a global reset to the et1310 */
4852 et131x_soft_reset(adapter);
4853
4854 /* Disable all interrupts (paranoid) */
4855 et131x_disable_interrupts(adapter);
4856
4857 /* Allocate DMA memory */
4858 rc = et131x_adapter_memory_alloc(adapter);
4859 if (rc < 0) {
4860 dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
4861 goto err_iounmap;
4862 }
4863
4864 /* Init send data structures */
4865 et131x_init_send(adapter);
4866
4867 /* Set up the task structure for the ISR's deferred handler */
4868 INIT_WORK(&adapter->task, et131x_isr_handler);
4869
4870 /* Copy address into the net_device struct */
4871 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
4872
4873 /* Init variable for counting how long we do not have link status */
4874 adapter->boot_coma = 0;
4875 et1310_disable_phy_coma(adapter);
4876
4877 rc = -ENOMEM;
4878
4879 /* Setup the mii_bus struct */
4880 adapter->mii_bus = mdiobus_alloc();
4881 if (!adapter->mii_bus) {
4882 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
4883 goto err_mem_free;
4884 }
4885
4886 adapter->mii_bus->name = "et131x_eth_mii";
4887 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
4888 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
4889 adapter->mii_bus->priv = netdev;
4890 adapter->mii_bus->read = et131x_mdio_read;
4891 adapter->mii_bus->write = et131x_mdio_write;
4892 adapter->mii_bus->reset = et131x_mdio_reset;
4893 adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int),
4894 GFP_KERNEL);
4895 if (!adapter->mii_bus->irq)
4896 goto err_mdio_free;
4897
4898 for (ii = 0; ii < PHY_MAX_ADDR; ii++)
4899 adapter->mii_bus->irq[ii] = PHY_POLL;
4900
4901 rc = mdiobus_register(adapter->mii_bus);
4902 if (rc < 0) {
4903 dev_err(&pdev->dev, "failed to register MII bus\n");
4904 goto err_mdio_free_irq;
4905 }
4906
4907 rc = et131x_mii_probe(netdev);
4908 if (rc < 0) {
4909 dev_err(&pdev->dev, "failed to probe MII bus\n");
4910 goto err_mdio_unregister;
4911 }
4912
4913 /* Setup et1310 as per the documentation */
4914 et131x_adapter_setup(adapter);
4915
4916 /* We can enable interrupts now
4917 *
4918 * NOTE - Because registration of interrupt handler is done in the
4919 * device's open(), defer enabling device interrupts to that
4920 * point
4921 */
4922
4923 /* Register the net_device struct with the Linux network layer */
4924 rc = register_netdev(netdev);
4925 if (rc < 0) {
4926 dev_err(&pdev->dev, "register_netdev() failed\n");
4927 goto err_phy_disconnect;
4928 }
4929
4930 /* Register the net_device struct with the PCI subsystem. Save a copy
4931 * of the PCI config space for this device now that the device has
4932 * been initialized, just in case it needs to be quickly restored.
4933 */
4934 pci_set_drvdata(pdev, netdev);
4935 out:
4936 return rc;
4937
4938 err_phy_disconnect:
4939 phy_disconnect(adapter->phydev);
4940 err_mdio_unregister:
4941 mdiobus_unregister(adapter->mii_bus);
4942 err_mdio_free_irq:
4943 kfree(adapter->mii_bus->irq);
4944 err_mdio_free:
4945 mdiobus_free(adapter->mii_bus);
4946 err_mem_free:
4947 et131x_adapter_memory_free(adapter);
4948 err_iounmap:
4949 iounmap(adapter->regs);
4950 err_free_dev:
4951 pci_dev_put(pdev);
4952 free_netdev(netdev);
4953 err_release_res:
4954 pci_release_regions(pdev);
4955 err_disable:
4956 pci_disable_device(pdev);
4957 goto out;
4958 }
4959
4960 static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
4961 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
4962 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
4963 {0,}
4964 };
4965 MODULE_DEVICE_TABLE(pci, et131x_pci_table);
4966
4967 static struct pci_driver et131x_driver = {
4968 .name = DRIVER_NAME,
4969 .id_table = et131x_pci_table,
4970 .probe = et131x_pci_setup,
4971 .remove = et131x_pci_remove,
4972 .driver.pm = ET131X_PM_OPS,
4973 };
4974
4975 module_pci_driver(et131x_driver);
This page took 0.130727 seconds and 4 git commands to generate.