staging: et131x: Fix 'else is not generally useful after a break or return'
[deliverable/linux.git] / drivers / staging / et131x / et131x.c
CommitLineData
26ef1021 1/* Agere Systems Inc.
d2796743
ME
2 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
3 *
4 * Copyright © 2005 Agere Systems Inc.
5 * All rights reserved.
6 * http://www.agere.com
7 *
8 * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
9 *
10 *------------------------------------------------------------------------------
11 *
12 * SOFTWARE LICENSE
13 *
14 * This software is provided subject to the following terms and conditions,
15 * which you should read carefully before using the software. Using this
16 * software indicates your acceptance of these terms and conditions. If you do
17 * not agree with these terms and conditions, do not use the software.
18 *
19 * Copyright © 2005 Agere Systems Inc.
20 * All rights reserved.
21 *
22 * Redistribution and use in source or binary forms, with or without
23 * modifications, are permitted provided that the following conditions are met:
24 *
25 * . Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following Disclaimer as comments in the code as
27 * well as in the documentation and/or other materials provided with the
28 * distribution.
29 *
30 * . Redistributions in binary form must reproduce the above copyright notice,
31 * this list of conditions and the following Disclaimer in the documentation
32 * and/or other materials provided with the distribution.
33 *
34 * . Neither the name of Agere Systems Inc. nor the names of the contributors
35 * may be used to endorse or promote products derived from this software
36 * without specific prior written permission.
37 *
38 * Disclaimer
39 *
40 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
41 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
42 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
43 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
44 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
48 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
51 * DAMAGE.
d2796743
ME
52 */
53
e58b89da
TY
54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
d2796743 56#include <linux/pci.h>
d2796743
ME
57#include <linux/module.h>
58#include <linux/types.h>
59#include <linux/kernel.h>
60
61#include <linux/sched.h>
62#include <linux/ptrace.h>
63#include <linux/slab.h>
64#include <linux/ctype.h>
65#include <linux/string.h>
66#include <linux/timer.h>
67#include <linux/interrupt.h>
68#include <linux/in.h>
69#include <linux/delay.h>
70#include <linux/bitops.h>
71#include <linux/io.h>
d2796743
ME
72
73#include <linux/netdevice.h>
74#include <linux/etherdevice.h>
75#include <linux/skbuff.h>
76#include <linux/if_arp.h>
77#include <linux/ioport.h>
78#include <linux/crc32.h>
79#include <linux/random.h>
80#include <linux/phy.h>
81
d2796743
ME
82#include "et131x.h"
83
84MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
85MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
86MODULE_LICENSE("Dual BSD/GPL");
397d3e60 87MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
d2796743 88
bd156af6
ME
89/* EEPROM defines */
90#define MAX_NUM_REGISTER_POLLS 1000
91#define MAX_NUM_WRITE_RETRIES 2
92
93/* MAC defines */
94#define COUNTER_WRAP_16_BIT 0x10000
95#define COUNTER_WRAP_12_BIT 0x1000
96
97/* PCI defines */
98#define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */
99#define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */
100
101/* ISR defines */
26ef1021 102/* For interrupts, normal running is:
bd156af6
ME
103 * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
104 * watchdog_interrupt & txdma_xfer_done
105 *
106 * In both cases, when flow control is enabled for either Tx or bi-direction,
107 * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
108 * buffer rings are running low.
109 */
110#define INT_MASK_DISABLE 0xffffffff
111
112/* NOTE: Masking out MAC_STAT Interrupt for now...
113 * #define INT_MASK_ENABLE 0xfff6bf17
114 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7
115 */
116#define INT_MASK_ENABLE 0xfffebf17
117#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
118
1c1c1b5f
ME
119/* General defines */
120/* Packet and header sizes */
121#define NIC_MIN_PACKET_SIZE 60
122
123/* Multicast list size */
124#define NIC_MAX_MCAST_LIST 128
125
126/* Supported Filters */
127#define ET131X_PACKET_TYPE_DIRECTED 0x0001
128#define ET131X_PACKET_TYPE_MULTICAST 0x0002
129#define ET131X_PACKET_TYPE_BROADCAST 0x0004
130#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
131#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
132
133/* Tx Timeout */
134#define ET131X_TX_TIMEOUT (1 * HZ)
135#define NIC_SEND_HANG_THRESHOLD 0
136
137/* MP_TCB flags */
c655dee9
ME
138#define FMP_DEST_MULTI 0x00000001
139#define FMP_DEST_BROAD 0x00000002
1c1c1b5f
ME
140
141/* MP_ADAPTER flags */
c655dee9 142#define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
1c1c1b5f
ME
143
144/* MP_SHARED flags */
c655dee9 145#define FMP_ADAPTER_LOWER_POWER 0x00200000
1c1c1b5f 146
c655dee9
ME
147#define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
148#define FMP_ADAPTER_HARDWARE_ERROR 0x04000000
1c1c1b5f 149
c655dee9 150#define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
1c1c1b5f
ME
151
152/* Some offsets in PCI config space that are actually used. */
1c1c1b5f
ME
153#define ET1310_PCI_MAC_ADDRESS 0xA4
154#define ET1310_PCI_EEPROM_STATUS 0xB2
155#define ET1310_PCI_ACK_NACK 0xC0
156#define ET1310_PCI_REPLAY 0xC2
157#define ET1310_PCI_L0L1LATENCY 0xCF
158
26d19bf6 159/* PCI Product IDs */
1c1c1b5f
ME
160#define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */
161#define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */
162
163/* Define order of magnitude converter */
164#define NANO_IN_A_MICRO 1000
165
166#define PARM_RX_NUM_BUFS_DEF 4
167#define PARM_RX_TIME_INT_DEF 10
168#define PARM_RX_MEM_END_DEF 0x2bc
169#define PARM_TX_TIME_INT_DEF 40
170#define PARM_TX_NUM_BUFS_DEF 4
171#define PARM_DMA_CACHE_DEF 0
172
562550b0 173/* RX defines */
788ca84a
ME
174#define FBR_CHUNKS 32
175#define MAX_DESC_PER_RING_RX 1024
562550b0
ME
176
177/* number of RFDs - default and min */
562550b0 178#define RFD_LOW_WATER_MARK 40
562550b0 179#define NIC_DEFAULT_NUM_RFD 1024
6abafc16 180#define NUM_FBRS 2
562550b0 181
b60e6d0a 182#define MAX_PACKETS_HANDLED 256
562550b0 183
562550b0
ME
184#define ALCATEL_MULTICAST_PKT 0x01000000
185#define ALCATEL_BROADCAST_PKT 0x02000000
186
187/* typedefs for Free Buffer Descriptors */
188struct fbr_desc {
189 u32 addr_lo;
190 u32 addr_hi;
191 u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */
192};
193
194/* Packet Status Ring Descriptors
195 *
196 * Word 0:
197 *
198 * top 16 bits are from the Alcatel Status Word as enumerated in
199 * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
200 *
201 * 0: hp hash pass
202 * 1: ipa IP checksum assist
203 * 2: ipp IP checksum pass
204 * 3: tcpa TCP checksum assist
205 * 4: tcpp TCP checksum pass
206 * 5: wol WOL Event
207 * 6: rxmac_error RXMAC Error Indicator
208 * 7: drop Drop packet
209 * 8: ft Frame Truncated
210 * 9: jp Jumbo Packet
211 * 10: vp VLAN Packet
212 * 11-15: unused
213 * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
214 * 17: asw_RX_DV_event short receive event detected
215 * 18: asw_false_carrier_event bad carrier since last good packet
216 * 19: asw_code_err one or more nibbles signalled as errors
217 * 20: asw_CRC_err CRC error
218 * 21: asw_len_chk_err frame length field incorrect
219 * 22: asw_too_long frame length > 1518 bytes
220 * 23: asw_OK valid CRC + no code error
221 * 24: asw_multicast has a multicast address
222 * 25: asw_broadcast has a broadcast address
223 * 26: asw_dribble_nibble spurious bits after EOP
224 * 27: asw_control_frame is a control frame
225 * 28: asw_pause_frame is a pause frame
226 * 29: asw_unsupported_op unsupported OP code
227 * 30: asw_VLAN_tag VLAN tag detected
228 * 31: asw_long_evt Rx long event
229 *
230 * Word 1:
231 * 0-15: length length in bytes
232 * 16-25: bi Buffer Index
233 * 26-27: ri Ring Index
234 * 28-31: reserved
235 */
562550b0
ME
236struct pkt_stat_desc {
237 u32 word0;
238 u32 word1;
239};
240
241/* Typedefs for the RX DMA status word */
242
26ef1021 243/* rx status word 0 holds part of the status bits of the Rx DMA engine
562550b0
ME
244 * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word
245 * which contains the Free Buffer ring 0 and 1 available offset.
246 *
247 * bit 0-9 FBR1 offset
248 * bit 10 Wrap flag for FBR1
249 * bit 16-25 FBR0 offset
250 * bit 26 Wrap flag for FBR0
251 */
252
26ef1021 253/* RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
562550b0
ME
254 * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word
255 * which contains the Packet Status Ring available offset.
256 *
257 * bit 0-15 reserved
258 * bit 16-27 PSRoffset
259 * bit 28 PSRwrap
260 * bit 29-31 unused
261 */
262
26ef1021 263/* struct rx_status_block is a structure representing the status of the Rx
562550b0
ME
264 * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
265 */
266struct rx_status_block {
267 u32 word0;
268 u32 word1;
269};
270
26ef1021 271/* Structure for look-up table holding free buffer ring pointers, addresses
6abafc16 272 * and state.
562550b0
ME
273 */
274struct fbr_lookup {
6abafc16 275 void *virt[MAX_DESC_PER_RING_RX];
6abafc16
ME
276 u32 bus_high[MAX_DESC_PER_RING_RX];
277 u32 bus_low[MAX_DESC_PER_RING_RX];
278 void *ring_virtaddr;
279 dma_addr_t ring_physaddr;
280 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
281 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
6abafc16
ME
282 u32 local_full;
283 u32 num_entries;
983e4b35 284 dma_addr_t buffsize;
562550b0
ME
285};
286
868bf442 287/* struct rx_ring is the structure representing the adaptor's local
562550b0
ME
288 * reference(s) to the rings
289 */
290struct rx_ring {
6abafc16 291 struct fbr_lookup *fbr[NUM_FBRS];
562550b0
ME
292 void *ps_ring_virtaddr;
293 dma_addr_t ps_ring_physaddr;
294 u32 local_psr_full;
295 u32 psr_num_entries;
296
297 struct rx_status_block *rx_status_block;
298 dma_addr_t rx_status_bus;
299
562550b0
ME
300 /* RECV */
301 struct list_head recv_list;
302 u32 num_ready_recv;
303
304 u32 num_rfd;
305
306 bool unfinished_receives;
562550b0
ME
307};
308
17ec9ff3 309/* TX defines */
26ef1021 310/* word 2 of the control bits in the Tx Descriptor ring for the ET-1310
17ec9ff3
ME
311 *
312 * 0-15: length of packet
313 * 16-27: VLAN tag
314 * 28: VLAN CFI
315 * 29-31: VLAN priority
316 *
317 * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
318 *
319 * 0: last packet in the sequence
320 * 1: first packet in the sequence
321 * 2: interrupt the processor when this pkt sent
322 * 3: Control word - no packet data
323 * 4: Issue half-duplex backpressure : XON/XOFF
324 * 5: send pause frame
325 * 6: Tx frame has error
326 * 7: append CRC
327 * 8: MAC override
328 * 9: pad packet
329 * 10: Packet is a Huge packet
330 * 11: append VLAN tag
331 * 12: IP checksum assist
332 * 13: TCP checksum assist
333 * 14: UDP checksum assist
334 */
a129be84
ME
335#define TXDESC_FLAG_LASTPKT 0x0001
336#define TXDESC_FLAG_FIRSTPKT 0x0002
337#define TXDESC_FLAG_INTPROC 0x0004
338
17ec9ff3
ME
339/* struct tx_desc represents each descriptor on the ring */
340struct tx_desc {
341 u32 addr_hi;
342 u32 addr_lo;
343 u32 len_vlan; /* control words how to xmit the */
344 u32 flags; /* data (detailed above) */
345};
346
26ef1021 347/* The status of the Tx DMA engine it sits in free memory, and is pointed to
17ec9ff3
ME
348 * by 0x101c / 0x1020. This is a DMA10 type
349 */
350
351/* TCB (Transmit Control Block: Host Side) */
352struct tcb {
353 struct tcb *next; /* Next entry in ring */
17ec9ff3
ME
354 u32 count; /* Used to spot stuck/lost packets */
355 u32 stale; /* Used to spot stuck/lost packets */
356 struct sk_buff *skb; /* Network skb we are tied to */
357 u32 index; /* Ring indexes */
358 u32 index_start;
359};
360
361/* Structure representing our local reference(s) to the ring */
362struct tx_ring {
363 /* TCB (Transmit Control Block) memory and lists */
364 struct tcb *tcb_ring;
365
366 /* List of TCBs that are ready to be used */
367 struct tcb *tcb_qhead;
368 struct tcb *tcb_qtail;
369
370 /* list of TCBs that are currently being sent. NOTE that access to all
371 * three of these (including used) are controlled via the
372 * TCBSendQLock. This lock should be secured prior to incementing /
373 * decrementing used, or any queue manipulation on send_head /
374 * tail
375 */
376 struct tcb *send_head;
377 struct tcb *send_tail;
378 int used;
379
380 /* The actual descriptor ring */
381 struct tx_desc *tx_desc_ring;
382 dma_addr_t tx_desc_ring_pa;
383
384 /* send_idx indicates where we last wrote to in the descriptor ring. */
385 u32 send_idx;
386
387 /* The location of the write-back status block */
388 u32 *tx_status;
389 dma_addr_t tx_status_pa;
390
391 /* Packets since the last IRQ: used for interrupt coalescing */
392 int since_irq;
393};
394
26ef1021 395/* Do not change these values: if changed, then change also in respective
fd0651a6
ME
396 * TXdma and Rxdma engines
397 */
398#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
399#define NUM_TCB 64
400
26ef1021 401/* These values are all superseded by registry entries to facilitate tuning.
fd0651a6
ME
402 * Once the desired performance has been achieved, the optimal registry values
403 * should be re-populated to these #defines:
404 */
fd0651a6
ME
405#define TX_ERROR_PERIOD 1000
406
407#define LO_MARK_PERCENT_FOR_PSR 15
408#define LO_MARK_PERCENT_FOR_RX 15
409
410/* RFD (Receive Frame Descriptor) */
411struct rfd {
412 struct list_head list_node;
413 struct sk_buff *skb;
414 u32 len; /* total size of receive frame */
415 u16 bufferindex;
416 u8 ringindex;
417};
418
419/* Flow Control */
420#define FLOW_BOTH 0
421#define FLOW_TXONLY 1
422#define FLOW_RXONLY 2
423#define FLOW_NONE 3
424
425/* Struct to define some device statistics */
426struct ce_stats {
427 /* MIB II variables
428 *
429 * NOTE: atomic_t types are only guaranteed to store 24-bits; if we
430 * MUST have 32, then we'll need another way to perform atomic
431 * operations
432 */
fd0651a6 433 u32 multicast_pkts_rcvd;
fd0651a6
ME
434 u32 rcvd_pkts_dropped;
435
436 /* Tx Statistics. */
437 u32 tx_underflows;
fd0651a6
ME
438 u32 tx_collisions;
439 u32 tx_excessive_collisions;
440 u32 tx_first_collisions;
441 u32 tx_late_collisions;
442 u32 tx_max_pkt_errs;
443 u32 tx_deferred;
444
445 /* Rx Statistics. */
446 u32 rx_overflows;
fd0651a6
ME
447 u32 rx_length_errs;
448 u32 rx_align_errs;
449 u32 rx_crc_errs;
450 u32 rx_code_violations;
451 u32 rx_other_errs;
452
453 u32 synchronous_iterations;
454 u32 interrupt_status;
455};
456
457/* The private adapter structure */
458struct et131x_adapter {
459 struct net_device *netdev;
460 struct pci_dev *pdev;
461 struct mii_bus *mii_bus;
462 struct phy_device *phydev;
c2ebf58b 463 struct napi_struct napi;
fd0651a6
ME
464
465 /* Flags that indicate current state of the adapter */
466 u32 flags;
467
468 /* local link state, to determine if a state change has occurred */
469 int link;
470
471 /* Configuration */
472 u8 rom_addr[ETH_ALEN];
473 u8 addr[ETH_ALEN];
474 bool has_eeprom;
475 u8 eeprom_data[2];
476
48c8f789
ME
477 spinlock_t tcb_send_qlock; /* protects the tx_ring send tcb list */
478 spinlock_t tcb_ready_qlock; /* protects the tx_ring ready tcb list */
479 spinlock_t rcv_lock; /* protects the rx_ring receive list */
fd0651a6 480
fd0651a6
ME
481 /* Packet Filter and look ahead size */
482 u32 packet_filter;
483
484 /* multicast list */
485 u32 multicast_addr_count;
486 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
487
488 /* Pointer to the device's PCI register space */
489 struct address_map __iomem *regs;
490
491 /* Registry parameters */
492 u8 wanted_flow; /* Flow we want for 802.3x flow control */
493 u32 registry_jumbo_packet; /* Max supported ethernet packet size */
494
495 /* Derived from the registry: */
496 u8 flowcontrol; /* flow control validated by the far-end */
497
498 /* Minimize init-time */
499 struct timer_list error_timer;
500
501 /* variable putting the phy into coma mode when boot up with no cable
502 * plugged in after 5 seconds
503 */
504 u8 boot_coma;
505
506 /* Next two used to save power information at power down. This
507 * information will be used during power up to set up parts of Power
508 * Management in JAGCore
509 */
510 u16 pdown_speed;
511 u8 pdown_duplex;
512
513 /* Tx Memory Variables */
514 struct tx_ring tx_ring;
515
516 /* Rx Memory Variables */
517 struct rx_ring rx_ring;
518
519 /* Stats */
520 struct ce_stats stats;
fd0651a6
ME
521};
522
d2796743
ME
523static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
524{
525 u32 reg;
526 int i;
527
26ef1021 528 /* 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
d2796743
ME
529 * bits 7,1:0 both equal to 1, at least once after reset.
530 * Subsequent operations need only to check that bits 1:0 are equal
531 * to 1 prior to starting a single byte read/write
532 */
d2796743
ME
533 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
534 /* Read registers grouped in DWORD1 */
535 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg))
536 return -EIO;
537
538 /* I2C idle and Phy Queue Avail both true */
539 if ((reg & 0x3000) == 0x3000) {
540 if (status)
541 *status = reg;
542 return reg & 0xFF;
543 }
544 }
545 return -ETIMEDOUT;
546}
547
26ef1021 548/* eeprom_write - Write a byte to the ET1310's EEPROM
d2796743
ME
549 * @adapter: pointer to our private adapter structure
550 * @addr: the address to write
551 * @data: the value to write
552 *
553 * Returns 1 for a successful write.
554 */
555static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
556{
557 struct pci_dev *pdev = adapter->pdev;
558 int index = 0;
559 int retries;
560 int err = 0;
d2796743
ME
561 int writeok = 0;
562 u32 status;
563 u32 val = 0;
564
26ef1021 565 /* For an EEPROM, an I2C single byte write is defined as a START
d2796743
ME
566 * condition followed by the device address, EEPROM address, one byte
567 * of data and a STOP condition. The STOP condition will trigger the
568 * EEPROM's internally timed write cycle to the nonvolatile memory.
569 * All inputs are disabled during this write cycle and the EEPROM will
570 * not respond to any access until the internal write is complete.
571 */
d2796743 572 err = eeprom_wait_ready(pdev, NULL);
8dd4a966 573 if (err < 0)
d2796743
ME
574 return err;
575
26ef1021
ME
576 /* 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
577 * and bits 1:0 both =0. Bit 5 should be set according to the
578 * type of EEPROM being accessed (1=two byte addressing, 0=one
579 * byte addressing).
580 */
d2796743 581 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
096e6224
ME
582 LBCIF_CONTROL_LBCIF_ENABLE |
583 LBCIF_CONTROL_I2C_WRITE))
d2796743
ME
584 return -EIO;
585
d2796743 586 /* Prepare EEPROM address for Step 3 */
d2796743
ME
587 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
588 /* Write the address to the LBCIF Address Register */
589 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
590 break;
26ef1021 591 /* Write the data to the LBCIF Data Register (the I2C write
d2796743
ME
592 * will begin).
593 */
594 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
595 break;
26ef1021 596 /* Monitor bit 1:0 of the LBCIF Status Register. When bits
d2796743
ME
597 * 1:0 are both equal to 1, the I2C write has completed and the
598 * internal write cycle of the EEPROM is about to start.
599 * (bits 1:0 = 01 is a legal state while waiting from both
600 * equal to 1, but bits 1:0 = 10 is invalid and implies that
601 * something is broken).
602 */
603 err = eeprom_wait_ready(pdev, &status);
604 if (err < 0)
605 return 0;
606
26ef1021 607 /* Check bit 3 of the LBCIF Status Register. If equal to 1,
d2796743
ME
608 * an error has occurred.Don't break here if we are revision
609 * 1, this is so we do a blind write for load bug.
610 */
611 if ((status & LBCIF_STATUS_GENERAL_ERROR)
612 && adapter->pdev->revision == 0)
613 break;
614
26ef1021 615 /* Check bit 2 of the LBCIF Status Register. If equal to 1 an
d2796743
ME
616 * ACK error has occurred on the address phase of the write.
617 * This could be due to an actual hardware failure or the
618 * EEPROM may still be in its internal write cycle from a
619 * previous write. This write operation was ignored and must be
620 *repeated later.
621 */
622 if (status & LBCIF_STATUS_ACK_ERROR) {
26ef1021 623 /* This could be due to an actual hardware failure
d2796743
ME
624 * or the EEPROM may still be in its internal write
625 * cycle from a previous write. This write operation
626 * was ignored and must be repeated later.
627 */
628 udelay(10);
629 continue;
630 }
631
632 writeok = 1;
633 break;
634 }
635
26ef1021 636 /* Set bit 6 of the LBCIF Control Register = 0.
d2796743
ME
637 */
638 udelay(10);
639
5a5835ea 640 while (1) {
d2796743 641 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
096e6224 642 LBCIF_CONTROL_LBCIF_ENABLE))
d2796743
ME
643 writeok = 0;
644
645 /* Do read until internal ACK_ERROR goes away meaning write
646 * completed
647 */
648 do {
649 pci_write_config_dword(pdev,
650 LBCIF_ADDRESS_REGISTER,
651 addr);
652 do {
653 pci_read_config_dword(pdev,
096e6224
ME
654 LBCIF_DATA_REGISTER,
655 &val);
d2796743
ME
656 } while ((val & 0x00010000) == 0);
657 } while (val & 0x00040000);
658
659 if ((val & 0xFF00) != 0xC000 || index == 10000)
660 break;
661 index++;
662 }
663 return writeok ? 0 : -EIO;
664}
665
26ef1021 666/* eeprom_read - Read a byte from the ET1310's EEPROM
d2796743
ME
667 * @adapter: pointer to our private adapter structure
668 * @addr: the address from which to read
669 * @pdata: a pointer to a byte in which to store the value of the read
670 * @eeprom_id: the ID of the EEPROM
671 * @addrmode: how the EEPROM is to be accessed
672 *
673 * Returns 1 for a successful read
674 */
675static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
676{
677 struct pci_dev *pdev = adapter->pdev;
678 int err;
679 u32 status;
680
26ef1021 681 /* A single byte read is similar to the single byte write, with the
d2796743
ME
682 * exception of the data flow:
683 */
d2796743 684 err = eeprom_wait_ready(pdev, NULL);
8dd4a966 685 if (err < 0)
d2796743 686 return err;
26ef1021 687 /* Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
d2796743
ME
688 * and bits 1:0 both =0. Bit 5 should be set according to the type
689 * of EEPROM being accessed (1=two byte addressing, 0=one byte
690 * addressing).
691 */
692 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
693 LBCIF_CONTROL_LBCIF_ENABLE))
694 return -EIO;
26ef1021 695 /* Write the address to the LBCIF Address Register (I2C read will
d2796743
ME
696 * begin).
697 */
698 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
699 return -EIO;
26ef1021 700 /* Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
d2796743
ME
701 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
702 * has occurred).
703 */
704 err = eeprom_wait_ready(pdev, &status);
705 if (err < 0)
706 return err;
26ef1021 707 /* Regardless of error status, read data byte from LBCIF Data
d2796743
ME
708 * Register.
709 */
710 *pdata = err;
26ef1021 711 /* Check bit 2 of the LBCIF Status Register. If = 1,
d2796743
ME
712 * then an error has occurred.
713 */
714 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
715}
716
eb7a6ca6 717static int et131x_init_eeprom(struct et131x_adapter *adapter)
d2796743
ME
718{
719 struct pci_dev *pdev = adapter->pdev;
720 u8 eestatus;
721
722 /* We first need to check the EEPROM Status code located at offset
723 * 0xB2 of config space
724 */
a129be84 725 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus);
d2796743
ME
726
727 /* THIS IS A WORKAROUND:
728 * I need to call this function twice to get my card in a
729 * LG M1 Express Dual running. I tried also a msleep before this
a129be84 730 * function, because I thought there could be some time conditions
d2796743
ME
731 * but it didn't work. Call the whole function twice also work.
732 */
733 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
734 dev_err(&pdev->dev,
096e6224 735 "Could not read PCI config space for EEPROM Status\n");
d2796743
ME
736 return -EIO;
737 }
738
739 /* Determine if the error(s) we care about are present. If they are
740 * present we need to fail.
741 */
742 if (eestatus & 0x4C) {
743 int write_failed = 0;
f03fcca0 744
d2796743
ME
745 if (pdev->revision == 0x01) {
746 int i;
747 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
748
749 /* Re-write the first 4 bytes if we have an eeprom
750 * present and the revision id is 1, this fixes the
751 * corruption seen with 1310 B Silicon
752 */
753 for (i = 0; i < 3; i++)
754 if (eeprom_write(adapter, i, eedata[i]) < 0)
755 write_failed = 1;
756 }
757 if (pdev->revision != 0x01 || write_failed) {
758 dev_err(&pdev->dev,
096e6224
ME
759 "Fatal EEPROM Status Error - 0x%04x\n",
760 eestatus);
d2796743
ME
761
762 /* This error could mean that there was an error
763 * reading the eeprom or that the eeprom doesn't exist.
764 * We will treat each case the same and not try to
765 * gather additional information that normally would
766 * come from the eeprom, like MAC Address
767 */
768 adapter->has_eeprom = 0;
769 return -EIO;
770 }
771 }
772 adapter->has_eeprom = 1;
773
774 /* Read the EEPROM for information regarding LED behavior. Refer to
775 * ET1310_phy.c, et131x_xcvr_init(), for its use.
776 */
777 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
778 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
779
780 if (adapter->eeprom_data[0] != 0xcd)
781 /* Disable all optional features */
782 adapter->eeprom_data[1] = 0x00;
783
784 return 0;
785}
786
26ef1021 787/* et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
8310c602
ME
788 * @adapter: pointer to our adapter structure
789 */
eb7a6ca6 790static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
8310c602
ME
791{
792 /* Setup the receive dma configuration register for normal operation */
3040d056 793 u32 csr = ET_RXDMA_CSR_FBR1_ENABLE;
8f7fa96a 794 struct rx_ring *rx_ring = &adapter->rx_ring;
8310c602 795
8f7fa96a 796 if (rx_ring->fbr[1]->buffsize == 4096)
3040d056 797 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO;
8f7fa96a 798 else if (rx_ring->fbr[1]->buffsize == 8192)
3040d056 799 csr |= ET_RXDMA_CSR_FBR1_SIZE_HI;
8f7fa96a 800 else if (rx_ring->fbr[1]->buffsize == 16384)
3040d056 801 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI;
b5254867 802
3040d056 803 csr |= ET_RXDMA_CSR_FBR0_ENABLE;
8f7fa96a 804 if (rx_ring->fbr[0]->buffsize == 256)
3040d056 805 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO;
8f7fa96a 806 else if (rx_ring->fbr[0]->buffsize == 512)
3040d056 807 csr |= ET_RXDMA_CSR_FBR0_SIZE_HI;
8f7fa96a 808 else if (rx_ring->fbr[0]->buffsize == 1024)
3040d056 809 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI;
8310c602
ME
810 writel(csr, &adapter->regs->rxdma.csr);
811
812 csr = readl(&adapter->regs->rxdma.csr);
3040d056 813 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
8310c602
ME
814 udelay(5);
815 csr = readl(&adapter->regs->rxdma.csr);
3040d056 816 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
8310c602 817 dev_err(&adapter->pdev->dev,
096e6224 818 "RX Dma failed to exit halt state. CSR 0x%08x\n",
8310c602
ME
819 csr);
820 }
821 }
822}
823
26ef1021 824/* et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
8310c602
ME
825 * @adapter: pointer to our adapter structure
826 */
eb7a6ca6 827static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
8310c602
ME
828{
829 u32 csr;
830 /* Setup the receive dma configuration register */
3040d056
ME
831 writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE,
832 &adapter->regs->rxdma.csr);
8310c602 833 csr = readl(&adapter->regs->rxdma.csr);
3040d056 834 if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) {
8310c602
ME
835 udelay(5);
836 csr = readl(&adapter->regs->rxdma.csr);
3040d056 837 if (!(csr & ET_RXDMA_CSR_HALT_STATUS))
8310c602 838 dev_err(&adapter->pdev->dev,
096e6224
ME
839 "RX Dma failed to enter halt state. CSR 0x%08x\n",
840 csr);
8310c602
ME
841 }
842}
843
26ef1021 844/* et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
8310c602
ME
845 * @adapter: pointer to our adapter structure
846 *
847 * Mainly used after a return to the D0 (full-power) state from a lower state.
848 */
eb7a6ca6 849static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
8310c602
ME
850{
851 /* Setup the transmit dma configuration register for normal
852 * operation
853 */
096e6224
ME
854 writel(ET_TXDMA_SNGL_EPKT | (PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
855 &adapter->regs->txdma.csr);
8310c602
ME
856}
857
858static inline void add_10bit(u32 *v, int n)
859{
860 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
861}
862
863static inline void add_12bit(u32 *v, int n)
864{
865 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
866}
867
26ef1021 868/* et1310_config_mac_regs1 - Initialize the first part of MAC regs
d2796743
ME
869 * @adapter: pointer to our adapter structure
870 */
eb7a6ca6 871static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
d2796743
ME
872{
873 struct mac_regs __iomem *macregs = &adapter->regs->mac;
874 u32 station1;
875 u32 station2;
876 u32 ipg;
877
878 /* First we need to reset everything. Write to MAC configuration
879 * register 1 to perform reset.
880 */
a129be84
ME
881 writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
882 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
883 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC,
884 &macregs->cfg1);
d2796743
ME
885
886 /* Next lets configure the MAC Inter-packet gap register */
887 ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
888 ipg |= 0x50 << 8; /* ifg enforce 0x50 */
889 writel(ipg, &macregs->ipg);
890
891 /* Next lets configure the MAC Half Duplex register */
892 /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
893 writel(0x00A1F037, &macregs->hfdp);
894
895 /* Next lets configure the MAC Interface Control register */
896 writel(0, &macregs->if_ctrl);
897
898 /* Let's move on to setting up the mii management configuration */
a129be84 899 writel(ET_MAC_MIIMGMT_CLK_RST, &macregs->mii_mgmt_cfg);
d2796743
ME
900
901 /* Next lets configure the MAC Station Address register. These
902 * values are read from the EEPROM during initialization and stored
903 * in the adapter structure. We write what is stored in the adapter
904 * structure to the MAC Station Address registers high and low. This
905 * station address is used for generating and checking pause control
906 * packets.
907 */
908 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
909 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
910 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
911 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
912 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
913 adapter->addr[2];
914 writel(station1, &macregs->station_addr_1);
915 writel(station2, &macregs->station_addr_2);
916
ac399bc0 917 /* Max ethernet packet in bytes that will be passed by the mac without
d2796743
ME
918 * being truncated. Allow the MAC to pass 4 more than our max packet
919 * size. This is 4 for the Ethernet CRC.
920 *
921 * Packets larger than (registry_jumbo_packet) that do not contain a
922 * VLAN ID will be dropped by the Rx function.
923 */
924 writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len);
925
926 /* clear out MAC config reset */
927 writel(0, &macregs->cfg1);
928}
929
26ef1021 930/* et1310_config_mac_regs2 - Initialize the second part of MAC regs
d2796743
ME
931 * @adapter: pointer to our adapter structure
932 */
eb7a6ca6 933static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
d2796743
ME
934{
935 int32_t delay = 0;
936 struct mac_regs __iomem *mac = &adapter->regs->mac;
937 struct phy_device *phydev = adapter->phydev;
938 u32 cfg1;
939 u32 cfg2;
940 u32 ifctrl;
941 u32 ctl;
942
943 ctl = readl(&adapter->regs->txmac.ctl);
944 cfg1 = readl(&mac->cfg1);
945 cfg2 = readl(&mac->cfg2);
946 ifctrl = readl(&mac->if_ctrl);
947
948 /* Set up the if mode bits */
a129be84 949 cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK;
76af0140 950 if (phydev->speed == SPEED_1000) {
a129be84 951 cfg2 |= ET_MAC_CFG2_IFMODE_1000;
d2796743 952 /* Phy mode bit */
a129be84 953 ifctrl &= ~ET_MAC_IFCTRL_PHYMODE;
d2796743 954 } else {
a129be84
ME
955 cfg2 |= ET_MAC_CFG2_IFMODE_100;
956 ifctrl |= ET_MAC_IFCTRL_PHYMODE;
d2796743
ME
957 }
958
959 /* We need to enable Rx/Tx */
a129be84
ME
960 cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE |
961 ET_MAC_CFG1_TX_FLOW;
d2796743 962 /* Initialize loop back to off */
a129be84 963 cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW);
d2796743 964 if (adapter->flowcontrol == FLOW_RXONLY ||
096e6224 965 adapter->flowcontrol == FLOW_BOTH)
a129be84 966 cfg1 |= ET_MAC_CFG1_RX_FLOW;
d2796743
ME
967 writel(cfg1, &mac->cfg1);
968
969 /* Now we need to initialize the MAC Configuration 2 register */
970 /* preamble 7, check length, huge frame off, pad crc, crc enable
26ef1021
ME
971 * full duplex off
972 */
a129be84
ME
973 cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT;
974 cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK;
975 cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC;
976 cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE;
977 cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME;
978 cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX;
d2796743
ME
979
980 /* Turn on duplex if needed */
76af0140 981 if (phydev->duplex == DUPLEX_FULL)
a129be84 982 cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX;
d2796743 983
a129be84 984 ifctrl &= ~ET_MAC_IFCTRL_GHDMODE;
76af0140 985 if (phydev->duplex == DUPLEX_HALF)
a129be84 986 ifctrl |= ET_MAC_IFCTRL_GHDMODE;
d2796743
ME
987
988 writel(ifctrl, &mac->if_ctrl);
989 writel(cfg2, &mac->cfg2);
990
991 do {
992 udelay(10);
993 delay++;
994 cfg1 = readl(&mac->cfg1);
a129be84 995 } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100);
d2796743
ME
996
997 if (delay == 100) {
998 dev_warn(&adapter->pdev->dev,
096e6224
ME
999 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
1000 cfg1);
d2796743
ME
1001 }
1002
1003 /* Enable txmac */
a129be84 1004 ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE;
d2796743
ME
1005 writel(ctl, &adapter->regs->txmac.ctl);
1006
1007 /* Ready to start the RXDMA/TXDMA engine */
c655dee9 1008 if (adapter->flags & FMP_ADAPTER_LOWER_POWER) {
d2796743
ME
1009 et131x_rx_dma_enable(adapter);
1010 et131x_tx_dma_enable(adapter);
1011 }
1012}
1013
26ef1021 1014/* et1310_in_phy_coma - check if the device is in phy coma
2288760e
ME
1015 * @adapter: pointer to our adapter structure
1016 *
1017 * Returns 0 if the device is not in phy coma, 1 if it is in phy coma
1018 */
eb7a6ca6 1019static int et1310_in_phy_coma(struct et131x_adapter *adapter)
2288760e 1020{
12a2f3f3 1021 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
2288760e
ME
1022
1023 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
1024}
1025
eb7a6ca6 1026static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
a4d444bd
ME
1027{
1028 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
834d0ee3
FR
1029 u32 hash1 = 0;
1030 u32 hash2 = 0;
1031 u32 hash3 = 0;
1032 u32 hash4 = 0;
a4d444bd
ME
1033 u32 pm_csr;
1034
1035 /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
1036 * the multi-cast LIST. If it is NOT specified, (and "ALL" is not
1037 * specified) then we should pass NO multi-cast addresses to the
1038 * driver.
1039 */
1040 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
834d0ee3
FR
1041 int i;
1042
a4d444bd 1043 /* Loop through our multicast array and set up the device */
834d0ee3
FR
1044 for (i = 0; i < adapter->multicast_addr_count; i++) {
1045 u32 result;
1046
1047 result = ether_crc(6, adapter->multicast_list[i]);
a4d444bd
ME
1048
1049 result = (result & 0x3F800000) >> 23;
1050
1051 if (result < 32) {
1052 hash1 |= (1 << result);
1053 } else if ((31 < result) && (result < 64)) {
1054 result -= 32;
1055 hash2 |= (1 << result);
1056 } else if ((63 < result) && (result < 96)) {
1057 result -= 64;
1058 hash3 |= (1 << result);
1059 } else {
1060 result -= 96;
1061 hash4 |= (1 << result);
1062 }
1063 }
1064 }
1065
1066 /* Write out the new hash to the device */
1067 pm_csr = readl(&adapter->regs->global.pm_csr);
1068 if (!et1310_in_phy_coma(adapter)) {
1069 writel(hash1, &rxmac->multi_hash1);
1070 writel(hash2, &rxmac->multi_hash2);
1071 writel(hash3, &rxmac->multi_hash3);
1072 writel(hash4, &rxmac->multi_hash4);
1073 }
1074}
1075
eb7a6ca6 1076static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
a4d444bd
ME
1077{
1078 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1079 u32 uni_pf1;
1080 u32 uni_pf2;
1081 u32 uni_pf3;
1082 u32 pm_csr;
1083
1084 /* Set up unicast packet filter reg 3 to be the first two octets of
1085 * the MAC address for both address
1086 *
1087 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
1088 * MAC address for second address
1089 *
1090 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
1091 * MAC address for first address
1092 */
a129be84
ME
1093 uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) |
1094 (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) |
1095 (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) |
a4d444bd
ME
1096 adapter->addr[1];
1097
a129be84
ME
1098 uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) |
1099 (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) |
1100 (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) |
a4d444bd
ME
1101 adapter->addr[5];
1102
a129be84
ME
1103 uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) |
1104 (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) |
1105 (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) |
a4d444bd
ME
1106 adapter->addr[5];
1107
1108 pm_csr = readl(&adapter->regs->global.pm_csr);
1109 if (!et1310_in_phy_coma(adapter)) {
1110 writel(uni_pf1, &rxmac->uni_pf_addr1);
1111 writel(uni_pf2, &rxmac->uni_pf_addr2);
1112 writel(uni_pf3, &rxmac->uni_pf_addr3);
1113 }
1114}
1115
eb7a6ca6 1116static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
d2796743
ME
1117{
1118 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1119 struct phy_device *phydev = adapter->phydev;
1120 u32 sa_lo;
1121 u32 sa_hi = 0;
1122 u32 pf_ctrl = 0;
1a5a5cbc 1123 u32 __iomem *wolw;
d2796743
ME
1124
1125 /* Disable the MAC while it is being configured (also disable WOL) */
1126 writel(0x8, &rxmac->ctrl);
1127
1128 /* Initialize WOL to disabled. */
1129 writel(0, &rxmac->crc0);
1130 writel(0, &rxmac->crc12);
1131 writel(0, &rxmac->crc34);
1132
1133 /* We need to set the WOL mask0 - mask4 next. We initialize it to
1134 * its default Values of 0x00000000 because there are not WOL masks
1135 * as of this time.
1136 */
6697b870
ME
1137 for (wolw = &rxmac->mask0_word0; wolw <= &rxmac->mask4_word3; wolw++)
1138 writel(0, wolw);
d2796743
ME
1139
1140 /* Lets setup the WOL Source Address */
a129be84
ME
1141 sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) |
1142 (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) |
1143 (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) |
d2796743
ME
1144 adapter->addr[5];
1145 writel(sa_lo, &rxmac->sa_lo);
1146
c1375678 1147 sa_hi = (u32)(adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) |
d2796743
ME
1148 adapter->addr[1];
1149 writel(sa_hi, &rxmac->sa_hi);
1150
1151 /* Disable all Packet Filtering */
1152 writel(0, &rxmac->pf_ctrl);
1153
1154 /* Let's initialize the Unicast Packet filtering address */
1155 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1156 et1310_setup_device_for_unicast(adapter);
a129be84 1157 pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE;
d2796743
ME
1158 } else {
1159 writel(0, &rxmac->uni_pf_addr1);
1160 writel(0, &rxmac->uni_pf_addr2);
1161 writel(0, &rxmac->uni_pf_addr3);
1162 }
1163
1164 /* Let's initialize the Multicast hash */
1165 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
a129be84 1166 pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE;
d2796743
ME
1167 et1310_setup_device_for_multicast(adapter);
1168 }
1169
1170 /* Runt packet filtering. Didn't work in version A silicon. */
a129be84
ME
1171 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT;
1172 pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE;
d2796743
ME
1173
1174 if (adapter->registry_jumbo_packet > 8192)
1175 /* In order to transmit jumbo packets greater than 8k, the
1176 * FIFO between RxMAC and RxDMA needs to be reduced in size
1177 * to (16k - Jumbo packet size). In order to implement this,
1178 * we must use "cut through" mode in the RxMAC, which chops
1179 * packets down into segments which are (max_size * 16). In
1180 * this case we selected 256 bytes, since this is the size of
1181 * the PCI-Express TLP's that the 1310 uses.
1182 *
1183 * seg_en on, fc_en off, size 0x10
1184 */
1185 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1186 else
1187 writel(0, &rxmac->mcif_ctrl_max_seg);
1188
1189 /* Initialize the MCIF water marks */
1190 writel(0, &rxmac->mcif_water_mark);
1191
1192 /* Initialize the MIF control */
1193 writel(0, &rxmac->mif_ctrl);
1194
1195 /* Initialize the Space Available Register */
1196 writel(0, &rxmac->space_avail);
1197
1198 /* Initialize the the mif_ctrl register
1199 * bit 3: Receive code error. One or more nibbles were signaled as
1200 * errors during the reception of the packet. Clear this
1201 * bit in Gigabit, set it in 100Mbit. This was derived
1202 * experimentally at UNH.
1203 * bit 4: Receive CRC error. The packet's CRC did not match the
1204 * internally generated CRC.
1205 * bit 5: Receive length check error. Indicates that frame length
1206 * field value in the packet does not match the actual data
1207 * byte length and is not a type field.
1208 * bit 16: Receive frame truncated.
1209 * bit 17: Drop packet enable
1210 */
1211 if (phydev && phydev->speed == SPEED_100)
1212 writel(0x30038, &rxmac->mif_ctrl);
1213 else
1214 writel(0x30030, &rxmac->mif_ctrl);
1215
1216 /* Finally we initialize RxMac to be enabled & WOL disabled. Packet
1217 * filter is always enabled since it is where the runt packets are
1218 * supposed to be dropped. For version A silicon, runt packet
1219 * dropping doesn't work, so it is disabled in the pf_ctrl register,
1220 * but we still leave the packet filter on.
1221 */
1222 writel(pf_ctrl, &rxmac->pf_ctrl);
a129be84 1223 writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl);
d2796743
ME
1224}
1225
eb7a6ca6 1226static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
d2796743
ME
1227{
1228 struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1229
1230 /* We need to update the Control Frame Parameters
1231 * cfpt - control frame pause timer set to 64 (0x40)
1232 * cfep - control frame extended pause timer set to 0x0
1233 */
1234 if (adapter->flowcontrol == FLOW_NONE)
1235 writel(0, &txmac->cf_param);
1236 else
1237 writel(0x40, &txmac->cf_param);
1238}
1239
eb7a6ca6 1240static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
d2796743 1241{
becce4a4 1242 struct macstat_regs __iomem *macstat = &adapter->regs->macstat;
1a5a5cbc 1243 u32 __iomem *reg;
d2796743 1244
becce4a4
ME
1245 /* initialize all the macstat registers to zero on the device */
1246 for (reg = &macstat->txrx_0_64_byte_frames;
1247 reg <= &macstat->carry_reg2; reg++)
1248 writel(0, reg);
d2796743
ME
1249
1250 /* Unmask any counters that we want to track the overflow of.
1251 * Initially this will be all counters. It may become clear later
1252 * that we do not need to track all counters.
1253 */
1254 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1255 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1256}
1257
26ef1021 1258/* et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC
2288760e
ME
1259 * @adapter: pointer to our private adapter structure
1260 * @addr: the address of the transceiver
1261 * @reg: the register to read
1262 * @value: pointer to a 16-bit value in which the value will be stored
2288760e 1263 */
eb7a6ca6 1264static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
096e6224 1265 u8 reg, u16 *value)
2288760e
ME
1266{
1267 struct mac_regs __iomem *mac = &adapter->regs->mac;
1268 int status = 0;
1269 u32 delay = 0;
1270 u32 mii_addr;
1271 u32 mii_cmd;
1272 u32 mii_indicator;
1273
1274 /* Save a local copy of the registers we are dealing with so we can
1275 * set them back
1276 */
1277 mii_addr = readl(&mac->mii_mgmt_addr);
1278 mii_cmd = readl(&mac->mii_mgmt_cmd);
1279
1280 /* Stop the current operation */
1281 writel(0, &mac->mii_mgmt_cmd);
1282
1283 /* Set up the register we need to read from on the correct PHY */
a129be84 1284 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
2288760e
ME
1285
1286 writel(0x1, &mac->mii_mgmt_cmd);
1287
1288 do {
1289 udelay(50);
1290 delay++;
1291 mii_indicator = readl(&mac->mii_mgmt_indicator);
a129be84 1292 } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50);
2288760e
ME
1293
1294 /* If we hit the max delay, we could not read the register */
1295 if (delay == 50) {
1296 dev_warn(&adapter->pdev->dev,
096e6224 1297 "reg 0x%08x could not be read\n", reg);
2288760e 1298 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
096e6224 1299 mii_indicator);
2288760e
ME
1300
1301 status = -EIO;
a863a15b 1302 goto out;
2288760e
ME
1303 }
1304
1305 /* If we hit here we were able to read the register and we need to
26ef1021
ME
1306 * return the value to the caller
1307 */
a129be84 1308 *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK;
2288760e 1309
a863a15b 1310out:
2288760e
ME
1311 /* Stop the read operation */
1312 writel(0, &mac->mii_mgmt_cmd);
1313
1314 /* set the registers we touched back to the state at which we entered
1315 * this function
1316 */
1317 writel(mii_addr, &mac->mii_mgmt_addr);
1318 writel(mii_cmd, &mac->mii_mgmt_cmd);
1319
1320 return status;
1321}
1322
eb7a6ca6 1323static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
2288760e
ME
1324{
1325 struct phy_device *phydev = adapter->phydev;
1326
1327 if (!phydev)
1328 return -EIO;
1329
1330 return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1331}
1332
26ef1021 1333/* et131x_mii_write - Write to a PHY reg through the MII interface of the MAC
2288760e
ME
1334 * @adapter: pointer to our private adapter structure
1335 * @reg: the register to read
1336 * @value: 16-bit value to write
2288760e 1337 */
ec0a38bf
ME
1338static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg,
1339 u16 value)
2288760e
ME
1340{
1341 struct mac_regs __iomem *mac = &adapter->regs->mac;
2288760e 1342 int status = 0;
2288760e
ME
1343 u32 delay = 0;
1344 u32 mii_addr;
1345 u32 mii_cmd;
1346 u32 mii_indicator;
1347
2288760e
ME
1348 /* Save a local copy of the registers we are dealing with so we can
1349 * set them back
1350 */
1351 mii_addr = readl(&mac->mii_mgmt_addr);
1352 mii_cmd = readl(&mac->mii_mgmt_cmd);
1353
1354 /* Stop the current operation */
1355 writel(0, &mac->mii_mgmt_cmd);
1356
1357 /* Set up the register we need to write to on the correct PHY */
a129be84 1358 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
2288760e
ME
1359
1360 /* Add the value to write to the registers to the mac */
1361 writel(value, &mac->mii_mgmt_ctrl);
1362
1363 do {
1364 udelay(50);
1365 delay++;
1366 mii_indicator = readl(&mac->mii_mgmt_indicator);
a129be84 1367 } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100);
2288760e
ME
1368
1369 /* If we hit the max delay, we could not write the register */
1370 if (delay == 100) {
1371 u16 tmp;
1372
1373 dev_warn(&adapter->pdev->dev,
096e6224 1374 "reg 0x%08x could not be written", reg);
2288760e 1375 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
096e6224 1376 mii_indicator);
2288760e 1377 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
096e6224 1378 readl(&mac->mii_mgmt_cmd));
2288760e
ME
1379
1380 et131x_mii_read(adapter, reg, &tmp);
1381
1382 status = -EIO;
1383 }
1384 /* Stop the write operation */
1385 writel(0, &mac->mii_mgmt_cmd);
1386
26ef1021 1387 /* set the registers we touched back to the state at which we entered
2288760e
ME
1388 * this function
1389 */
1390 writel(mii_addr, &mac->mii_mgmt_addr);
1391 writel(mii_cmd, &mac->mii_mgmt_cmd);
1392
1393 return status;
1394}
1395
19d857de
ME
1396static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter,
1397 u16 regnum,
1398 u16 bitnum,
1399 u8 *value)
2288760e
ME
1400{
1401 u16 reg;
a129be84 1402 u16 mask = 1 << bitnum;
2288760e
ME
1403
1404 /* Read the requested register */
1405 et131x_mii_read(adapter, regnum, &reg);
1406
19d857de 1407 *value = (reg & mask) >> bitnum;
2288760e
ME
1408}
1409
eb7a6ca6 1410static void et1310_config_flow_control(struct et131x_adapter *adapter)
d2796743
ME
1411{
1412 struct phy_device *phydev = adapter->phydev;
1413
1414 if (phydev->duplex == DUPLEX_HALF) {
1415 adapter->flowcontrol = FLOW_NONE;
1416 } else {
1417 char remote_pause, remote_async_pause;
1418
19d857de
ME
1419 et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause);
1420 et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause);
d2796743 1421
19d857de 1422 if (remote_pause && remote_async_pause) {
d2796743 1423 adapter->flowcontrol = adapter->wanted_flow;
19d857de 1424 } else if (remote_pause && !remote_async_pause) {
d2796743
ME
1425 if (adapter->wanted_flow == FLOW_BOTH)
1426 adapter->flowcontrol = FLOW_BOTH;
1427 else
1428 adapter->flowcontrol = FLOW_NONE;
19d857de 1429 } else if (!remote_pause && !remote_async_pause) {
d2796743 1430 adapter->flowcontrol = FLOW_NONE;
19d857de 1431 } else {
d2796743
ME
1432 if (adapter->wanted_flow == FLOW_BOTH)
1433 adapter->flowcontrol = FLOW_RXONLY;
1434 else
1435 adapter->flowcontrol = FLOW_NONE;
1436 }
1437 }
1438}
1439
15ae239d 1440/* et1310_update_macstat_host_counters - Update local copy of the statistics */
eb7a6ca6 1441static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
d2796743
ME
1442{
1443 struct ce_stats *stats = &adapter->stats;
1444 struct macstat_regs __iomem *macstat =
1445 &adapter->regs->macstat;
1446
1447 stats->tx_collisions += readl(&macstat->tx_total_collisions);
1448 stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1449 stats->tx_deferred += readl(&macstat->tx_deferred);
1450 stats->tx_excessive_collisions +=
1451 readl(&macstat->tx_multiple_collisions);
1452 stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1453 stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1454 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1455
1456 stats->rx_align_errs += readl(&macstat->rx_align_errs);
1457 stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1458 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1459 stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1460 stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1461 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1462 stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1463}
1464
26ef1021 1465/* et1310_handle_macstat_interrupt
d2796743
ME
1466 *
1467 * One of the MACSTAT counters has wrapped. Update the local copy of
1468 * the statistics held in the adapter structure, checking the "wrap"
1469 * bit for each counter.
1470 */
eb7a6ca6 1471static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
d2796743
ME
1472{
1473 u32 carry_reg1;
1474 u32 carry_reg2;
1475
1476 /* Read the interrupt bits from the register(s). These are Clear On
1477 * Write.
1478 */
1479 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1480 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1481
1482 writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1483 writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1484
1485 /* We need to do update the host copy of all the MAC_STAT counters.
1486 * For each counter, check it's overflow bit. If the overflow bit is
1487 * set, then increment the host version of the count by one complete
1488 * revolution of the counter. This routine is called when the counter
1489 * block indicates that one of the counters has wrapped.
1490 */
1491 if (carry_reg1 & (1 << 14))
1492 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1493 if (carry_reg1 & (1 << 8))
1494 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1495 if (carry_reg1 & (1 << 7))
1496 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1497 if (carry_reg1 & (1 << 2))
1498 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1499 if (carry_reg1 & (1 << 6))
1500 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1501 if (carry_reg1 & (1 << 3))
1502 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1503 if (carry_reg1 & (1 << 0))
1504 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1505 if (carry_reg2 & (1 << 16))
1506 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1507 if (carry_reg2 & (1 << 15))
1508 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1509 if (carry_reg2 & (1 << 6))
1510 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1511 if (carry_reg2 & (1 << 8))
1512 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1513 if (carry_reg2 & (1 << 5))
1514 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1515 if (carry_reg2 & (1 << 4))
1516 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1517 if (carry_reg2 & (1 << 2))
1518 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1519}
1520
eb7a6ca6 1521static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
d2796743
ME
1522{
1523 struct net_device *netdev = bus->priv;
1524 struct et131x_adapter *adapter = netdev_priv(netdev);
1525 u16 value;
1526 int ret;
1527
1528 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1529
1530 if (ret < 0)
1531 return ret;
d855b893
ME
1532
1533 return value;
d2796743
ME
1534}
1535
bf3313a1 1536static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1537 int reg, u16 value)
d2796743
ME
1538{
1539 struct net_device *netdev = bus->priv;
1540 struct et131x_adapter *adapter = netdev_priv(netdev);
1541
ec0a38bf 1542 return et131x_mii_write(adapter, phy_addr, reg, value);
d2796743
ME
1543}
1544
1ff70a7c 1545/* et1310_phy_power_switch - PHY power control
d2796743
ME
1546 * @adapter: device to control
1547 * @down: true for off/false for back on
1548 *
1549 * one hundred, ten, one thousand megs
1550 * How would you like to have your LAN accessed
1551 * Can't you see that this code processed
1552 * Phy power, phy power..
1553 */
1ff70a7c 1554static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
d2796743
ME
1555{
1556 u16 data;
ec0a38bf 1557 struct phy_device *phydev = adapter->phydev;
d2796743
ME
1558
1559 et131x_mii_read(adapter, MII_BMCR, &data);
1560 data &= ~BMCR_PDOWN;
1561 if (down)
1562 data |= BMCR_PDOWN;
ec0a38bf 1563 et131x_mii_write(adapter, phydev->addr, MII_BMCR, data);
d2796743
ME
1564}
1565
15ae239d 1566/* et131x_xcvr_init - Init the phy if we are setting it into force mode */
eb7a6ca6 1567static void et131x_xcvr_init(struct et131x_adapter *adapter)
d2796743 1568{
d2796743 1569 u16 lcr2;
ec0a38bf 1570 struct phy_device *phydev = adapter->phydev;
d2796743 1571
d2796743
ME
1572 /* Set the LED behavior such that LED 1 indicates speed (off =
1573 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
1574 * link and activity (on for link, blink off for activity).
1575 *
1576 * NOTE: Some customizations have been added here for specific
1577 * vendors; The LED behavior is now determined by vendor data in the
1578 * EEPROM. However, the above description is the default.
1579 */
1580 if ((adapter->eeprom_data[1] & 0x4) == 0) {
1581 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1582
b5b86a4d 1583 lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T);
d2796743
ME
1584 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1585
1586 if ((adapter->eeprom_data[1] & 0x8) == 0)
1587 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1588 else
1589 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1590
ec0a38bf 1591 et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2);
d2796743
ME
1592 }
1593}
1594
26ef1021 1595/* et131x_configure_global_regs - configure JAGCore global regs
36f2771a
ME
1596 *
1597 * Used to configure the global registers on the JAGCore
1598 */
eb7a6ca6 1599static void et131x_configure_global_regs(struct et131x_adapter *adapter)
36f2771a
ME
1600{
1601 struct global_regs __iomem *regs = &adapter->regs->global;
1602
1603 writel(0, &regs->rxq_start_addr);
1604 writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr);
1605
1606 if (adapter->registry_jumbo_packet < 2048) {
1607 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
1608 * block of RAM that the driver can split between Tx
1609 * and Rx as it desires. Our default is to split it
1610 * 50/50:
1611 */
1612 writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr);
1613 writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr);
1614 } else if (adapter->registry_jumbo_packet < 8192) {
1615 /* For jumbo packets > 2k but < 8k, split 50-50. */
1616 writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr);
1617 writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr);
1618 } else {
1619 /* 9216 is the only packet size greater than 8k that
1620 * is available. The Tx buffer has to be big enough
1621 * for one whole packet on the Tx side. We'll make
1622 * the Tx 9408, and give the rest to Rx
1623 */
1624 writel(0x01b3, &regs->rxq_end_addr);
1625 writel(0x01b4, &regs->txq_start_addr);
1626 }
1627
1628 /* Initialize the loopback register. Disable all loopbacks. */
1629 writel(0, &regs->loopback);
1630
1631 /* MSI Register */
1632 writel(0, &regs->msi_config);
1633
1634 /* By default, disable the watchdog timer. It will be enabled when
1635 * a packet is queued.
1636 */
1637 writel(0, &regs->watchdog_timer);
1638}
1639
15ae239d 1640/* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence */
eb7a6ca6 1641static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
36f2771a
ME
1642{
1643 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1644 struct rx_ring *rx_local = &adapter->rx_ring;
1645 struct fbr_desc *fbr_entry;
1646 u32 entry;
1647 u32 psr_num_des;
1648 unsigned long flags;
788ca84a 1649 u8 id;
36f2771a
ME
1650
1651 /* Halt RXDMA to perform the reconfigure. */
1652 et131x_rx_dma_disable(adapter);
1653
25e8e8ab
ME
1654 /* Load the completion writeback physical address */
1655 writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi);
1656 writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo);
36f2771a
ME
1657
1658 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1659
1660 /* Set the address and parameters of the packet status ring into the
1661 * 1310's registers
1662 */
25e8e8ab
ME
1663 writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi);
1664 writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo);
36f2771a
ME
1665 writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
1666 writel(0, &rx_dma->psr_full_offset);
1667
a129be84 1668 psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK;
36f2771a
ME
1669 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1670 &rx_dma->psr_min_des);
1671
1672 spin_lock_irqsave(&adapter->rcv_lock, flags);
1673
1674 /* These local variables track the PSR in the adapter structure */
1675 rx_local->local_psr_full = 0;
1676
788ca84a 1677 for (id = 0; id < NUM_FBRS; id++) {
c0594ee9
ME
1678 u32 __iomem *num_des;
1679 u32 __iomem *full_offset;
1680 u32 __iomem *min_des;
1681 u32 __iomem *base_hi;
1682 u32 __iomem *base_lo;
efc56817 1683 struct fbr_lookup *fbr = rx_local->fbr[id];
788ca84a
ME
1684
1685 if (id == 0) {
788ca84a
ME
1686 num_des = &rx_dma->fbr0_num_des;
1687 full_offset = &rx_dma->fbr0_full_offset;
1688 min_des = &rx_dma->fbr0_min_des;
1689 base_hi = &rx_dma->fbr0_base_hi;
1690 base_lo = &rx_dma->fbr0_base_lo;
f0ada678
ME
1691 } else {
1692 num_des = &rx_dma->fbr1_num_des;
1693 full_offset = &rx_dma->fbr1_full_offset;
1694 min_des = &rx_dma->fbr1_min_des;
1695 base_hi = &rx_dma->fbr1_base_hi;
1696 base_lo = &rx_dma->fbr1_base_lo;
788ca84a 1697 }
36f2771a 1698
788ca84a 1699 /* Now's the best time to initialize FBR contents */
57cc0279 1700 fbr_entry = fbr->ring_virtaddr;
efc56817
ZG
1701 for (entry = 0; entry < fbr->num_entries; entry++) {
1702 fbr_entry->addr_hi = fbr->bus_high[entry];
1703 fbr_entry->addr_lo = fbr->bus_low[entry];
788ca84a
ME
1704 fbr_entry->word2 = entry;
1705 fbr_entry++;
1706 }
36f2771a 1707
788ca84a
ME
1708 /* Set the address and parameters of Free buffer ring 1 and 0
1709 * into the 1310's registers
1710 */
efc56817
ZG
1711 writel(upper_32_bits(fbr->ring_physaddr), base_hi);
1712 writel(lower_32_bits(fbr->ring_physaddr), base_lo);
1713 writel(fbr->num_entries - 1, num_des);
788ca84a 1714 writel(ET_DMA10_WRAP, full_offset);
36f2771a 1715
788ca84a
ME
1716 /* This variable tracks the free buffer ring 1 full position,
1717 * so it has to match the above.
1718 */
efc56817
ZG
1719 fbr->local_full = ET_DMA10_WRAP;
1720 writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
788ca84a
ME
1721 min_des);
1722 }
36f2771a
ME
1723
1724 /* Program the number of packets we will receive before generating an
1725 * interrupt.
1726 * For version B silicon, this value gets updated once autoneg is
1727 *complete.
1728 */
1729 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1730
1731 /* The "time_done" is not working correctly to coalesce interrupts
1732 * after a given time period, but rather is giving us an interrupt
1733 * regardless of whether we have received packets.
1734 * This value gets updated once autoneg is complete.
1735 */
1736 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1737
1738 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1739}
1740
26ef1021 1741/* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
36f2771a
ME
1742 *
1743 * Configure the transmit engine with the ring buffers we have created
1744 * and prepare it for use.
1745 */
eb7a6ca6 1746static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
36f2771a
ME
1747{
1748 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
76981cf1 1749 struct tx_ring *tx_ring = &adapter->tx_ring;
36f2771a
ME
1750
1751 /* Load the hardware with the start of the transmit descriptor ring. */
76981cf1
ZG
1752 writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi);
1753 writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo);
36f2771a
ME
1754
1755 /* Initialise the transmit DMA engine */
1756 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1757
1758 /* Load the completion writeback physical address */
76981cf1
ZG
1759 writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi);
1760 writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo);
36f2771a 1761
76981cf1 1762 *tx_ring->tx_status = 0;
36f2771a
ME
1763
1764 writel(0, &txdma->service_request);
76981cf1 1765 tx_ring->send_idx = 0;
36f2771a
ME
1766}
1767
15ae239d 1768/* et131x_adapter_setup - Set the adapter up as per cassini+ documentation */
eb7a6ca6 1769static void et131x_adapter_setup(struct et131x_adapter *adapter)
36f2771a
ME
1770{
1771 /* Configure the JAGCore */
1772 et131x_configure_global_regs(adapter);
1773
1774 et1310_config_mac_regs1(adapter);
1775
1776 /* Configure the MMC registers */
1777 /* All we need to do is initialize the Memory Control Register */
1778 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
1779
1780 et1310_config_rxmac_regs(adapter);
1781 et1310_config_txmac_regs(adapter);
1782
1783 et131x_config_rx_dma_regs(adapter);
1784 et131x_config_tx_dma_regs(adapter);
1785
1786 et1310_config_macstat_regs(adapter);
1787
1ff70a7c 1788 et1310_phy_power_switch(adapter, 0);
36f2771a
ME
1789 et131x_xcvr_init(adapter);
1790}
1791
15ae239d 1792/* et131x_soft_reset - Issue soft reset to the hardware, complete for ET1310 */
eb7a6ca6 1793static void et131x_soft_reset(struct et131x_adapter *adapter)
5da2b158 1794{
a129be84 1795 u32 reg;
5da2b158 1796
a129be84
ME
1797 /* Disable MAC Core */
1798 reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
1799 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1800 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1801 writel(reg, &adapter->regs->mac.cfg1);
1802
1803 reg = ET_RESET_ALL;
1804 writel(reg, &adapter->regs->global.sw_reset);
1805
1806 reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1807 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1808 writel(reg, &adapter->regs->mac.cfg1);
1809 writel(0, &adapter->regs->mac.cfg1);
5da2b158
ME
1810}
1811
26ef1021 1812/* et131x_enable_interrupts - enable interrupt
a4d444bd
ME
1813 *
1814 * Enable the appropriate interrupts on the ET131x according to our
1815 * configuration
1816 */
eb7a6ca6 1817static void et131x_enable_interrupts(struct et131x_adapter *adapter)
a4d444bd
ME
1818{
1819 u32 mask;
1820
1821 /* Enable all global interrupts */
1822 if (adapter->flowcontrol == FLOW_TXONLY ||
12a2f3f3 1823 adapter->flowcontrol == FLOW_BOTH)
a4d444bd
ME
1824 mask = INT_MASK_ENABLE;
1825 else
1826 mask = INT_MASK_ENABLE_NO_FLOW;
1827
1828 writel(mask, &adapter->regs->global.int_mask);
1829}
1830
26ef1021 1831/* et131x_disable_interrupts - interrupt disable
a4d444bd
ME
1832 *
1833 * Block all interrupts from the et131x device at the device itself
1834 */
eb7a6ca6 1835static void et131x_disable_interrupts(struct et131x_adapter *adapter)
a4d444bd
ME
1836{
1837 /* Disable all global interrupts */
1838 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
1839}
1840
15ae239d 1841/* et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 */
eb7a6ca6 1842static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
a4d444bd 1843{
868bf442 1844 /* Setup the transmit dma configuration register */
3040d056 1845 writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT,
096e6224 1846 &adapter->regs->txdma.csr);
a4d444bd
ME
1847}
1848
15ae239d 1849/* et131x_enable_txrx - Enable tx/rx queues */
eb7a6ca6 1850static void et131x_enable_txrx(struct net_device *netdev)
a4d444bd
ME
1851{
1852 struct et131x_adapter *adapter = netdev_priv(netdev);
1853
1854 /* Enable the Tx and Rx DMA engines (if not already enabled) */
1855 et131x_rx_dma_enable(adapter);
1856 et131x_tx_dma_enable(adapter);
1857
1858 /* Enable device interrupts */
c655dee9 1859 if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)
a4d444bd
ME
1860 et131x_enable_interrupts(adapter);
1861
1862 /* We're ready to move some data, so start the queue */
1863 netif_start_queue(netdev);
1864}
1865
15ae239d 1866/* et131x_disable_txrx - Disable tx/rx queues */
eb7a6ca6 1867static void et131x_disable_txrx(struct net_device *netdev)
a4d444bd
ME
1868{
1869 struct et131x_adapter *adapter = netdev_priv(netdev);
1870
1871 /* First thing is to stop the queue */
1872 netif_stop_queue(netdev);
1873
1874 /* Stop the Tx and Rx DMA engines */
1875 et131x_rx_dma_disable(adapter);
1876 et131x_tx_dma_disable(adapter);
1877
1878 /* Disable device interrupts */
1879 et131x_disable_interrupts(adapter);
1880}
1881
15ae239d 1882/* et131x_init_send - Initialize send data structures */
eb7a6ca6 1883static void et131x_init_send(struct et131x_adapter *adapter)
8310c602 1884{
8310c602 1885 u32 ct;
76981cf1
ZG
1886 struct tx_ring *tx_ring = &adapter->tx_ring;
1887 struct tcb *tcb = tx_ring->tcb_ring;
8310c602
ME
1888
1889 tx_ring->tcb_qhead = tcb;
1890
1891 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
1892
1893 /* Go through and set up each TCB */
1894 for (ct = 0; ct++ < NUM_TCB; tcb++)
1895 /* Set the link pointer in HW TCB to the next TCB in the
1896 * chain
1897 */
1898 tcb->next = tcb + 1;
1899
1900 /* Set the tail pointer */
1901 tcb--;
1902 tx_ring->tcb_qtail = tcb;
1903 tcb->next = NULL;
1904 /* Curr send queue should now be empty */
1905 tx_ring->send_head = NULL;
1906 tx_ring->send_tail = NULL;
1907}
1908
26ef1021 1909/* et1310_enable_phy_coma - called when network cable is unplugged
d2796743
ME
1910 *
1911 * driver receive an phy status change interrupt while in D0 and check that
1912 * phy_status is down.
1913 *
1914 * -- gate off JAGCore;
1915 * -- set gigE PHY in Coma mode
1916 * -- wake on phy_interrupt; Perform software reset JAGCore,
1917 * re-initialize jagcore and gigE PHY
1918 *
1919 * Add D0-ASPM-PhyLinkDown Support:
1920 * -- while in D0, when there is a phy_interrupt indicating phy link
1921 * down status, call the MPSetPhyComa routine to enter this active
1922 * state power saving mode
1923 * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt
1924 * indicating linkup status, call the MPDisablePhyComa routine to
1925 * restore JAGCore and gigE PHY
1926 */
eb7a6ca6 1927static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
d2796743 1928{
bacb71ed 1929 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
d2796743
ME
1930
1931 /* Save the GbE PHY speed and duplex modes. Need to restore this
1932 * when cable is plugged back in
1933 */
d2796743
ME
1934
1935 /* Stop sending packets. */
c655dee9 1936 adapter->flags |= FMP_ADAPTER_LOWER_POWER;
d2796743
ME
1937
1938 /* Wait for outstanding Receive packets */
d2796743
ME
1939 et131x_disable_txrx(adapter->netdev);
1940
1941 /* Gate off JAGCore 3 clock domains */
1942 pmcsr &= ~ET_PMCSR_INIT;
1943 writel(pmcsr, &adapter->regs->global.pm_csr);
1944
1945 /* Program gigE PHY in to Coma mode */
1946 pmcsr |= ET_PM_PHY_SW_COMA;
1947 writel(pmcsr, &adapter->regs->global.pm_csr);
1948}
1949
15ae239d 1950/* et1310_disable_phy_coma - Disable the Phy Coma Mode */
eb7a6ca6 1951static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
d2796743
ME
1952{
1953 u32 pmcsr;
1954
1955 pmcsr = readl(&adapter->regs->global.pm_csr);
1956
1957 /* Disable phy_sw_coma register and re-enable JAGCore clocks */
1958 pmcsr |= ET_PMCSR_INIT;
1959 pmcsr &= ~ET_PM_PHY_SW_COMA;
1960 writel(pmcsr, &adapter->regs->global.pm_csr);
1961
1962 /* Restore the GbE PHY speed and duplex modes;
1963 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
1964 */
d2796743
ME
1965
1966 /* Re-initialize the send structures */
1967 et131x_init_send(adapter);
1968
d2796743
ME
1969 /* Bring the device back to the state it was during init prior to
1970 * autonegotiation being complete. This way, when we get the auto-neg
1971 * complete interrupt, we can complete init by calling ConfigMacREGS2.
1972 */
1973 et131x_soft_reset(adapter);
1974
1975 /* setup et1310 as per the documentation ?? */
1976 et131x_adapter_setup(adapter);
1977
1978 /* Allow Tx to restart */
c655dee9 1979 adapter->flags &= ~FMP_ADAPTER_LOWER_POWER;
d2796743
ME
1980
1981 et131x_enable_txrx(adapter->netdev);
1982}
1983
d2796743
ME
1984static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
1985{
1986 u32 tmp_free_buff_ring = *free_buff_ring;
f03fcca0 1987
d2796743
ME
1988 tmp_free_buff_ring++;
1989 /* This works for all cases where limit < 1024. The 1023 case
26ef1021
ME
1990 * works because 1023++ is 1024 which means the if condition is not
1991 * taken but the carry of the bit into the wrap bit toggles the wrap
1992 * value correctly
1993 */
d2796743
ME
1994 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
1995 tmp_free_buff_ring &= ~ET_DMA10_MASK;
1996 tmp_free_buff_ring ^= ET_DMA10_WRAP;
1997 }
1998 /* For the 1023 case */
12a2f3f3 1999 tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP);
d2796743
ME
2000 *free_buff_ring = tmp_free_buff_ring;
2001 return tmp_free_buff_ring;
2002}
2003
26ef1021 2004/* et131x_rx_dma_memory_alloc
d2796743
ME
2005 *
2006 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
2007 * and the Packet Status Ring.
2008 */
eb7a6ca6 2009static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
d2796743 2010{
788ca84a 2011 u8 id;
d2796743
ME
2012 u32 i, j;
2013 u32 bufsize;
87648933
ME
2014 u32 pktstat_ringsize;
2015 u32 fbr_chunksize;
8f7fa96a 2016 struct rx_ring *rx_ring = &adapter->rx_ring;
bad5d26f 2017 struct fbr_lookup *fbr;
d2796743 2018
d2796743 2019 /* Alloc memory for the lookup table */
b6cb9660 2020 rx_ring->fbr[0] = kmalloc(sizeof(*fbr), GFP_KERNEL);
a9f48883
A
2021 if (rx_ring->fbr[0] == NULL)
2022 return -ENOMEM;
b6cb9660 2023 rx_ring->fbr[1] = kmalloc(sizeof(*fbr), GFP_KERNEL);
a9f48883
A
2024 if (rx_ring->fbr[1] == NULL)
2025 return -ENOMEM;
d2796743
ME
2026
2027 /* The first thing we will do is configure the sizes of the buffer
2028 * rings. These will change based on jumbo packet support. Larger
2029 * jumbo packets increases the size of each entry in FBR0, and the
2030 * number of entries in FBR0, while at the same time decreasing the
2031 * number of entries in FBR1.
2032 *
2033 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
2034 * entries are huge in order to accommodate a "jumbo" frame, then it
2035 * will have less entries. Conversely, FBR1 will now be relied upon
2036 * to carry more "normal" frames, thus it's entry size also increases
2037 * and the number of entries goes up too (since it now carries
2038 * "small" + "regular" packets.
2039 *
2040 * In this scheme, we try to maintain 512 entries between the two
2041 * rings. Also, FBR1 remains a constant size - when it's size doubles
2042 * the number of entries halves. FBR0 increases in size, however.
2043 */
d2796743 2044 if (adapter->registry_jumbo_packet < 2048) {
f0ada678 2045 rx_ring->fbr[0]->buffsize = 256;
e592a9b0 2046 rx_ring->fbr[0]->num_entries = 512;
f0ada678
ME
2047 rx_ring->fbr[1]->buffsize = 2048;
2048 rx_ring->fbr[1]->num_entries = 512;
d2796743 2049 } else if (adapter->registry_jumbo_packet < 4096) {
f0ada678
ME
2050 rx_ring->fbr[0]->buffsize = 512;
2051 rx_ring->fbr[0]->num_entries = 1024;
2052 rx_ring->fbr[1]->buffsize = 4096;
2053 rx_ring->fbr[1]->num_entries = 512;
d2796743 2054 } else {
f0ada678
ME
2055 rx_ring->fbr[0]->buffsize = 1024;
2056 rx_ring->fbr[0]->num_entries = 768;
2057 rx_ring->fbr[1]->buffsize = 16384;
2058 rx_ring->fbr[1]->num_entries = 128;
d2796743
ME
2059 }
2060
bad5d26f
ZG
2061 rx_ring->psr_num_entries = rx_ring->fbr[0]->num_entries +
2062 rx_ring->fbr[1]->num_entries;
d2796743 2063
788ca84a 2064 for (id = 0; id < NUM_FBRS; id++) {
bad5d26f 2065 fbr = rx_ring->fbr[id];
788ca84a 2066 /* Allocate an area of memory for Free Buffer Ring */
bad5d26f
ZG
2067 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
2068 fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2069 bufsize,
2070 &fbr->ring_physaddr,
2071 GFP_KERNEL);
2072 if (!fbr->ring_virtaddr) {
d2796743 2073 dev_err(&adapter->pdev->dev,
096e6224
ME
2074 "Cannot alloc memory for Free Buffer Ring %d\n",
2075 id);
d2796743
ME
2076 return -ENOMEM;
2077 }
d2796743
ME
2078 }
2079
788ca84a 2080 for (id = 0; id < NUM_FBRS; id++) {
bad5d26f
ZG
2081 fbr = rx_ring->fbr[id];
2082 fbr_chunksize = (FBR_CHUNKS * fbr->buffsize);
87648933 2083
bad5d26f 2084 for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) {
788ca84a 2085 dma_addr_t fbr_tmp_physaddr;
788ca84a 2086
bad5d26f 2087 fbr->mem_virtaddrs[i] = dma_alloc_coherent(
788ca84a 2088 &adapter->pdev->dev, fbr_chunksize,
bad5d26f 2089 &fbr->mem_physaddrs[i],
788ca84a 2090 GFP_KERNEL);
d2796743 2091
bad5d26f 2092 if (!fbr->mem_virtaddrs[i]) {
788ca84a
ME
2093 dev_err(&adapter->pdev->dev,
2094 "Could not alloc memory\n");
2095 return -ENOMEM;
2096 }
d2796743 2097
788ca84a 2098 /* See NOTE in "Save Physical Address" comment above */
bad5d26f 2099 fbr_tmp_physaddr = fbr->mem_physaddrs[i];
788ca84a 2100
788ca84a
ME
2101 for (j = 0; j < FBR_CHUNKS; j++) {
2102 u32 index = (i * FBR_CHUNKS) + j;
2103
2104 /* Save the Virtual address of this index for
2105 * quick access later
2106 */
bad5d26f
ZG
2107 fbr->virt[index] = (u8 *)fbr->mem_virtaddrs[i] +
2108 (j * fbr->buffsize);
788ca84a
ME
2109
2110 /* now store the physical address in the
2111 * descriptor so the device can access it
2112 */
bad5d26f 2113 fbr->bus_high[index] =
788ca84a 2114 upper_32_bits(fbr_tmp_physaddr);
bad5d26f 2115 fbr->bus_low[index] =
788ca84a
ME
2116 lower_32_bits(fbr_tmp_physaddr);
2117
bad5d26f 2118 fbr_tmp_physaddr += fbr->buffsize;
788ca84a 2119 }
d2796743
ME
2120 }
2121 }
d2796743
ME
2122
2123 /* Allocate an area of memory for FIFO of Packet Status ring entries */
2124 pktstat_ringsize =
bad5d26f 2125 sizeof(struct pkt_stat_desc) * rx_ring->psr_num_entries;
d2796743 2126
0d1b7a84 2127 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
d2796743 2128 pktstat_ringsize,
0d1b7a84
ME
2129 &rx_ring->ps_ring_physaddr,
2130 GFP_KERNEL);
d2796743
ME
2131
2132 if (!rx_ring->ps_ring_virtaddr) {
2133 dev_err(&adapter->pdev->dev,
096e6224 2134 "Cannot alloc memory for Packet Status Ring\n");
d2796743
ME
2135 return -ENOMEM;
2136 }
d2796743 2137
26ef1021 2138 /* NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
d2796743
ME
2139 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2140 * are ever returned, make sure the high part is retrieved here before
2141 * storing the adjusted address.
2142 */
2143
2144 /* Allocate an area of memory for writeback of status information */
0d1b7a84 2145 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
d2796743 2146 sizeof(struct rx_status_block),
0d1b7a84
ME
2147 &rx_ring->rx_status_bus,
2148 GFP_KERNEL);
d2796743
ME
2149 if (!rx_ring->rx_status_block) {
2150 dev_err(&adapter->pdev->dev,
096e6224 2151 "Cannot alloc memory for Status Block\n");
d2796743
ME
2152 return -ENOMEM;
2153 }
2154 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
d2796743 2155
d2796743
ME
2156 /* The RFDs are going to be put on lists later on, so initialize the
2157 * lists now.
2158 */
2159 INIT_LIST_HEAD(&rx_ring->recv_list);
2160 return 0;
2161}
2162
15ae239d 2163/* et131x_rx_dma_memory_free - Free all memory allocated within this module */
eb7a6ca6 2164static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
d2796743 2165{
788ca84a 2166 u8 id;
d2796743
ME
2167 u32 index;
2168 u32 bufsize;
2169 u32 pktstat_ringsize;
2170 struct rfd *rfd;
8f7fa96a 2171 struct rx_ring *rx_ring = &adapter->rx_ring;
f876f59e 2172 struct fbr_lookup *fbr;
d2796743 2173
d2796743
ME
2174 /* Free RFDs and associated packet descriptors */
2175 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2176
2177 while (!list_empty(&rx_ring->recv_list)) {
57cc0279
ZG
2178 rfd = list_entry(rx_ring->recv_list.next,
2179 struct rfd, list_node);
d2796743
ME
2180
2181 list_del(&rfd->list_node);
2182 rfd->skb = NULL;
d959df0a 2183 kfree(rfd);
d2796743
ME
2184 }
2185
788ca84a
ME
2186 /* Free Free Buffer Rings */
2187 for (id = 0; id < NUM_FBRS; id++) {
f876f59e
ZG
2188 fbr = rx_ring->fbr[id];
2189
a9f48883 2190 if (!fbr || !fbr->ring_virtaddr)
823bb2e8 2191 continue;
d2796743 2192
823bb2e8
ME
2193 /* First the packet memory */
2194 for (index = 0;
f876f59e 2195 index < fbr->num_entries / FBR_CHUNKS;
823bb2e8 2196 index++) {
f876f59e
ZG
2197 if (fbr->mem_virtaddrs[index]) {
2198 bufsize = fbr->buffsize * FBR_CHUNKS;
d2796743 2199
823bb2e8 2200 dma_free_coherent(&adapter->pdev->dev,
f876f59e
ZG
2201 bufsize,
2202 fbr->mem_virtaddrs[index],
2203 fbr->mem_physaddrs[index]);
d2796743 2204
f876f59e 2205 fbr->mem_virtaddrs[index] = NULL;
823bb2e8 2206 }
d2796743 2207 }
823bb2e8 2208
f876f59e 2209 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
823bb2e8 2210
f876f59e
ZG
2211 dma_free_coherent(&adapter->pdev->dev,
2212 bufsize,
2213 fbr->ring_virtaddr,
2214 fbr->ring_physaddr);
823bb2e8 2215
f876f59e 2216 fbr->ring_virtaddr = NULL;
d2796743 2217 }
d2796743
ME
2218
2219 /* Free Packet Status Ring */
2220 if (rx_ring->ps_ring_virtaddr) {
242187aa 2221 pktstat_ringsize = sizeof(struct pkt_stat_desc) *
8f7fa96a 2222 rx_ring->psr_num_entries;
d2796743 2223
675c8f68 2224 dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
096e6224
ME
2225 rx_ring->ps_ring_virtaddr,
2226 rx_ring->ps_ring_physaddr);
d2796743
ME
2227
2228 rx_ring->ps_ring_virtaddr = NULL;
2229 }
2230
2231 /* Free area of memory for the writeback of status information */
2232 if (rx_ring->rx_status_block) {
675c8f68 2233 dma_free_coherent(&adapter->pdev->dev,
096e6224
ME
2234 sizeof(struct rx_status_block),
2235 rx_ring->rx_status_block,
2236 rx_ring->rx_status_bus);
d2796743
ME
2237 rx_ring->rx_status_block = NULL;
2238 }
2239
d2796743 2240 /* Free the FBR Lookup Table */
e592a9b0 2241 kfree(rx_ring->fbr[0]);
f0ada678 2242 kfree(rx_ring->fbr[1]);
d2796743
ME
2243
2244 /* Reset Counters */
2245 rx_ring->num_ready_recv = 0;
2246}
2247
15ae239d 2248/* et131x_init_recv - Initialize receive data structures */
eb7a6ca6 2249static int et131x_init_recv(struct et131x_adapter *adapter)
d2796743 2250{
d959df0a 2251 struct rfd *rfd;
d2796743 2252 u32 rfdct;
8f7fa96a 2253 struct rx_ring *rx_ring = &adapter->rx_ring;
d2796743
ME
2254
2255 /* Setup each RFD */
2256 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
b6cb9660 2257 rfd = kzalloc(sizeof(*rfd), GFP_ATOMIC | GFP_DMA);
78110bb8 2258 if (!rfd)
d959df0a 2259 return -ENOMEM;
d2796743
ME
2260
2261 rfd->skb = NULL;
2262
2263 /* Add this RFD to the recv_list */
2264 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2265
4eb94628 2266 /* Increment the available RFD's */
d2796743 2267 rx_ring->num_ready_recv++;
d2796743
ME
2268 }
2269
d959df0a 2270 return 0;
d2796743
ME
2271}
2272
15ae239d 2273/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */
eb7a6ca6 2274static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
d2796743
ME
2275{
2276 struct phy_device *phydev = adapter->phydev;
2277
d2796743
ME
2278 /* For version B silicon, we do not use the RxDMA timer for 10 and 100
2279 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
2280 */
2281 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2282 writel(0, &adapter->regs->rxdma.max_pkt_time);
2283 writel(1, &adapter->regs->rxdma.num_pkt_done);
2284 }
2285}
2286
26ef1021 2287/* NICReturnRFD - Recycle a RFD and put it back onto the receive list
d2796743
ME
2288 * @adapter: pointer to our adapter
2289 * @rfd: pointer to the RFD
2290 */
2291static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2292{
2293 struct rx_ring *rx_local = &adapter->rx_ring;
2294 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2295 u16 buff_index = rfd->bufferindex;
2296 u8 ring_index = rfd->ringindex;
2297 unsigned long flags;
39bdb4a0 2298 struct fbr_lookup *fbr = rx_local->fbr[ring_index];
d2796743
ME
2299
2300 /* We don't use any of the OOB data besides status. Otherwise, we
2301 * need to clean up OOB data
2302 */
39bdb4a0
ZG
2303 if (buff_index < fbr->num_entries) {
2304 u32 free_buff_ring;
c0594ee9 2305 u32 __iomem *offset;
788ca84a
ME
2306 struct fbr_desc *next;
2307
f0ada678 2308 if (ring_index == 0)
788ca84a 2309 offset = &rx_dma->fbr0_full_offset;
f0ada678
ME
2310 else
2311 offset = &rx_dma->fbr1_full_offset;
788ca84a 2312
39bdb4a0
ZG
2313 next = (struct fbr_desc *)(fbr->ring_virtaddr) +
2314 INDEX10(fbr->local_full);
788ca84a
ME
2315
2316 /* Handle the Free Buffer Ring advancement here. Write
2317 * the PA / Buffer Index for the returned buffer into
2318 * the oldest (next to be freed)FBR entry
2319 */
39bdb4a0
ZG
2320 next->addr_hi = fbr->bus_high[buff_index];
2321 next->addr_lo = fbr->bus_low[buff_index];
788ca84a
ME
2322 next->word2 = buff_index;
2323
39bdb4a0
ZG
2324 free_buff_ring = bump_free_buff_ring(&fbr->local_full,
2325 fbr->num_entries - 1);
2326 writel(free_buff_ring, offset);
d2796743
ME
2327 } else {
2328 dev_err(&adapter->pdev->dev,
096e6224 2329 "%s illegal Buffer Index returned\n", __func__);
d2796743
ME
2330 }
2331
2332 /* The processing on this RFD is done, so put it back on the tail of
2333 * our list
2334 */
2335 spin_lock_irqsave(&adapter->rcv_lock, flags);
2336 list_add_tail(&rfd->list_node, &rx_local->recv_list);
2337 rx_local->num_ready_recv++;
2338 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2339
2340 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2341}
2342
26ef1021 2343/* nic_rx_pkts - Checks the hardware for available packets
54dbf04f
ME
2344 *
2345 * Returns rfd, a pointer to our MPRFD.
2346 *
2347 * Checks the hardware for available packets, using completion ring
2348 * If packets are available, it gets an RFD from the recv_list, attaches
2349 * the packet to it, puts the RFD in the RecvPendList, and also returns
2350 * the pointer to the RFD.
2351 */
d2796743
ME
2352static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2353{
2354 struct rx_ring *rx_local = &adapter->rx_ring;
2355 struct rx_status_block *status;
2356 struct pkt_stat_desc *psr;
186c426d 2357 struct rfd *rfd;
d2796743
ME
2358 unsigned long flags;
2359 struct list_head *element;
2360 u8 ring_index;
2361 u16 buff_index;
2362 u32 len;
2363 u32 word0;
2364 u32 word1;
186c426d 2365 struct sk_buff *skb;
297bb9db 2366 struct fbr_lookup *fbr;
d2796743
ME
2367
2368 /* RX Status block is written by the DMA engine prior to every
2369 * interrupt. It contains the next to be used entry in the Packet
2370 * Status Ring, and also the two Free Buffer rings.
2371 */
2372 status = rx_local->rx_status_block;
2373 word1 = status->word1 >> 16; /* Get the useful bits */
2374
2375 /* Check the PSR and wrap bits do not match */
2376 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
242187aa 2377 return NULL; /* Looks like this ring is not updated yet */
d2796743
ME
2378
2379 /* The packet status ring indicates that data is available. */
c1375678 2380 psr = (struct pkt_stat_desc *)(rx_local->ps_ring_virtaddr) +
d2796743
ME
2381 (rx_local->local_psr_full & 0xFFF);
2382
242187aa
ME
2383 /* Grab any information that is required once the PSR is advanced,
2384 * since we can no longer rely on the memory being accurate
d2796743
ME
2385 */
2386 len = psr->word1 & 0xFFFF;
2387 ring_index = (psr->word1 >> 26) & 0x03;
297bb9db 2388 fbr = rx_local->fbr[ring_index];
d2796743
ME
2389 buff_index = (psr->word1 >> 16) & 0x3FF;
2390 word0 = psr->word0;
2391
2392 /* Indicate that we have used this PSR entry. */
2393 /* FIXME wrap 12 */
2394 add_12bit(&rx_local->local_psr_full, 1);
2395 if (
2396 (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
2397 /* Clear psr full and toggle the wrap bit */
2398 rx_local->local_psr_full &= ~0xFFF;
2399 rx_local->local_psr_full ^= 0x1000;
2400 }
2401
242187aa 2402 writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset);
d2796743 2403
297bb9db 2404 if (ring_index > 1 || buff_index > fbr->num_entries - 1) {
d2796743
ME
2405 /* Illegal buffer or ring index cannot be used by S/W*/
2406 dev_err(&adapter->pdev->dev,
242187aa
ME
2407 "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
2408 rx_local->local_psr_full & 0xFFF, len, buff_index);
d2796743
ME
2409 return NULL;
2410 }
2411
2412 /* Get and fill the RFD. */
2413 spin_lock_irqsave(&adapter->rcv_lock, flags);
2414
d2796743 2415 element = rx_local->recv_list.next;
57cc0279 2416 rfd = list_entry(element, struct rfd, list_node);
d2796743 2417
242187aa 2418 if (!rfd) {
d2796743
ME
2419 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2420 return NULL;
2421 }
2422
2423 list_del(&rfd->list_node);
2424 rx_local->num_ready_recv--;
2425
2426 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2427
2428 rfd->bufferindex = buff_index;
2429 rfd->ringindex = ring_index;
2430
242187aa
ME
2431 /* In V1 silicon, there is a bug which screws up filtering of runt
2432 * packets. Therefore runt packet filtering is disabled in the MAC and
2433 * the packets are dropped here. They are also counted here.
d2796743
ME
2434 */
2435 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
2436 adapter->stats.rx_other_errs++;
242187aa
ME
2437 rfd->len = 0;
2438 goto out;
2439 }
2440
8019f2e2
ME
2441 if ((word0 & ALCATEL_MULTICAST_PKT) && !(word0 & ALCATEL_BROADCAST_PKT))
2442 adapter->stats.multicast_pkts_rcvd++;
d2796743 2443
242187aa 2444 rfd->len = len;
d2796743 2445
242187aa
ME
2446 skb = dev_alloc_skb(rfd->len + 2);
2447 if (!skb) {
2448 dev_err(&adapter->pdev->dev, "Couldn't alloc an SKB for Rx\n");
2449 return NULL;
2450 }
d2796743 2451
1f765d9f 2452 adapter->netdev->stats.rx_bytes += rfd->len;
d2796743 2453
297bb9db 2454 memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len);
d2796743 2455
242187aa
ME
2456 skb->protocol = eth_type_trans(skb, adapter->netdev);
2457 skb->ip_summed = CHECKSUM_NONE;
c2ebf58b 2458 netif_receive_skb(skb);
d2796743 2459
242187aa 2460out:
d2796743
ME
2461 nic_return_rfd(adapter, rfd);
2462 return rfd;
2463}
2464
c2ebf58b 2465/* et131x_handle_recv_pkts - Interrupt handler for receive processing
d2796743
ME
2466 *
2467 * Assumption, Rcv spinlock has been acquired.
2468 */
c2ebf58b 2469static int et131x_handle_recv_pkts(struct et131x_adapter *adapter, int budget)
d2796743
ME
2470{
2471 struct rfd *rfd = NULL;
c2ebf58b
ME
2472 int count = 0;
2473 int limit = budget;
d2796743 2474 bool done = true;
8f7fa96a 2475 struct rx_ring *rx_ring = &adapter->rx_ring;
d2796743 2476
c2ebf58b
ME
2477 if (budget > MAX_PACKETS_HANDLED)
2478 limit = MAX_PACKETS_HANDLED;
2479
d2796743 2480 /* Process up to available RFD's */
c2ebf58b 2481 while (count < limit) {
8f7fa96a
ZG
2482 if (list_empty(&rx_ring->recv_list)) {
2483 WARN_ON(rx_ring->num_ready_recv != 0);
d2796743
ME
2484 done = false;
2485 break;
2486 }
2487
2488 rfd = nic_rx_pkts(adapter);
2489
2490 if (rfd == NULL)
2491 break;
2492
2493 /* Do not receive any packets until a filter has been set.
2494 * Do not receive any packets until we have link.
2495 * If length is zero, return the RFD in order to advance the
2496 * Free buffer ring.
2497 */
2498 if (!adapter->packet_filter ||
2499 !netif_carrier_ok(adapter->netdev) ||
2500 rfd->len == 0)
2501 continue;
2502
2503 /* Increment the number of packets we received */
1f765d9f 2504 adapter->netdev->stats.rx_packets++;
d2796743
ME
2505
2506 /* Set the status on the packet, either resources or success */
8f7fa96a 2507 if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK)
0cdc6ee8
ME
2508 dev_warn(&adapter->pdev->dev, "RFD's are running out\n");
2509
d2796743
ME
2510 count++;
2511 }
2512
c2ebf58b 2513 if (count == limit || !done) {
8f7fa96a 2514 rx_ring->unfinished_receives = true;
d2796743
ME
2515 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2516 &adapter->regs->global.watchdog_timer);
2517 } else
2518 /* Watchdog timer will disable itself if appropriate. */
8f7fa96a 2519 rx_ring->unfinished_receives = false;
c2ebf58b
ME
2520
2521 return count;
d2796743
ME
2522}
2523
26ef1021 2524/* et131x_tx_dma_memory_alloc
d2796743
ME
2525 *
2526 * Allocates memory that will be visible both to the device and to the CPU.
2527 * The OS will pass us packets, pointers to which we will insert in the Tx
2528 * Descriptor queue. The device will read this queue to find the packets in
2529 * memory. The device will update the "status" in memory each time it xmits a
2530 * packet.
2531 */
eb7a6ca6 2532static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
d2796743
ME
2533{
2534 int desc_size = 0;
2535 struct tx_ring *tx_ring = &adapter->tx_ring;
2536
2537 /* Allocate memory for the TCB's (Transmit Control Block) */
76981cf1
ZG
2538 tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
2539 GFP_ATOMIC | GFP_DMA);
2540 if (!tx_ring->tcb_ring)
d2796743 2541 return -ENOMEM;
d2796743 2542
d3c75e8d 2543 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
57cc0279
ZG
2544 tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev,
2545 desc_size,
2546 &tx_ring->tx_desc_ring_pa,
2547 GFP_KERNEL);
76981cf1 2548 if (!tx_ring->tx_desc_ring) {
d2796743 2549 dev_err(&adapter->pdev->dev,
09a3fc2b 2550 "Cannot alloc memory for Tx Ring\n");
d2796743
ME
2551 return -ENOMEM;
2552 }
2553
2554 /* Save physical address
2555 *
26dc751e 2556 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
d2796743
ME
2557 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2558 * are ever returned, make sure the high part is retrieved here before
2559 * storing the adjusted address.
2560 */
2561 /* Allocate memory for the Tx status block */
0d1b7a84 2562 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
d2796743 2563 sizeof(u32),
0d1b7a84
ME
2564 &tx_ring->tx_status_pa,
2565 GFP_KERNEL);
76981cf1 2566 if (!tx_ring->tx_status_pa) {
d2796743 2567 dev_err(&adapter->pdev->dev,
76981cf1 2568 "Cannot alloc memory for Tx status block\n");
d2796743
ME
2569 return -ENOMEM;
2570 }
2571 return 0;
2572}
2573
15ae239d 2574/* et131x_tx_dma_memory_free - Free all memory allocated within this module */
eb7a6ca6 2575static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
d2796743
ME
2576{
2577 int desc_size = 0;
76981cf1 2578 struct tx_ring *tx_ring = &adapter->tx_ring;
d2796743 2579
76981cf1 2580 if (tx_ring->tx_desc_ring) {
d2796743 2581 /* Free memory relating to Tx rings here */
d3c75e8d 2582 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
675c8f68 2583 dma_free_coherent(&adapter->pdev->dev,
76981cf1
ZG
2584 desc_size,
2585 tx_ring->tx_desc_ring,
2586 tx_ring->tx_desc_ring_pa);
2587 tx_ring->tx_desc_ring = NULL;
d2796743
ME
2588 }
2589
2590 /* Free memory for the Tx status block */
76981cf1 2591 if (tx_ring->tx_status) {
675c8f68 2592 dma_free_coherent(&adapter->pdev->dev,
76981cf1
ZG
2593 sizeof(u32),
2594 tx_ring->tx_status,
2595 tx_ring->tx_status_pa);
d2796743 2596
76981cf1 2597 tx_ring->tx_status = NULL;
d2796743
ME
2598 }
2599 /* Free the memory for the tcb structures */
76981cf1 2600 kfree(tx_ring->tcb_ring);
d2796743
ME
2601}
2602
26ef1021 2603/* nic_send_packet - NIC specific send handler for version B silicon.
d2796743
ME
2604 * @adapter: pointer to our adapter
2605 * @tcb: pointer to struct tcb
d2796743
ME
2606 */
2607static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
2608{
2609 u32 i;
2610 struct tx_desc desc[24]; /* 24 x 16 byte */
2611 u32 frag = 0;
2612 u32 thiscopy, remainder;
2613 struct sk_buff *skb = tcb->skb;
2614 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
2615 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
d2796743 2616 struct phy_device *phydev = adapter->phydev;
998f6dfb 2617 dma_addr_t dma_addr;
76981cf1 2618 struct tx_ring *tx_ring = &adapter->tx_ring;
d2796743
ME
2619
2620 /* Part of the optimizations of this send routine restrict us to
2621 * sending 24 fragments at a pass. In practice we should never see
2622 * more than 5 fragments.
2623 *
2624 * NOTE: The older version of this function (below) can handle any
2625 * number of fragments. If needed, we can call this function,
2626 * although it is less efficient.
2627 */
9c7bc376
RK
2628
2629 /* nr_frags should be no more than 18. */
2630 BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23);
d2796743
ME
2631
2632 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
2633
2634 for (i = 0; i < nr_frags; i++) {
2635 /* If there is something in this element, lets get a
2636 * descriptor from the ring and get the necessary data
2637 */
2638 if (i == 0) {
2639 /* If the fragments are smaller than a standard MTU,
2640 * then map them to a single descriptor in the Tx
2641 * Desc ring. However, if they're larger, as is
2642 * possible with support for jumbo packets, then
2643 * split them each across 2 descriptors.
2644 *
2645 * This will work until we determine why the hardware
2646 * doesn't seem to like large fragments.
2647 */
f1b540b9 2648 if (skb_headlen(skb) <= 1514) {
d2796743 2649 /* Low 16bits are length, high is vlan and
26ef1021
ME
2650 * unused currently so zero
2651 */
f1b540b9 2652 desc[frag].len_vlan = skb_headlen(skb);
998f6dfb
ME
2653 dma_addr = dma_map_single(&adapter->pdev->dev,
2654 skb->data,
2655 skb_headlen(skb),
2656 DMA_TO_DEVICE);
2657 desc[frag].addr_lo = lower_32_bits(dma_addr);
2658 desc[frag].addr_hi = upper_32_bits(dma_addr);
2659 frag++;
d2796743 2660 } else {
f1b540b9 2661 desc[frag].len_vlan = skb_headlen(skb) / 2;
998f6dfb 2662 dma_addr = dma_map_single(&adapter->pdev->dev,
096e6224
ME
2663 skb->data,
2664 (skb_headlen(skb) / 2),
2665 DMA_TO_DEVICE);
998f6dfb
ME
2666 desc[frag].addr_lo = lower_32_bits(dma_addr);
2667 desc[frag].addr_hi = upper_32_bits(dma_addr);
2668 frag++;
d2796743 2669
f1b540b9 2670 desc[frag].len_vlan = skb_headlen(skb) / 2;
998f6dfb 2671 dma_addr = dma_map_single(&adapter->pdev->dev,
096e6224
ME
2672 skb->data +
2673 (skb_headlen(skb) / 2),
2674 (skb_headlen(skb) / 2),
2675 DMA_TO_DEVICE);
998f6dfb
ME
2676 desc[frag].addr_lo = lower_32_bits(dma_addr);
2677 desc[frag].addr_hi = upper_32_bits(dma_addr);
2678 frag++;
d2796743
ME
2679 }
2680 } else {
998f6dfb
ME
2681 desc[frag].len_vlan = frags[i - 1].size;
2682 dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
2683 &frags[i - 1],
2684 0,
2685 frags[i - 1].size,
2686 DMA_TO_DEVICE);
2687 desc[frag].addr_lo = lower_32_bits(dma_addr);
2688 desc[frag].addr_hi = upper_32_bits(dma_addr);
2689 frag++;
d2796743
ME
2690 }
2691 }
2692
d2796743 2693 if (phydev && phydev->speed == SPEED_1000) {
76981cf1 2694 if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) {
d2796743 2695 /* Last element & Interrupt flag */
c655dee9
ME
2696 desc[frag - 1].flags =
2697 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
76981cf1 2698 tx_ring->since_irq = 0;
d2796743 2699 } else { /* Last element */
a129be84 2700 desc[frag - 1].flags = TXDESC_FLAG_LASTPKT;
d2796743
ME
2701 }
2702 } else
c655dee9
ME
2703 desc[frag - 1].flags =
2704 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
d2796743 2705
a129be84 2706 desc[0].flags |= TXDESC_FLAG_FIRSTPKT;
d2796743 2707
76981cf1 2708 tcb->index_start = tx_ring->send_idx;
d2796743
ME
2709 tcb->stale = 0;
2710
76981cf1 2711 thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx);
d2796743
ME
2712
2713 if (thiscopy >= frag) {
2714 remainder = 0;
2715 thiscopy = frag;
2716 } else {
2717 remainder = frag - thiscopy;
2718 }
2719
76981cf1
ZG
2720 memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx),
2721 desc,
d2796743
ME
2722 sizeof(struct tx_desc) * thiscopy);
2723
76981cf1 2724 add_10bit(&tx_ring->send_idx, thiscopy);
d2796743 2725
76981cf1 2726 if (INDEX10(tx_ring->send_idx) == 0 ||
096e6224 2727 INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) {
76981cf1
ZG
2728 tx_ring->send_idx &= ~ET_DMA10_MASK;
2729 tx_ring->send_idx ^= ET_DMA10_WRAP;
d2796743
ME
2730 }
2731
2732 if (remainder) {
76981cf1 2733 memcpy(tx_ring->tx_desc_ring,
d2796743
ME
2734 desc + thiscopy,
2735 sizeof(struct tx_desc) * remainder);
2736
76981cf1 2737 add_10bit(&tx_ring->send_idx, remainder);
d2796743
ME
2738 }
2739
76981cf1
ZG
2740 if (INDEX10(tx_ring->send_idx) == 0) {
2741 if (tx_ring->send_idx)
d2796743
ME
2742 tcb->index = NUM_DESC_PER_RING_TX - 1;
2743 else
2744 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
ee60c8ec 2745 } else {
76981cf1 2746 tcb->index = tx_ring->send_idx - 1;
ee60c8ec 2747 }
d2796743
ME
2748
2749 spin_lock(&adapter->tcb_send_qlock);
2750
76981cf1
ZG
2751 if (tx_ring->send_tail)
2752 tx_ring->send_tail->next = tcb;
d2796743 2753 else
76981cf1 2754 tx_ring->send_head = tcb;
d2796743 2755
76981cf1 2756 tx_ring->send_tail = tcb;
d2796743
ME
2757
2758 WARN_ON(tcb->next != NULL);
2759
76981cf1 2760 tx_ring->used++;
d2796743
ME
2761
2762 spin_unlock(&adapter->tcb_send_qlock);
2763
2764 /* Write the new write pointer back to the device. */
76981cf1 2765 writel(tx_ring->send_idx, &adapter->regs->txdma.service_request);
d2796743
ME
2766
2767 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
2768 * timer to wake us up if this packet isn't followed by N more.
2769 */
2770 if (phydev && phydev->speed == SPEED_1000) {
2771 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2772 &adapter->regs->global.watchdog_timer);
2773 }
d2796743
ME
2774 return 0;
2775}
2776
26ef1021 2777/* send_packet - Do the work to send a packet
d2796743
ME
2778 *
2779 * Assumption: Send spinlock has been acquired
2780 */
2781static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
2782{
2783 int status;
76981cf1 2784 struct tcb *tcb;
d2796743 2785 unsigned long flags;
76981cf1 2786 struct tx_ring *tx_ring = &adapter->tx_ring;
d2796743
ME
2787
2788 /* All packets must have at least a MAC address and a protocol type */
2789 if (skb->len < ETH_HLEN)
2790 return -EIO;
2791
2792 /* Get a TCB for this packet */
2793 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2794
76981cf1 2795 tcb = tx_ring->tcb_qhead;
d2796743
ME
2796
2797 if (tcb == NULL) {
2798 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2799 return -ENOMEM;
2800 }
2801
76981cf1 2802 tx_ring->tcb_qhead = tcb->next;
d2796743 2803
76981cf1
ZG
2804 if (tx_ring->tcb_qhead == NULL)
2805 tx_ring->tcb_qtail = NULL;
d2796743
ME
2806
2807 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2808
2809 tcb->skb = skb;
d2796743
ME
2810 tcb->next = NULL;
2811
2812 /* Call the NIC specific send handler. */
2813 status = nic_send_packet(adapter, tcb);
2814
2815 if (status != 0) {
2816 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2817
76981cf1
ZG
2818 if (tx_ring->tcb_qtail)
2819 tx_ring->tcb_qtail->next = tcb;
d2796743
ME
2820 else
2821 /* Apparently ready Q is empty. */
76981cf1 2822 tx_ring->tcb_qhead = tcb;
d2796743 2823
76981cf1 2824 tx_ring->tcb_qtail = tcb;
d2796743
ME
2825 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2826 return status;
2827 }
76981cf1 2828 WARN_ON(tx_ring->used > NUM_TCB);
d2796743
ME
2829 return 0;
2830}
2831
26ef1021 2832/* free_send_packet - Recycle a struct tcb
d2796743
ME
2833 * @adapter: pointer to our adapter
2834 * @tcb: pointer to struct tcb
2835 *
2836 * Complete the packet if necessary
2837 * Assumption - Send spinlock has been acquired
2838 */
2839static inline void free_send_packet(struct et131x_adapter *adapter,
096e6224 2840 struct tcb *tcb)
d2796743
ME
2841{
2842 unsigned long flags;
2843 struct tx_desc *desc = NULL;
1f765d9f 2844 struct net_device_stats *stats = &adapter->netdev->stats;
76981cf1 2845 struct tx_ring *tx_ring = &adapter->tx_ring;
983e4b35 2846 u64 dma_addr;
d2796743 2847
d2796743
ME
2848 if (tcb->skb) {
2849 stats->tx_bytes += tcb->skb->len;
2850
2851 /* Iterate through the TX descriptors on the ring
2852 * corresponding to this packet and umap the fragments
2853 * they point to
2854 */
2855 do {
76981cf1 2856 desc = tx_ring->tx_desc_ring +
57cc0279 2857 INDEX10(tcb->index_start);
d2796743 2858
998f6dfb 2859 dma_addr = desc->addr_lo;
983e4b35 2860 dma_addr |= (u64)desc->addr_hi << 32;
998f6dfb 2861
26dc751e 2862 dma_unmap_single(&adapter->pdev->dev,
998f6dfb 2863 dma_addr,
26dc751e 2864 desc->len_vlan, DMA_TO_DEVICE);
d2796743
ME
2865
2866 add_10bit(&tcb->index_start, 1);
2867 if (INDEX10(tcb->index_start) >=
2868 NUM_DESC_PER_RING_TX) {
2869 tcb->index_start &= ~ET_DMA10_MASK;
2870 tcb->index_start ^= ET_DMA10_WRAP;
2871 }
76981cf1 2872 } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index));
d2796743
ME
2873
2874 dev_kfree_skb_any(tcb->skb);
2875 }
2876
2877 memset(tcb, 0, sizeof(struct tcb));
2878
2879 /* Add the TCB to the Ready Q */
2880 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2881
1f765d9f 2882 stats->tx_packets++;
d2796743 2883
76981cf1
ZG
2884 if (tx_ring->tcb_qtail)
2885 tx_ring->tcb_qtail->next = tcb;
d2796743
ME
2886 else
2887 /* Apparently ready Q is empty. */
76981cf1 2888 tx_ring->tcb_qhead = tcb;
d2796743 2889
76981cf1 2890 tx_ring->tcb_qtail = tcb;
d2796743
ME
2891
2892 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
76981cf1 2893 WARN_ON(tx_ring->used < 0);
d2796743
ME
2894}
2895
26ef1021 2896/* et131x_free_busy_send_packets - Free and complete the stopped active sends
d2796743
ME
2897 *
2898 * Assumption - Send spinlock has been acquired
2899 */
eb7a6ca6 2900static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
d2796743
ME
2901{
2902 struct tcb *tcb;
2903 unsigned long flags;
2904 u32 freed = 0;
76981cf1 2905 struct tx_ring *tx_ring = &adapter->tx_ring;
d2796743
ME
2906
2907 /* Any packets being sent? Check the first TCB on the send list */
2908 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2909
76981cf1 2910 tcb = tx_ring->send_head;
d2796743
ME
2911
2912 while (tcb != NULL && freed < NUM_TCB) {
2913 struct tcb *next = tcb->next;
2914
76981cf1 2915 tx_ring->send_head = next;
d2796743
ME
2916
2917 if (next == NULL)
76981cf1 2918 tx_ring->send_tail = NULL;
d2796743 2919
76981cf1 2920 tx_ring->used--;
d2796743
ME
2921
2922 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2923
2924 freed++;
2925 free_send_packet(adapter, tcb);
2926
2927 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2928
76981cf1 2929 tcb = tx_ring->send_head;
d2796743
ME
2930 }
2931
2932 WARN_ON(freed == NUM_TCB);
2933
2934 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2935
76981cf1 2936 tx_ring->used = 0;
d2796743
ME
2937}
2938
c2ebf58b 2939/* et131x_handle_send_pkts - Interrupt handler for sending processing
d2796743
ME
2940 *
2941 * Re-claim the send resources, complete sends and get more to send from
2942 * the send wait queue.
2943 *
2944 * Assumption - Send spinlock has been acquired
2945 */
c2ebf58b 2946static void et131x_handle_send_pkts(struct et131x_adapter *adapter)
d2796743
ME
2947{
2948 unsigned long flags;
2949 u32 serviced;
2950 struct tcb *tcb;
2951 u32 index;
76981cf1 2952 struct tx_ring *tx_ring = &adapter->tx_ring;
d2796743
ME
2953
2954 serviced = readl(&adapter->regs->txdma.new_service_complete);
2955 index = INDEX10(serviced);
2956
2957 /* Has the ring wrapped? Process any descriptors that do not have
2958 * the same "wrap" indicator as the current completion indicator
2959 */
2960 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2961
76981cf1 2962 tcb = tx_ring->send_head;
d2796743
ME
2963
2964 while (tcb &&
2965 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
2966 index < INDEX10(tcb->index)) {
76981cf1
ZG
2967 tx_ring->used--;
2968 tx_ring->send_head = tcb->next;
d2796743 2969 if (tcb->next == NULL)
76981cf1 2970 tx_ring->send_tail = NULL;
d2796743
ME
2971
2972 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2973 free_send_packet(adapter, tcb);
2974 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2975
2976 /* Goto the next packet */
76981cf1 2977 tcb = tx_ring->send_head;
d2796743
ME
2978 }
2979 while (tcb &&
2980 !((serviced ^ tcb->index) & ET_DMA10_WRAP)
2981 && index > (tcb->index & ET_DMA10_MASK)) {
76981cf1
ZG
2982 tx_ring->used--;
2983 tx_ring->send_head = tcb->next;
d2796743 2984 if (tcb->next == NULL)
76981cf1 2985 tx_ring->send_tail = NULL;
d2796743
ME
2986
2987 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2988 free_send_packet(adapter, tcb);
2989 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2990
2991 /* Goto the next packet */
76981cf1 2992 tcb = tx_ring->send_head;
d2796743
ME
2993 }
2994
2995 /* Wake up the queue when we hit a low-water mark */
76981cf1 2996 if (tx_ring->used <= NUM_TCB / 3)
d2796743
ME
2997 netif_wake_queue(adapter->netdev);
2998
2999 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3000}
3001
d2796743
ME
3002static int et131x_get_settings(struct net_device *netdev,
3003 struct ethtool_cmd *cmd)
3004{
3005 struct et131x_adapter *adapter = netdev_priv(netdev);
3006
3007 return phy_ethtool_gset(adapter->phydev, cmd);
3008}
3009
3010static int et131x_set_settings(struct net_device *netdev,
3011 struct ethtool_cmd *cmd)
3012{
3013 struct et131x_adapter *adapter = netdev_priv(netdev);
3014
3015 return phy_ethtool_sset(adapter->phydev, cmd);
3016}
3017
3018static int et131x_get_regs_len(struct net_device *netdev)
3019{
3020#define ET131X_REGS_LEN 256
3021 return ET131X_REGS_LEN * sizeof(u32);
3022}
3023
3024static void et131x_get_regs(struct net_device *netdev,
3025 struct ethtool_regs *regs, void *regs_data)
3026{
3027 struct et131x_adapter *adapter = netdev_priv(netdev);
3028 struct address_map __iomem *aregs = adapter->regs;
3029 u32 *regs_buff = regs_data;
3030 u32 num = 0;
c8b0a484 3031 u16 tmp;
d2796743
ME
3032
3033 memset(regs_data, 0, et131x_get_regs_len(netdev));
3034
3035 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
3036 adapter->pdev->device;
3037
3038 /* PHY regs */
c8b0a484
ME
3039 et131x_mii_read(adapter, MII_BMCR, &tmp);
3040 regs_buff[num++] = tmp;
3041 et131x_mii_read(adapter, MII_BMSR, &tmp);
3042 regs_buff[num++] = tmp;
3043 et131x_mii_read(adapter, MII_PHYSID1, &tmp);
3044 regs_buff[num++] = tmp;
3045 et131x_mii_read(adapter, MII_PHYSID2, &tmp);
3046 regs_buff[num++] = tmp;
3047 et131x_mii_read(adapter, MII_ADVERTISE, &tmp);
3048 regs_buff[num++] = tmp;
3049 et131x_mii_read(adapter, MII_LPA, &tmp);
3050 regs_buff[num++] = tmp;
3051 et131x_mii_read(adapter, MII_EXPANSION, &tmp);
3052 regs_buff[num++] = tmp;
d2796743 3053 /* Autoneg next page transmit reg */
c8b0a484
ME
3054 et131x_mii_read(adapter, 0x07, &tmp);
3055 regs_buff[num++] = tmp;
d2796743 3056 /* Link partner next page reg */
c8b0a484
ME
3057 et131x_mii_read(adapter, 0x08, &tmp);
3058 regs_buff[num++] = tmp;
3059 et131x_mii_read(adapter, MII_CTRL1000, &tmp);
3060 regs_buff[num++] = tmp;
3061 et131x_mii_read(adapter, MII_STAT1000, &tmp);
3062 regs_buff[num++] = tmp;
3063 et131x_mii_read(adapter, 0x0b, &tmp);
3064 regs_buff[num++] = tmp;
3065 et131x_mii_read(adapter, 0x0c, &tmp);
3066 regs_buff[num++] = tmp;
3067 et131x_mii_read(adapter, MII_MMD_CTRL, &tmp);
3068 regs_buff[num++] = tmp;
3069 et131x_mii_read(adapter, MII_MMD_DATA, &tmp);
3070 regs_buff[num++] = tmp;
3071 et131x_mii_read(adapter, MII_ESTATUS, &tmp);
3072 regs_buff[num++] = tmp;
3073
3074 et131x_mii_read(adapter, PHY_INDEX_REG, &tmp);
3075 regs_buff[num++] = tmp;
3076 et131x_mii_read(adapter, PHY_DATA_REG, &tmp);
3077 regs_buff[num++] = tmp;
3078 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp);
3079 regs_buff[num++] = tmp;
3080 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp);
3081 regs_buff[num++] = tmp;
3082 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp);
3083 regs_buff[num++] = tmp;
3084
3085 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp);
3086 regs_buff[num++] = tmp;
3087 et131x_mii_read(adapter, PHY_CONFIG, &tmp);
3088 regs_buff[num++] = tmp;
3089 et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp);
3090 regs_buff[num++] = tmp;
3091 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp);
3092 regs_buff[num++] = tmp;
3093 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp);
3094 regs_buff[num++] = tmp;
3095 et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp);
3096 regs_buff[num++] = tmp;
3097 et131x_mii_read(adapter, PHY_LED_1, &tmp);
3098 regs_buff[num++] = tmp;
3099 et131x_mii_read(adapter, PHY_LED_2, &tmp);
3100 regs_buff[num++] = tmp;
d2796743
ME
3101
3102 /* Global regs */
3103 regs_buff[num++] = readl(&aregs->global.txq_start_addr);
3104 regs_buff[num++] = readl(&aregs->global.txq_end_addr);
3105 regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
3106 regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
3107 regs_buff[num++] = readl(&aregs->global.pm_csr);
3108 regs_buff[num++] = adapter->stats.interrupt_status;
3109 regs_buff[num++] = readl(&aregs->global.int_mask);
3110 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
3111 regs_buff[num++] = readl(&aregs->global.int_status_alias);
3112 regs_buff[num++] = readl(&aregs->global.sw_reset);
3113 regs_buff[num++] = readl(&aregs->global.slv_timer);
3114 regs_buff[num++] = readl(&aregs->global.msi_config);
3115 regs_buff[num++] = readl(&aregs->global.loopback);
3116 regs_buff[num++] = readl(&aregs->global.watchdog_timer);
3117
3118 /* TXDMA regs */
3119 regs_buff[num++] = readl(&aregs->txdma.csr);
3120 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
3121 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
3122 regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
3123 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
3124 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
3125 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
3126 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
3127 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
3128 regs_buff[num++] = readl(&aregs->txdma.service_request);
3129 regs_buff[num++] = readl(&aregs->txdma.service_complete);
3130 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
3131 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
3132 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
3133 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
3134 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
3135 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
3136 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
3137 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
3138 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
3139 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
3140 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
3141 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
3142 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
3143 regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
3144 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
3145
3146 /* RXDMA regs */
3147 regs_buff[num++] = readl(&aregs->rxdma.csr);
3148 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
3149 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
3150 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
3151 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
3152 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
3153 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
3154 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
3155 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
3156 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
3157 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
3158 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
3159 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
3160 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
3161 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
3162 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
3163 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
3164 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
3165 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
3166 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
3167 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
3168 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
3169 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
3170 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
3171 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
3172 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
3173 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
3174 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
3175 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
3176}
3177
d2796743
ME
3178static void et131x_get_drvinfo(struct net_device *netdev,
3179 struct ethtool_drvinfo *info)
3180{
3181 struct et131x_adapter *adapter = netdev_priv(netdev);
3182
7826d43f
JP
3183 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
3184 strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
3185 strlcpy(info->bus_info, pci_name(adapter->pdev),
3186 sizeof(info->bus_info));
d2796743
ME
3187}
3188
3189static struct ethtool_ops et131x_ethtool_ops = {
3190 .get_settings = et131x_get_settings,
3191 .set_settings = et131x_set_settings,
3192 .get_drvinfo = et131x_get_drvinfo,
3193 .get_regs_len = et131x_get_regs_len,
3194 .get_regs = et131x_get_regs,
242187aa 3195 .get_link = ethtool_op_get_link,
d2796743 3196};
26ef1021 3197
15ae239d 3198/* et131x_hwaddr_init - set up the MAC Address on the ET1310 */
eb7a6ca6 3199static void et131x_hwaddr_init(struct et131x_adapter *adapter)
d2796743
ME
3200{
3201 /* If have our default mac from init and no mac address from
3202 * EEPROM then we need to generate the last octet and set it on the
3203 * device
3204 */
c14d01b8 3205 if (is_zero_ether_addr(adapter->rom_addr)) {
26ef1021 3206 /* We need to randomly generate the last octet so we
d2796743
ME
3207 * decrease our chances of setting the mac address to
3208 * same as another one of our cards in the system
3209 */
3210 get_random_bytes(&adapter->addr[5], 1);
26ef1021 3211 /* We have the default value in the register we are
d2796743
ME
3212 * working with so we need to copy the current
3213 * address into the permanent address
3214 */
3215 memcpy(adapter->rom_addr,
096e6224 3216 adapter->addr, ETH_ALEN);
d2796743
ME
3217 } else {
3218 /* We do not have an override address, so set the
3219 * current address to the permanent address and add
3220 * it to the device
3221 */
3222 memcpy(adapter->addr,
3223 adapter->rom_addr, ETH_ALEN);
3224 }
3225}
3226
26ef1021 3227/* et131x_pci_init - initial PCI setup
d2796743
ME
3228 *
3229 * Perform the initial setup of PCI registers and if possible initialise
3230 * the MAC address. At this point the I/O registers have yet to be mapped
3231 */
3232static int et131x_pci_init(struct et131x_adapter *adapter,
12a2f3f3 3233 struct pci_dev *pdev)
d2796743 3234{
d14e3d05 3235 u16 max_payload;
d14e3d05
FR
3236 int i, rc;
3237
3238 rc = et131x_init_eeprom(adapter);
3239 if (rc < 0)
3240 goto out;
3241
532c5f69 3242 if (!pci_is_pcie(pdev)) {
d14e3d05
FR
3243 dev_err(&pdev->dev, "Missing PCIe capabilities\n");
3244 goto err_out;
3245 }
bf3313a1 3246
9db008d0 3247 /* Let's set up the PORT LOGIC Register. */
d2796743
ME
3248
3249 /* Program the Ack/Nak latency and replay timers */
9db008d0 3250 max_payload = pdev->pcie_mpss;
d2796743
ME
3251
3252 if (max_payload < 2) {
3253 static const u16 acknak[2] = { 0x76, 0xD0 };
3254 static const u16 replay[2] = { 0x1E0, 0x2ED };
3255
3256 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
096e6224 3257 acknak[max_payload])) {
d2796743 3258 dev_err(&pdev->dev,
096e6224 3259 "Could not write PCI config space for ACK/NAK\n");
d14e3d05 3260 goto err_out;
d2796743
ME
3261 }
3262 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
096e6224 3263 replay[max_payload])) {
d2796743 3264 dev_err(&pdev->dev,
096e6224 3265 "Could not write PCI config space for Replay Timer\n");
d14e3d05 3266 goto err_out;
d2796743
ME
3267 }
3268 }
3269
3270 /* l0s and l1 latency timers. We are using default values.
3271 * Representing 001 for L0s and 010 for L1
3272 */
3273 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
3274 dev_err(&pdev->dev,
096e6224 3275 "Could not write PCI config space for Latency Timers\n");
d14e3d05 3276 goto err_out;
d2796743
ME
3277 }
3278
3279 /* Change the max read size to 2k */
9db008d0 3280 if (pcie_set_readrq(pdev, 2048)) {
d2796743 3281 dev_err(&pdev->dev,
532c5f69 3282 "Couldn't change PCI config space for Max read size\n");
d14e3d05 3283 goto err_out;
d2796743
ME
3284 }
3285
3286 /* Get MAC address from config space if an eeprom exists, otherwise
3287 * the MAC address there will not be valid
3288 */
3289 if (!adapter->has_eeprom) {
3290 et131x_hwaddr_init(adapter);
3291 return 0;
3292 }
3293
3294 for (i = 0; i < ETH_ALEN; i++) {
3295 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
096e6224 3296 adapter->rom_addr + i)) {
d2796743 3297 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
d14e3d05 3298 goto err_out;
d2796743
ME
3299 }
3300 }
015851c3 3301 ether_addr_copy(adapter->addr, adapter->rom_addr);
d14e3d05
FR
3302out:
3303 return rc;
3304err_out:
3305 rc = -EIO;
3306 goto out;
d2796743
ME
3307}
3308
26ef1021 3309/* et131x_error_timer_handler
d2796743
ME
3310 * @data: timer-specific variable; here a pointer to our adapter structure
3311 *
3312 * The routine called when the error timer expires, to track the number of
3313 * recurring errors.
3314 */
eb7a6ca6 3315static void et131x_error_timer_handler(unsigned long data)
d2796743 3316{
c1375678 3317 struct et131x_adapter *adapter = (struct et131x_adapter *)data;
d2796743
ME
3318 struct phy_device *phydev = adapter->phydev;
3319
3320 if (et1310_in_phy_coma(adapter)) {
3321 /* Bring the device immediately out of coma, to
3322 * prevent it from sleeping indefinitely, this
26ef1021
ME
3323 * mechanism could be improved!
3324 */
d2796743
ME
3325 et1310_disable_phy_coma(adapter);
3326 adapter->boot_coma = 20;
3327 } else {
3328 et1310_update_macstat_host_counters(adapter);
3329 }
3330
3331 if (!phydev->link && adapter->boot_coma < 11)
3332 adapter->boot_coma++;
3333
3334 if (adapter->boot_coma == 10) {
3335 if (!phydev->link) {
3336 if (!et1310_in_phy_coma(adapter)) {
3337 /* NOTE - This was originally a 'sync with
3338 * interrupt'. How to do that under Linux?
3339 */
3340 et131x_enable_interrupts(adapter);
3341 et1310_enable_phy_coma(adapter);
3342 }
3343 }
3344 }
3345
3346 /* This is a periodic timer, so reschedule */
242187aa 3347 mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000);
d2796743
ME
3348}
3349
15ae239d 3350/* et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx */
d959df0a
ME
3351static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
3352{
d959df0a
ME
3353 et131x_tx_dma_memory_free(adapter);
3354 et131x_rx_dma_memory_free(adapter);
3355}
3356
26ef1021 3357/* et131x_adapter_memory_alloc
d2796743
ME
3358 * Allocate all the memory blocks for send, receive and others.
3359 */
eb7a6ca6 3360static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
d2796743
ME
3361{
3362 int status;
3363
3364 /* Allocate memory for the Tx Ring */
3365 status = et131x_tx_dma_memory_alloc(adapter);
12a2f3f3 3366 if (status) {
d2796743 3367 dev_err(&adapter->pdev->dev,
096e6224 3368 "et131x_tx_dma_memory_alloc FAILED\n");
a9f48883 3369 et131x_tx_dma_memory_free(adapter);
d2796743
ME
3370 return status;
3371 }
3372 /* Receive buffer memory allocation */
3373 status = et131x_rx_dma_memory_alloc(adapter);
12a2f3f3 3374 if (status) {
d2796743 3375 dev_err(&adapter->pdev->dev,
096e6224 3376 "et131x_rx_dma_memory_alloc FAILED\n");
a9f48883 3377 et131x_adapter_memory_free(adapter);
d2796743
ME
3378 return status;
3379 }
3380
3381 /* Init receive data structures */
3382 status = et131x_init_recv(adapter);
d959df0a 3383 if (status) {
12a2f3f3 3384 dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n");
d959df0a 3385 et131x_adapter_memory_free(adapter);
d2796743
ME
3386 }
3387 return status;
3388}
3389
d2796743
ME
3390static void et131x_adjust_link(struct net_device *netdev)
3391{
3392 struct et131x_adapter *adapter = netdev_priv(netdev);
3393 struct phy_device *phydev = adapter->phydev;
3394
b96ab7cc
ME
3395 if (!phydev)
3396 return;
3397 if (phydev->link == adapter->link)
3398 return;
6903098c 3399
b96ab7cc
ME
3400 /* Check to see if we are in coma mode and if
3401 * so, disable it because we will not be able
3402 * to read PHY values until we are out.
3403 */
3404 if (et1310_in_phy_coma(adapter))
3405 et1310_disable_phy_coma(adapter);
6903098c 3406
b96ab7cc
ME
3407 adapter->link = phydev->link;
3408 phy_print_status(phydev);
6903098c 3409
b96ab7cc
ME
3410 if (phydev->link) {
3411 adapter->boot_coma = 20;
76af0140 3412 if (phydev->speed == SPEED_10) {
b96ab7cc
ME
3413 u16 register18;
3414
3415 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
096e6224 3416 &register18);
ec0a38bf
ME
3417 et131x_mii_write(adapter, phydev->addr,
3418 PHY_MPHY_CONTROL_REG, register18 | 0x4);
3419 et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG,
b96ab7cc 3420 register18 | 0x8402);
ec0a38bf 3421 et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG,
b96ab7cc 3422 register18 | 511);
ec0a38bf
ME
3423 et131x_mii_write(adapter, phydev->addr,
3424 PHY_MPHY_CONTROL_REG, register18);
b96ab7cc 3425 }
6903098c 3426
b96ab7cc 3427 et1310_config_flow_control(adapter);
d2796743 3428
76af0140 3429 if (phydev->speed == SPEED_1000 &&
b96ab7cc
ME
3430 adapter->registry_jumbo_packet > 2048) {
3431 u16 reg;
d2796743 3432
b96ab7cc
ME
3433 et131x_mii_read(adapter, PHY_CONFIG, &reg);
3434 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
3435 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
ec0a38bf
ME
3436 et131x_mii_write(adapter, phydev->addr, PHY_CONFIG,
3437 reg);
b96ab7cc 3438 }
d2796743 3439
b96ab7cc
ME
3440 et131x_set_rx_dma_timer(adapter);
3441 et1310_config_mac_regs2(adapter);
3442 } else {
3443 adapter->boot_coma = 0;
d2796743 3444
b96ab7cc 3445 if (phydev->speed == SPEED_10) {
b96ab7cc
ME
3446 u16 register18;
3447
3448 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
096e6224 3449 &register18);
ec0a38bf 3450 et131x_mii_write(adapter, phydev->addr,
096e6224 3451 PHY_MPHY_CONTROL_REG, register18 | 0x4);
ec0a38bf 3452 et131x_mii_write(adapter, phydev->addr,
096e6224 3453 PHY_INDEX_REG, register18 | 0x8402);
ec0a38bf 3454 et131x_mii_write(adapter, phydev->addr,
096e6224 3455 PHY_DATA_REG, register18 | 511);
ec0a38bf 3456 et131x_mii_write(adapter, phydev->addr,
096e6224 3457 PHY_MPHY_CONTROL_REG, register18);
b96ab7cc 3458 }
d2796743 3459
b96ab7cc
ME
3460 /* Free the packets being actively sent & stopped */
3461 et131x_free_busy_send_packets(adapter);
d2796743 3462
b96ab7cc
ME
3463 /* Re-initialize the send structures */
3464 et131x_init_send(adapter);
3465
3466 /* Bring the device back to the state it was during
3467 * init prior to autonegotiation being complete. This
3468 * way, when we get the auto-neg complete interrupt,
3469 * we can complete init by calling config_mac_regs2.
3470 */
3471 et131x_soft_reset(adapter);
3472
3473 /* Setup ET1310 as per the documentation */
3474 et131x_adapter_setup(adapter);
d2796743 3475
b96ab7cc
ME
3476 /* perform reset of tx/rx */
3477 et131x_disable_txrx(netdev);
3478 et131x_enable_txrx(netdev);
d2796743
ME
3479 }
3480}
3481
3482static int et131x_mii_probe(struct net_device *netdev)
3483{
3484 struct et131x_adapter *adapter = netdev_priv(netdev);
3485 struct phy_device *phydev = NULL;
3486
3487 phydev = phy_find_first(adapter->mii_bus);
3488 if (!phydev) {
3489 dev_err(&adapter->pdev->dev, "no PHY found\n");
3490 return -ENODEV;
3491 }
3492
3493 phydev = phy_connect(netdev, dev_name(&phydev->dev),
f9a8f83b 3494 &et131x_adjust_link, PHY_INTERFACE_MODE_MII);
d2796743
ME
3495
3496 if (IS_ERR(phydev)) {
3497 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
3498 return PTR_ERR(phydev);
3499 }
3500
bbf45bcf
ME
3501 phydev->supported &= (SUPPORTED_10baseT_Half |
3502 SUPPORTED_10baseT_Full |
3503 SUPPORTED_100baseT_Half |
3504 SUPPORTED_100baseT_Full |
3505 SUPPORTED_Autoneg |
3506 SUPPORTED_MII |
3507 SUPPORTED_TP);
d2796743
ME
3508
3509 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
bbf45bcf
ME
3510 phydev->supported |= SUPPORTED_1000baseT_Half |
3511 SUPPORTED_1000baseT_Full;
d2796743
ME
3512
3513 phydev->advertising = phydev->supported;
bbf45bcf 3514 phydev->autoneg = AUTONEG_ENABLE;
d2796743
ME
3515 adapter->phydev = phydev;
3516
12a2f3f3
ME
3517 dev_info(&adapter->pdev->dev,
3518 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
d2796743
ME
3519 phydev->drv->name, dev_name(&phydev->dev));
3520
3521 return 0;
3522}
3523
26ef1021 3524/* et131x_adapter_init
d2796743
ME
3525 *
3526 * Initialize the data structures for the et131x_adapter object and link
3527 * them together with the platform provided device structures.
3528 */
3529static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
12a2f3f3 3530 struct pci_dev *pdev)
d2796743
ME
3531{
3532 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
3533
3534 struct et131x_adapter *adapter;
3535
3536 /* Allocate private adapter struct and copy in relevant information */
3537 adapter = netdev_priv(netdev);
3538 adapter->pdev = pci_dev_get(pdev);
3539 adapter->netdev = netdev;
3540
d2796743 3541 /* Initialize spinlocks here */
d2796743
ME
3542 spin_lock_init(&adapter->tcb_send_qlock);
3543 spin_lock_init(&adapter->tcb_ready_qlock);
d2796743 3544 spin_lock_init(&adapter->rcv_lock);
d2796743
ME
3545
3546 adapter->registry_jumbo_packet = 1514; /* 1514-9216 */
3547
3548 /* Set the MAC address to a default */
015851c3 3549 ether_addr_copy(adapter->addr, default_mac);
d2796743
ME
3550
3551 return adapter;
3552}
3553
26ef1021 3554/* et131x_pci_remove
d2796743
ME
3555 *
3556 * Registered in the pci_driver structure, this function is called when the
3557 * PCI subsystem detects that a PCI device which matches the information
3558 * contained in the pci_device_id table has been removed.
3559 */
596c5dd3 3560static void et131x_pci_remove(struct pci_dev *pdev)
d2796743
ME
3561{
3562 struct net_device *netdev = pci_get_drvdata(pdev);
3563 struct et131x_adapter *adapter = netdev_priv(netdev);
3564
3565 unregister_netdev(netdev);
c2ebf58b 3566 netif_napi_del(&adapter->napi);
fa9f0a65 3567 phy_disconnect(adapter->phydev);
d2796743
ME
3568 mdiobus_unregister(adapter->mii_bus);
3569 kfree(adapter->mii_bus->irq);
3570 mdiobus_free(adapter->mii_bus);
3571
3572 et131x_adapter_memory_free(adapter);
3573 iounmap(adapter->regs);
3574 pci_dev_put(pdev);
3575
3576 free_netdev(netdev);
3577 pci_release_regions(pdev);
3578 pci_disable_device(pdev);
3579}
3580
15ae239d 3581/* et131x_up - Bring up a device for use. */
eb7a6ca6 3582static void et131x_up(struct net_device *netdev)
a4d444bd
ME
3583{
3584 struct et131x_adapter *adapter = netdev_priv(netdev);
3585
3586 et131x_enable_txrx(netdev);
3587 phy_start(adapter->phydev);
3588}
3589
15ae239d 3590/* et131x_down - Bring down the device */
eb7a6ca6 3591static void et131x_down(struct net_device *netdev)
a4d444bd
ME
3592{
3593 struct et131x_adapter *adapter = netdev_priv(netdev);
3594
3595 /* Save the timestamp for the TX watchdog, prevent a timeout */
3596 netdev->trans_start = jiffies;
3597
3598 phy_stop(adapter->phydev);
3599 et131x_disable_txrx(netdev);
3600}
3601
d2796743
ME
3602#ifdef CONFIG_PM_SLEEP
3603static int et131x_suspend(struct device *dev)
3604{
3605 struct pci_dev *pdev = to_pci_dev(dev);
3606 struct net_device *netdev = pci_get_drvdata(pdev);
3607
3608 if (netif_running(netdev)) {
3609 netif_device_detach(netdev);
3610 et131x_down(netdev);
3611 pci_save_state(pdev);
3612 }
3613
3614 return 0;
3615}
3616
3617static int et131x_resume(struct device *dev)
3618{
3619 struct pci_dev *pdev = to_pci_dev(dev);
3620 struct net_device *netdev = pci_get_drvdata(pdev);
3621
3622 if (netif_running(netdev)) {
3623 pci_restore_state(pdev);
3624 et131x_up(netdev);
3625 netif_device_attach(netdev);
3626 }
3627
3628 return 0;
3629}
3630
2e9ff8d9
ME
3631static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
3632#define ET131X_PM_OPS (&et131x_pm_ops)
3633#else
3634#define ET131X_PM_OPS NULL
3635#endif
3636
26ef1021 3637/* et131x_isr - The Interrupt Service Routine for the driver.
d2796743
ME
3638 * @irq: the IRQ on which the interrupt was received.
3639 * @dev_id: device-specific info (here a pointer to a net_device struct)
3640 *
3641 * Returns a value indicating if the interrupt was handled.
3642 */
c0594ee9 3643static irqreturn_t et131x_isr(int irq, void *dev_id)
d2796743
ME
3644{
3645 bool handled = true;
be40a261 3646 bool enable_interrupts = true;
d2796743 3647 struct net_device *netdev = (struct net_device *)dev_id;
8f7fa96a 3648 struct et131x_adapter *adapter = netdev_priv(netdev);
c2ebf58b 3649 struct address_map __iomem *iomem = adapter->regs;
8f7fa96a 3650 struct rx_ring *rx_ring = &adapter->rx_ring;
76981cf1 3651 struct tx_ring *tx_ring = &adapter->tx_ring;
d2796743
ME
3652 u32 status;
3653
3654 if (!netif_device_present(netdev)) {
3655 handled = false;
be40a261 3656 enable_interrupts = false;
d2796743
ME
3657 goto out;
3658 }
3659
d2796743
ME
3660 /* If the adapter is in low power state, then it should not
3661 * recognize any interrupt
3662 */
3663
3664 /* Disable Device Interrupts */
3665 et131x_disable_interrupts(adapter);
3666
3667 /* Get a copy of the value in the interrupt status register
3668 * so we can process the interrupting section
3669 */
3670 status = readl(&adapter->regs->global.int_status);
3671
3672 if (adapter->flowcontrol == FLOW_TXONLY ||
3673 adapter->flowcontrol == FLOW_BOTH) {
3674 status &= ~INT_MASK_ENABLE;
3675 } else {
3676 status &= ~INT_MASK_ENABLE_NO_FLOW;
3677 }
3678
3679 /* Make sure this is our interrupt */
3680 if (!status) {
3681 handled = false;
3682 et131x_enable_interrupts(adapter);
3683 goto out;
3684 }
3685
3686 /* This is our interrupt, so process accordingly */
d2796743 3687 if (status & ET_INTR_WATCHDOG) {
76981cf1 3688 struct tcb *tcb = tx_ring->send_head;
d2796743
ME
3689
3690 if (tcb)
3691 if (++tcb->stale > 1)
3692 status |= ET_INTR_TXDMA_ISR;
3693
8f7fa96a 3694 if (rx_ring->unfinished_receives)
d2796743
ME
3695 status |= ET_INTR_RXDMA_XFR_DONE;
3696 else if (tcb == NULL)
3697 writel(0, &adapter->regs->global.watchdog_timer);
3698
3699 status &= ~ET_INTR_WATCHDOG;
3700 }
3701
be40a261
ME
3702 if (status & (ET_INTR_RXDMA_XFR_DONE | ET_INTR_TXDMA_ISR)) {
3703 enable_interrupts = false;
c2ebf58b 3704 napi_schedule(&adapter->napi);
be40a261 3705 }
d2796743 3706
df7b3b8a 3707 status &= ~(ET_INTR_TXDMA_ISR | ET_INTR_RXDMA_XFR_DONE);
d2796743 3708
15ffde4d
ME
3709 if (!status)
3710 goto out;
d2796743 3711
15ffde4d
ME
3712 /* Handle the TXDMA Error interrupt */
3713 if (status & ET_INTR_TXDMA_ERR) {
15ffde4d 3714 /* Following read also clears the register (COR) */
12a2f3f3 3715 u32 txdma_err = readl(&iomem->txdma.tx_dma_error);
d2796743 3716
15ffde4d 3717 dev_warn(&adapter->pdev->dev,
096e6224
ME
3718 "TXDMA_ERR interrupt, error = %d\n",
3719 txdma_err);
15ffde4d 3720 }
d2796743 3721
15ffde4d
ME
3722 /* Handle Free Buffer Ring 0 and 1 Low interrupt */
3723 if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
26ef1021 3724 /* This indicates the number of unused buffers in RXDMA free
15ffde4d
ME
3725 * buffer ring 0 is <= the limit you programmed. Free buffer
3726 * resources need to be returned. Free buffers are consumed as
3727 * packets are passed from the network to the host. The host
3728 * becomes aware of the packets from the contents of the packet
3729 * status ring. This ring is queried when the packet done
3730 * interrupt occurs. Packets are then passed to the OS. When
3731 * the OS is done with the packets the resources can be
3732 * returned to the ET1310 for re-use. This interrupt is one
3733 * method of returning resources.
3734 */
d2796743 3735
26ef1021 3736 /* If the user has flow control on, then we will
15ffde4d
ME
3737 * send a pause packet, otherwise just exit
3738 */
3739 if (adapter->flowcontrol == FLOW_TXONLY ||
3740 adapter->flowcontrol == FLOW_BOTH) {
3741 u32 pm_csr;
d2796743 3742
26ef1021 3743 /* Tell the device to send a pause packet via the back
15ffde4d 3744 * pressure register (bp req and bp xon/xoff)
d2796743 3745 */
15ffde4d
ME
3746 pm_csr = readl(&iomem->global.pm_csr);
3747 if (!et1310_in_phy_coma(adapter))
3748 writel(3, &iomem->txmac.bp_ctrl);
d2796743 3749 }
15ffde4d 3750 }
d2796743 3751
15ffde4d
ME
3752 /* Handle Packet Status Ring Low Interrupt */
3753 if (status & ET_INTR_RXDMA_STAT_LOW) {
26ef1021 3754 /* Same idea as with the two Free Buffer Rings. Packets going
15ffde4d 3755 * from the network to the host each consume a free buffer
868bf442 3756 * resource and a packet status resource. These resources are
15ffde4d
ME
3757 * passed to the OS. When the OS is done with the resources,
3758 * they need to be returned to the ET1310. This is one method
3759 * of returning the resources.
3760 */
3761 }
d2796743 3762
15ffde4d
ME
3763 /* Handle RXDMA Error Interrupt */
3764 if (status & ET_INTR_RXDMA_ERR) {
26ef1021 3765 /* The rxdma_error interrupt is sent when a time-out on a
15ffde4d
ME
3766 * request issued by the JAGCore has occurred or a completion is
3767 * returned with an un-successful status. In both cases the
3768 * request is considered complete. The JAGCore will
3769 * automatically re-try the request in question. Normally
3770 * information on events like these are sent to the host using
3771 * the "Advanced Error Reporting" capability. This interrupt is
3772 * another way of getting similar information. The only thing
3773 * required is to clear the interrupt by reading the ISR in the
3774 * global resources. The JAGCore will do a re-try on the
3775 * request. Normally you should never see this interrupt. If
3776 * you start to see this interrupt occurring frequently then
3777 * something bad has occurred. A reset might be the thing to do.
3778 */
3779 /* TRAP();*/
d2796743 3780
15ffde4d 3781 dev_warn(&adapter->pdev->dev,
096e6224
ME
3782 "RxDMA_ERR interrupt, error %x\n",
3783 readl(&iomem->txmac.tx_test));
15ffde4d 3784 }
d2796743 3785
15ffde4d
ME
3786 /* Handle the Wake on LAN Event */
3787 if (status & ET_INTR_WOL) {
26ef1021 3788 /* This is a secondary interrupt for wake on LAN. The driver
15ffde4d
ME
3789 * should never see this, if it does, something serious is
3790 * wrong. We will TRAP the message when we are in DBG mode,
3791 * otherwise we will ignore it.
3792 */
3793 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
3794 }
d2796743 3795
15ffde4d
ME
3796 /* Let's move on to the TxMac */
3797 if (status & ET_INTR_TXMAC) {
3798 u32 err = readl(&iomem->txmac.err);
d2796743 3799
26ef1021 3800 /* When any of the errors occur and TXMAC generates an
15ffde4d
ME
3801 * interrupt to report these errors, it usually means that
3802 * TXMAC has detected an error in the data stream retrieved
3803 * from the on-chip Tx Q. All of these errors are catastrophic
3804 * and TXMAC won't be able to recover data when these errors
3805 * occur. In a nutshell, the whole Tx path will have to be reset
3806 * and re-configured afterwards.
3807 */
3808 dev_warn(&adapter->pdev->dev,
3809 "TXMAC interrupt, error 0x%08x\n",
3810 err);
d2796743 3811
26ef1021 3812 /* If we are debugging, we want to see this error, otherwise we
15ffde4d
ME
3813 * just want the device to be reset and continue
3814 */
3815 }
d2796743 3816
15ffde4d
ME
3817 /* Handle RXMAC Interrupt */
3818 if (status & ET_INTR_RXMAC) {
26ef1021 3819 /* These interrupts are catastrophic to the device, what we need
15ffde4d
ME
3820 * to do is disable the interrupts and set the flag to cause us
3821 * to reset so we can solve this issue.
3822 */
c655dee9 3823 /* MP_SET_FLAG( adapter, FMP_ADAPTER_HARDWARE_ERROR); */
d2796743 3824
15ffde4d
ME
3825 dev_warn(&adapter->pdev->dev,
3826 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
3827 readl(&iomem->rxmac.err_reg));
d2796743 3828
15ffde4d
ME
3829 dev_warn(&adapter->pdev->dev,
3830 "Enable 0x%08x, Diag 0x%08x\n",
3831 readl(&iomem->rxmac.ctrl),
3832 readl(&iomem->rxmac.rxq_diag));
d2796743 3833
26ef1021 3834 /* If we are debugging, we want to see this error, otherwise we
15ffde4d
ME
3835 * just want the device to be reset and continue
3836 */
3837 }
d2796743 3838
15ffde4d
ME
3839 /* Handle MAC_STAT Interrupt */
3840 if (status & ET_INTR_MAC_STAT) {
26ef1021 3841 /* This means at least one of the un-masked counters in the
15ffde4d
ME
3842 * MAC_STAT block has rolled over. Use this to maintain the top,
3843 * software managed bits of the counter(s).
3844 */
3845 et1310_handle_macstat_interrupt(adapter);
d2796743 3846 }
15ffde4d
ME
3847
3848 /* Handle SLV Timeout Interrupt */
3849 if (status & ET_INTR_SLV_TIMEOUT) {
26ef1021 3850 /* This means a timeout has occurred on a read or write request
15ffde4d
ME
3851 * to one of the JAGCore registers. The Global Resources block
3852 * has terminated the request and on a read request, returned a
3853 * "fake" value. The most likely reasons are: Bad Address or the
3854 * addressed module is in a power-down state and can't respond.
3855 */
3856 }
c2ebf58b 3857
be40a261
ME
3858out:
3859 if (enable_interrupts)
c2ebf58b 3860 et131x_enable_interrupts(adapter);
c2ebf58b 3861
c2ebf58b
ME
3862 return IRQ_RETVAL(handled);
3863}
3864
3865static int et131x_poll(struct napi_struct *napi, int budget)
3866{
3867 struct et131x_adapter *adapter =
3868 container_of(napi, struct et131x_adapter, napi);
3869 int work_done = et131x_handle_recv_pkts(adapter, budget);
3870
3871 et131x_handle_send_pkts(adapter);
3872
3873 if (work_done < budget) {
3874 napi_complete(&adapter->napi);
3875 et131x_enable_interrupts(adapter);
3876 }
3877
3878 return work_done;
d2796743
ME
3879}
3880
15ae239d 3881/* et131x_stats - Return the current device statistics */
d2796743
ME
3882static struct net_device_stats *et131x_stats(struct net_device *netdev)
3883{
3884 struct et131x_adapter *adapter = netdev_priv(netdev);
1f765d9f 3885 struct net_device_stats *stats = &adapter->netdev->stats;
d2796743
ME
3886 struct ce_stats *devstat = &adapter->stats;
3887
3888 stats->rx_errors = devstat->rx_length_errs +
3889 devstat->rx_align_errs +
3890 devstat->rx_crc_errs +
3891 devstat->rx_code_violations +
3892 devstat->rx_other_errs;
3893 stats->tx_errors = devstat->tx_max_pkt_errs;
3894 stats->multicast = devstat->multicast_pkts_rcvd;
3895 stats->collisions = devstat->tx_collisions;
3896
3897 stats->rx_length_errors = devstat->rx_length_errs;
3898 stats->rx_over_errors = devstat->rx_overflows;
3899 stats->rx_crc_errors = devstat->rx_crc_errs;
23780f07 3900 stats->rx_dropped = devstat->rcvd_pkts_dropped;
d2796743 3901
23780f07 3902 /* NOTE: Not used, can't find analogous statistics */
d2796743
ME
3903 /* stats->rx_frame_errors = devstat->; */
3904 /* stats->rx_fifo_errors = devstat->; */
3905 /* stats->rx_missed_errors = devstat->; */
3906
3907 /* stats->tx_aborted_errors = devstat->; */
3908 /* stats->tx_carrier_errors = devstat->; */
3909 /* stats->tx_fifo_errors = devstat->; */
3910 /* stats->tx_heartbeat_errors = devstat->; */
3911 /* stats->tx_window_errors = devstat->; */
3912 return stats;
3913}
3914
15ae239d 3915/* et131x_open - Open the device for use. */
eb7a6ca6 3916static int et131x_open(struct net_device *netdev)
d2796743 3917{
d2796743 3918 struct et131x_adapter *adapter = netdev_priv(netdev);
5f3eb881
FR
3919 struct pci_dev *pdev = adapter->pdev;
3920 unsigned int irq = pdev->irq;
3921 int result;
d2796743
ME
3922
3923 /* Start the timer to track NIC errors */
3924 init_timer(&adapter->error_timer);
3925 adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
3926 adapter->error_timer.function = et131x_error_timer_handler;
3927 adapter->error_timer.data = (unsigned long)adapter;
3928 add_timer(&adapter->error_timer);
3929
bf3313a1 3930 result = request_irq(irq, et131x_isr,
3931 IRQF_SHARED, netdev->name, netdev);
d2796743 3932 if (result) {
5f3eb881 3933 dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
d2796743
ME
3934 return result;
3935 }
3936
c655dee9 3937 adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE;
d2796743 3938
c2ebf58b
ME
3939 napi_enable(&adapter->napi);
3940
d2796743
ME
3941 et131x_up(netdev);
3942
3943 return result;
3944}
3945
15ae239d 3946/* et131x_close - Close the device */
eb7a6ca6 3947static int et131x_close(struct net_device *netdev)
d2796743
ME
3948{
3949 struct et131x_adapter *adapter = netdev_priv(netdev);
3950
3951 et131x_down(netdev);
c2ebf58b 3952 napi_disable(&adapter->napi);
d2796743 3953
c655dee9 3954 adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE;
5f3eb881 3955 free_irq(adapter->pdev->irq, netdev);
d2796743
ME
3956
3957 /* Stop the error timer */
3958 return del_timer_sync(&adapter->error_timer);
3959}
3960
26ef1021 3961/* et131x_ioctl - The I/O Control handler for the driver
d2796743
ME
3962 * @netdev: device on which the control request is being made
3963 * @reqbuf: a pointer to the IOCTL request buffer
3964 * @cmd: the IOCTL command code
d2796743 3965 */
09a3fc2b
ME
3966static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
3967 int cmd)
d2796743
ME
3968{
3969 struct et131x_adapter *adapter = netdev_priv(netdev);
3970
3971 if (!adapter->phydev)
3972 return -EINVAL;
3973
3974 return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
3975}
3976
26ef1021 3977/* et131x_set_packet_filter - Configures the Rx Packet filtering on the device
d2796743
ME
3978 * @adapter: pointer to our private adapter structure
3979 *
3980 * FIXME: lot of dups with MAC code
d2796743
ME
3981 */
3982static int et131x_set_packet_filter(struct et131x_adapter *adapter)
3983{
834d0ee3 3984 int filter = adapter->packet_filter;
d2796743
ME
3985 u32 ctrl;
3986 u32 pf_ctrl;
3987
3988 ctrl = readl(&adapter->regs->rxmac.ctrl);
3989 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
3990
3991 /* Default to disabled packet filtering. Enable it in the individual
3992 * case statements that require the device to filter something
3993 */
3994 ctrl |= 0x04;
3995
3996 /* Set us to be in promiscuous mode so we receive everything, this
3997 * is also true when we get a packet filter of 0
3998 */
3999 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
4000 pf_ctrl &= ~7; /* Clear filter bits */
4001 else {
26ef1021 4002 /* Set us up with Multicast packet filtering. Three cases are
d2796743
ME
4003 * possible - (1) we have a multi-cast list, (2) we receive ALL
4004 * multicast entries or (3) we receive none.
4005 */
4006 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
4007 pf_ctrl &= ~2; /* Multicast filter bit */
4008 else {
4009 et1310_setup_device_for_multicast(adapter);
4010 pf_ctrl |= 2;
4011 ctrl &= ~0x04;
4012 }
4013
4014 /* Set us up with Unicast packet filtering */
4015 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
4016 et1310_setup_device_for_unicast(adapter);
4017 pf_ctrl |= 4;
4018 ctrl &= ~0x04;
4019 }
4020
4021 /* Set us up with Broadcast packet filtering */
4022 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
4023 pf_ctrl |= 1; /* Broadcast filter bit */
4024 ctrl &= ~0x04;
ee60c8ec 4025 } else {
d2796743 4026 pf_ctrl &= ~1;
ee60c8ec 4027 }
d2796743
ME
4028
4029 /* Setup the receive mac configuration registers - Packet
4030 * Filter control + the enable / disable for packet filter
4031 * in the control reg.
4032 */
4033 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
4034 writel(ctrl, &adapter->regs->rxmac.ctrl);
4035 }
8ea6cdf5 4036 return 0;
d2796743
ME
4037}
4038
15ae239d 4039/* et131x_multicast - The handler to configure multicasting on the interface */
d2796743
ME
4040static void et131x_multicast(struct net_device *netdev)
4041{
4042 struct et131x_adapter *adapter = netdev_priv(netdev);
834d0ee3 4043 int packet_filter;
d2796743
ME
4044 struct netdev_hw_addr *ha;
4045 int i;
4046
d2796743
ME
4047 /* Before we modify the platform-independent filter flags, store them
4048 * locally. This allows us to determine if anything's changed and if
4049 * we even need to bother the hardware
4050 */
4051 packet_filter = adapter->packet_filter;
4052
4053 /* Clear the 'multicast' flag locally; because we only have a single
4054 * flag to check multicast, and multiple multicast addresses can be
4055 * set, this is the easiest way to determine if more than one
4056 * multicast address is being set.
4057 */
4058 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4059
4060 /* Check the net_device flags and set the device independent flags
4061 * accordingly
4062 */
d2796743
ME
4063 if (netdev->flags & IFF_PROMISC)
4064 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
4065 else
4066 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
4067
668caa67
ME
4068 if ((netdev->flags & IFF_ALLMULTI) ||
4069 (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST))
d2796743
ME
4070 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4071
4072 if (netdev_mc_count(netdev) < 1) {
4073 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
4074 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
ee60c8ec 4075 } else {
d2796743 4076 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
ee60c8ec 4077 }
d2796743
ME
4078
4079 /* Set values in the private adapter struct */
4080 i = 0;
4081 netdev_for_each_mc_addr(ha, netdev) {
4082 if (i == NIC_MAX_MCAST_LIST)
4083 break;
4084 memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
4085 }
4086 adapter->multicast_addr_count = i;
4087
4088 /* Are the new flags different from the previous ones? If not, then no
4089 * action is required
4090 *
4091 * NOTE - This block will always update the multicast_list with the
4092 * hardware, even if the addresses aren't the same.
4093 */
15ae239d 4094 if (packet_filter != adapter->packet_filter)
d2796743 4095 et131x_set_packet_filter(adapter);
d2796743
ME
4096}
4097
15ae239d 4098/* et131x_tx - The handler to tx a packet on the device */
be40a261 4099static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
d2796743 4100{
06709e96 4101 struct et131x_adapter *adapter = netdev_priv(netdev);
76981cf1 4102 struct tx_ring *tx_ring = &adapter->tx_ring;
06709e96
ME
4103
4104 /* stop the queue if it's getting full */
76981cf1 4105 if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
06709e96 4106 netif_stop_queue(netdev);
d2796743
ME
4107
4108 /* Save the timestamp for the TX timeout watchdog */
4109 netdev->trans_start = jiffies;
4110
4792e6d1 4111 /* TCB is not available */
701b943e
ME
4112 if (tx_ring->used >= NUM_TCB)
4113 goto drop_err;
4792e6d1 4114
701b943e
ME
4115 if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) ||
4116 !netif_carrier_ok(netdev))
4117 goto drop_err;
d2796743 4118
701b943e
ME
4119 if (send_packet(skb, adapter))
4120 goto drop_err;
4121
4122 return NETDEV_TX_OK;
4123
4124drop_err:
4125 dev_kfree_skb_any(skb);
701b943e
ME
4126 adapter->netdev->stats.tx_dropped++;
4127 return NETDEV_TX_OK;
d2796743
ME
4128}
4129
26ef1021 4130/* et131x_tx_timeout - Timeout handler
d2796743
ME
4131 *
4132 * The handler called when a Tx request times out. The timeout period is
4133 * specified by the 'tx_timeo" element in the net_device structure (see
4134 * et131x_alloc_device() to see how this value is set).
4135 */
4136static void et131x_tx_timeout(struct net_device *netdev)
4137{
4138 struct et131x_adapter *adapter = netdev_priv(netdev);
76981cf1 4139 struct tx_ring *tx_ring = &adapter->tx_ring;
d2796743
ME
4140 struct tcb *tcb;
4141 unsigned long flags;
4142
4143 /* If the device is closed, ignore the timeout */
c655dee9 4144 if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
d2796743
ME
4145 return;
4146
4147 /* Any nonrecoverable hardware error?
4148 * Checks adapter->flags for any failure in phy reading
4149 */
c655dee9 4150 if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR)
d2796743
ME
4151 return;
4152
4153 /* Hardware failure? */
c655dee9 4154 if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) {
d2796743
ME
4155 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
4156 return;
4157 }
4158
4159 /* Is send stuck? */
4160 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
76981cf1 4161 tcb = tx_ring->send_head;
82d95799 4162 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
d2796743 4163
82d95799 4164 if (tcb) {
d2796743
ME
4165 tcb->count++;
4166
4167 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
d2796743 4168 dev_warn(&adapter->pdev->dev,
97cd38dc
ME
4169 "Send stuck - reset. tcb->WrIndex %x\n",
4170 tcb->index);
d2796743 4171
1f765d9f 4172 adapter->netdev->stats.tx_errors++;
d2796743
ME
4173
4174 /* perform reset of tx/rx */
4175 et131x_disable_txrx(netdev);
4176 et131x_enable_txrx(netdev);
d2796743
ME
4177 }
4178 }
d2796743
ME
4179}
4180
15ae239d 4181/* et131x_change_mtu - The handler called to change the MTU for the device */
d2796743
ME
4182static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
4183{
4184 int result = 0;
4185 struct et131x_adapter *adapter = netdev_priv(netdev);
4186
4187 /* Make sure the requested MTU is valid */
4188 if (new_mtu < 64 || new_mtu > 9216)
4189 return -EINVAL;
4190
4191 et131x_disable_txrx(netdev);
d2796743
ME
4192
4193 /* Set the new MTU */
4194 netdev->mtu = new_mtu;
4195
4196 /* Free Rx DMA memory */
4197 et131x_adapter_memory_free(adapter);
4198
4199 /* Set the config parameter for Jumbo Packet support */
4200 adapter->registry_jumbo_packet = new_mtu + 14;
4201 et131x_soft_reset(adapter);
4202
4203 /* Alloc and init Rx DMA memory */
4204 result = et131x_adapter_memory_alloc(adapter);
4205 if (result != 0) {
4206 dev_warn(&adapter->pdev->dev,
096e6224 4207 "Change MTU failed; couldn't re-alloc DMA memory\n");
d2796743
ME
4208 return result;
4209 }
4210
4211 et131x_init_send(adapter);
4212
4213 et131x_hwaddr_init(adapter);
4214 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
4215
4216 /* Init the device with the new settings */
4217 et131x_adapter_setup(adapter);
d2796743
ME
4218 et131x_enable_txrx(netdev);
4219
4220 return result;
4221}
4222
d2796743
ME
4223static const struct net_device_ops et131x_netdev_ops = {
4224 .ndo_open = et131x_open,
4225 .ndo_stop = et131x_close,
4226 .ndo_start_xmit = et131x_tx,
aa77677e 4227 .ndo_set_rx_mode = et131x_multicast,
d2796743
ME
4228 .ndo_tx_timeout = et131x_tx_timeout,
4229 .ndo_change_mtu = et131x_change_mtu,
36087dc1 4230 .ndo_set_mac_address = eth_mac_addr,
d2796743
ME
4231 .ndo_validate_addr = eth_validate_addr,
4232 .ndo_get_stats = et131x_stats,
4233 .ndo_do_ioctl = et131x_ioctl,
4234};
4235
26ef1021 4236/* et131x_pci_setup - Perform device initialization
5da2b158
ME
4237 * @pdev: a pointer to the device's pci_dev structure
4238 * @ent: this device's entry in the pci_device_id table
4239 *
5da2b158
ME
4240 * Registered in the pci_driver structure, this function is called when the
4241 * PCI subsystem finds a new PCI device which matches the information
4242 * contained in the pci_device_id table. This routine is the equivalent to
4243 * a device insertion routine.
4244 */
fe5c49b3 4245static int et131x_pci_setup(struct pci_dev *pdev,
12a2f3f3 4246 const struct pci_device_id *ent)
5da2b158 4247{
5da2b158
ME
4248 struct net_device *netdev;
4249 struct et131x_adapter *adapter;
fa9f0a65 4250 int rc;
5da2b158
ME
4251 int ii;
4252
fa9f0a65
FR
4253 rc = pci_enable_device(pdev);
4254 if (rc < 0) {
5da2b158 4255 dev_err(&pdev->dev, "pci_enable_device() failed\n");
fa9f0a65 4256 goto out;
5da2b158
ME
4257 }
4258
4259 /* Perform some basic PCI checks */
4260 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4261 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
fa9f0a65 4262 rc = -ENODEV;
5da2b158
ME
4263 goto err_disable;
4264 }
4265
fa9f0a65
FR
4266 rc = pci_request_regions(pdev, DRIVER_NAME);
4267 if (rc < 0) {
5da2b158
ME
4268 dev_err(&pdev->dev, "Can't get PCI resources\n");
4269 goto err_disable;
4270 }
4271
4272 pci_set_master(pdev);
4273
4274 /* Check the DMA addressing support of this device */
e22f0e3f
RK
4275 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
4276 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
5da2b158 4277 dev_err(&pdev->dev, "No usable DMA addressing method\n");
fa9f0a65 4278 rc = -EIO;
5da2b158
ME
4279 goto err_release_res;
4280 }
4281
4282 /* Allocate netdev and private adapter structs */
fa9f0a65 4283 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
5da2b158
ME
4284 if (!netdev) {
4285 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
fa9f0a65 4286 rc = -ENOMEM;
5da2b158
ME
4287 goto err_release_res;
4288 }
4289
fa9f0a65
FR
4290 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
4291 netdev->netdev_ops = &et131x_netdev_ops;
4292
5da2b158 4293 SET_NETDEV_DEV(netdev, &pdev->dev);
7ad24ea4 4294 netdev->ethtool_ops = &et131x_ethtool_ops;
5da2b158
ME
4295
4296 adapter = et131x_adapter_init(netdev, pdev);
4297
fa9f0a65
FR
4298 rc = et131x_pci_init(adapter, pdev);
4299 if (rc < 0)
4300 goto err_free_dev;
5da2b158
ME
4301
4302 /* Map the bus-relative registers to system virtual memory */
4303 adapter->regs = pci_ioremap_bar(pdev, 0);
4304 if (!adapter->regs) {
4305 dev_err(&pdev->dev, "Cannot map device registers\n");
fa9f0a65 4306 rc = -ENOMEM;
5da2b158
ME
4307 goto err_free_dev;
4308 }
4309
4310 /* If Phy COMA mode was enabled when we went down, disable it here. */
4311 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
4312
4313 /* Issue a global reset to the et1310 */
4314 et131x_soft_reset(adapter);
4315
4316 /* Disable all interrupts (paranoid) */
4317 et131x_disable_interrupts(adapter);
4318
4319 /* Allocate DMA memory */
fa9f0a65
FR
4320 rc = et131x_adapter_memory_alloc(adapter);
4321 if (rc < 0) {
868bf442 4322 dev_err(&pdev->dev, "Could not alloc adapter memory (DMA)\n");
5da2b158
ME
4323 goto err_iounmap;
4324 }
4325
4326 /* Init send data structures */
4327 et131x_init_send(adapter);
4328
c2ebf58b 4329 netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
5da2b158
ME
4330
4331 /* Copy address into the net_device struct */
4332 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
4333
fa9f0a65
FR
4334 rc = -ENOMEM;
4335
5da2b158
ME
4336 /* Setup the mii_bus struct */
4337 adapter->mii_bus = mdiobus_alloc();
4338 if (!adapter->mii_bus) {
4339 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
4340 goto err_mem_free;
4341 }
4342
4343 adapter->mii_bus->name = "et131x_eth_mii";
4344 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
096e6224 4345 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
5da2b158
ME
4346 adapter->mii_bus->priv = netdev;
4347 adapter->mii_bus->read = et131x_mdio_read;
4348 adapter->mii_bus->write = et131x_mdio_write;
78110bb8
JP
4349 adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int),
4350 GFP_KERNEL);
4351 if (!adapter->mii_bus->irq)
5da2b158 4352 goto err_mdio_free;
5da2b158
ME
4353
4354 for (ii = 0; ii < PHY_MAX_ADDR; ii++)
4355 adapter->mii_bus->irq[ii] = PHY_POLL;
4356
fa9f0a65
FR
4357 rc = mdiobus_register(adapter->mii_bus);
4358 if (rc < 0) {
5da2b158 4359 dev_err(&pdev->dev, "failed to register MII bus\n");
5da2b158
ME
4360 goto err_mdio_free_irq;
4361 }
4362
fa9f0a65
FR
4363 rc = et131x_mii_probe(netdev);
4364 if (rc < 0) {
5da2b158
ME
4365 dev_err(&pdev->dev, "failed to probe MII bus\n");
4366 goto err_mdio_unregister;
4367 }
4368
4369 /* Setup et1310 as per the documentation */
4370 et131x_adapter_setup(adapter);
4371
ec0a38bf
ME
4372 /* Init variable for counting how long we do not have link status */
4373 adapter->boot_coma = 0;
4374 et1310_disable_phy_coma(adapter);
4375
5da2b158
ME
4376 /* We can enable interrupts now
4377 *
4378 * NOTE - Because registration of interrupt handler is done in the
4379 * device's open(), defer enabling device interrupts to that
4380 * point
4381 */
4382
4383 /* Register the net_device struct with the Linux network layer */
fa9f0a65
FR
4384 rc = register_netdev(netdev);
4385 if (rc < 0) {
5da2b158 4386 dev_err(&pdev->dev, "register_netdev() failed\n");
fa9f0a65 4387 goto err_phy_disconnect;
5da2b158
ME
4388 }
4389
4390 /* Register the net_device struct with the PCI subsystem. Save a copy
4391 * of the PCI config space for this device now that the device has
4392 * been initialized, just in case it needs to be quickly restored.
4393 */
4394 pci_set_drvdata(pdev, netdev);
fa9f0a65
FR
4395out:
4396 return rc;
5da2b158 4397
fa9f0a65
FR
4398err_phy_disconnect:
4399 phy_disconnect(adapter->phydev);
5da2b158
ME
4400err_mdio_unregister:
4401 mdiobus_unregister(adapter->mii_bus);
4402err_mdio_free_irq:
4403 kfree(adapter->mii_bus->irq);
4404err_mdio_free:
4405 mdiobus_free(adapter->mii_bus);
4406err_mem_free:
4407 et131x_adapter_memory_free(adapter);
4408err_iounmap:
4409 iounmap(adapter->regs);
4410err_free_dev:
4411 pci_dev_put(pdev);
4412 free_netdev(netdev);
4413err_release_res:
4414 pci_release_regions(pdev);
4415err_disable:
4416 pci_disable_device(pdev);
fa9f0a65 4417 goto out;
5da2b158
ME
4418}
4419
41e043fc 4420static const struct pci_device_id et131x_pci_table[] = {
5da2b158
ME
4421 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
4422 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
4423 {0,}
4424};
4425MODULE_DEVICE_TABLE(pci, et131x_pci_table);
4426
4427static struct pci_driver et131x_driver = {
4428 .name = DRIVER_NAME,
4429 .id_table = et131x_pci_table,
4430 .probe = et131x_pci_setup,
0b5e4092 4431 .remove = et131x_pci_remove,
5da2b158
ME
4432 .driver.pm = ET131X_PM_OPS,
4433};
4434
89812b1f 4435module_pci_driver(et131x_driver);
This page took 0.719951 seconds and 5 git commands to generate.