staging: et131x: Add space after { in pci ID table
[deliverable/linux.git] / drivers / staging / et131x / et131x.c
CommitLineData
26ef1021 1/* Agere Systems Inc.
d2796743
ME
2 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
3 *
4 * Copyright © 2005 Agere Systems Inc.
5 * All rights reserved.
6 * http://www.agere.com
7 *
8 * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
9 *
10 *------------------------------------------------------------------------------
11 *
12 * SOFTWARE LICENSE
13 *
14 * This software is provided subject to the following terms and conditions,
15 * which you should read carefully before using the software. Using this
16 * software indicates your acceptance of these terms and conditions. If you do
17 * not agree with these terms and conditions, do not use the software.
18 *
19 * Copyright © 2005 Agere Systems Inc.
20 * All rights reserved.
21 *
22 * Redistribution and use in source or binary forms, with or without
23 * modifications, are permitted provided that the following conditions are met:
24 *
25 * . Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following Disclaimer as comments in the code as
27 * well as in the documentation and/or other materials provided with the
28 * distribution.
29 *
30 * . Redistributions in binary form must reproduce the above copyright notice,
31 * this list of conditions and the following Disclaimer in the documentation
32 * and/or other materials provided with the distribution.
33 *
34 * . Neither the name of Agere Systems Inc. nor the names of the contributors
35 * may be used to endorse or promote products derived from this software
36 * without specific prior written permission.
37 *
38 * Disclaimer
39 *
40 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
41 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
42 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
43 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
44 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
48 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
51 * DAMAGE.
d2796743
ME
52 */
53
e58b89da
TY
54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
d2796743 56#include <linux/pci.h>
d2796743
ME
57#include <linux/module.h>
58#include <linux/types.h>
59#include <linux/kernel.h>
60
61#include <linux/sched.h>
62#include <linux/ptrace.h>
63#include <linux/slab.h>
64#include <linux/ctype.h>
65#include <linux/string.h>
66#include <linux/timer.h>
67#include <linux/interrupt.h>
68#include <linux/in.h>
69#include <linux/delay.h>
70#include <linux/bitops.h>
71#include <linux/io.h>
d2796743
ME
72
73#include <linux/netdevice.h>
74#include <linux/etherdevice.h>
75#include <linux/skbuff.h>
76#include <linux/if_arp.h>
77#include <linux/ioport.h>
78#include <linux/crc32.h>
79#include <linux/random.h>
80#include <linux/phy.h>
81
d2796743
ME
82#include "et131x.h"
83
84MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
85MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
86MODULE_LICENSE("Dual BSD/GPL");
397d3e60 87MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
d2796743 88
bd156af6
ME
89/* EEPROM defines */
90#define MAX_NUM_REGISTER_POLLS 1000
91#define MAX_NUM_WRITE_RETRIES 2
92
93/* MAC defines */
94#define COUNTER_WRAP_16_BIT 0x10000
95#define COUNTER_WRAP_12_BIT 0x1000
96
97/* PCI defines */
98#define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */
99#define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */
100
101/* ISR defines */
26ef1021 102/* For interrupts, normal running is:
bd156af6
ME
103 * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
104 * watchdog_interrupt & txdma_xfer_done
105 *
106 * In both cases, when flow control is enabled for either Tx or bi-direction,
107 * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
108 * buffer rings are running low.
109 */
110#define INT_MASK_DISABLE 0xffffffff
111
112/* NOTE: Masking out MAC_STAT Interrupt for now...
113 * #define INT_MASK_ENABLE 0xfff6bf17
114 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7
115 */
116#define INT_MASK_ENABLE 0xfffebf17
117#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
118
1c1c1b5f
ME
119/* General defines */
120/* Packet and header sizes */
121#define NIC_MIN_PACKET_SIZE 60
122
123/* Multicast list size */
124#define NIC_MAX_MCAST_LIST 128
125
126/* Supported Filters */
127#define ET131X_PACKET_TYPE_DIRECTED 0x0001
128#define ET131X_PACKET_TYPE_MULTICAST 0x0002
129#define ET131X_PACKET_TYPE_BROADCAST 0x0004
130#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
131#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
132
133/* Tx Timeout */
134#define ET131X_TX_TIMEOUT (1 * HZ)
135#define NIC_SEND_HANG_THRESHOLD 0
136
1c1c1b5f 137/* MP_ADAPTER flags */
c655dee9 138#define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
1c1c1b5f
ME
139
140/* MP_SHARED flags */
c655dee9 141#define FMP_ADAPTER_LOWER_POWER 0x00200000
1c1c1b5f 142
c655dee9
ME
143#define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
144#define FMP_ADAPTER_HARDWARE_ERROR 0x04000000
1c1c1b5f 145
c655dee9 146#define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
1c1c1b5f
ME
147
148/* Some offsets in PCI config space that are actually used. */
1c1c1b5f
ME
149#define ET1310_PCI_MAC_ADDRESS 0xA4
150#define ET1310_PCI_EEPROM_STATUS 0xB2
151#define ET1310_PCI_ACK_NACK 0xC0
152#define ET1310_PCI_REPLAY 0xC2
153#define ET1310_PCI_L0L1LATENCY 0xCF
154
26d19bf6 155/* PCI Product IDs */
1c1c1b5f
ME
156#define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */
157#define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */
158
159/* Define order of magnitude converter */
160#define NANO_IN_A_MICRO 1000
161
162#define PARM_RX_NUM_BUFS_DEF 4
163#define PARM_RX_TIME_INT_DEF 10
164#define PARM_RX_MEM_END_DEF 0x2bc
165#define PARM_TX_TIME_INT_DEF 40
166#define PARM_TX_NUM_BUFS_DEF 4
167#define PARM_DMA_CACHE_DEF 0
168
562550b0 169/* RX defines */
788ca84a
ME
170#define FBR_CHUNKS 32
171#define MAX_DESC_PER_RING_RX 1024
562550b0
ME
172
173/* number of RFDs - default and min */
562550b0 174#define RFD_LOW_WATER_MARK 40
562550b0 175#define NIC_DEFAULT_NUM_RFD 1024
6abafc16 176#define NUM_FBRS 2
562550b0 177
b60e6d0a 178#define MAX_PACKETS_HANDLED 256
562550b0 179
562550b0
ME
180#define ALCATEL_MULTICAST_PKT 0x01000000
181#define ALCATEL_BROADCAST_PKT 0x02000000
182
183/* typedefs for Free Buffer Descriptors */
184struct fbr_desc {
185 u32 addr_lo;
186 u32 addr_hi;
187 u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */
188};
189
190/* Packet Status Ring Descriptors
191 *
192 * Word 0:
193 *
194 * top 16 bits are from the Alcatel Status Word as enumerated in
195 * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
196 *
197 * 0: hp hash pass
198 * 1: ipa IP checksum assist
199 * 2: ipp IP checksum pass
200 * 3: tcpa TCP checksum assist
201 * 4: tcpp TCP checksum pass
202 * 5: wol WOL Event
203 * 6: rxmac_error RXMAC Error Indicator
204 * 7: drop Drop packet
205 * 8: ft Frame Truncated
206 * 9: jp Jumbo Packet
207 * 10: vp VLAN Packet
208 * 11-15: unused
209 * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
210 * 17: asw_RX_DV_event short receive event detected
211 * 18: asw_false_carrier_event bad carrier since last good packet
212 * 19: asw_code_err one or more nibbles signalled as errors
213 * 20: asw_CRC_err CRC error
214 * 21: asw_len_chk_err frame length field incorrect
215 * 22: asw_too_long frame length > 1518 bytes
216 * 23: asw_OK valid CRC + no code error
217 * 24: asw_multicast has a multicast address
218 * 25: asw_broadcast has a broadcast address
219 * 26: asw_dribble_nibble spurious bits after EOP
220 * 27: asw_control_frame is a control frame
221 * 28: asw_pause_frame is a pause frame
222 * 29: asw_unsupported_op unsupported OP code
223 * 30: asw_VLAN_tag VLAN tag detected
224 * 31: asw_long_evt Rx long event
225 *
226 * Word 1:
227 * 0-15: length length in bytes
228 * 16-25: bi Buffer Index
229 * 26-27: ri Ring Index
230 * 28-31: reserved
231 */
562550b0
ME
232struct pkt_stat_desc {
233 u32 word0;
234 u32 word1;
235};
236
237/* Typedefs for the RX DMA status word */
238
26ef1021 239/* rx status word 0 holds part of the status bits of the Rx DMA engine
562550b0
ME
240 * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word
241 * which contains the Free Buffer ring 0 and 1 available offset.
242 *
243 * bit 0-9 FBR1 offset
244 * bit 10 Wrap flag for FBR1
245 * bit 16-25 FBR0 offset
246 * bit 26 Wrap flag for FBR0
247 */
248
26ef1021 249/* RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
562550b0
ME
250 * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word
251 * which contains the Packet Status Ring available offset.
252 *
253 * bit 0-15 reserved
254 * bit 16-27 PSRoffset
255 * bit 28 PSRwrap
256 * bit 29-31 unused
257 */
258
26ef1021 259/* struct rx_status_block is a structure representing the status of the Rx
562550b0
ME
260 * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
261 */
262struct rx_status_block {
263 u32 word0;
264 u32 word1;
265};
266
26ef1021 267/* Structure for look-up table holding free buffer ring pointers, addresses
6abafc16 268 * and state.
562550b0
ME
269 */
270struct fbr_lookup {
6abafc16 271 void *virt[MAX_DESC_PER_RING_RX];
6abafc16
ME
272 u32 bus_high[MAX_DESC_PER_RING_RX];
273 u32 bus_low[MAX_DESC_PER_RING_RX];
274 void *ring_virtaddr;
275 dma_addr_t ring_physaddr;
276 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
277 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
6abafc16
ME
278 u32 local_full;
279 u32 num_entries;
983e4b35 280 dma_addr_t buffsize;
562550b0
ME
281};
282
868bf442 283/* struct rx_ring is the structure representing the adaptor's local
562550b0
ME
284 * reference(s) to the rings
285 */
286struct rx_ring {
6abafc16 287 struct fbr_lookup *fbr[NUM_FBRS];
562550b0
ME
288 void *ps_ring_virtaddr;
289 dma_addr_t ps_ring_physaddr;
290 u32 local_psr_full;
3f4d2029 291 u32 psr_entries;
562550b0
ME
292
293 struct rx_status_block *rx_status_block;
294 dma_addr_t rx_status_bus;
295
562550b0
ME
296 /* RECV */
297 struct list_head recv_list;
298 u32 num_ready_recv;
299
300 u32 num_rfd;
301
302 bool unfinished_receives;
562550b0
ME
303};
304
17ec9ff3 305/* TX defines */
26ef1021 306/* word 2 of the control bits in the Tx Descriptor ring for the ET-1310
17ec9ff3
ME
307 *
308 * 0-15: length of packet
309 * 16-27: VLAN tag
310 * 28: VLAN CFI
311 * 29-31: VLAN priority
312 *
313 * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
314 *
315 * 0: last packet in the sequence
316 * 1: first packet in the sequence
317 * 2: interrupt the processor when this pkt sent
318 * 3: Control word - no packet data
319 * 4: Issue half-duplex backpressure : XON/XOFF
320 * 5: send pause frame
321 * 6: Tx frame has error
322 * 7: append CRC
323 * 8: MAC override
324 * 9: pad packet
325 * 10: Packet is a Huge packet
326 * 11: append VLAN tag
327 * 12: IP checksum assist
328 * 13: TCP checksum assist
329 * 14: UDP checksum assist
330 */
a129be84
ME
331#define TXDESC_FLAG_LASTPKT 0x0001
332#define TXDESC_FLAG_FIRSTPKT 0x0002
333#define TXDESC_FLAG_INTPROC 0x0004
334
17ec9ff3
ME
335/* struct tx_desc represents each descriptor on the ring */
336struct tx_desc {
337 u32 addr_hi;
338 u32 addr_lo;
339 u32 len_vlan; /* control words how to xmit the */
340 u32 flags; /* data (detailed above) */
341};
342
26ef1021 343/* The status of the Tx DMA engine it sits in free memory, and is pointed to
17ec9ff3
ME
344 * by 0x101c / 0x1020. This is a DMA10 type
345 */
346
347/* TCB (Transmit Control Block: Host Side) */
348struct tcb {
349 struct tcb *next; /* Next entry in ring */
17ec9ff3
ME
350 u32 count; /* Used to spot stuck/lost packets */
351 u32 stale; /* Used to spot stuck/lost packets */
352 struct sk_buff *skb; /* Network skb we are tied to */
353 u32 index; /* Ring indexes */
354 u32 index_start;
355};
356
357/* Structure representing our local reference(s) to the ring */
358struct tx_ring {
359 /* TCB (Transmit Control Block) memory and lists */
360 struct tcb *tcb_ring;
361
362 /* List of TCBs that are ready to be used */
363 struct tcb *tcb_qhead;
364 struct tcb *tcb_qtail;
365
366 /* list of TCBs that are currently being sent. NOTE that access to all
367 * three of these (including used) are controlled via the
368 * TCBSendQLock. This lock should be secured prior to incementing /
369 * decrementing used, or any queue manipulation on send_head /
370 * tail
371 */
372 struct tcb *send_head;
373 struct tcb *send_tail;
374 int used;
375
376 /* The actual descriptor ring */
377 struct tx_desc *tx_desc_ring;
378 dma_addr_t tx_desc_ring_pa;
379
380 /* send_idx indicates where we last wrote to in the descriptor ring. */
381 u32 send_idx;
382
383 /* The location of the write-back status block */
384 u32 *tx_status;
385 dma_addr_t tx_status_pa;
386
387 /* Packets since the last IRQ: used for interrupt coalescing */
388 int since_irq;
389};
390
26ef1021 391/* Do not change these values: if changed, then change also in respective
fd0651a6
ME
392 * TXdma and Rxdma engines
393 */
394#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
395#define NUM_TCB 64
396
26ef1021 397/* These values are all superseded by registry entries to facilitate tuning.
fd0651a6
ME
398 * Once the desired performance has been achieved, the optimal registry values
399 * should be re-populated to these #defines:
400 */
fd0651a6
ME
401#define TX_ERROR_PERIOD 1000
402
403#define LO_MARK_PERCENT_FOR_PSR 15
404#define LO_MARK_PERCENT_FOR_RX 15
405
406/* RFD (Receive Frame Descriptor) */
407struct rfd {
408 struct list_head list_node;
409 struct sk_buff *skb;
410 u32 len; /* total size of receive frame */
411 u16 bufferindex;
412 u8 ringindex;
413};
414
415/* Flow Control */
416#define FLOW_BOTH 0
417#define FLOW_TXONLY 1
418#define FLOW_RXONLY 2
419#define FLOW_NONE 3
420
421/* Struct to define some device statistics */
422struct ce_stats {
423 /* MIB II variables
424 *
425 * NOTE: atomic_t types are only guaranteed to store 24-bits; if we
426 * MUST have 32, then we'll need another way to perform atomic
427 * operations
428 */
fd0651a6 429 u32 multicast_pkts_rcvd;
fd0651a6
ME
430 u32 rcvd_pkts_dropped;
431
432 /* Tx Statistics. */
433 u32 tx_underflows;
fd0651a6
ME
434 u32 tx_collisions;
435 u32 tx_excessive_collisions;
436 u32 tx_first_collisions;
437 u32 tx_late_collisions;
438 u32 tx_max_pkt_errs;
439 u32 tx_deferred;
440
441 /* Rx Statistics. */
442 u32 rx_overflows;
fd0651a6
ME
443 u32 rx_length_errs;
444 u32 rx_align_errs;
445 u32 rx_crc_errs;
446 u32 rx_code_violations;
447 u32 rx_other_errs;
448
fd0651a6
ME
449 u32 interrupt_status;
450};
451
452/* The private adapter structure */
453struct et131x_adapter {
454 struct net_device *netdev;
455 struct pci_dev *pdev;
456 struct mii_bus *mii_bus;
457 struct phy_device *phydev;
c2ebf58b 458 struct napi_struct napi;
fd0651a6
ME
459
460 /* Flags that indicate current state of the adapter */
461 u32 flags;
462
463 /* local link state, to determine if a state change has occurred */
464 int link;
465
466 /* Configuration */
467 u8 rom_addr[ETH_ALEN];
468 u8 addr[ETH_ALEN];
469 bool has_eeprom;
470 u8 eeprom_data[2];
471
48c8f789
ME
472 spinlock_t tcb_send_qlock; /* protects the tx_ring send tcb list */
473 spinlock_t tcb_ready_qlock; /* protects the tx_ring ready tcb list */
474 spinlock_t rcv_lock; /* protects the rx_ring receive list */
fd0651a6 475
fd0651a6
ME
476 /* Packet Filter and look ahead size */
477 u32 packet_filter;
478
479 /* multicast list */
480 u32 multicast_addr_count;
481 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
482
483 /* Pointer to the device's PCI register space */
484 struct address_map __iomem *regs;
485
486 /* Registry parameters */
487 u8 wanted_flow; /* Flow we want for 802.3x flow control */
488 u32 registry_jumbo_packet; /* Max supported ethernet packet size */
489
490 /* Derived from the registry: */
26ca0f1b 491 u8 flow; /* flow control validated by the far-end */
fd0651a6
ME
492
493 /* Minimize init-time */
494 struct timer_list error_timer;
495
496 /* variable putting the phy into coma mode when boot up with no cable
497 * plugged in after 5 seconds
498 */
499 u8 boot_coma;
500
fd0651a6
ME
501 /* Tx Memory Variables */
502 struct tx_ring tx_ring;
503
504 /* Rx Memory Variables */
505 struct rx_ring rx_ring;
506
507 /* Stats */
508 struct ce_stats stats;
fd0651a6
ME
509};
510
d2796743
ME
511static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
512{
513 u32 reg;
514 int i;
515
26ef1021 516 /* 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
d2796743
ME
517 * bits 7,1:0 both equal to 1, at least once after reset.
518 * Subsequent operations need only to check that bits 1:0 are equal
519 * to 1 prior to starting a single byte read/write
520 */
d2796743
ME
521 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
522 /* Read registers grouped in DWORD1 */
523 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg))
524 return -EIO;
525
526 /* I2C idle and Phy Queue Avail both true */
527 if ((reg & 0x3000) == 0x3000) {
528 if (status)
529 *status = reg;
530 return reg & 0xFF;
531 }
532 }
533 return -ETIMEDOUT;
534}
535
26ef1021 536/* eeprom_write - Write a byte to the ET1310's EEPROM
d2796743
ME
537 * @adapter: pointer to our private adapter structure
538 * @addr: the address to write
539 * @data: the value to write
540 *
541 * Returns 1 for a successful write.
542 */
543static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
544{
545 struct pci_dev *pdev = adapter->pdev;
546 int index = 0;
547 int retries;
548 int err = 0;
d2796743
ME
549 int writeok = 0;
550 u32 status;
551 u32 val = 0;
552
26ef1021 553 /* For an EEPROM, an I2C single byte write is defined as a START
d2796743
ME
554 * condition followed by the device address, EEPROM address, one byte
555 * of data and a STOP condition. The STOP condition will trigger the
556 * EEPROM's internally timed write cycle to the nonvolatile memory.
557 * All inputs are disabled during this write cycle and the EEPROM will
558 * not respond to any access until the internal write is complete.
559 */
d2796743 560 err = eeprom_wait_ready(pdev, NULL);
8dd4a966 561 if (err < 0)
d2796743
ME
562 return err;
563
26ef1021
ME
564 /* 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
565 * and bits 1:0 both =0. Bit 5 should be set according to the
566 * type of EEPROM being accessed (1=two byte addressing, 0=one
567 * byte addressing).
568 */
d2796743 569 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
096e6224
ME
570 LBCIF_CONTROL_LBCIF_ENABLE |
571 LBCIF_CONTROL_I2C_WRITE))
d2796743
ME
572 return -EIO;
573
d2796743 574 /* Prepare EEPROM address for Step 3 */
d2796743
ME
575 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
576 /* Write the address to the LBCIF Address Register */
577 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
578 break;
26ef1021 579 /* Write the data to the LBCIF Data Register (the I2C write
d2796743
ME
580 * will begin).
581 */
582 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
583 break;
26ef1021 584 /* Monitor bit 1:0 of the LBCIF Status Register. When bits
d2796743
ME
585 * 1:0 are both equal to 1, the I2C write has completed and the
586 * internal write cycle of the EEPROM is about to start.
587 * (bits 1:0 = 01 is a legal state while waiting from both
588 * equal to 1, but bits 1:0 = 10 is invalid and implies that
589 * something is broken).
590 */
591 err = eeprom_wait_ready(pdev, &status);
592 if (err < 0)
593 return 0;
594
26ef1021 595 /* Check bit 3 of the LBCIF Status Register. If equal to 1,
d2796743
ME
596 * an error has occurred.Don't break here if we are revision
597 * 1, this is so we do a blind write for load bug.
598 */
cec78b98
ME
599 if ((status & LBCIF_STATUS_GENERAL_ERROR) &&
600 adapter->pdev->revision == 0)
d2796743
ME
601 break;
602
26ef1021 603 /* Check bit 2 of the LBCIF Status Register. If equal to 1 an
d2796743
ME
604 * ACK error has occurred on the address phase of the write.
605 * This could be due to an actual hardware failure or the
606 * EEPROM may still be in its internal write cycle from a
607 * previous write. This write operation was ignored and must be
608 *repeated later.
609 */
610 if (status & LBCIF_STATUS_ACK_ERROR) {
26ef1021 611 /* This could be due to an actual hardware failure
d2796743
ME
612 * or the EEPROM may still be in its internal write
613 * cycle from a previous write. This write operation
614 * was ignored and must be repeated later.
615 */
616 udelay(10);
617 continue;
618 }
619
620 writeok = 1;
621 break;
622 }
623
26ef1021 624 /* Set bit 6 of the LBCIF Control Register = 0.
d2796743
ME
625 */
626 udelay(10);
627
5a5835ea 628 while (1) {
d2796743 629 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
096e6224 630 LBCIF_CONTROL_LBCIF_ENABLE))
d2796743
ME
631 writeok = 0;
632
633 /* Do read until internal ACK_ERROR goes away meaning write
634 * completed
635 */
636 do {
637 pci_write_config_dword(pdev,
638 LBCIF_ADDRESS_REGISTER,
639 addr);
640 do {
641 pci_read_config_dword(pdev,
096e6224
ME
642 LBCIF_DATA_REGISTER,
643 &val);
d2796743
ME
644 } while ((val & 0x00010000) == 0);
645 } while (val & 0x00040000);
646
647 if ((val & 0xFF00) != 0xC000 || index == 10000)
648 break;
649 index++;
650 }
651 return writeok ? 0 : -EIO;
652}
653
26ef1021 654/* eeprom_read - Read a byte from the ET1310's EEPROM
d2796743
ME
655 * @adapter: pointer to our private adapter structure
656 * @addr: the address from which to read
657 * @pdata: a pointer to a byte in which to store the value of the read
658 * @eeprom_id: the ID of the EEPROM
659 * @addrmode: how the EEPROM is to be accessed
660 *
661 * Returns 1 for a successful read
662 */
663static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
664{
665 struct pci_dev *pdev = adapter->pdev;
666 int err;
667 u32 status;
668
26ef1021 669 /* A single byte read is similar to the single byte write, with the
d2796743
ME
670 * exception of the data flow:
671 */
d2796743 672 err = eeprom_wait_ready(pdev, NULL);
8dd4a966 673 if (err < 0)
d2796743 674 return err;
26ef1021 675 /* Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
d2796743
ME
676 * and bits 1:0 both =0. Bit 5 should be set according to the type
677 * of EEPROM being accessed (1=two byte addressing, 0=one byte
678 * addressing).
679 */
680 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
681 LBCIF_CONTROL_LBCIF_ENABLE))
682 return -EIO;
26ef1021 683 /* Write the address to the LBCIF Address Register (I2C read will
d2796743
ME
684 * begin).
685 */
686 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
687 return -EIO;
26ef1021 688 /* Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
d2796743
ME
689 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
690 * has occurred).
691 */
692 err = eeprom_wait_ready(pdev, &status);
693 if (err < 0)
694 return err;
26ef1021 695 /* Regardless of error status, read data byte from LBCIF Data
d2796743
ME
696 * Register.
697 */
698 *pdata = err;
26ef1021 699 /* Check bit 2 of the LBCIF Status Register. If = 1,
d2796743
ME
700 * then an error has occurred.
701 */
702 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
703}
704
eb7a6ca6 705static int et131x_init_eeprom(struct et131x_adapter *adapter)
d2796743
ME
706{
707 struct pci_dev *pdev = adapter->pdev;
708 u8 eestatus;
709
710 /* We first need to check the EEPROM Status code located at offset
711 * 0xB2 of config space
712 */
a129be84 713 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus);
d2796743
ME
714
715 /* THIS IS A WORKAROUND:
716 * I need to call this function twice to get my card in a
717 * LG M1 Express Dual running. I tried also a msleep before this
a129be84 718 * function, because I thought there could be some time conditions
d2796743
ME
719 * but it didn't work. Call the whole function twice also work.
720 */
721 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
722 dev_err(&pdev->dev,
096e6224 723 "Could not read PCI config space for EEPROM Status\n");
d2796743
ME
724 return -EIO;
725 }
726
727 /* Determine if the error(s) we care about are present. If they are
728 * present we need to fail.
729 */
730 if (eestatus & 0x4C) {
731 int write_failed = 0;
f03fcca0 732
d2796743
ME
733 if (pdev->revision == 0x01) {
734 int i;
735 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
736
737 /* Re-write the first 4 bytes if we have an eeprom
738 * present and the revision id is 1, this fixes the
739 * corruption seen with 1310 B Silicon
740 */
741 for (i = 0; i < 3; i++)
742 if (eeprom_write(adapter, i, eedata[i]) < 0)
743 write_failed = 1;
744 }
745 if (pdev->revision != 0x01 || write_failed) {
746 dev_err(&pdev->dev,
096e6224
ME
747 "Fatal EEPROM Status Error - 0x%04x\n",
748 eestatus);
d2796743
ME
749
750 /* This error could mean that there was an error
751 * reading the eeprom or that the eeprom doesn't exist.
752 * We will treat each case the same and not try to
753 * gather additional information that normally would
754 * come from the eeprom, like MAC Address
755 */
756 adapter->has_eeprom = 0;
757 return -EIO;
758 }
759 }
760 adapter->has_eeprom = 1;
761
762 /* Read the EEPROM for information regarding LED behavior. Refer to
763 * ET1310_phy.c, et131x_xcvr_init(), for its use.
764 */
765 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
766 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
767
768 if (adapter->eeprom_data[0] != 0xcd)
769 /* Disable all optional features */
770 adapter->eeprom_data[1] = 0x00;
771
772 return 0;
773}
774
26ef1021 775/* et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
8310c602
ME
776 * @adapter: pointer to our adapter structure
777 */
eb7a6ca6 778static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
8310c602
ME
779{
780 /* Setup the receive dma configuration register for normal operation */
3040d056 781 u32 csr = ET_RXDMA_CSR_FBR1_ENABLE;
8f7fa96a 782 struct rx_ring *rx_ring = &adapter->rx_ring;
8310c602 783
8f7fa96a 784 if (rx_ring->fbr[1]->buffsize == 4096)
3040d056 785 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO;
8f7fa96a 786 else if (rx_ring->fbr[1]->buffsize == 8192)
3040d056 787 csr |= ET_RXDMA_CSR_FBR1_SIZE_HI;
8f7fa96a 788 else if (rx_ring->fbr[1]->buffsize == 16384)
3040d056 789 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI;
b5254867 790
3040d056 791 csr |= ET_RXDMA_CSR_FBR0_ENABLE;
8f7fa96a 792 if (rx_ring->fbr[0]->buffsize == 256)
3040d056 793 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO;
8f7fa96a 794 else if (rx_ring->fbr[0]->buffsize == 512)
3040d056 795 csr |= ET_RXDMA_CSR_FBR0_SIZE_HI;
8f7fa96a 796 else if (rx_ring->fbr[0]->buffsize == 1024)
3040d056 797 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI;
8310c602
ME
798 writel(csr, &adapter->regs->rxdma.csr);
799
800 csr = readl(&adapter->regs->rxdma.csr);
3040d056 801 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
8310c602
ME
802 udelay(5);
803 csr = readl(&adapter->regs->rxdma.csr);
3040d056 804 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
8310c602 805 dev_err(&adapter->pdev->dev,
096e6224 806 "RX Dma failed to exit halt state. CSR 0x%08x\n",
8310c602
ME
807 csr);
808 }
809 }
810}
811
26ef1021 812/* et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
8310c602
ME
813 * @adapter: pointer to our adapter structure
814 */
eb7a6ca6 815static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
8310c602
ME
816{
817 u32 csr;
818 /* Setup the receive dma configuration register */
3040d056
ME
819 writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE,
820 &adapter->regs->rxdma.csr);
8310c602 821 csr = readl(&adapter->regs->rxdma.csr);
3040d056 822 if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) {
8310c602
ME
823 udelay(5);
824 csr = readl(&adapter->regs->rxdma.csr);
3040d056 825 if (!(csr & ET_RXDMA_CSR_HALT_STATUS))
8310c602 826 dev_err(&adapter->pdev->dev,
096e6224
ME
827 "RX Dma failed to enter halt state. CSR 0x%08x\n",
828 csr);
8310c602
ME
829 }
830}
831
26ef1021 832/* et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
8310c602
ME
833 * @adapter: pointer to our adapter structure
834 *
835 * Mainly used after a return to the D0 (full-power) state from a lower state.
836 */
eb7a6ca6 837static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
8310c602
ME
838{
839 /* Setup the transmit dma configuration register for normal
840 * operation
841 */
096e6224
ME
842 writel(ET_TXDMA_SNGL_EPKT | (PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
843 &adapter->regs->txdma.csr);
8310c602
ME
844}
845
846static inline void add_10bit(u32 *v, int n)
847{
848 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
849}
850
851static inline void add_12bit(u32 *v, int n)
852{
853 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
854}
855
26ef1021 856/* et1310_config_mac_regs1 - Initialize the first part of MAC regs
d2796743
ME
857 * @adapter: pointer to our adapter structure
858 */
eb7a6ca6 859static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
d2796743
ME
860{
861 struct mac_regs __iomem *macregs = &adapter->regs->mac;
862 u32 station1;
863 u32 station2;
864 u32 ipg;
865
866 /* First we need to reset everything. Write to MAC configuration
867 * register 1 to perform reset.
868 */
a129be84
ME
869 writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
870 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
871 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC,
872 &macregs->cfg1);
d2796743
ME
873
874 /* Next lets configure the MAC Inter-packet gap register */
875 ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
876 ipg |= 0x50 << 8; /* ifg enforce 0x50 */
877 writel(ipg, &macregs->ipg);
878
879 /* Next lets configure the MAC Half Duplex register */
880 /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
881 writel(0x00A1F037, &macregs->hfdp);
882
883 /* Next lets configure the MAC Interface Control register */
884 writel(0, &macregs->if_ctrl);
885
886 /* Let's move on to setting up the mii management configuration */
a129be84 887 writel(ET_MAC_MIIMGMT_CLK_RST, &macregs->mii_mgmt_cfg);
d2796743
ME
888
889 /* Next lets configure the MAC Station Address register. These
890 * values are read from the EEPROM during initialization and stored
891 * in the adapter structure. We write what is stored in the adapter
892 * structure to the MAC Station Address registers high and low. This
893 * station address is used for generating and checking pause control
894 * packets.
895 */
896 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
897 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
898 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
899 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
900 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
901 adapter->addr[2];
902 writel(station1, &macregs->station_addr_1);
903 writel(station2, &macregs->station_addr_2);
904
ac399bc0 905 /* Max ethernet packet in bytes that will be passed by the mac without
d2796743
ME
906 * being truncated. Allow the MAC to pass 4 more than our max packet
907 * size. This is 4 for the Ethernet CRC.
908 *
909 * Packets larger than (registry_jumbo_packet) that do not contain a
910 * VLAN ID will be dropped by the Rx function.
911 */
912 writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len);
913
914 /* clear out MAC config reset */
915 writel(0, &macregs->cfg1);
916}
917
26ef1021 918/* et1310_config_mac_regs2 - Initialize the second part of MAC regs
d2796743
ME
919 * @adapter: pointer to our adapter structure
920 */
eb7a6ca6 921static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
d2796743
ME
922{
923 int32_t delay = 0;
924 struct mac_regs __iomem *mac = &adapter->regs->mac;
925 struct phy_device *phydev = adapter->phydev;
926 u32 cfg1;
927 u32 cfg2;
928 u32 ifctrl;
929 u32 ctl;
930
931 ctl = readl(&adapter->regs->txmac.ctl);
932 cfg1 = readl(&mac->cfg1);
933 cfg2 = readl(&mac->cfg2);
934 ifctrl = readl(&mac->if_ctrl);
935
936 /* Set up the if mode bits */
a129be84 937 cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK;
76af0140 938 if (phydev->speed == SPEED_1000) {
a129be84 939 cfg2 |= ET_MAC_CFG2_IFMODE_1000;
d2796743 940 /* Phy mode bit */
a129be84 941 ifctrl &= ~ET_MAC_IFCTRL_PHYMODE;
d2796743 942 } else {
a129be84
ME
943 cfg2 |= ET_MAC_CFG2_IFMODE_100;
944 ifctrl |= ET_MAC_IFCTRL_PHYMODE;
d2796743
ME
945 }
946
947 /* We need to enable Rx/Tx */
a129be84
ME
948 cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE |
949 ET_MAC_CFG1_TX_FLOW;
d2796743 950 /* Initialize loop back to off */
a129be84 951 cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW);
26ca0f1b 952 if (adapter->flow == FLOW_RXONLY || adapter->flow == FLOW_BOTH)
a129be84 953 cfg1 |= ET_MAC_CFG1_RX_FLOW;
d2796743
ME
954 writel(cfg1, &mac->cfg1);
955
956 /* Now we need to initialize the MAC Configuration 2 register */
957 /* preamble 7, check length, huge frame off, pad crc, crc enable
26ef1021
ME
958 * full duplex off
959 */
a129be84
ME
960 cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT;
961 cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK;
962 cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC;
963 cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE;
964 cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME;
965 cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX;
d2796743
ME
966
967 /* Turn on duplex if needed */
76af0140 968 if (phydev->duplex == DUPLEX_FULL)
a129be84 969 cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX;
d2796743 970
a129be84 971 ifctrl &= ~ET_MAC_IFCTRL_GHDMODE;
76af0140 972 if (phydev->duplex == DUPLEX_HALF)
a129be84 973 ifctrl |= ET_MAC_IFCTRL_GHDMODE;
d2796743
ME
974
975 writel(ifctrl, &mac->if_ctrl);
976 writel(cfg2, &mac->cfg2);
977
978 do {
979 udelay(10);
980 delay++;
981 cfg1 = readl(&mac->cfg1);
a129be84 982 } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100);
d2796743
ME
983
984 if (delay == 100) {
985 dev_warn(&adapter->pdev->dev,
096e6224
ME
986 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
987 cfg1);
d2796743
ME
988 }
989
990 /* Enable txmac */
a129be84 991 ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE;
d2796743
ME
992 writel(ctl, &adapter->regs->txmac.ctl);
993
994 /* Ready to start the RXDMA/TXDMA engine */
c655dee9 995 if (adapter->flags & FMP_ADAPTER_LOWER_POWER) {
d2796743
ME
996 et131x_rx_dma_enable(adapter);
997 et131x_tx_dma_enable(adapter);
998 }
999}
1000
26ef1021 1001/* et1310_in_phy_coma - check if the device is in phy coma
2288760e
ME
1002 * @adapter: pointer to our adapter structure
1003 *
1004 * Returns 0 if the device is not in phy coma, 1 if it is in phy coma
1005 */
eb7a6ca6 1006static int et1310_in_phy_coma(struct et131x_adapter *adapter)
2288760e 1007{
12a2f3f3 1008 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
2288760e
ME
1009
1010 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
1011}
1012
eb7a6ca6 1013static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
a4d444bd
ME
1014{
1015 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
834d0ee3
FR
1016 u32 hash1 = 0;
1017 u32 hash2 = 0;
1018 u32 hash3 = 0;
1019 u32 hash4 = 0;
a4d444bd
ME
1020 u32 pm_csr;
1021
1022 /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
1023 * the multi-cast LIST. If it is NOT specified, (and "ALL" is not
1024 * specified) then we should pass NO multi-cast addresses to the
1025 * driver.
1026 */
1027 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
834d0ee3
FR
1028 int i;
1029
a4d444bd 1030 /* Loop through our multicast array and set up the device */
834d0ee3
FR
1031 for (i = 0; i < adapter->multicast_addr_count; i++) {
1032 u32 result;
1033
1034 result = ether_crc(6, adapter->multicast_list[i]);
a4d444bd
ME
1035
1036 result = (result & 0x3F800000) >> 23;
1037
1038 if (result < 32) {
1039 hash1 |= (1 << result);
1040 } else if ((31 < result) && (result < 64)) {
1041 result -= 32;
1042 hash2 |= (1 << result);
1043 } else if ((63 < result) && (result < 96)) {
1044 result -= 64;
1045 hash3 |= (1 << result);
1046 } else {
1047 result -= 96;
1048 hash4 |= (1 << result);
1049 }
1050 }
1051 }
1052
1053 /* Write out the new hash to the device */
1054 pm_csr = readl(&adapter->regs->global.pm_csr);
1055 if (!et1310_in_phy_coma(adapter)) {
1056 writel(hash1, &rxmac->multi_hash1);
1057 writel(hash2, &rxmac->multi_hash2);
1058 writel(hash3, &rxmac->multi_hash3);
1059 writel(hash4, &rxmac->multi_hash4);
1060 }
1061}
1062
eb7a6ca6 1063static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
a4d444bd
ME
1064{
1065 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1066 u32 uni_pf1;
1067 u32 uni_pf2;
1068 u32 uni_pf3;
1069 u32 pm_csr;
1070
1071 /* Set up unicast packet filter reg 3 to be the first two octets of
1072 * the MAC address for both address
1073 *
1074 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
1075 * MAC address for second address
1076 *
1077 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
1078 * MAC address for first address
1079 */
a129be84
ME
1080 uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) |
1081 (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) |
1082 (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) |
a4d444bd
ME
1083 adapter->addr[1];
1084
a129be84
ME
1085 uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) |
1086 (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) |
1087 (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) |
a4d444bd
ME
1088 adapter->addr[5];
1089
a129be84
ME
1090 uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) |
1091 (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) |
1092 (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) |
a4d444bd
ME
1093 adapter->addr[5];
1094
1095 pm_csr = readl(&adapter->regs->global.pm_csr);
1096 if (!et1310_in_phy_coma(adapter)) {
1097 writel(uni_pf1, &rxmac->uni_pf_addr1);
1098 writel(uni_pf2, &rxmac->uni_pf_addr2);
1099 writel(uni_pf3, &rxmac->uni_pf_addr3);
1100 }
1101}
1102
eb7a6ca6 1103static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
d2796743
ME
1104{
1105 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1106 struct phy_device *phydev = adapter->phydev;
1107 u32 sa_lo;
1108 u32 sa_hi = 0;
1109 u32 pf_ctrl = 0;
1a5a5cbc 1110 u32 __iomem *wolw;
d2796743
ME
1111
1112 /* Disable the MAC while it is being configured (also disable WOL) */
1113 writel(0x8, &rxmac->ctrl);
1114
1115 /* Initialize WOL to disabled. */
1116 writel(0, &rxmac->crc0);
1117 writel(0, &rxmac->crc12);
1118 writel(0, &rxmac->crc34);
1119
1120 /* We need to set the WOL mask0 - mask4 next. We initialize it to
1121 * its default Values of 0x00000000 because there are not WOL masks
1122 * as of this time.
1123 */
6697b870
ME
1124 for (wolw = &rxmac->mask0_word0; wolw <= &rxmac->mask4_word3; wolw++)
1125 writel(0, wolw);
d2796743
ME
1126
1127 /* Lets setup the WOL Source Address */
a129be84
ME
1128 sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) |
1129 (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) |
1130 (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) |
d2796743
ME
1131 adapter->addr[5];
1132 writel(sa_lo, &rxmac->sa_lo);
1133
c1375678 1134 sa_hi = (u32)(adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) |
d2796743
ME
1135 adapter->addr[1];
1136 writel(sa_hi, &rxmac->sa_hi);
1137
1138 /* Disable all Packet Filtering */
1139 writel(0, &rxmac->pf_ctrl);
1140
1141 /* Let's initialize the Unicast Packet filtering address */
1142 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1143 et1310_setup_device_for_unicast(adapter);
a129be84 1144 pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE;
d2796743
ME
1145 } else {
1146 writel(0, &rxmac->uni_pf_addr1);
1147 writel(0, &rxmac->uni_pf_addr2);
1148 writel(0, &rxmac->uni_pf_addr3);
1149 }
1150
1151 /* Let's initialize the Multicast hash */
1152 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
a129be84 1153 pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE;
d2796743
ME
1154 et1310_setup_device_for_multicast(adapter);
1155 }
1156
1157 /* Runt packet filtering. Didn't work in version A silicon. */
a129be84
ME
1158 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT;
1159 pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE;
d2796743
ME
1160
1161 if (adapter->registry_jumbo_packet > 8192)
1162 /* In order to transmit jumbo packets greater than 8k, the
1163 * FIFO between RxMAC and RxDMA needs to be reduced in size
1164 * to (16k - Jumbo packet size). In order to implement this,
1165 * we must use "cut through" mode in the RxMAC, which chops
1166 * packets down into segments which are (max_size * 16). In
1167 * this case we selected 256 bytes, since this is the size of
1168 * the PCI-Express TLP's that the 1310 uses.
1169 *
1170 * seg_en on, fc_en off, size 0x10
1171 */
1172 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1173 else
1174 writel(0, &rxmac->mcif_ctrl_max_seg);
1175
1176 /* Initialize the MCIF water marks */
1177 writel(0, &rxmac->mcif_water_mark);
1178
1179 /* Initialize the MIF control */
1180 writel(0, &rxmac->mif_ctrl);
1181
1182 /* Initialize the Space Available Register */
1183 writel(0, &rxmac->space_avail);
1184
1185 /* Initialize the the mif_ctrl register
1186 * bit 3: Receive code error. One or more nibbles were signaled as
1187 * errors during the reception of the packet. Clear this
1188 * bit in Gigabit, set it in 100Mbit. This was derived
1189 * experimentally at UNH.
1190 * bit 4: Receive CRC error. The packet's CRC did not match the
1191 * internally generated CRC.
1192 * bit 5: Receive length check error. Indicates that frame length
1193 * field value in the packet does not match the actual data
1194 * byte length and is not a type field.
1195 * bit 16: Receive frame truncated.
1196 * bit 17: Drop packet enable
1197 */
1198 if (phydev && phydev->speed == SPEED_100)
1199 writel(0x30038, &rxmac->mif_ctrl);
1200 else
1201 writel(0x30030, &rxmac->mif_ctrl);
1202
1203 /* Finally we initialize RxMac to be enabled & WOL disabled. Packet
1204 * filter is always enabled since it is where the runt packets are
1205 * supposed to be dropped. For version A silicon, runt packet
1206 * dropping doesn't work, so it is disabled in the pf_ctrl register,
1207 * but we still leave the packet filter on.
1208 */
1209 writel(pf_ctrl, &rxmac->pf_ctrl);
a129be84 1210 writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl);
d2796743
ME
1211}
1212
eb7a6ca6 1213static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
d2796743
ME
1214{
1215 struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1216
1217 /* We need to update the Control Frame Parameters
1218 * cfpt - control frame pause timer set to 64 (0x40)
1219 * cfep - control frame extended pause timer set to 0x0
1220 */
26ca0f1b 1221 if (adapter->flow == FLOW_NONE)
d2796743
ME
1222 writel(0, &txmac->cf_param);
1223 else
1224 writel(0x40, &txmac->cf_param);
1225}
1226
eb7a6ca6 1227static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
d2796743 1228{
becce4a4 1229 struct macstat_regs __iomem *macstat = &adapter->regs->macstat;
1a5a5cbc 1230 u32 __iomem *reg;
d2796743 1231
becce4a4
ME
1232 /* initialize all the macstat registers to zero on the device */
1233 for (reg = &macstat->txrx_0_64_byte_frames;
1234 reg <= &macstat->carry_reg2; reg++)
1235 writel(0, reg);
d2796743
ME
1236
1237 /* Unmask any counters that we want to track the overflow of.
1238 * Initially this will be all counters. It may become clear later
1239 * that we do not need to track all counters.
1240 */
1241 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1242 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1243}
1244
26ef1021 1245/* et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC
2288760e
ME
1246 * @adapter: pointer to our private adapter structure
1247 * @addr: the address of the transceiver
1248 * @reg: the register to read
1249 * @value: pointer to a 16-bit value in which the value will be stored
2288760e 1250 */
eb7a6ca6 1251static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
096e6224 1252 u8 reg, u16 *value)
2288760e
ME
1253{
1254 struct mac_regs __iomem *mac = &adapter->regs->mac;
1255 int status = 0;
1256 u32 delay = 0;
1257 u32 mii_addr;
1258 u32 mii_cmd;
1259 u32 mii_indicator;
1260
1261 /* Save a local copy of the registers we are dealing with so we can
1262 * set them back
1263 */
1264 mii_addr = readl(&mac->mii_mgmt_addr);
1265 mii_cmd = readl(&mac->mii_mgmt_cmd);
1266
1267 /* Stop the current operation */
1268 writel(0, &mac->mii_mgmt_cmd);
1269
1270 /* Set up the register we need to read from on the correct PHY */
a129be84 1271 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
2288760e
ME
1272
1273 writel(0x1, &mac->mii_mgmt_cmd);
1274
1275 do {
1276 udelay(50);
1277 delay++;
1278 mii_indicator = readl(&mac->mii_mgmt_indicator);
a129be84 1279 } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50);
2288760e
ME
1280
1281 /* If we hit the max delay, we could not read the register */
1282 if (delay == 50) {
1283 dev_warn(&adapter->pdev->dev,
096e6224 1284 "reg 0x%08x could not be read\n", reg);
2288760e 1285 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
096e6224 1286 mii_indicator);
2288760e
ME
1287
1288 status = -EIO;
a863a15b 1289 goto out;
2288760e
ME
1290 }
1291
1292 /* If we hit here we were able to read the register and we need to
26ef1021
ME
1293 * return the value to the caller
1294 */
a129be84 1295 *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK;
2288760e 1296
a863a15b 1297out:
2288760e
ME
1298 /* Stop the read operation */
1299 writel(0, &mac->mii_mgmt_cmd);
1300
1301 /* set the registers we touched back to the state at which we entered
1302 * this function
1303 */
1304 writel(mii_addr, &mac->mii_mgmt_addr);
1305 writel(mii_cmd, &mac->mii_mgmt_cmd);
1306
1307 return status;
1308}
1309
eb7a6ca6 1310static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
2288760e
ME
1311{
1312 struct phy_device *phydev = adapter->phydev;
1313
1314 if (!phydev)
1315 return -EIO;
1316
1317 return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1318}
1319
26ef1021 1320/* et131x_mii_write - Write to a PHY reg through the MII interface of the MAC
2288760e
ME
1321 * @adapter: pointer to our private adapter structure
1322 * @reg: the register to read
1323 * @value: 16-bit value to write
2288760e 1324 */
ec0a38bf
ME
1325static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg,
1326 u16 value)
2288760e
ME
1327{
1328 struct mac_regs __iomem *mac = &adapter->regs->mac;
2288760e 1329 int status = 0;
2288760e
ME
1330 u32 delay = 0;
1331 u32 mii_addr;
1332 u32 mii_cmd;
1333 u32 mii_indicator;
1334
2288760e
ME
1335 /* Save a local copy of the registers we are dealing with so we can
1336 * set them back
1337 */
1338 mii_addr = readl(&mac->mii_mgmt_addr);
1339 mii_cmd = readl(&mac->mii_mgmt_cmd);
1340
1341 /* Stop the current operation */
1342 writel(0, &mac->mii_mgmt_cmd);
1343
1344 /* Set up the register we need to write to on the correct PHY */
a129be84 1345 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
2288760e
ME
1346
1347 /* Add the value to write to the registers to the mac */
1348 writel(value, &mac->mii_mgmt_ctrl);
1349
1350 do {
1351 udelay(50);
1352 delay++;
1353 mii_indicator = readl(&mac->mii_mgmt_indicator);
a129be84 1354 } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100);
2288760e
ME
1355
1356 /* If we hit the max delay, we could not write the register */
1357 if (delay == 100) {
1358 u16 tmp;
1359
1360 dev_warn(&adapter->pdev->dev,
096e6224 1361 "reg 0x%08x could not be written", reg);
2288760e 1362 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
096e6224 1363 mii_indicator);
2288760e 1364 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
096e6224 1365 readl(&mac->mii_mgmt_cmd));
2288760e
ME
1366
1367 et131x_mii_read(adapter, reg, &tmp);
1368
1369 status = -EIO;
1370 }
1371 /* Stop the write operation */
1372 writel(0, &mac->mii_mgmt_cmd);
1373
26ef1021 1374 /* set the registers we touched back to the state at which we entered
2288760e
ME
1375 * this function
1376 */
1377 writel(mii_addr, &mac->mii_mgmt_addr);
1378 writel(mii_cmd, &mac->mii_mgmt_cmd);
1379
1380 return status;
1381}
1382
19d857de
ME
1383static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter,
1384 u16 regnum,
1385 u16 bitnum,
1386 u8 *value)
2288760e
ME
1387{
1388 u16 reg;
a129be84 1389 u16 mask = 1 << bitnum;
2288760e
ME
1390
1391 /* Read the requested register */
1392 et131x_mii_read(adapter, regnum, &reg);
1393
19d857de 1394 *value = (reg & mask) >> bitnum;
2288760e
ME
1395}
1396
eb7a6ca6 1397static void et1310_config_flow_control(struct et131x_adapter *adapter)
d2796743
ME
1398{
1399 struct phy_device *phydev = adapter->phydev;
1400
1401 if (phydev->duplex == DUPLEX_HALF) {
26ca0f1b 1402 adapter->flow = FLOW_NONE;
d2796743
ME
1403 } else {
1404 char remote_pause, remote_async_pause;
1405
19d857de
ME
1406 et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause);
1407 et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause);
d2796743 1408
19d857de 1409 if (remote_pause && remote_async_pause) {
26ca0f1b 1410 adapter->flow = adapter->wanted_flow;
19d857de 1411 } else if (remote_pause && !remote_async_pause) {
d2796743 1412 if (adapter->wanted_flow == FLOW_BOTH)
26ca0f1b 1413 adapter->flow = FLOW_BOTH;
d2796743 1414 else
26ca0f1b 1415 adapter->flow = FLOW_NONE;
19d857de 1416 } else if (!remote_pause && !remote_async_pause) {
26ca0f1b 1417 adapter->flow = FLOW_NONE;
19d857de 1418 } else {
d2796743 1419 if (adapter->wanted_flow == FLOW_BOTH)
26ca0f1b 1420 adapter->flow = FLOW_RXONLY;
d2796743 1421 else
26ca0f1b 1422 adapter->flow = FLOW_NONE;
d2796743
ME
1423 }
1424 }
1425}
1426
15ae239d 1427/* et1310_update_macstat_host_counters - Update local copy of the statistics */
eb7a6ca6 1428static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
d2796743
ME
1429{
1430 struct ce_stats *stats = &adapter->stats;
1431 struct macstat_regs __iomem *macstat =
1432 &adapter->regs->macstat;
1433
1434 stats->tx_collisions += readl(&macstat->tx_total_collisions);
1435 stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1436 stats->tx_deferred += readl(&macstat->tx_deferred);
1437 stats->tx_excessive_collisions +=
1438 readl(&macstat->tx_multiple_collisions);
1439 stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1440 stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1441 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1442
1443 stats->rx_align_errs += readl(&macstat->rx_align_errs);
1444 stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1445 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1446 stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1447 stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1448 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1449 stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1450}
1451
26ef1021 1452/* et1310_handle_macstat_interrupt
d2796743
ME
1453 *
1454 * One of the MACSTAT counters has wrapped. Update the local copy of
1455 * the statistics held in the adapter structure, checking the "wrap"
1456 * bit for each counter.
1457 */
eb7a6ca6 1458static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
d2796743
ME
1459{
1460 u32 carry_reg1;
1461 u32 carry_reg2;
1462
1463 /* Read the interrupt bits from the register(s). These are Clear On
1464 * Write.
1465 */
1466 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1467 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1468
1469 writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1470 writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1471
1472 /* We need to do update the host copy of all the MAC_STAT counters.
1473 * For each counter, check it's overflow bit. If the overflow bit is
1474 * set, then increment the host version of the count by one complete
1475 * revolution of the counter. This routine is called when the counter
1476 * block indicates that one of the counters has wrapped.
1477 */
1478 if (carry_reg1 & (1 << 14))
1479 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1480 if (carry_reg1 & (1 << 8))
1481 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1482 if (carry_reg1 & (1 << 7))
1483 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1484 if (carry_reg1 & (1 << 2))
1485 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1486 if (carry_reg1 & (1 << 6))
1487 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1488 if (carry_reg1 & (1 << 3))
1489 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1490 if (carry_reg1 & (1 << 0))
1491 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1492 if (carry_reg2 & (1 << 16))
1493 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1494 if (carry_reg2 & (1 << 15))
1495 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1496 if (carry_reg2 & (1 << 6))
1497 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1498 if (carry_reg2 & (1 << 8))
1499 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1500 if (carry_reg2 & (1 << 5))
1501 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1502 if (carry_reg2 & (1 << 4))
1503 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1504 if (carry_reg2 & (1 << 2))
1505 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1506}
1507
eb7a6ca6 1508static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
d2796743
ME
1509{
1510 struct net_device *netdev = bus->priv;
1511 struct et131x_adapter *adapter = netdev_priv(netdev);
1512 u16 value;
1513 int ret;
1514
1515 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1516
1517 if (ret < 0)
1518 return ret;
d855b893
ME
1519
1520 return value;
d2796743
ME
1521}
1522
bf3313a1 1523static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1524 int reg, u16 value)
d2796743
ME
1525{
1526 struct net_device *netdev = bus->priv;
1527 struct et131x_adapter *adapter = netdev_priv(netdev);
1528
ec0a38bf 1529 return et131x_mii_write(adapter, phy_addr, reg, value);
d2796743
ME
1530}
1531
1ff70a7c 1532/* et1310_phy_power_switch - PHY power control
d2796743
ME
1533 * @adapter: device to control
1534 * @down: true for off/false for back on
1535 *
1536 * one hundred, ten, one thousand megs
1537 * How would you like to have your LAN accessed
1538 * Can't you see that this code processed
1539 * Phy power, phy power..
1540 */
1ff70a7c 1541static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
d2796743
ME
1542{
1543 u16 data;
ec0a38bf 1544 struct phy_device *phydev = adapter->phydev;
d2796743
ME
1545
1546 et131x_mii_read(adapter, MII_BMCR, &data);
1547 data &= ~BMCR_PDOWN;
1548 if (down)
1549 data |= BMCR_PDOWN;
ec0a38bf 1550 et131x_mii_write(adapter, phydev->addr, MII_BMCR, data);
d2796743
ME
1551}
1552
15ae239d 1553/* et131x_xcvr_init - Init the phy if we are setting it into force mode */
eb7a6ca6 1554static void et131x_xcvr_init(struct et131x_adapter *adapter)
d2796743 1555{
d2796743 1556 u16 lcr2;
ec0a38bf 1557 struct phy_device *phydev = adapter->phydev;
d2796743 1558
d2796743
ME
1559 /* Set the LED behavior such that LED 1 indicates speed (off =
1560 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
1561 * link and activity (on for link, blink off for activity).
1562 *
1563 * NOTE: Some customizations have been added here for specific
1564 * vendors; The LED behavior is now determined by vendor data in the
1565 * EEPROM. However, the above description is the default.
1566 */
1567 if ((adapter->eeprom_data[1] & 0x4) == 0) {
1568 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1569
b5b86a4d 1570 lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T);
d2796743
ME
1571 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1572
1573 if ((adapter->eeprom_data[1] & 0x8) == 0)
1574 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1575 else
1576 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1577
ec0a38bf 1578 et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2);
d2796743
ME
1579 }
1580}
1581
26ef1021 1582/* et131x_configure_global_regs - configure JAGCore global regs
36f2771a
ME
1583 *
1584 * Used to configure the global registers on the JAGCore
1585 */
eb7a6ca6 1586static void et131x_configure_global_regs(struct et131x_adapter *adapter)
36f2771a
ME
1587{
1588 struct global_regs __iomem *regs = &adapter->regs->global;
1589
1590 writel(0, &regs->rxq_start_addr);
1591 writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr);
1592
1593 if (adapter->registry_jumbo_packet < 2048) {
1594 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
1595 * block of RAM that the driver can split between Tx
1596 * and Rx as it desires. Our default is to split it
1597 * 50/50:
1598 */
1599 writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr);
1600 writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr);
1601 } else if (adapter->registry_jumbo_packet < 8192) {
1602 /* For jumbo packets > 2k but < 8k, split 50-50. */
1603 writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr);
1604 writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr);
1605 } else {
1606 /* 9216 is the only packet size greater than 8k that
1607 * is available. The Tx buffer has to be big enough
1608 * for one whole packet on the Tx side. We'll make
1609 * the Tx 9408, and give the rest to Rx
1610 */
1611 writel(0x01b3, &regs->rxq_end_addr);
1612 writel(0x01b4, &regs->txq_start_addr);
1613 }
1614
1615 /* Initialize the loopback register. Disable all loopbacks. */
1616 writel(0, &regs->loopback);
1617
1618 /* MSI Register */
1619 writel(0, &regs->msi_config);
1620
1621 /* By default, disable the watchdog timer. It will be enabled when
1622 * a packet is queued.
1623 */
1624 writel(0, &regs->watchdog_timer);
1625}
1626
15ae239d 1627/* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence */
eb7a6ca6 1628static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
36f2771a
ME
1629{
1630 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1631 struct rx_ring *rx_local = &adapter->rx_ring;
1632 struct fbr_desc *fbr_entry;
1633 u32 entry;
1634 u32 psr_num_des;
1635 unsigned long flags;
788ca84a 1636 u8 id;
36f2771a
ME
1637
1638 /* Halt RXDMA to perform the reconfigure. */
1639 et131x_rx_dma_disable(adapter);
1640
25e8e8ab
ME
1641 /* Load the completion writeback physical address */
1642 writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi);
1643 writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo);
36f2771a
ME
1644
1645 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1646
1647 /* Set the address and parameters of the packet status ring into the
1648 * 1310's registers
1649 */
25e8e8ab
ME
1650 writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi);
1651 writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo);
3f4d2029 1652 writel(rx_local->psr_entries - 1, &rx_dma->psr_num_des);
36f2771a
ME
1653 writel(0, &rx_dma->psr_full_offset);
1654
a129be84 1655 psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK;
36f2771a
ME
1656 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1657 &rx_dma->psr_min_des);
1658
1659 spin_lock_irqsave(&adapter->rcv_lock, flags);
1660
1661 /* These local variables track the PSR in the adapter structure */
1662 rx_local->local_psr_full = 0;
1663
788ca84a 1664 for (id = 0; id < NUM_FBRS; id++) {
c0594ee9
ME
1665 u32 __iomem *num_des;
1666 u32 __iomem *full_offset;
1667 u32 __iomem *min_des;
1668 u32 __iomem *base_hi;
1669 u32 __iomem *base_lo;
efc56817 1670 struct fbr_lookup *fbr = rx_local->fbr[id];
788ca84a
ME
1671
1672 if (id == 0) {
788ca84a
ME
1673 num_des = &rx_dma->fbr0_num_des;
1674 full_offset = &rx_dma->fbr0_full_offset;
1675 min_des = &rx_dma->fbr0_min_des;
1676 base_hi = &rx_dma->fbr0_base_hi;
1677 base_lo = &rx_dma->fbr0_base_lo;
f0ada678
ME
1678 } else {
1679 num_des = &rx_dma->fbr1_num_des;
1680 full_offset = &rx_dma->fbr1_full_offset;
1681 min_des = &rx_dma->fbr1_min_des;
1682 base_hi = &rx_dma->fbr1_base_hi;
1683 base_lo = &rx_dma->fbr1_base_lo;
788ca84a 1684 }
36f2771a 1685
788ca84a 1686 /* Now's the best time to initialize FBR contents */
57cc0279 1687 fbr_entry = fbr->ring_virtaddr;
efc56817
ZG
1688 for (entry = 0; entry < fbr->num_entries; entry++) {
1689 fbr_entry->addr_hi = fbr->bus_high[entry];
1690 fbr_entry->addr_lo = fbr->bus_low[entry];
788ca84a
ME
1691 fbr_entry->word2 = entry;
1692 fbr_entry++;
1693 }
36f2771a 1694
788ca84a
ME
1695 /* Set the address and parameters of Free buffer ring 1 and 0
1696 * into the 1310's registers
1697 */
efc56817
ZG
1698 writel(upper_32_bits(fbr->ring_physaddr), base_hi);
1699 writel(lower_32_bits(fbr->ring_physaddr), base_lo);
1700 writel(fbr->num_entries - 1, num_des);
788ca84a 1701 writel(ET_DMA10_WRAP, full_offset);
36f2771a 1702
788ca84a
ME
1703 /* This variable tracks the free buffer ring 1 full position,
1704 * so it has to match the above.
1705 */
efc56817
ZG
1706 fbr->local_full = ET_DMA10_WRAP;
1707 writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
788ca84a
ME
1708 min_des);
1709 }
36f2771a
ME
1710
1711 /* Program the number of packets we will receive before generating an
1712 * interrupt.
1713 * For version B silicon, this value gets updated once autoneg is
1714 *complete.
1715 */
1716 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1717
1718 /* The "time_done" is not working correctly to coalesce interrupts
1719 * after a given time period, but rather is giving us an interrupt
1720 * regardless of whether we have received packets.
1721 * This value gets updated once autoneg is complete.
1722 */
1723 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1724
1725 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1726}
1727
26ef1021 1728/* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
36f2771a
ME
1729 *
1730 * Configure the transmit engine with the ring buffers we have created
1731 * and prepare it for use.
1732 */
eb7a6ca6 1733static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
36f2771a
ME
1734{
1735 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
76981cf1 1736 struct tx_ring *tx_ring = &adapter->tx_ring;
36f2771a
ME
1737
1738 /* Load the hardware with the start of the transmit descriptor ring. */
76981cf1
ZG
1739 writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi);
1740 writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo);
36f2771a
ME
1741
1742 /* Initialise the transmit DMA engine */
1743 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1744
1745 /* Load the completion writeback physical address */
76981cf1
ZG
1746 writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi);
1747 writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo);
36f2771a 1748
76981cf1 1749 *tx_ring->tx_status = 0;
36f2771a
ME
1750
1751 writel(0, &txdma->service_request);
76981cf1 1752 tx_ring->send_idx = 0;
36f2771a
ME
1753}
1754
15ae239d 1755/* et131x_adapter_setup - Set the adapter up as per cassini+ documentation */
eb7a6ca6 1756static void et131x_adapter_setup(struct et131x_adapter *adapter)
36f2771a
ME
1757{
1758 /* Configure the JAGCore */
1759 et131x_configure_global_regs(adapter);
1760
1761 et1310_config_mac_regs1(adapter);
1762
1763 /* Configure the MMC registers */
1764 /* All we need to do is initialize the Memory Control Register */
1765 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
1766
1767 et1310_config_rxmac_regs(adapter);
1768 et1310_config_txmac_regs(adapter);
1769
1770 et131x_config_rx_dma_regs(adapter);
1771 et131x_config_tx_dma_regs(adapter);
1772
1773 et1310_config_macstat_regs(adapter);
1774
1ff70a7c 1775 et1310_phy_power_switch(adapter, 0);
36f2771a
ME
1776 et131x_xcvr_init(adapter);
1777}
1778
15ae239d 1779/* et131x_soft_reset - Issue soft reset to the hardware, complete for ET1310 */
eb7a6ca6 1780static void et131x_soft_reset(struct et131x_adapter *adapter)
5da2b158 1781{
a129be84 1782 u32 reg;
5da2b158 1783
a129be84
ME
1784 /* Disable MAC Core */
1785 reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
1786 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1787 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1788 writel(reg, &adapter->regs->mac.cfg1);
1789
1790 reg = ET_RESET_ALL;
1791 writel(reg, &adapter->regs->global.sw_reset);
1792
1793 reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1794 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1795 writel(reg, &adapter->regs->mac.cfg1);
1796 writel(0, &adapter->regs->mac.cfg1);
5da2b158
ME
1797}
1798
26ef1021 1799/* et131x_enable_interrupts - enable interrupt
a4d444bd
ME
1800 *
1801 * Enable the appropriate interrupts on the ET131x according to our
1802 * configuration
1803 */
eb7a6ca6 1804static void et131x_enable_interrupts(struct et131x_adapter *adapter)
a4d444bd
ME
1805{
1806 u32 mask;
1807
1808 /* Enable all global interrupts */
26ca0f1b 1809 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH)
a4d444bd
ME
1810 mask = INT_MASK_ENABLE;
1811 else
1812 mask = INT_MASK_ENABLE_NO_FLOW;
1813
1814 writel(mask, &adapter->regs->global.int_mask);
1815}
1816
26ef1021 1817/* et131x_disable_interrupts - interrupt disable
a4d444bd
ME
1818 *
1819 * Block all interrupts from the et131x device at the device itself
1820 */
eb7a6ca6 1821static void et131x_disable_interrupts(struct et131x_adapter *adapter)
a4d444bd
ME
1822{
1823 /* Disable all global interrupts */
1824 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
1825}
1826
15ae239d 1827/* et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 */
eb7a6ca6 1828static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
a4d444bd 1829{
868bf442 1830 /* Setup the transmit dma configuration register */
3040d056 1831 writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT,
096e6224 1832 &adapter->regs->txdma.csr);
a4d444bd
ME
1833}
1834
15ae239d 1835/* et131x_enable_txrx - Enable tx/rx queues */
eb7a6ca6 1836static void et131x_enable_txrx(struct net_device *netdev)
a4d444bd
ME
1837{
1838 struct et131x_adapter *adapter = netdev_priv(netdev);
1839
1840 /* Enable the Tx and Rx DMA engines (if not already enabled) */
1841 et131x_rx_dma_enable(adapter);
1842 et131x_tx_dma_enable(adapter);
1843
1844 /* Enable device interrupts */
c655dee9 1845 if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)
a4d444bd
ME
1846 et131x_enable_interrupts(adapter);
1847
1848 /* We're ready to move some data, so start the queue */
1849 netif_start_queue(netdev);
1850}
1851
15ae239d 1852/* et131x_disable_txrx - Disable tx/rx queues */
eb7a6ca6 1853static void et131x_disable_txrx(struct net_device *netdev)
a4d444bd
ME
1854{
1855 struct et131x_adapter *adapter = netdev_priv(netdev);
1856
1857 /* First thing is to stop the queue */
1858 netif_stop_queue(netdev);
1859
1860 /* Stop the Tx and Rx DMA engines */
1861 et131x_rx_dma_disable(adapter);
1862 et131x_tx_dma_disable(adapter);
1863
1864 /* Disable device interrupts */
1865 et131x_disable_interrupts(adapter);
1866}
1867
15ae239d 1868/* et131x_init_send - Initialize send data structures */
eb7a6ca6 1869static void et131x_init_send(struct et131x_adapter *adapter)
8310c602 1870{
8310c602 1871 u32 ct;
76981cf1
ZG
1872 struct tx_ring *tx_ring = &adapter->tx_ring;
1873 struct tcb *tcb = tx_ring->tcb_ring;
8310c602
ME
1874
1875 tx_ring->tcb_qhead = tcb;
1876
1877 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
1878
1879 /* Go through and set up each TCB */
1880 for (ct = 0; ct++ < NUM_TCB; tcb++)
1881 /* Set the link pointer in HW TCB to the next TCB in the
1882 * chain
1883 */
1884 tcb->next = tcb + 1;
1885
1886 /* Set the tail pointer */
1887 tcb--;
1888 tx_ring->tcb_qtail = tcb;
1889 tcb->next = NULL;
1890 /* Curr send queue should now be empty */
1891 tx_ring->send_head = NULL;
1892 tx_ring->send_tail = NULL;
1893}
1894
26ef1021 1895/* et1310_enable_phy_coma - called when network cable is unplugged
d2796743
ME
1896 *
1897 * driver receive an phy status change interrupt while in D0 and check that
1898 * phy_status is down.
1899 *
1900 * -- gate off JAGCore;
1901 * -- set gigE PHY in Coma mode
1902 * -- wake on phy_interrupt; Perform software reset JAGCore,
1903 * re-initialize jagcore and gigE PHY
1904 *
1905 * Add D0-ASPM-PhyLinkDown Support:
1906 * -- while in D0, when there is a phy_interrupt indicating phy link
1907 * down status, call the MPSetPhyComa routine to enter this active
1908 * state power saving mode
1909 * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt
1910 * indicating linkup status, call the MPDisablePhyComa routine to
1911 * restore JAGCore and gigE PHY
1912 */
eb7a6ca6 1913static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
d2796743 1914{
bacb71ed 1915 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
d2796743
ME
1916
1917 /* Save the GbE PHY speed and duplex modes. Need to restore this
1918 * when cable is plugged back in
1919 */
d2796743
ME
1920
1921 /* Stop sending packets. */
c655dee9 1922 adapter->flags |= FMP_ADAPTER_LOWER_POWER;
d2796743
ME
1923
1924 /* Wait for outstanding Receive packets */
d2796743
ME
1925 et131x_disable_txrx(adapter->netdev);
1926
1927 /* Gate off JAGCore 3 clock domains */
1928 pmcsr &= ~ET_PMCSR_INIT;
1929 writel(pmcsr, &adapter->regs->global.pm_csr);
1930
1931 /* Program gigE PHY in to Coma mode */
1932 pmcsr |= ET_PM_PHY_SW_COMA;
1933 writel(pmcsr, &adapter->regs->global.pm_csr);
1934}
1935
15ae239d 1936/* et1310_disable_phy_coma - Disable the Phy Coma Mode */
eb7a6ca6 1937static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
d2796743
ME
1938{
1939 u32 pmcsr;
1940
1941 pmcsr = readl(&adapter->regs->global.pm_csr);
1942
1943 /* Disable phy_sw_coma register and re-enable JAGCore clocks */
1944 pmcsr |= ET_PMCSR_INIT;
1945 pmcsr &= ~ET_PM_PHY_SW_COMA;
1946 writel(pmcsr, &adapter->regs->global.pm_csr);
1947
1948 /* Restore the GbE PHY speed and duplex modes;
1949 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
1950 */
d2796743
ME
1951
1952 /* Re-initialize the send structures */
1953 et131x_init_send(adapter);
1954
d2796743
ME
1955 /* Bring the device back to the state it was during init prior to
1956 * autonegotiation being complete. This way, when we get the auto-neg
1957 * complete interrupt, we can complete init by calling ConfigMacREGS2.
1958 */
1959 et131x_soft_reset(adapter);
1960
1961 /* setup et1310 as per the documentation ?? */
1962 et131x_adapter_setup(adapter);
1963
1964 /* Allow Tx to restart */
c655dee9 1965 adapter->flags &= ~FMP_ADAPTER_LOWER_POWER;
d2796743
ME
1966
1967 et131x_enable_txrx(adapter->netdev);
1968}
1969
d2796743
ME
1970static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
1971{
1972 u32 tmp_free_buff_ring = *free_buff_ring;
f03fcca0 1973
d2796743
ME
1974 tmp_free_buff_ring++;
1975 /* This works for all cases where limit < 1024. The 1023 case
26ef1021
ME
1976 * works because 1023++ is 1024 which means the if condition is not
1977 * taken but the carry of the bit into the wrap bit toggles the wrap
1978 * value correctly
1979 */
d2796743
ME
1980 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
1981 tmp_free_buff_ring &= ~ET_DMA10_MASK;
1982 tmp_free_buff_ring ^= ET_DMA10_WRAP;
1983 }
1984 /* For the 1023 case */
12a2f3f3 1985 tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP);
d2796743
ME
1986 *free_buff_ring = tmp_free_buff_ring;
1987 return tmp_free_buff_ring;
1988}
1989
26ef1021 1990/* et131x_rx_dma_memory_alloc
d2796743
ME
1991 *
1992 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
1993 * and the Packet Status Ring.
1994 */
eb7a6ca6 1995static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
d2796743 1996{
788ca84a 1997 u8 id;
d2796743
ME
1998 u32 i, j;
1999 u32 bufsize;
3f4d2029 2000 u32 psr_size;
87648933 2001 u32 fbr_chunksize;
8f7fa96a 2002 struct rx_ring *rx_ring = &adapter->rx_ring;
bad5d26f 2003 struct fbr_lookup *fbr;
d2796743 2004
d2796743 2005 /* Alloc memory for the lookup table */
e112eb59 2006 rx_ring->fbr[0] = kzalloc(sizeof(*fbr), GFP_KERNEL);
a9f48883
A
2007 if (rx_ring->fbr[0] == NULL)
2008 return -ENOMEM;
e112eb59 2009 rx_ring->fbr[1] = kzalloc(sizeof(*fbr), GFP_KERNEL);
a9f48883
A
2010 if (rx_ring->fbr[1] == NULL)
2011 return -ENOMEM;
d2796743
ME
2012
2013 /* The first thing we will do is configure the sizes of the buffer
2014 * rings. These will change based on jumbo packet support. Larger
2015 * jumbo packets increases the size of each entry in FBR0, and the
2016 * number of entries in FBR0, while at the same time decreasing the
2017 * number of entries in FBR1.
2018 *
2019 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
2020 * entries are huge in order to accommodate a "jumbo" frame, then it
2021 * will have less entries. Conversely, FBR1 will now be relied upon
2022 * to carry more "normal" frames, thus it's entry size also increases
2023 * and the number of entries goes up too (since it now carries
2024 * "small" + "regular" packets.
2025 *
2026 * In this scheme, we try to maintain 512 entries between the two
2027 * rings. Also, FBR1 remains a constant size - when it's size doubles
2028 * the number of entries halves. FBR0 increases in size, however.
2029 */
d2796743 2030 if (adapter->registry_jumbo_packet < 2048) {
f0ada678 2031 rx_ring->fbr[0]->buffsize = 256;
e592a9b0 2032 rx_ring->fbr[0]->num_entries = 512;
f0ada678
ME
2033 rx_ring->fbr[1]->buffsize = 2048;
2034 rx_ring->fbr[1]->num_entries = 512;
d2796743 2035 } else if (adapter->registry_jumbo_packet < 4096) {
f0ada678
ME
2036 rx_ring->fbr[0]->buffsize = 512;
2037 rx_ring->fbr[0]->num_entries = 1024;
2038 rx_ring->fbr[1]->buffsize = 4096;
2039 rx_ring->fbr[1]->num_entries = 512;
d2796743 2040 } else {
f0ada678
ME
2041 rx_ring->fbr[0]->buffsize = 1024;
2042 rx_ring->fbr[0]->num_entries = 768;
2043 rx_ring->fbr[1]->buffsize = 16384;
2044 rx_ring->fbr[1]->num_entries = 128;
d2796743
ME
2045 }
2046
3f4d2029
ME
2047 rx_ring->psr_entries = rx_ring->fbr[0]->num_entries +
2048 rx_ring->fbr[1]->num_entries;
d2796743 2049
788ca84a 2050 for (id = 0; id < NUM_FBRS; id++) {
bad5d26f 2051 fbr = rx_ring->fbr[id];
788ca84a 2052 /* Allocate an area of memory for Free Buffer Ring */
bad5d26f
ZG
2053 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
2054 fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2055 bufsize,
2056 &fbr->ring_physaddr,
2057 GFP_KERNEL);
2058 if (!fbr->ring_virtaddr) {
d2796743 2059 dev_err(&adapter->pdev->dev,
096e6224
ME
2060 "Cannot alloc memory for Free Buffer Ring %d\n",
2061 id);
d2796743
ME
2062 return -ENOMEM;
2063 }
d2796743
ME
2064 }
2065
788ca84a 2066 for (id = 0; id < NUM_FBRS; id++) {
bad5d26f
ZG
2067 fbr = rx_ring->fbr[id];
2068 fbr_chunksize = (FBR_CHUNKS * fbr->buffsize);
87648933 2069
bad5d26f 2070 for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) {
a02a26f0 2071 dma_addr_t fbr_physaddr;
788ca84a 2072
bad5d26f 2073 fbr->mem_virtaddrs[i] = dma_alloc_coherent(
788ca84a 2074 &adapter->pdev->dev, fbr_chunksize,
bad5d26f 2075 &fbr->mem_physaddrs[i],
788ca84a 2076 GFP_KERNEL);
d2796743 2077
bad5d26f 2078 if (!fbr->mem_virtaddrs[i]) {
788ca84a
ME
2079 dev_err(&adapter->pdev->dev,
2080 "Could not alloc memory\n");
2081 return -ENOMEM;
2082 }
d2796743 2083
788ca84a 2084 /* See NOTE in "Save Physical Address" comment above */
a02a26f0 2085 fbr_physaddr = fbr->mem_physaddrs[i];
788ca84a 2086
788ca84a 2087 for (j = 0; j < FBR_CHUNKS; j++) {
a02a26f0 2088 u32 k = (i * FBR_CHUNKS) + j;
788ca84a
ME
2089
2090 /* Save the Virtual address of this index for
2091 * quick access later
2092 */
a02a26f0 2093 fbr->virt[k] = (u8 *)fbr->mem_virtaddrs[i] +
bad5d26f 2094 (j * fbr->buffsize);
788ca84a
ME
2095
2096 /* now store the physical address in the
2097 * descriptor so the device can access it
2098 */
a02a26f0
ME
2099 fbr->bus_high[k] = upper_32_bits(fbr_physaddr);
2100 fbr->bus_low[k] = lower_32_bits(fbr_physaddr);
2101 fbr_physaddr += fbr->buffsize;
788ca84a 2102 }
d2796743
ME
2103 }
2104 }
d2796743
ME
2105
2106 /* Allocate an area of memory for FIFO of Packet Status ring entries */
3f4d2029 2107 psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries;
d2796743 2108
0d1b7a84 2109 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
3f4d2029 2110 psr_size,
0d1b7a84
ME
2111 &rx_ring->ps_ring_physaddr,
2112 GFP_KERNEL);
d2796743
ME
2113
2114 if (!rx_ring->ps_ring_virtaddr) {
2115 dev_err(&adapter->pdev->dev,
096e6224 2116 "Cannot alloc memory for Packet Status Ring\n");
d2796743
ME
2117 return -ENOMEM;
2118 }
d2796743 2119
26ef1021 2120 /* NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
d2796743
ME
2121 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2122 * are ever returned, make sure the high part is retrieved here before
2123 * storing the adjusted address.
2124 */
2125
2126 /* Allocate an area of memory for writeback of status information */
0d1b7a84 2127 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
d2796743 2128 sizeof(struct rx_status_block),
0d1b7a84
ME
2129 &rx_ring->rx_status_bus,
2130 GFP_KERNEL);
d2796743
ME
2131 if (!rx_ring->rx_status_block) {
2132 dev_err(&adapter->pdev->dev,
096e6224 2133 "Cannot alloc memory for Status Block\n");
d2796743
ME
2134 return -ENOMEM;
2135 }
2136 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
d2796743 2137
d2796743
ME
2138 /* The RFDs are going to be put on lists later on, so initialize the
2139 * lists now.
2140 */
2141 INIT_LIST_HEAD(&rx_ring->recv_list);
2142 return 0;
2143}
2144
15ae239d 2145/* et131x_rx_dma_memory_free - Free all memory allocated within this module */
eb7a6ca6 2146static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
d2796743 2147{
788ca84a 2148 u8 id;
3f4d2029 2149 u32 ii;
d2796743 2150 u32 bufsize;
3f4d2029 2151 u32 psr_size;
d2796743 2152 struct rfd *rfd;
8f7fa96a 2153 struct rx_ring *rx_ring = &adapter->rx_ring;
f876f59e 2154 struct fbr_lookup *fbr;
d2796743 2155
d2796743
ME
2156 /* Free RFDs and associated packet descriptors */
2157 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2158
2159 while (!list_empty(&rx_ring->recv_list)) {
57cc0279
ZG
2160 rfd = list_entry(rx_ring->recv_list.next,
2161 struct rfd, list_node);
d2796743
ME
2162
2163 list_del(&rfd->list_node);
2164 rfd->skb = NULL;
d959df0a 2165 kfree(rfd);
d2796743
ME
2166 }
2167
788ca84a
ME
2168 /* Free Free Buffer Rings */
2169 for (id = 0; id < NUM_FBRS; id++) {
f876f59e
ZG
2170 fbr = rx_ring->fbr[id];
2171
a9f48883 2172 if (!fbr || !fbr->ring_virtaddr)
823bb2e8 2173 continue;
d2796743 2174
823bb2e8 2175 /* First the packet memory */
3f4d2029
ME
2176 for (ii = 0; ii < fbr->num_entries / FBR_CHUNKS; ii++) {
2177 if (fbr->mem_virtaddrs[ii]) {
f876f59e 2178 bufsize = fbr->buffsize * FBR_CHUNKS;
d2796743 2179
823bb2e8 2180 dma_free_coherent(&adapter->pdev->dev,
f876f59e 2181 bufsize,
3f4d2029
ME
2182 fbr->mem_virtaddrs[ii],
2183 fbr->mem_physaddrs[ii]);
d2796743 2184
3f4d2029 2185 fbr->mem_virtaddrs[ii] = NULL;
823bb2e8 2186 }
d2796743 2187 }
823bb2e8 2188
f876f59e 2189 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
823bb2e8 2190
f876f59e
ZG
2191 dma_free_coherent(&adapter->pdev->dev,
2192 bufsize,
2193 fbr->ring_virtaddr,
2194 fbr->ring_physaddr);
823bb2e8 2195
f876f59e 2196 fbr->ring_virtaddr = NULL;
d2796743 2197 }
d2796743
ME
2198
2199 /* Free Packet Status Ring */
2200 if (rx_ring->ps_ring_virtaddr) {
3f4d2029 2201 psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries;
d2796743 2202
3f4d2029 2203 dma_free_coherent(&adapter->pdev->dev, psr_size,
096e6224
ME
2204 rx_ring->ps_ring_virtaddr,
2205 rx_ring->ps_ring_physaddr);
d2796743
ME
2206
2207 rx_ring->ps_ring_virtaddr = NULL;
2208 }
2209
2210 /* Free area of memory for the writeback of status information */
2211 if (rx_ring->rx_status_block) {
675c8f68 2212 dma_free_coherent(&adapter->pdev->dev,
096e6224
ME
2213 sizeof(struct rx_status_block),
2214 rx_ring->rx_status_block,
2215 rx_ring->rx_status_bus);
d2796743
ME
2216 rx_ring->rx_status_block = NULL;
2217 }
2218
d2796743 2219 /* Free the FBR Lookup Table */
e592a9b0 2220 kfree(rx_ring->fbr[0]);
f0ada678 2221 kfree(rx_ring->fbr[1]);
d2796743
ME
2222
2223 /* Reset Counters */
2224 rx_ring->num_ready_recv = 0;
2225}
2226
15ae239d 2227/* et131x_init_recv - Initialize receive data structures */
eb7a6ca6 2228static int et131x_init_recv(struct et131x_adapter *adapter)
d2796743 2229{
d959df0a 2230 struct rfd *rfd;
d2796743 2231 u32 rfdct;
8f7fa96a 2232 struct rx_ring *rx_ring = &adapter->rx_ring;
d2796743
ME
2233
2234 /* Setup each RFD */
2235 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
b6cb9660 2236 rfd = kzalloc(sizeof(*rfd), GFP_ATOMIC | GFP_DMA);
78110bb8 2237 if (!rfd)
d959df0a 2238 return -ENOMEM;
d2796743
ME
2239
2240 rfd->skb = NULL;
2241
2242 /* Add this RFD to the recv_list */
2243 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2244
4eb94628 2245 /* Increment the available RFD's */
d2796743 2246 rx_ring->num_ready_recv++;
d2796743
ME
2247 }
2248
d959df0a 2249 return 0;
d2796743
ME
2250}
2251
15ae239d 2252/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */
eb7a6ca6 2253static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
d2796743
ME
2254{
2255 struct phy_device *phydev = adapter->phydev;
2256
d2796743
ME
2257 /* For version B silicon, we do not use the RxDMA timer for 10 and 100
2258 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
2259 */
2260 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2261 writel(0, &adapter->regs->rxdma.max_pkt_time);
2262 writel(1, &adapter->regs->rxdma.num_pkt_done);
2263 }
2264}
2265
26ef1021 2266/* NICReturnRFD - Recycle a RFD and put it back onto the receive list
d2796743
ME
2267 * @adapter: pointer to our adapter
2268 * @rfd: pointer to the RFD
2269 */
2270static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2271{
2272 struct rx_ring *rx_local = &adapter->rx_ring;
2273 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2274 u16 buff_index = rfd->bufferindex;
2275 u8 ring_index = rfd->ringindex;
2276 unsigned long flags;
39bdb4a0 2277 struct fbr_lookup *fbr = rx_local->fbr[ring_index];
d2796743
ME
2278
2279 /* We don't use any of the OOB data besides status. Otherwise, we
2280 * need to clean up OOB data
2281 */
39bdb4a0
ZG
2282 if (buff_index < fbr->num_entries) {
2283 u32 free_buff_ring;
c0594ee9 2284 u32 __iomem *offset;
788ca84a
ME
2285 struct fbr_desc *next;
2286
f0ada678 2287 if (ring_index == 0)
788ca84a 2288 offset = &rx_dma->fbr0_full_offset;
f0ada678
ME
2289 else
2290 offset = &rx_dma->fbr1_full_offset;
788ca84a 2291
39bdb4a0
ZG
2292 next = (struct fbr_desc *)(fbr->ring_virtaddr) +
2293 INDEX10(fbr->local_full);
788ca84a
ME
2294
2295 /* Handle the Free Buffer Ring advancement here. Write
2296 * the PA / Buffer Index for the returned buffer into
2297 * the oldest (next to be freed)FBR entry
2298 */
39bdb4a0
ZG
2299 next->addr_hi = fbr->bus_high[buff_index];
2300 next->addr_lo = fbr->bus_low[buff_index];
788ca84a
ME
2301 next->word2 = buff_index;
2302
39bdb4a0
ZG
2303 free_buff_ring = bump_free_buff_ring(&fbr->local_full,
2304 fbr->num_entries - 1);
2305 writel(free_buff_ring, offset);
d2796743
ME
2306 } else {
2307 dev_err(&adapter->pdev->dev,
096e6224 2308 "%s illegal Buffer Index returned\n", __func__);
d2796743
ME
2309 }
2310
2311 /* The processing on this RFD is done, so put it back on the tail of
2312 * our list
2313 */
2314 spin_lock_irqsave(&adapter->rcv_lock, flags);
2315 list_add_tail(&rfd->list_node, &rx_local->recv_list);
2316 rx_local->num_ready_recv++;
2317 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2318
2319 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2320}
2321
26ef1021 2322/* nic_rx_pkts - Checks the hardware for available packets
54dbf04f
ME
2323 *
2324 * Returns rfd, a pointer to our MPRFD.
2325 *
2326 * Checks the hardware for available packets, using completion ring
2327 * If packets are available, it gets an RFD from the recv_list, attaches
2328 * the packet to it, puts the RFD in the RecvPendList, and also returns
2329 * the pointer to the RFD.
2330 */
d2796743
ME
2331static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2332{
2333 struct rx_ring *rx_local = &adapter->rx_ring;
2334 struct rx_status_block *status;
2335 struct pkt_stat_desc *psr;
186c426d 2336 struct rfd *rfd;
d2796743
ME
2337 unsigned long flags;
2338 struct list_head *element;
2339 u8 ring_index;
2340 u16 buff_index;
2341 u32 len;
2342 u32 word0;
2343 u32 word1;
186c426d 2344 struct sk_buff *skb;
297bb9db 2345 struct fbr_lookup *fbr;
d2796743
ME
2346
2347 /* RX Status block is written by the DMA engine prior to every
2348 * interrupt. It contains the next to be used entry in the Packet
2349 * Status Ring, and also the two Free Buffer rings.
2350 */
2351 status = rx_local->rx_status_block;
2352 word1 = status->word1 >> 16; /* Get the useful bits */
2353
2354 /* Check the PSR and wrap bits do not match */
2355 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
242187aa 2356 return NULL; /* Looks like this ring is not updated yet */
d2796743
ME
2357
2358 /* The packet status ring indicates that data is available. */
c1375678 2359 psr = (struct pkt_stat_desc *)(rx_local->ps_ring_virtaddr) +
d2796743
ME
2360 (rx_local->local_psr_full & 0xFFF);
2361
242187aa
ME
2362 /* Grab any information that is required once the PSR is advanced,
2363 * since we can no longer rely on the memory being accurate
d2796743
ME
2364 */
2365 len = psr->word1 & 0xFFFF;
2366 ring_index = (psr->word1 >> 26) & 0x03;
297bb9db 2367 fbr = rx_local->fbr[ring_index];
d2796743
ME
2368 buff_index = (psr->word1 >> 16) & 0x3FF;
2369 word0 = psr->word0;
2370
2371 /* Indicate that we have used this PSR entry. */
2372 /* FIXME wrap 12 */
2373 add_12bit(&rx_local->local_psr_full, 1);
3f4d2029 2374 if ((rx_local->local_psr_full & 0xFFF) > rx_local->psr_entries - 1) {
d2796743
ME
2375 /* Clear psr full and toggle the wrap bit */
2376 rx_local->local_psr_full &= ~0xFFF;
2377 rx_local->local_psr_full ^= 0x1000;
2378 }
2379
242187aa 2380 writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset);
d2796743 2381
297bb9db 2382 if (ring_index > 1 || buff_index > fbr->num_entries - 1) {
d2796743
ME
2383 /* Illegal buffer or ring index cannot be used by S/W*/
2384 dev_err(&adapter->pdev->dev,
242187aa
ME
2385 "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
2386 rx_local->local_psr_full & 0xFFF, len, buff_index);
d2796743
ME
2387 return NULL;
2388 }
2389
2390 /* Get and fill the RFD. */
2391 spin_lock_irqsave(&adapter->rcv_lock, flags);
2392
d2796743 2393 element = rx_local->recv_list.next;
57cc0279 2394 rfd = list_entry(element, struct rfd, list_node);
d2796743 2395
242187aa 2396 if (!rfd) {
d2796743
ME
2397 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2398 return NULL;
2399 }
2400
2401 list_del(&rfd->list_node);
2402 rx_local->num_ready_recv--;
2403
2404 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2405
2406 rfd->bufferindex = buff_index;
2407 rfd->ringindex = ring_index;
2408
242187aa
ME
2409 /* In V1 silicon, there is a bug which screws up filtering of runt
2410 * packets. Therefore runt packet filtering is disabled in the MAC and
2411 * the packets are dropped here. They are also counted here.
d2796743
ME
2412 */
2413 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
2414 adapter->stats.rx_other_errs++;
242187aa
ME
2415 rfd->len = 0;
2416 goto out;
2417 }
2418
8019f2e2
ME
2419 if ((word0 & ALCATEL_MULTICAST_PKT) && !(word0 & ALCATEL_BROADCAST_PKT))
2420 adapter->stats.multicast_pkts_rcvd++;
d2796743 2421
242187aa 2422 rfd->len = len;
d2796743 2423
242187aa
ME
2424 skb = dev_alloc_skb(rfd->len + 2);
2425 if (!skb) {
2426 dev_err(&adapter->pdev->dev, "Couldn't alloc an SKB for Rx\n");
2427 return NULL;
2428 }
d2796743 2429
1f765d9f 2430 adapter->netdev->stats.rx_bytes += rfd->len;
d2796743 2431
297bb9db 2432 memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len);
d2796743 2433
242187aa
ME
2434 skb->protocol = eth_type_trans(skb, adapter->netdev);
2435 skb->ip_summed = CHECKSUM_NONE;
c2ebf58b 2436 netif_receive_skb(skb);
d2796743 2437
242187aa 2438out:
d2796743
ME
2439 nic_return_rfd(adapter, rfd);
2440 return rfd;
2441}
2442
c2ebf58b 2443/* et131x_handle_recv_pkts - Interrupt handler for receive processing
d2796743
ME
2444 *
2445 * Assumption, Rcv spinlock has been acquired.
2446 */
c2ebf58b 2447static int et131x_handle_recv_pkts(struct et131x_adapter *adapter, int budget)
d2796743
ME
2448{
2449 struct rfd *rfd = NULL;
c2ebf58b
ME
2450 int count = 0;
2451 int limit = budget;
d2796743 2452 bool done = true;
8f7fa96a 2453 struct rx_ring *rx_ring = &adapter->rx_ring;
d2796743 2454
c2ebf58b
ME
2455 if (budget > MAX_PACKETS_HANDLED)
2456 limit = MAX_PACKETS_HANDLED;
2457
d2796743 2458 /* Process up to available RFD's */
c2ebf58b 2459 while (count < limit) {
8f7fa96a
ZG
2460 if (list_empty(&rx_ring->recv_list)) {
2461 WARN_ON(rx_ring->num_ready_recv != 0);
d2796743
ME
2462 done = false;
2463 break;
2464 }
2465
2466 rfd = nic_rx_pkts(adapter);
2467
2468 if (rfd == NULL)
2469 break;
2470
2471 /* Do not receive any packets until a filter has been set.
2472 * Do not receive any packets until we have link.
2473 * If length is zero, return the RFD in order to advance the
2474 * Free buffer ring.
2475 */
2476 if (!adapter->packet_filter ||
2477 !netif_carrier_ok(adapter->netdev) ||
2478 rfd->len == 0)
2479 continue;
2480
2481 /* Increment the number of packets we received */
1f765d9f 2482 adapter->netdev->stats.rx_packets++;
d2796743
ME
2483
2484 /* Set the status on the packet, either resources or success */
8f7fa96a 2485 if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK)
0cdc6ee8
ME
2486 dev_warn(&adapter->pdev->dev, "RFD's are running out\n");
2487
d2796743
ME
2488 count++;
2489 }
2490
c2ebf58b 2491 if (count == limit || !done) {
8f7fa96a 2492 rx_ring->unfinished_receives = true;
d2796743
ME
2493 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2494 &adapter->regs->global.watchdog_timer);
c9e3c03a 2495 } else {
d2796743 2496 /* Watchdog timer will disable itself if appropriate. */
8f7fa96a 2497 rx_ring->unfinished_receives = false;
c9e3c03a 2498 }
c2ebf58b
ME
2499
2500 return count;
d2796743
ME
2501}
2502
26ef1021 2503/* et131x_tx_dma_memory_alloc
d2796743
ME
2504 *
2505 * Allocates memory that will be visible both to the device and to the CPU.
2506 * The OS will pass us packets, pointers to which we will insert in the Tx
2507 * Descriptor queue. The device will read this queue to find the packets in
2508 * memory. The device will update the "status" in memory each time it xmits a
2509 * packet.
2510 */
eb7a6ca6 2511static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
d2796743
ME
2512{
2513 int desc_size = 0;
2514 struct tx_ring *tx_ring = &adapter->tx_ring;
2515
2516 /* Allocate memory for the TCB's (Transmit Control Block) */
76981cf1
ZG
2517 tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
2518 GFP_ATOMIC | GFP_DMA);
2519 if (!tx_ring->tcb_ring)
d2796743 2520 return -ENOMEM;
d2796743 2521
d3c75e8d 2522 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
57cc0279
ZG
2523 tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev,
2524 desc_size,
2525 &tx_ring->tx_desc_ring_pa,
2526 GFP_KERNEL);
76981cf1 2527 if (!tx_ring->tx_desc_ring) {
d2796743 2528 dev_err(&adapter->pdev->dev,
09a3fc2b 2529 "Cannot alloc memory for Tx Ring\n");
d2796743
ME
2530 return -ENOMEM;
2531 }
2532
2533 /* Save physical address
2534 *
26dc751e 2535 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
d2796743
ME
2536 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2537 * are ever returned, make sure the high part is retrieved here before
2538 * storing the adjusted address.
2539 */
2540 /* Allocate memory for the Tx status block */
0d1b7a84 2541 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
d2796743 2542 sizeof(u32),
0d1b7a84
ME
2543 &tx_ring->tx_status_pa,
2544 GFP_KERNEL);
76981cf1 2545 if (!tx_ring->tx_status_pa) {
d2796743 2546 dev_err(&adapter->pdev->dev,
76981cf1 2547 "Cannot alloc memory for Tx status block\n");
d2796743
ME
2548 return -ENOMEM;
2549 }
2550 return 0;
2551}
2552
15ae239d 2553/* et131x_tx_dma_memory_free - Free all memory allocated within this module */
eb7a6ca6 2554static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
d2796743
ME
2555{
2556 int desc_size = 0;
76981cf1 2557 struct tx_ring *tx_ring = &adapter->tx_ring;
d2796743 2558
76981cf1 2559 if (tx_ring->tx_desc_ring) {
d2796743 2560 /* Free memory relating to Tx rings here */
d3c75e8d 2561 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
675c8f68 2562 dma_free_coherent(&adapter->pdev->dev,
76981cf1
ZG
2563 desc_size,
2564 tx_ring->tx_desc_ring,
2565 tx_ring->tx_desc_ring_pa);
2566 tx_ring->tx_desc_ring = NULL;
d2796743
ME
2567 }
2568
2569 /* Free memory for the Tx status block */
76981cf1 2570 if (tx_ring->tx_status) {
675c8f68 2571 dma_free_coherent(&adapter->pdev->dev,
76981cf1
ZG
2572 sizeof(u32),
2573 tx_ring->tx_status,
2574 tx_ring->tx_status_pa);
d2796743 2575
76981cf1 2576 tx_ring->tx_status = NULL;
d2796743
ME
2577 }
2578 /* Free the memory for the tcb structures */
76981cf1 2579 kfree(tx_ring->tcb_ring);
d2796743
ME
2580}
2581
26ef1021 2582/* nic_send_packet - NIC specific send handler for version B silicon.
d2796743
ME
2583 * @adapter: pointer to our adapter
2584 * @tcb: pointer to struct tcb
d2796743
ME
2585 */
2586static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
2587{
2588 u32 i;
2589 struct tx_desc desc[24]; /* 24 x 16 byte */
2590 u32 frag = 0;
2591 u32 thiscopy, remainder;
2592 struct sk_buff *skb = tcb->skb;
2593 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
2594 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
d2796743 2595 struct phy_device *phydev = adapter->phydev;
998f6dfb 2596 dma_addr_t dma_addr;
76981cf1 2597 struct tx_ring *tx_ring = &adapter->tx_ring;
d2796743
ME
2598
2599 /* Part of the optimizations of this send routine restrict us to
2600 * sending 24 fragments at a pass. In practice we should never see
2601 * more than 5 fragments.
2602 *
2603 * NOTE: The older version of this function (below) can handle any
2604 * number of fragments. If needed, we can call this function,
2605 * although it is less efficient.
2606 */
9c7bc376
RK
2607
2608 /* nr_frags should be no more than 18. */
2609 BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23);
d2796743
ME
2610
2611 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
2612
2613 for (i = 0; i < nr_frags; i++) {
2614 /* If there is something in this element, lets get a
2615 * descriptor from the ring and get the necessary data
2616 */
2617 if (i == 0) {
2618 /* If the fragments are smaller than a standard MTU,
2619 * then map them to a single descriptor in the Tx
2620 * Desc ring. However, if they're larger, as is
2621 * possible with support for jumbo packets, then
2622 * split them each across 2 descriptors.
2623 *
2624 * This will work until we determine why the hardware
2625 * doesn't seem to like large fragments.
2626 */
f1b540b9 2627 if (skb_headlen(skb) <= 1514) {
d2796743 2628 /* Low 16bits are length, high is vlan and
26ef1021
ME
2629 * unused currently so zero
2630 */
f1b540b9 2631 desc[frag].len_vlan = skb_headlen(skb);
998f6dfb
ME
2632 dma_addr = dma_map_single(&adapter->pdev->dev,
2633 skb->data,
2634 skb_headlen(skb),
2635 DMA_TO_DEVICE);
2636 desc[frag].addr_lo = lower_32_bits(dma_addr);
2637 desc[frag].addr_hi = upper_32_bits(dma_addr);
2638 frag++;
d2796743 2639 } else {
f1b540b9 2640 desc[frag].len_vlan = skb_headlen(skb) / 2;
998f6dfb 2641 dma_addr = dma_map_single(&adapter->pdev->dev,
096e6224
ME
2642 skb->data,
2643 (skb_headlen(skb) / 2),
2644 DMA_TO_DEVICE);
998f6dfb
ME
2645 desc[frag].addr_lo = lower_32_bits(dma_addr);
2646 desc[frag].addr_hi = upper_32_bits(dma_addr);
2647 frag++;
d2796743 2648
f1b540b9 2649 desc[frag].len_vlan = skb_headlen(skb) / 2;
998f6dfb 2650 dma_addr = dma_map_single(&adapter->pdev->dev,
096e6224
ME
2651 skb->data +
2652 (skb_headlen(skb) / 2),
2653 (skb_headlen(skb) / 2),
2654 DMA_TO_DEVICE);
998f6dfb
ME
2655 desc[frag].addr_lo = lower_32_bits(dma_addr);
2656 desc[frag].addr_hi = upper_32_bits(dma_addr);
2657 frag++;
d2796743
ME
2658 }
2659 } else {
998f6dfb
ME
2660 desc[frag].len_vlan = frags[i - 1].size;
2661 dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
2662 &frags[i - 1],
2663 0,
2664 frags[i - 1].size,
2665 DMA_TO_DEVICE);
2666 desc[frag].addr_lo = lower_32_bits(dma_addr);
2667 desc[frag].addr_hi = upper_32_bits(dma_addr);
2668 frag++;
d2796743
ME
2669 }
2670 }
2671
d2796743 2672 if (phydev && phydev->speed == SPEED_1000) {
76981cf1 2673 if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) {
d2796743 2674 /* Last element & Interrupt flag */
c655dee9
ME
2675 desc[frag - 1].flags =
2676 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
76981cf1 2677 tx_ring->since_irq = 0;
d2796743 2678 } else { /* Last element */
a129be84 2679 desc[frag - 1].flags = TXDESC_FLAG_LASTPKT;
d2796743 2680 }
c9e3c03a 2681 } else {
c655dee9
ME
2682 desc[frag - 1].flags =
2683 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
c9e3c03a 2684 }
d2796743 2685
a129be84 2686 desc[0].flags |= TXDESC_FLAG_FIRSTPKT;
d2796743 2687
76981cf1 2688 tcb->index_start = tx_ring->send_idx;
d2796743
ME
2689 tcb->stale = 0;
2690
76981cf1 2691 thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx);
d2796743
ME
2692
2693 if (thiscopy >= frag) {
2694 remainder = 0;
2695 thiscopy = frag;
2696 } else {
2697 remainder = frag - thiscopy;
2698 }
2699
76981cf1
ZG
2700 memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx),
2701 desc,
d2796743
ME
2702 sizeof(struct tx_desc) * thiscopy);
2703
76981cf1 2704 add_10bit(&tx_ring->send_idx, thiscopy);
d2796743 2705
76981cf1 2706 if (INDEX10(tx_ring->send_idx) == 0 ||
096e6224 2707 INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) {
76981cf1
ZG
2708 tx_ring->send_idx &= ~ET_DMA10_MASK;
2709 tx_ring->send_idx ^= ET_DMA10_WRAP;
d2796743
ME
2710 }
2711
2712 if (remainder) {
76981cf1 2713 memcpy(tx_ring->tx_desc_ring,
d2796743
ME
2714 desc + thiscopy,
2715 sizeof(struct tx_desc) * remainder);
2716
76981cf1 2717 add_10bit(&tx_ring->send_idx, remainder);
d2796743
ME
2718 }
2719
76981cf1
ZG
2720 if (INDEX10(tx_ring->send_idx) == 0) {
2721 if (tx_ring->send_idx)
d2796743
ME
2722 tcb->index = NUM_DESC_PER_RING_TX - 1;
2723 else
2724 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
ee60c8ec 2725 } else {
76981cf1 2726 tcb->index = tx_ring->send_idx - 1;
ee60c8ec 2727 }
d2796743
ME
2728
2729 spin_lock(&adapter->tcb_send_qlock);
2730
76981cf1
ZG
2731 if (tx_ring->send_tail)
2732 tx_ring->send_tail->next = tcb;
d2796743 2733 else
76981cf1 2734 tx_ring->send_head = tcb;
d2796743 2735
76981cf1 2736 tx_ring->send_tail = tcb;
d2796743
ME
2737
2738 WARN_ON(tcb->next != NULL);
2739
76981cf1 2740 tx_ring->used++;
d2796743
ME
2741
2742 spin_unlock(&adapter->tcb_send_qlock);
2743
2744 /* Write the new write pointer back to the device. */
76981cf1 2745 writel(tx_ring->send_idx, &adapter->regs->txdma.service_request);
d2796743
ME
2746
2747 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
2748 * timer to wake us up if this packet isn't followed by N more.
2749 */
2750 if (phydev && phydev->speed == SPEED_1000) {
2751 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2752 &adapter->regs->global.watchdog_timer);
2753 }
d2796743
ME
2754 return 0;
2755}
2756
26ef1021 2757/* send_packet - Do the work to send a packet
d2796743
ME
2758 *
2759 * Assumption: Send spinlock has been acquired
2760 */
2761static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
2762{
2763 int status;
76981cf1 2764 struct tcb *tcb;
d2796743 2765 unsigned long flags;
76981cf1 2766 struct tx_ring *tx_ring = &adapter->tx_ring;
d2796743
ME
2767
2768 /* All packets must have at least a MAC address and a protocol type */
2769 if (skb->len < ETH_HLEN)
2770 return -EIO;
2771
2772 /* Get a TCB for this packet */
2773 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2774
76981cf1 2775 tcb = tx_ring->tcb_qhead;
d2796743
ME
2776
2777 if (tcb == NULL) {
2778 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2779 return -ENOMEM;
2780 }
2781
76981cf1 2782 tx_ring->tcb_qhead = tcb->next;
d2796743 2783
76981cf1
ZG
2784 if (tx_ring->tcb_qhead == NULL)
2785 tx_ring->tcb_qtail = NULL;
d2796743
ME
2786
2787 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2788
2789 tcb->skb = skb;
d2796743
ME
2790 tcb->next = NULL;
2791
2792 /* Call the NIC specific send handler. */
2793 status = nic_send_packet(adapter, tcb);
2794
2795 if (status != 0) {
2796 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2797
76981cf1
ZG
2798 if (tx_ring->tcb_qtail)
2799 tx_ring->tcb_qtail->next = tcb;
d2796743
ME
2800 else
2801 /* Apparently ready Q is empty. */
76981cf1 2802 tx_ring->tcb_qhead = tcb;
d2796743 2803
76981cf1 2804 tx_ring->tcb_qtail = tcb;
d2796743
ME
2805 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2806 return status;
2807 }
76981cf1 2808 WARN_ON(tx_ring->used > NUM_TCB);
d2796743
ME
2809 return 0;
2810}
2811
26ef1021 2812/* free_send_packet - Recycle a struct tcb
d2796743
ME
2813 * @adapter: pointer to our adapter
2814 * @tcb: pointer to struct tcb
2815 *
2816 * Complete the packet if necessary
2817 * Assumption - Send spinlock has been acquired
2818 */
2819static inline void free_send_packet(struct et131x_adapter *adapter,
096e6224 2820 struct tcb *tcb)
d2796743
ME
2821{
2822 unsigned long flags;
2823 struct tx_desc *desc = NULL;
1f765d9f 2824 struct net_device_stats *stats = &adapter->netdev->stats;
76981cf1 2825 struct tx_ring *tx_ring = &adapter->tx_ring;
983e4b35 2826 u64 dma_addr;
d2796743 2827
d2796743
ME
2828 if (tcb->skb) {
2829 stats->tx_bytes += tcb->skb->len;
2830
2831 /* Iterate through the TX descriptors on the ring
2832 * corresponding to this packet and umap the fragments
2833 * they point to
2834 */
2835 do {
76981cf1 2836 desc = tx_ring->tx_desc_ring +
57cc0279 2837 INDEX10(tcb->index_start);
d2796743 2838
998f6dfb 2839 dma_addr = desc->addr_lo;
983e4b35 2840 dma_addr |= (u64)desc->addr_hi << 32;
998f6dfb 2841
26dc751e 2842 dma_unmap_single(&adapter->pdev->dev,
998f6dfb 2843 dma_addr,
26dc751e 2844 desc->len_vlan, DMA_TO_DEVICE);
d2796743
ME
2845
2846 add_10bit(&tcb->index_start, 1);
2847 if (INDEX10(tcb->index_start) >=
2848 NUM_DESC_PER_RING_TX) {
2849 tcb->index_start &= ~ET_DMA10_MASK;
2850 tcb->index_start ^= ET_DMA10_WRAP;
2851 }
76981cf1 2852 } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index));
d2796743
ME
2853
2854 dev_kfree_skb_any(tcb->skb);
2855 }
2856
2857 memset(tcb, 0, sizeof(struct tcb));
2858
2859 /* Add the TCB to the Ready Q */
2860 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2861
1f765d9f 2862 stats->tx_packets++;
d2796743 2863
76981cf1
ZG
2864 if (tx_ring->tcb_qtail)
2865 tx_ring->tcb_qtail->next = tcb;
d2796743
ME
2866 else
2867 /* Apparently ready Q is empty. */
76981cf1 2868 tx_ring->tcb_qhead = tcb;
d2796743 2869
76981cf1 2870 tx_ring->tcb_qtail = tcb;
d2796743
ME
2871
2872 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
76981cf1 2873 WARN_ON(tx_ring->used < 0);
d2796743
ME
2874}
2875
26ef1021 2876/* et131x_free_busy_send_packets - Free and complete the stopped active sends
d2796743
ME
2877 *
2878 * Assumption - Send spinlock has been acquired
2879 */
eb7a6ca6 2880static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
d2796743
ME
2881{
2882 struct tcb *tcb;
2883 unsigned long flags;
2884 u32 freed = 0;
76981cf1 2885 struct tx_ring *tx_ring = &adapter->tx_ring;
d2796743
ME
2886
2887 /* Any packets being sent? Check the first TCB on the send list */
2888 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2889
76981cf1 2890 tcb = tx_ring->send_head;
d2796743
ME
2891
2892 while (tcb != NULL && freed < NUM_TCB) {
2893 struct tcb *next = tcb->next;
2894
76981cf1 2895 tx_ring->send_head = next;
d2796743
ME
2896
2897 if (next == NULL)
76981cf1 2898 tx_ring->send_tail = NULL;
d2796743 2899
76981cf1 2900 tx_ring->used--;
d2796743
ME
2901
2902 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2903
2904 freed++;
2905 free_send_packet(adapter, tcb);
2906
2907 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2908
76981cf1 2909 tcb = tx_ring->send_head;
d2796743
ME
2910 }
2911
2912 WARN_ON(freed == NUM_TCB);
2913
2914 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2915
76981cf1 2916 tx_ring->used = 0;
d2796743
ME
2917}
2918
c2ebf58b 2919/* et131x_handle_send_pkts - Interrupt handler for sending processing
d2796743
ME
2920 *
2921 * Re-claim the send resources, complete sends and get more to send from
2922 * the send wait queue.
2923 *
2924 * Assumption - Send spinlock has been acquired
2925 */
c2ebf58b 2926static void et131x_handle_send_pkts(struct et131x_adapter *adapter)
d2796743
ME
2927{
2928 unsigned long flags;
2929 u32 serviced;
2930 struct tcb *tcb;
2931 u32 index;
76981cf1 2932 struct tx_ring *tx_ring = &adapter->tx_ring;
d2796743
ME
2933
2934 serviced = readl(&adapter->regs->txdma.new_service_complete);
2935 index = INDEX10(serviced);
2936
2937 /* Has the ring wrapped? Process any descriptors that do not have
2938 * the same "wrap" indicator as the current completion indicator
2939 */
2940 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2941
76981cf1 2942 tcb = tx_ring->send_head;
d2796743
ME
2943
2944 while (tcb &&
2945 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
2946 index < INDEX10(tcb->index)) {
76981cf1
ZG
2947 tx_ring->used--;
2948 tx_ring->send_head = tcb->next;
d2796743 2949 if (tcb->next == NULL)
76981cf1 2950 tx_ring->send_tail = NULL;
d2796743
ME
2951
2952 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2953 free_send_packet(adapter, tcb);
2954 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2955
2956 /* Goto the next packet */
76981cf1 2957 tcb = tx_ring->send_head;
d2796743
ME
2958 }
2959 while (tcb &&
cec78b98
ME
2960 !((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
2961 index > (tcb->index & ET_DMA10_MASK)) {
76981cf1
ZG
2962 tx_ring->used--;
2963 tx_ring->send_head = tcb->next;
d2796743 2964 if (tcb->next == NULL)
76981cf1 2965 tx_ring->send_tail = NULL;
d2796743
ME
2966
2967 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2968 free_send_packet(adapter, tcb);
2969 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2970
2971 /* Goto the next packet */
76981cf1 2972 tcb = tx_ring->send_head;
d2796743
ME
2973 }
2974
2975 /* Wake up the queue when we hit a low-water mark */
76981cf1 2976 if (tx_ring->used <= NUM_TCB / 3)
d2796743
ME
2977 netif_wake_queue(adapter->netdev);
2978
2979 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2980}
2981
d2796743
ME
2982static int et131x_get_settings(struct net_device *netdev,
2983 struct ethtool_cmd *cmd)
2984{
2985 struct et131x_adapter *adapter = netdev_priv(netdev);
2986
2987 return phy_ethtool_gset(adapter->phydev, cmd);
2988}
2989
2990static int et131x_set_settings(struct net_device *netdev,
2991 struct ethtool_cmd *cmd)
2992{
2993 struct et131x_adapter *adapter = netdev_priv(netdev);
2994
2995 return phy_ethtool_sset(adapter->phydev, cmd);
2996}
2997
2998static int et131x_get_regs_len(struct net_device *netdev)
2999{
3000#define ET131X_REGS_LEN 256
3001 return ET131X_REGS_LEN * sizeof(u32);
3002}
3003
3004static void et131x_get_regs(struct net_device *netdev,
3005 struct ethtool_regs *regs, void *regs_data)
3006{
3007 struct et131x_adapter *adapter = netdev_priv(netdev);
3008 struct address_map __iomem *aregs = adapter->regs;
3009 u32 *regs_buff = regs_data;
3010 u32 num = 0;
c8b0a484 3011 u16 tmp;
d2796743
ME
3012
3013 memset(regs_data, 0, et131x_get_regs_len(netdev));
3014
3015 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
3016 adapter->pdev->device;
3017
3018 /* PHY regs */
c8b0a484
ME
3019 et131x_mii_read(adapter, MII_BMCR, &tmp);
3020 regs_buff[num++] = tmp;
3021 et131x_mii_read(adapter, MII_BMSR, &tmp);
3022 regs_buff[num++] = tmp;
3023 et131x_mii_read(adapter, MII_PHYSID1, &tmp);
3024 regs_buff[num++] = tmp;
3025 et131x_mii_read(adapter, MII_PHYSID2, &tmp);
3026 regs_buff[num++] = tmp;
3027 et131x_mii_read(adapter, MII_ADVERTISE, &tmp);
3028 regs_buff[num++] = tmp;
3029 et131x_mii_read(adapter, MII_LPA, &tmp);
3030 regs_buff[num++] = tmp;
3031 et131x_mii_read(adapter, MII_EXPANSION, &tmp);
3032 regs_buff[num++] = tmp;
d2796743 3033 /* Autoneg next page transmit reg */
c8b0a484
ME
3034 et131x_mii_read(adapter, 0x07, &tmp);
3035 regs_buff[num++] = tmp;
d2796743 3036 /* Link partner next page reg */
c8b0a484
ME
3037 et131x_mii_read(adapter, 0x08, &tmp);
3038 regs_buff[num++] = tmp;
3039 et131x_mii_read(adapter, MII_CTRL1000, &tmp);
3040 regs_buff[num++] = tmp;
3041 et131x_mii_read(adapter, MII_STAT1000, &tmp);
3042 regs_buff[num++] = tmp;
3043 et131x_mii_read(adapter, 0x0b, &tmp);
3044 regs_buff[num++] = tmp;
3045 et131x_mii_read(adapter, 0x0c, &tmp);
3046 regs_buff[num++] = tmp;
3047 et131x_mii_read(adapter, MII_MMD_CTRL, &tmp);
3048 regs_buff[num++] = tmp;
3049 et131x_mii_read(adapter, MII_MMD_DATA, &tmp);
3050 regs_buff[num++] = tmp;
3051 et131x_mii_read(adapter, MII_ESTATUS, &tmp);
3052 regs_buff[num++] = tmp;
3053
3054 et131x_mii_read(adapter, PHY_INDEX_REG, &tmp);
3055 regs_buff[num++] = tmp;
3056 et131x_mii_read(adapter, PHY_DATA_REG, &tmp);
3057 regs_buff[num++] = tmp;
3058 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp);
3059 regs_buff[num++] = tmp;
3060 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp);
3061 regs_buff[num++] = tmp;
3062 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp);
3063 regs_buff[num++] = tmp;
3064
3065 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp);
3066 regs_buff[num++] = tmp;
3067 et131x_mii_read(adapter, PHY_CONFIG, &tmp);
3068 regs_buff[num++] = tmp;
3069 et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp);
3070 regs_buff[num++] = tmp;
3071 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp);
3072 regs_buff[num++] = tmp;
3073 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp);
3074 regs_buff[num++] = tmp;
3075 et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp);
3076 regs_buff[num++] = tmp;
3077 et131x_mii_read(adapter, PHY_LED_1, &tmp);
3078 regs_buff[num++] = tmp;
3079 et131x_mii_read(adapter, PHY_LED_2, &tmp);
3080 regs_buff[num++] = tmp;
d2796743
ME
3081
3082 /* Global regs */
3083 regs_buff[num++] = readl(&aregs->global.txq_start_addr);
3084 regs_buff[num++] = readl(&aregs->global.txq_end_addr);
3085 regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
3086 regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
3087 regs_buff[num++] = readl(&aregs->global.pm_csr);
3088 regs_buff[num++] = adapter->stats.interrupt_status;
3089 regs_buff[num++] = readl(&aregs->global.int_mask);
3090 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
3091 regs_buff[num++] = readl(&aregs->global.int_status_alias);
3092 regs_buff[num++] = readl(&aregs->global.sw_reset);
3093 regs_buff[num++] = readl(&aregs->global.slv_timer);
3094 regs_buff[num++] = readl(&aregs->global.msi_config);
3095 regs_buff[num++] = readl(&aregs->global.loopback);
3096 regs_buff[num++] = readl(&aregs->global.watchdog_timer);
3097
3098 /* TXDMA regs */
3099 regs_buff[num++] = readl(&aregs->txdma.csr);
3100 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
3101 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
3102 regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
3103 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
3104 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
3105 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
3106 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
3107 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
3108 regs_buff[num++] = readl(&aregs->txdma.service_request);
3109 regs_buff[num++] = readl(&aregs->txdma.service_complete);
3110 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
3111 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
3112 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
3113 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
3114 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
3115 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
3116 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
3117 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
3118 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
3119 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
3120 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
3121 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
3122 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
3123 regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
3124 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
3125
3126 /* RXDMA regs */
3127 regs_buff[num++] = readl(&aregs->rxdma.csr);
3128 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
3129 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
3130 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
3131 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
3132 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
3133 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
3134 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
3135 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
3136 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
3137 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
3138 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
3139 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
3140 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
3141 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
3142 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
3143 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
3144 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
3145 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
3146 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
3147 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
3148 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
3149 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
3150 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
3151 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
3152 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
3153 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
3154 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
3155 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
3156}
3157
d2796743
ME
3158static void et131x_get_drvinfo(struct net_device *netdev,
3159 struct ethtool_drvinfo *info)
3160{
3161 struct et131x_adapter *adapter = netdev_priv(netdev);
3162
7826d43f
JP
3163 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
3164 strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
3165 strlcpy(info->bus_info, pci_name(adapter->pdev),
3166 sizeof(info->bus_info));
d2796743
ME
3167}
3168
3169static struct ethtool_ops et131x_ethtool_ops = {
3170 .get_settings = et131x_get_settings,
3171 .set_settings = et131x_set_settings,
3172 .get_drvinfo = et131x_get_drvinfo,
3173 .get_regs_len = et131x_get_regs_len,
3174 .get_regs = et131x_get_regs,
242187aa 3175 .get_link = ethtool_op_get_link,
d2796743 3176};
26ef1021 3177
15ae239d 3178/* et131x_hwaddr_init - set up the MAC Address on the ET1310 */
eb7a6ca6 3179static void et131x_hwaddr_init(struct et131x_adapter *adapter)
d2796743
ME
3180{
3181 /* If have our default mac from init and no mac address from
3182 * EEPROM then we need to generate the last octet and set it on the
3183 * device
3184 */
c14d01b8 3185 if (is_zero_ether_addr(adapter->rom_addr)) {
26ef1021 3186 /* We need to randomly generate the last octet so we
d2796743
ME
3187 * decrease our chances of setting the mac address to
3188 * same as another one of our cards in the system
3189 */
3190 get_random_bytes(&adapter->addr[5], 1);
26ef1021 3191 /* We have the default value in the register we are
d2796743
ME
3192 * working with so we need to copy the current
3193 * address into the permanent address
3194 */
3195 memcpy(adapter->rom_addr,
096e6224 3196 adapter->addr, ETH_ALEN);
d2796743
ME
3197 } else {
3198 /* We do not have an override address, so set the
3199 * current address to the permanent address and add
3200 * it to the device
3201 */
3202 memcpy(adapter->addr,
3203 adapter->rom_addr, ETH_ALEN);
3204 }
3205}
3206
26ef1021 3207/* et131x_pci_init - initial PCI setup
d2796743
ME
3208 *
3209 * Perform the initial setup of PCI registers and if possible initialise
3210 * the MAC address. At this point the I/O registers have yet to be mapped
3211 */
3212static int et131x_pci_init(struct et131x_adapter *adapter,
12a2f3f3 3213 struct pci_dev *pdev)
d2796743 3214{
d14e3d05 3215 u16 max_payload;
d14e3d05
FR
3216 int i, rc;
3217
3218 rc = et131x_init_eeprom(adapter);
3219 if (rc < 0)
3220 goto out;
3221
532c5f69 3222 if (!pci_is_pcie(pdev)) {
d14e3d05
FR
3223 dev_err(&pdev->dev, "Missing PCIe capabilities\n");
3224 goto err_out;
3225 }
bf3313a1 3226
9db008d0 3227 /* Let's set up the PORT LOGIC Register. */
d2796743
ME
3228
3229 /* Program the Ack/Nak latency and replay timers */
9db008d0 3230 max_payload = pdev->pcie_mpss;
d2796743
ME
3231
3232 if (max_payload < 2) {
3233 static const u16 acknak[2] = { 0x76, 0xD0 };
3234 static const u16 replay[2] = { 0x1E0, 0x2ED };
3235
3236 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
096e6224 3237 acknak[max_payload])) {
d2796743 3238 dev_err(&pdev->dev,
096e6224 3239 "Could not write PCI config space for ACK/NAK\n");
d14e3d05 3240 goto err_out;
d2796743
ME
3241 }
3242 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
096e6224 3243 replay[max_payload])) {
d2796743 3244 dev_err(&pdev->dev,
096e6224 3245 "Could not write PCI config space for Replay Timer\n");
d14e3d05 3246 goto err_out;
d2796743
ME
3247 }
3248 }
3249
3250 /* l0s and l1 latency timers. We are using default values.
3251 * Representing 001 for L0s and 010 for L1
3252 */
3253 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
3254 dev_err(&pdev->dev,
096e6224 3255 "Could not write PCI config space for Latency Timers\n");
d14e3d05 3256 goto err_out;
d2796743
ME
3257 }
3258
3259 /* Change the max read size to 2k */
9db008d0 3260 if (pcie_set_readrq(pdev, 2048)) {
d2796743 3261 dev_err(&pdev->dev,
532c5f69 3262 "Couldn't change PCI config space for Max read size\n");
d14e3d05 3263 goto err_out;
d2796743
ME
3264 }
3265
3266 /* Get MAC address from config space if an eeprom exists, otherwise
3267 * the MAC address there will not be valid
3268 */
3269 if (!adapter->has_eeprom) {
3270 et131x_hwaddr_init(adapter);
3271 return 0;
3272 }
3273
3274 for (i = 0; i < ETH_ALEN; i++) {
3275 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
096e6224 3276 adapter->rom_addr + i)) {
d2796743 3277 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
d14e3d05 3278 goto err_out;
d2796743
ME
3279 }
3280 }
015851c3 3281 ether_addr_copy(adapter->addr, adapter->rom_addr);
d14e3d05
FR
3282out:
3283 return rc;
3284err_out:
3285 rc = -EIO;
3286 goto out;
d2796743
ME
3287}
3288
26ef1021 3289/* et131x_error_timer_handler
d2796743
ME
3290 * @data: timer-specific variable; here a pointer to our adapter structure
3291 *
3292 * The routine called when the error timer expires, to track the number of
3293 * recurring errors.
3294 */
eb7a6ca6 3295static void et131x_error_timer_handler(unsigned long data)
d2796743 3296{
c1375678 3297 struct et131x_adapter *adapter = (struct et131x_adapter *)data;
d2796743
ME
3298 struct phy_device *phydev = adapter->phydev;
3299
3300 if (et1310_in_phy_coma(adapter)) {
3301 /* Bring the device immediately out of coma, to
3302 * prevent it from sleeping indefinitely, this
26ef1021
ME
3303 * mechanism could be improved!
3304 */
d2796743
ME
3305 et1310_disable_phy_coma(adapter);
3306 adapter->boot_coma = 20;
3307 } else {
3308 et1310_update_macstat_host_counters(adapter);
3309 }
3310
3311 if (!phydev->link && adapter->boot_coma < 11)
3312 adapter->boot_coma++;
3313
3314 if (adapter->boot_coma == 10) {
3315 if (!phydev->link) {
3316 if (!et1310_in_phy_coma(adapter)) {
3317 /* NOTE - This was originally a 'sync with
3318 * interrupt'. How to do that under Linux?
3319 */
3320 et131x_enable_interrupts(adapter);
3321 et1310_enable_phy_coma(adapter);
3322 }
3323 }
3324 }
3325
3326 /* This is a periodic timer, so reschedule */
242187aa 3327 mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000);
d2796743
ME
3328}
3329
15ae239d 3330/* et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx */
d959df0a
ME
3331static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
3332{
d959df0a
ME
3333 et131x_tx_dma_memory_free(adapter);
3334 et131x_rx_dma_memory_free(adapter);
3335}
3336
26ef1021 3337/* et131x_adapter_memory_alloc
d2796743
ME
3338 * Allocate all the memory blocks for send, receive and others.
3339 */
eb7a6ca6 3340static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
d2796743
ME
3341{
3342 int status;
3343
3344 /* Allocate memory for the Tx Ring */
3345 status = et131x_tx_dma_memory_alloc(adapter);
12a2f3f3 3346 if (status) {
d2796743 3347 dev_err(&adapter->pdev->dev,
096e6224 3348 "et131x_tx_dma_memory_alloc FAILED\n");
a9f48883 3349 et131x_tx_dma_memory_free(adapter);
d2796743
ME
3350 return status;
3351 }
3352 /* Receive buffer memory allocation */
3353 status = et131x_rx_dma_memory_alloc(adapter);
12a2f3f3 3354 if (status) {
d2796743 3355 dev_err(&adapter->pdev->dev,
096e6224 3356 "et131x_rx_dma_memory_alloc FAILED\n");
a9f48883 3357 et131x_adapter_memory_free(adapter);
d2796743
ME
3358 return status;
3359 }
3360
3361 /* Init receive data structures */
3362 status = et131x_init_recv(adapter);
d959df0a 3363 if (status) {
12a2f3f3 3364 dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n");
d959df0a 3365 et131x_adapter_memory_free(adapter);
d2796743
ME
3366 }
3367 return status;
3368}
3369
d2796743
ME
3370static void et131x_adjust_link(struct net_device *netdev)
3371{
3372 struct et131x_adapter *adapter = netdev_priv(netdev);
3373 struct phy_device *phydev = adapter->phydev;
3374
b96ab7cc
ME
3375 if (!phydev)
3376 return;
3377 if (phydev->link == adapter->link)
3378 return;
6903098c 3379
b96ab7cc
ME
3380 /* Check to see if we are in coma mode and if
3381 * so, disable it because we will not be able
3382 * to read PHY values until we are out.
3383 */
3384 if (et1310_in_phy_coma(adapter))
3385 et1310_disable_phy_coma(adapter);
6903098c 3386
b96ab7cc
ME
3387 adapter->link = phydev->link;
3388 phy_print_status(phydev);
6903098c 3389
b96ab7cc
ME
3390 if (phydev->link) {
3391 adapter->boot_coma = 20;
76af0140 3392 if (phydev->speed == SPEED_10) {
b96ab7cc
ME
3393 u16 register18;
3394
3395 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
096e6224 3396 &register18);
ec0a38bf
ME
3397 et131x_mii_write(adapter, phydev->addr,
3398 PHY_MPHY_CONTROL_REG, register18 | 0x4);
3399 et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG,
b96ab7cc 3400 register18 | 0x8402);
ec0a38bf 3401 et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG,
b96ab7cc 3402 register18 | 511);
ec0a38bf
ME
3403 et131x_mii_write(adapter, phydev->addr,
3404 PHY_MPHY_CONTROL_REG, register18);
b96ab7cc 3405 }
6903098c 3406
b96ab7cc 3407 et1310_config_flow_control(adapter);
d2796743 3408
76af0140 3409 if (phydev->speed == SPEED_1000 &&
b96ab7cc
ME
3410 adapter->registry_jumbo_packet > 2048) {
3411 u16 reg;
d2796743 3412
b96ab7cc
ME
3413 et131x_mii_read(adapter, PHY_CONFIG, &reg);
3414 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
3415 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
ec0a38bf
ME
3416 et131x_mii_write(adapter, phydev->addr, PHY_CONFIG,
3417 reg);
b96ab7cc 3418 }
d2796743 3419
b96ab7cc
ME
3420 et131x_set_rx_dma_timer(adapter);
3421 et1310_config_mac_regs2(adapter);
3422 } else {
3423 adapter->boot_coma = 0;
d2796743 3424
b96ab7cc 3425 if (phydev->speed == SPEED_10) {
b96ab7cc
ME
3426 u16 register18;
3427
3428 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
096e6224 3429 &register18);
ec0a38bf 3430 et131x_mii_write(adapter, phydev->addr,
096e6224 3431 PHY_MPHY_CONTROL_REG, register18 | 0x4);
ec0a38bf 3432 et131x_mii_write(adapter, phydev->addr,
096e6224 3433 PHY_INDEX_REG, register18 | 0x8402);
ec0a38bf 3434 et131x_mii_write(adapter, phydev->addr,
096e6224 3435 PHY_DATA_REG, register18 | 511);
ec0a38bf 3436 et131x_mii_write(adapter, phydev->addr,
096e6224 3437 PHY_MPHY_CONTROL_REG, register18);
b96ab7cc 3438 }
d2796743 3439
b96ab7cc
ME
3440 /* Free the packets being actively sent & stopped */
3441 et131x_free_busy_send_packets(adapter);
d2796743 3442
b96ab7cc
ME
3443 /* Re-initialize the send structures */
3444 et131x_init_send(adapter);
3445
3446 /* Bring the device back to the state it was during
3447 * init prior to autonegotiation being complete. This
3448 * way, when we get the auto-neg complete interrupt,
3449 * we can complete init by calling config_mac_regs2.
3450 */
3451 et131x_soft_reset(adapter);
3452
3453 /* Setup ET1310 as per the documentation */
3454 et131x_adapter_setup(adapter);
d2796743 3455
b96ab7cc
ME
3456 /* perform reset of tx/rx */
3457 et131x_disable_txrx(netdev);
3458 et131x_enable_txrx(netdev);
d2796743
ME
3459 }
3460}
3461
3462static int et131x_mii_probe(struct net_device *netdev)
3463{
3464 struct et131x_adapter *adapter = netdev_priv(netdev);
3465 struct phy_device *phydev = NULL;
3466
3467 phydev = phy_find_first(adapter->mii_bus);
3468 if (!phydev) {
3469 dev_err(&adapter->pdev->dev, "no PHY found\n");
3470 return -ENODEV;
3471 }
3472
3473 phydev = phy_connect(netdev, dev_name(&phydev->dev),
f9a8f83b 3474 &et131x_adjust_link, PHY_INTERFACE_MODE_MII);
d2796743
ME
3475
3476 if (IS_ERR(phydev)) {
3477 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
3478 return PTR_ERR(phydev);
3479 }
3480
bbf45bcf
ME
3481 phydev->supported &= (SUPPORTED_10baseT_Half |
3482 SUPPORTED_10baseT_Full |
3483 SUPPORTED_100baseT_Half |
3484 SUPPORTED_100baseT_Full |
3485 SUPPORTED_Autoneg |
3486 SUPPORTED_MII |
3487 SUPPORTED_TP);
d2796743
ME
3488
3489 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
bbf45bcf
ME
3490 phydev->supported |= SUPPORTED_1000baseT_Half |
3491 SUPPORTED_1000baseT_Full;
d2796743
ME
3492
3493 phydev->advertising = phydev->supported;
bbf45bcf 3494 phydev->autoneg = AUTONEG_ENABLE;
d2796743
ME
3495 adapter->phydev = phydev;
3496
12a2f3f3
ME
3497 dev_info(&adapter->pdev->dev,
3498 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
d2796743
ME
3499 phydev->drv->name, dev_name(&phydev->dev));
3500
3501 return 0;
3502}
3503
26ef1021 3504/* et131x_adapter_init
d2796743
ME
3505 *
3506 * Initialize the data structures for the et131x_adapter object and link
3507 * them together with the platform provided device structures.
3508 */
3509static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
12a2f3f3 3510 struct pci_dev *pdev)
d2796743
ME
3511{
3512 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
3513
3514 struct et131x_adapter *adapter;
3515
3516 /* Allocate private adapter struct and copy in relevant information */
3517 adapter = netdev_priv(netdev);
3518 adapter->pdev = pci_dev_get(pdev);
3519 adapter->netdev = netdev;
3520
d2796743 3521 /* Initialize spinlocks here */
d2796743
ME
3522 spin_lock_init(&adapter->tcb_send_qlock);
3523 spin_lock_init(&adapter->tcb_ready_qlock);
d2796743 3524 spin_lock_init(&adapter->rcv_lock);
d2796743
ME
3525
3526 adapter->registry_jumbo_packet = 1514; /* 1514-9216 */
3527
3528 /* Set the MAC address to a default */
015851c3 3529 ether_addr_copy(adapter->addr, default_mac);
d2796743
ME
3530
3531 return adapter;
3532}
3533
26ef1021 3534/* et131x_pci_remove
d2796743
ME
3535 *
3536 * Registered in the pci_driver structure, this function is called when the
3537 * PCI subsystem detects that a PCI device which matches the information
3538 * contained in the pci_device_id table has been removed.
3539 */
596c5dd3 3540static void et131x_pci_remove(struct pci_dev *pdev)
d2796743
ME
3541{
3542 struct net_device *netdev = pci_get_drvdata(pdev);
3543 struct et131x_adapter *adapter = netdev_priv(netdev);
3544
3545 unregister_netdev(netdev);
c2ebf58b 3546 netif_napi_del(&adapter->napi);
fa9f0a65 3547 phy_disconnect(adapter->phydev);
d2796743
ME
3548 mdiobus_unregister(adapter->mii_bus);
3549 kfree(adapter->mii_bus->irq);
3550 mdiobus_free(adapter->mii_bus);
3551
3552 et131x_adapter_memory_free(adapter);
3553 iounmap(adapter->regs);
3554 pci_dev_put(pdev);
3555
3556 free_netdev(netdev);
3557 pci_release_regions(pdev);
3558 pci_disable_device(pdev);
3559}
3560
15ae239d 3561/* et131x_up - Bring up a device for use. */
eb7a6ca6 3562static void et131x_up(struct net_device *netdev)
a4d444bd
ME
3563{
3564 struct et131x_adapter *adapter = netdev_priv(netdev);
3565
3566 et131x_enable_txrx(netdev);
3567 phy_start(adapter->phydev);
3568}
3569
15ae239d 3570/* et131x_down - Bring down the device */
eb7a6ca6 3571static void et131x_down(struct net_device *netdev)
a4d444bd
ME
3572{
3573 struct et131x_adapter *adapter = netdev_priv(netdev);
3574
3575 /* Save the timestamp for the TX watchdog, prevent a timeout */
3576 netdev->trans_start = jiffies;
3577
3578 phy_stop(adapter->phydev);
3579 et131x_disable_txrx(netdev);
3580}
3581
d2796743
ME
3582#ifdef CONFIG_PM_SLEEP
3583static int et131x_suspend(struct device *dev)
3584{
3585 struct pci_dev *pdev = to_pci_dev(dev);
3586 struct net_device *netdev = pci_get_drvdata(pdev);
3587
3588 if (netif_running(netdev)) {
3589 netif_device_detach(netdev);
3590 et131x_down(netdev);
3591 pci_save_state(pdev);
3592 }
3593
3594 return 0;
3595}
3596
3597static int et131x_resume(struct device *dev)
3598{
3599 struct pci_dev *pdev = to_pci_dev(dev);
3600 struct net_device *netdev = pci_get_drvdata(pdev);
3601
3602 if (netif_running(netdev)) {
3603 pci_restore_state(pdev);
3604 et131x_up(netdev);
3605 netif_device_attach(netdev);
3606 }
3607
3608 return 0;
3609}
3610
2e9ff8d9
ME
3611static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
3612#define ET131X_PM_OPS (&et131x_pm_ops)
3613#else
3614#define ET131X_PM_OPS NULL
3615#endif
3616
26ef1021 3617/* et131x_isr - The Interrupt Service Routine for the driver.
d2796743
ME
3618 * @irq: the IRQ on which the interrupt was received.
3619 * @dev_id: device-specific info (here a pointer to a net_device struct)
3620 *
3621 * Returns a value indicating if the interrupt was handled.
3622 */
c0594ee9 3623static irqreturn_t et131x_isr(int irq, void *dev_id)
d2796743
ME
3624{
3625 bool handled = true;
be40a261 3626 bool enable_interrupts = true;
ecce5650 3627 struct net_device *netdev = dev_id;
8f7fa96a 3628 struct et131x_adapter *adapter = netdev_priv(netdev);
c2ebf58b 3629 struct address_map __iomem *iomem = adapter->regs;
8f7fa96a 3630 struct rx_ring *rx_ring = &adapter->rx_ring;
76981cf1 3631 struct tx_ring *tx_ring = &adapter->tx_ring;
d2796743
ME
3632 u32 status;
3633
3634 if (!netif_device_present(netdev)) {
3635 handled = false;
be40a261 3636 enable_interrupts = false;
d2796743
ME
3637 goto out;
3638 }
3639
d2796743
ME
3640 /* If the adapter is in low power state, then it should not
3641 * recognize any interrupt
3642 */
3643
3644 /* Disable Device Interrupts */
3645 et131x_disable_interrupts(adapter);
3646
3647 /* Get a copy of the value in the interrupt status register
3648 * so we can process the interrupting section
3649 */
3650 status = readl(&adapter->regs->global.int_status);
3651
26ca0f1b 3652 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH)
d2796743 3653 status &= ~INT_MASK_ENABLE;
26ca0f1b 3654 else
d2796743 3655 status &= ~INT_MASK_ENABLE_NO_FLOW;
d2796743
ME
3656
3657 /* Make sure this is our interrupt */
3658 if (!status) {
3659 handled = false;
3660 et131x_enable_interrupts(adapter);
3661 goto out;
3662 }
3663
3664 /* This is our interrupt, so process accordingly */
d2796743 3665 if (status & ET_INTR_WATCHDOG) {
76981cf1 3666 struct tcb *tcb = tx_ring->send_head;
d2796743
ME
3667
3668 if (tcb)
3669 if (++tcb->stale > 1)
3670 status |= ET_INTR_TXDMA_ISR;
3671
8f7fa96a 3672 if (rx_ring->unfinished_receives)
d2796743
ME
3673 status |= ET_INTR_RXDMA_XFR_DONE;
3674 else if (tcb == NULL)
3675 writel(0, &adapter->regs->global.watchdog_timer);
3676
3677 status &= ~ET_INTR_WATCHDOG;
3678 }
3679
be40a261
ME
3680 if (status & (ET_INTR_RXDMA_XFR_DONE | ET_INTR_TXDMA_ISR)) {
3681 enable_interrupts = false;
c2ebf58b 3682 napi_schedule(&adapter->napi);
be40a261 3683 }
d2796743 3684
df7b3b8a 3685 status &= ~(ET_INTR_TXDMA_ISR | ET_INTR_RXDMA_XFR_DONE);
d2796743 3686
15ffde4d
ME
3687 if (!status)
3688 goto out;
d2796743 3689
15ffde4d
ME
3690 /* Handle the TXDMA Error interrupt */
3691 if (status & ET_INTR_TXDMA_ERR) {
15ffde4d 3692 /* Following read also clears the register (COR) */
12a2f3f3 3693 u32 txdma_err = readl(&iomem->txdma.tx_dma_error);
d2796743 3694
15ffde4d 3695 dev_warn(&adapter->pdev->dev,
096e6224
ME
3696 "TXDMA_ERR interrupt, error = %d\n",
3697 txdma_err);
15ffde4d 3698 }
d2796743 3699
15ffde4d
ME
3700 /* Handle Free Buffer Ring 0 and 1 Low interrupt */
3701 if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
26ef1021 3702 /* This indicates the number of unused buffers in RXDMA free
15ffde4d
ME
3703 * buffer ring 0 is <= the limit you programmed. Free buffer
3704 * resources need to be returned. Free buffers are consumed as
3705 * packets are passed from the network to the host. The host
3706 * becomes aware of the packets from the contents of the packet
3707 * status ring. This ring is queried when the packet done
3708 * interrupt occurs. Packets are then passed to the OS. When
3709 * the OS is done with the packets the resources can be
3710 * returned to the ET1310 for re-use. This interrupt is one
3711 * method of returning resources.
3712 */
d2796743 3713
26ef1021 3714 /* If the user has flow control on, then we will
15ffde4d
ME
3715 * send a pause packet, otherwise just exit
3716 */
26ca0f1b 3717 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) {
15ffde4d 3718 u32 pm_csr;
d2796743 3719
26ef1021 3720 /* Tell the device to send a pause packet via the back
15ffde4d 3721 * pressure register (bp req and bp xon/xoff)
d2796743 3722 */
15ffde4d
ME
3723 pm_csr = readl(&iomem->global.pm_csr);
3724 if (!et1310_in_phy_coma(adapter))
3725 writel(3, &iomem->txmac.bp_ctrl);
d2796743 3726 }
15ffde4d 3727 }
d2796743 3728
15ffde4d
ME
3729 /* Handle Packet Status Ring Low Interrupt */
3730 if (status & ET_INTR_RXDMA_STAT_LOW) {
26ef1021 3731 /* Same idea as with the two Free Buffer Rings. Packets going
15ffde4d 3732 * from the network to the host each consume a free buffer
868bf442 3733 * resource and a packet status resource. These resources are
15ffde4d
ME
3734 * passed to the OS. When the OS is done with the resources,
3735 * they need to be returned to the ET1310. This is one method
3736 * of returning the resources.
3737 */
3738 }
d2796743 3739
15ffde4d
ME
3740 /* Handle RXDMA Error Interrupt */
3741 if (status & ET_INTR_RXDMA_ERR) {
26ef1021 3742 /* The rxdma_error interrupt is sent when a time-out on a
15ffde4d
ME
3743 * request issued by the JAGCore has occurred or a completion is
3744 * returned with an un-successful status. In both cases the
3745 * request is considered complete. The JAGCore will
3746 * automatically re-try the request in question. Normally
3747 * information on events like these are sent to the host using
3748 * the "Advanced Error Reporting" capability. This interrupt is
3749 * another way of getting similar information. The only thing
3750 * required is to clear the interrupt by reading the ISR in the
3751 * global resources. The JAGCore will do a re-try on the
3752 * request. Normally you should never see this interrupt. If
3753 * you start to see this interrupt occurring frequently then
3754 * something bad has occurred. A reset might be the thing to do.
3755 */
3756 /* TRAP();*/
d2796743 3757
15ffde4d 3758 dev_warn(&adapter->pdev->dev,
096e6224
ME
3759 "RxDMA_ERR interrupt, error %x\n",
3760 readl(&iomem->txmac.tx_test));
15ffde4d 3761 }
d2796743 3762
15ffde4d
ME
3763 /* Handle the Wake on LAN Event */
3764 if (status & ET_INTR_WOL) {
26ef1021 3765 /* This is a secondary interrupt for wake on LAN. The driver
15ffde4d
ME
3766 * should never see this, if it does, something serious is
3767 * wrong. We will TRAP the message when we are in DBG mode,
3768 * otherwise we will ignore it.
3769 */
3770 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
3771 }
d2796743 3772
15ffde4d
ME
3773 /* Let's move on to the TxMac */
3774 if (status & ET_INTR_TXMAC) {
3775 u32 err = readl(&iomem->txmac.err);
d2796743 3776
26ef1021 3777 /* When any of the errors occur and TXMAC generates an
15ffde4d
ME
3778 * interrupt to report these errors, it usually means that
3779 * TXMAC has detected an error in the data stream retrieved
3780 * from the on-chip Tx Q. All of these errors are catastrophic
3781 * and TXMAC won't be able to recover data when these errors
3782 * occur. In a nutshell, the whole Tx path will have to be reset
3783 * and re-configured afterwards.
3784 */
3785 dev_warn(&adapter->pdev->dev,
3786 "TXMAC interrupt, error 0x%08x\n",
3787 err);
d2796743 3788
26ef1021 3789 /* If we are debugging, we want to see this error, otherwise we
15ffde4d
ME
3790 * just want the device to be reset and continue
3791 */
3792 }
d2796743 3793
15ffde4d
ME
3794 /* Handle RXMAC Interrupt */
3795 if (status & ET_INTR_RXMAC) {
26ef1021 3796 /* These interrupts are catastrophic to the device, what we need
15ffde4d
ME
3797 * to do is disable the interrupts and set the flag to cause us
3798 * to reset so we can solve this issue.
3799 */
c655dee9 3800 /* MP_SET_FLAG( adapter, FMP_ADAPTER_HARDWARE_ERROR); */
d2796743 3801
15ffde4d
ME
3802 dev_warn(&adapter->pdev->dev,
3803 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
3804 readl(&iomem->rxmac.err_reg));
d2796743 3805
15ffde4d
ME
3806 dev_warn(&adapter->pdev->dev,
3807 "Enable 0x%08x, Diag 0x%08x\n",
3808 readl(&iomem->rxmac.ctrl),
3809 readl(&iomem->rxmac.rxq_diag));
d2796743 3810
26ef1021 3811 /* If we are debugging, we want to see this error, otherwise we
15ffde4d
ME
3812 * just want the device to be reset and continue
3813 */
3814 }
d2796743 3815
15ffde4d
ME
3816 /* Handle MAC_STAT Interrupt */
3817 if (status & ET_INTR_MAC_STAT) {
26ef1021 3818 /* This means at least one of the un-masked counters in the
15ffde4d
ME
3819 * MAC_STAT block has rolled over. Use this to maintain the top,
3820 * software managed bits of the counter(s).
3821 */
3822 et1310_handle_macstat_interrupt(adapter);
d2796743 3823 }
15ffde4d
ME
3824
3825 /* Handle SLV Timeout Interrupt */
3826 if (status & ET_INTR_SLV_TIMEOUT) {
26ef1021 3827 /* This means a timeout has occurred on a read or write request
15ffde4d
ME
3828 * to one of the JAGCore registers. The Global Resources block
3829 * has terminated the request and on a read request, returned a
3830 * "fake" value. The most likely reasons are: Bad Address or the
3831 * addressed module is in a power-down state and can't respond.
3832 */
3833 }
c2ebf58b 3834
be40a261
ME
3835out:
3836 if (enable_interrupts)
c2ebf58b 3837 et131x_enable_interrupts(adapter);
c2ebf58b 3838
c2ebf58b
ME
3839 return IRQ_RETVAL(handled);
3840}
3841
3842static int et131x_poll(struct napi_struct *napi, int budget)
3843{
3844 struct et131x_adapter *adapter =
3845 container_of(napi, struct et131x_adapter, napi);
3846 int work_done = et131x_handle_recv_pkts(adapter, budget);
3847
3848 et131x_handle_send_pkts(adapter);
3849
3850 if (work_done < budget) {
3851 napi_complete(&adapter->napi);
3852 et131x_enable_interrupts(adapter);
3853 }
3854
3855 return work_done;
d2796743
ME
3856}
3857
15ae239d 3858/* et131x_stats - Return the current device statistics */
d2796743
ME
3859static struct net_device_stats *et131x_stats(struct net_device *netdev)
3860{
3861 struct et131x_adapter *adapter = netdev_priv(netdev);
1f765d9f 3862 struct net_device_stats *stats = &adapter->netdev->stats;
d2796743
ME
3863 struct ce_stats *devstat = &adapter->stats;
3864
3865 stats->rx_errors = devstat->rx_length_errs +
3866 devstat->rx_align_errs +
3867 devstat->rx_crc_errs +
3868 devstat->rx_code_violations +
3869 devstat->rx_other_errs;
3870 stats->tx_errors = devstat->tx_max_pkt_errs;
3871 stats->multicast = devstat->multicast_pkts_rcvd;
3872 stats->collisions = devstat->tx_collisions;
3873
3874 stats->rx_length_errors = devstat->rx_length_errs;
3875 stats->rx_over_errors = devstat->rx_overflows;
3876 stats->rx_crc_errors = devstat->rx_crc_errs;
23780f07 3877 stats->rx_dropped = devstat->rcvd_pkts_dropped;
d2796743 3878
23780f07 3879 /* NOTE: Not used, can't find analogous statistics */
d2796743
ME
3880 /* stats->rx_frame_errors = devstat->; */
3881 /* stats->rx_fifo_errors = devstat->; */
3882 /* stats->rx_missed_errors = devstat->; */
3883
3884 /* stats->tx_aborted_errors = devstat->; */
3885 /* stats->tx_carrier_errors = devstat->; */
3886 /* stats->tx_fifo_errors = devstat->; */
3887 /* stats->tx_heartbeat_errors = devstat->; */
3888 /* stats->tx_window_errors = devstat->; */
3889 return stats;
3890}
3891
15ae239d 3892/* et131x_open - Open the device for use. */
eb7a6ca6 3893static int et131x_open(struct net_device *netdev)
d2796743 3894{
d2796743 3895 struct et131x_adapter *adapter = netdev_priv(netdev);
5f3eb881
FR
3896 struct pci_dev *pdev = adapter->pdev;
3897 unsigned int irq = pdev->irq;
3898 int result;
d2796743
ME
3899
3900 /* Start the timer to track NIC errors */
3901 init_timer(&adapter->error_timer);
3902 adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
3903 adapter->error_timer.function = et131x_error_timer_handler;
3904 adapter->error_timer.data = (unsigned long)adapter;
3905 add_timer(&adapter->error_timer);
3906
bf3313a1 3907 result = request_irq(irq, et131x_isr,
3908 IRQF_SHARED, netdev->name, netdev);
d2796743 3909 if (result) {
5f3eb881 3910 dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
d2796743
ME
3911 return result;
3912 }
3913
c655dee9 3914 adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE;
d2796743 3915
c2ebf58b
ME
3916 napi_enable(&adapter->napi);
3917
d2796743
ME
3918 et131x_up(netdev);
3919
3920 return result;
3921}
3922
15ae239d 3923/* et131x_close - Close the device */
eb7a6ca6 3924static int et131x_close(struct net_device *netdev)
d2796743
ME
3925{
3926 struct et131x_adapter *adapter = netdev_priv(netdev);
3927
3928 et131x_down(netdev);
c2ebf58b 3929 napi_disable(&adapter->napi);
d2796743 3930
c655dee9 3931 adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE;
5f3eb881 3932 free_irq(adapter->pdev->irq, netdev);
d2796743
ME
3933
3934 /* Stop the error timer */
3935 return del_timer_sync(&adapter->error_timer);
3936}
3937
26ef1021 3938/* et131x_ioctl - The I/O Control handler for the driver
d2796743
ME
3939 * @netdev: device on which the control request is being made
3940 * @reqbuf: a pointer to the IOCTL request buffer
3941 * @cmd: the IOCTL command code
d2796743 3942 */
09a3fc2b
ME
3943static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
3944 int cmd)
d2796743
ME
3945{
3946 struct et131x_adapter *adapter = netdev_priv(netdev);
3947
3948 if (!adapter->phydev)
3949 return -EINVAL;
3950
3951 return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
3952}
3953
26ef1021 3954/* et131x_set_packet_filter - Configures the Rx Packet filtering on the device
d2796743
ME
3955 * @adapter: pointer to our private adapter structure
3956 *
3957 * FIXME: lot of dups with MAC code
d2796743
ME
3958 */
3959static int et131x_set_packet_filter(struct et131x_adapter *adapter)
3960{
834d0ee3 3961 int filter = adapter->packet_filter;
d2796743
ME
3962 u32 ctrl;
3963 u32 pf_ctrl;
3964
3965 ctrl = readl(&adapter->regs->rxmac.ctrl);
3966 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
3967
3968 /* Default to disabled packet filtering. Enable it in the individual
3969 * case statements that require the device to filter something
3970 */
3971 ctrl |= 0x04;
3972
3973 /* Set us to be in promiscuous mode so we receive everything, this
3974 * is also true when we get a packet filter of 0
3975 */
3976 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
3977 pf_ctrl &= ~7; /* Clear filter bits */
3978 else {
26ef1021 3979 /* Set us up with Multicast packet filtering. Three cases are
d2796743
ME
3980 * possible - (1) we have a multi-cast list, (2) we receive ALL
3981 * multicast entries or (3) we receive none.
3982 */
3983 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
3984 pf_ctrl &= ~2; /* Multicast filter bit */
3985 else {
3986 et1310_setup_device_for_multicast(adapter);
3987 pf_ctrl |= 2;
3988 ctrl &= ~0x04;
3989 }
3990
3991 /* Set us up with Unicast packet filtering */
3992 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
3993 et1310_setup_device_for_unicast(adapter);
3994 pf_ctrl |= 4;
3995 ctrl &= ~0x04;
3996 }
3997
3998 /* Set us up with Broadcast packet filtering */
3999 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
4000 pf_ctrl |= 1; /* Broadcast filter bit */
4001 ctrl &= ~0x04;
ee60c8ec 4002 } else {
d2796743 4003 pf_ctrl &= ~1;
ee60c8ec 4004 }
d2796743
ME
4005
4006 /* Setup the receive mac configuration registers - Packet
4007 * Filter control + the enable / disable for packet filter
4008 * in the control reg.
4009 */
4010 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
4011 writel(ctrl, &adapter->regs->rxmac.ctrl);
4012 }
8ea6cdf5 4013 return 0;
d2796743
ME
4014}
4015
15ae239d 4016/* et131x_multicast - The handler to configure multicasting on the interface */
d2796743
ME
4017static void et131x_multicast(struct net_device *netdev)
4018{
4019 struct et131x_adapter *adapter = netdev_priv(netdev);
834d0ee3 4020 int packet_filter;
d2796743
ME
4021 struct netdev_hw_addr *ha;
4022 int i;
4023
d2796743
ME
4024 /* Before we modify the platform-independent filter flags, store them
4025 * locally. This allows us to determine if anything's changed and if
4026 * we even need to bother the hardware
4027 */
4028 packet_filter = adapter->packet_filter;
4029
4030 /* Clear the 'multicast' flag locally; because we only have a single
4031 * flag to check multicast, and multiple multicast addresses can be
4032 * set, this is the easiest way to determine if more than one
4033 * multicast address is being set.
4034 */
4035 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4036
4037 /* Check the net_device flags and set the device independent flags
4038 * accordingly
4039 */
d2796743
ME
4040 if (netdev->flags & IFF_PROMISC)
4041 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
4042 else
4043 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
4044
668caa67
ME
4045 if ((netdev->flags & IFF_ALLMULTI) ||
4046 (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST))
d2796743
ME
4047 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4048
4049 if (netdev_mc_count(netdev) < 1) {
4050 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
4051 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
ee60c8ec 4052 } else {
d2796743 4053 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
ee60c8ec 4054 }
d2796743
ME
4055
4056 /* Set values in the private adapter struct */
4057 i = 0;
4058 netdev_for_each_mc_addr(ha, netdev) {
4059 if (i == NIC_MAX_MCAST_LIST)
4060 break;
4061 memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
4062 }
4063 adapter->multicast_addr_count = i;
4064
4065 /* Are the new flags different from the previous ones? If not, then no
4066 * action is required
4067 *
4068 * NOTE - This block will always update the multicast_list with the
4069 * hardware, even if the addresses aren't the same.
4070 */
15ae239d 4071 if (packet_filter != adapter->packet_filter)
d2796743 4072 et131x_set_packet_filter(adapter);
d2796743
ME
4073}
4074
15ae239d 4075/* et131x_tx - The handler to tx a packet on the device */
be40a261 4076static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
d2796743 4077{
06709e96 4078 struct et131x_adapter *adapter = netdev_priv(netdev);
76981cf1 4079 struct tx_ring *tx_ring = &adapter->tx_ring;
06709e96
ME
4080
4081 /* stop the queue if it's getting full */
76981cf1 4082 if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
06709e96 4083 netif_stop_queue(netdev);
d2796743
ME
4084
4085 /* Save the timestamp for the TX timeout watchdog */
4086 netdev->trans_start = jiffies;
4087
4792e6d1 4088 /* TCB is not available */
701b943e
ME
4089 if (tx_ring->used >= NUM_TCB)
4090 goto drop_err;
4792e6d1 4091
701b943e
ME
4092 if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) ||
4093 !netif_carrier_ok(netdev))
4094 goto drop_err;
d2796743 4095
701b943e
ME
4096 if (send_packet(skb, adapter))
4097 goto drop_err;
4098
4099 return NETDEV_TX_OK;
4100
4101drop_err:
4102 dev_kfree_skb_any(skb);
701b943e
ME
4103 adapter->netdev->stats.tx_dropped++;
4104 return NETDEV_TX_OK;
d2796743
ME
4105}
4106
26ef1021 4107/* et131x_tx_timeout - Timeout handler
d2796743
ME
4108 *
4109 * The handler called when a Tx request times out. The timeout period is
4110 * specified by the 'tx_timeo" element in the net_device structure (see
4111 * et131x_alloc_device() to see how this value is set).
4112 */
4113static void et131x_tx_timeout(struct net_device *netdev)
4114{
4115 struct et131x_adapter *adapter = netdev_priv(netdev);
76981cf1 4116 struct tx_ring *tx_ring = &adapter->tx_ring;
d2796743
ME
4117 struct tcb *tcb;
4118 unsigned long flags;
4119
4120 /* If the device is closed, ignore the timeout */
c655dee9 4121 if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
d2796743
ME
4122 return;
4123
4124 /* Any nonrecoverable hardware error?
4125 * Checks adapter->flags for any failure in phy reading
4126 */
c655dee9 4127 if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR)
d2796743
ME
4128 return;
4129
4130 /* Hardware failure? */
c655dee9 4131 if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) {
d2796743
ME
4132 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
4133 return;
4134 }
4135
4136 /* Is send stuck? */
4137 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
76981cf1 4138 tcb = tx_ring->send_head;
82d95799 4139 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
d2796743 4140
82d95799 4141 if (tcb) {
d2796743
ME
4142 tcb->count++;
4143
4144 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
d2796743 4145 dev_warn(&adapter->pdev->dev,
97cd38dc
ME
4146 "Send stuck - reset. tcb->WrIndex %x\n",
4147 tcb->index);
d2796743 4148
1f765d9f 4149 adapter->netdev->stats.tx_errors++;
d2796743
ME
4150
4151 /* perform reset of tx/rx */
4152 et131x_disable_txrx(netdev);
4153 et131x_enable_txrx(netdev);
d2796743
ME
4154 }
4155 }
d2796743
ME
4156}
4157
15ae239d 4158/* et131x_change_mtu - The handler called to change the MTU for the device */
d2796743
ME
4159static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
4160{
4161 int result = 0;
4162 struct et131x_adapter *adapter = netdev_priv(netdev);
4163
4164 /* Make sure the requested MTU is valid */
4165 if (new_mtu < 64 || new_mtu > 9216)
4166 return -EINVAL;
4167
4168 et131x_disable_txrx(netdev);
d2796743
ME
4169
4170 /* Set the new MTU */
4171 netdev->mtu = new_mtu;
4172
4173 /* Free Rx DMA memory */
4174 et131x_adapter_memory_free(adapter);
4175
4176 /* Set the config parameter for Jumbo Packet support */
4177 adapter->registry_jumbo_packet = new_mtu + 14;
4178 et131x_soft_reset(adapter);
4179
4180 /* Alloc and init Rx DMA memory */
4181 result = et131x_adapter_memory_alloc(adapter);
4182 if (result != 0) {
4183 dev_warn(&adapter->pdev->dev,
096e6224 4184 "Change MTU failed; couldn't re-alloc DMA memory\n");
d2796743
ME
4185 return result;
4186 }
4187
4188 et131x_init_send(adapter);
4189
4190 et131x_hwaddr_init(adapter);
4191 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
4192
4193 /* Init the device with the new settings */
4194 et131x_adapter_setup(adapter);
d2796743
ME
4195 et131x_enable_txrx(netdev);
4196
4197 return result;
4198}
4199
d2796743
ME
4200static const struct net_device_ops et131x_netdev_ops = {
4201 .ndo_open = et131x_open,
4202 .ndo_stop = et131x_close,
4203 .ndo_start_xmit = et131x_tx,
aa77677e 4204 .ndo_set_rx_mode = et131x_multicast,
d2796743
ME
4205 .ndo_tx_timeout = et131x_tx_timeout,
4206 .ndo_change_mtu = et131x_change_mtu,
36087dc1 4207 .ndo_set_mac_address = eth_mac_addr,
d2796743
ME
4208 .ndo_validate_addr = eth_validate_addr,
4209 .ndo_get_stats = et131x_stats,
4210 .ndo_do_ioctl = et131x_ioctl,
4211};
4212
26ef1021 4213/* et131x_pci_setup - Perform device initialization
5da2b158
ME
4214 * @pdev: a pointer to the device's pci_dev structure
4215 * @ent: this device's entry in the pci_device_id table
4216 *
5da2b158
ME
4217 * Registered in the pci_driver structure, this function is called when the
4218 * PCI subsystem finds a new PCI device which matches the information
4219 * contained in the pci_device_id table. This routine is the equivalent to
4220 * a device insertion routine.
4221 */
fe5c49b3 4222static int et131x_pci_setup(struct pci_dev *pdev,
12a2f3f3 4223 const struct pci_device_id *ent)
5da2b158 4224{
5da2b158
ME
4225 struct net_device *netdev;
4226 struct et131x_adapter *adapter;
fa9f0a65 4227 int rc;
5da2b158
ME
4228 int ii;
4229
fa9f0a65
FR
4230 rc = pci_enable_device(pdev);
4231 if (rc < 0) {
5da2b158 4232 dev_err(&pdev->dev, "pci_enable_device() failed\n");
fa9f0a65 4233 goto out;
5da2b158
ME
4234 }
4235
4236 /* Perform some basic PCI checks */
4237 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4238 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
fa9f0a65 4239 rc = -ENODEV;
5da2b158
ME
4240 goto err_disable;
4241 }
4242
fa9f0a65
FR
4243 rc = pci_request_regions(pdev, DRIVER_NAME);
4244 if (rc < 0) {
5da2b158
ME
4245 dev_err(&pdev->dev, "Can't get PCI resources\n");
4246 goto err_disable;
4247 }
4248
4249 pci_set_master(pdev);
4250
4251 /* Check the DMA addressing support of this device */
e22f0e3f
RK
4252 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
4253 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
5da2b158 4254 dev_err(&pdev->dev, "No usable DMA addressing method\n");
fa9f0a65 4255 rc = -EIO;
5da2b158
ME
4256 goto err_release_res;
4257 }
4258
4259 /* Allocate netdev and private adapter structs */
fa9f0a65 4260 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
5da2b158
ME
4261 if (!netdev) {
4262 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
fa9f0a65 4263 rc = -ENOMEM;
5da2b158
ME
4264 goto err_release_res;
4265 }
4266
fa9f0a65
FR
4267 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
4268 netdev->netdev_ops = &et131x_netdev_ops;
4269
5da2b158 4270 SET_NETDEV_DEV(netdev, &pdev->dev);
7ad24ea4 4271 netdev->ethtool_ops = &et131x_ethtool_ops;
5da2b158
ME
4272
4273 adapter = et131x_adapter_init(netdev, pdev);
4274
fa9f0a65
FR
4275 rc = et131x_pci_init(adapter, pdev);
4276 if (rc < 0)
4277 goto err_free_dev;
5da2b158
ME
4278
4279 /* Map the bus-relative registers to system virtual memory */
4280 adapter->regs = pci_ioremap_bar(pdev, 0);
4281 if (!adapter->regs) {
4282 dev_err(&pdev->dev, "Cannot map device registers\n");
fa9f0a65 4283 rc = -ENOMEM;
5da2b158
ME
4284 goto err_free_dev;
4285 }
4286
4287 /* If Phy COMA mode was enabled when we went down, disable it here. */
4288 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
4289
4290 /* Issue a global reset to the et1310 */
4291 et131x_soft_reset(adapter);
4292
4293 /* Disable all interrupts (paranoid) */
4294 et131x_disable_interrupts(adapter);
4295
4296 /* Allocate DMA memory */
fa9f0a65
FR
4297 rc = et131x_adapter_memory_alloc(adapter);
4298 if (rc < 0) {
868bf442 4299 dev_err(&pdev->dev, "Could not alloc adapter memory (DMA)\n");
5da2b158
ME
4300 goto err_iounmap;
4301 }
4302
4303 /* Init send data structures */
4304 et131x_init_send(adapter);
4305
c2ebf58b 4306 netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
5da2b158
ME
4307
4308 /* Copy address into the net_device struct */
4309 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
4310
fa9f0a65
FR
4311 rc = -ENOMEM;
4312
5da2b158
ME
4313 /* Setup the mii_bus struct */
4314 adapter->mii_bus = mdiobus_alloc();
4315 if (!adapter->mii_bus) {
4316 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
4317 goto err_mem_free;
4318 }
4319
4320 adapter->mii_bus->name = "et131x_eth_mii";
4321 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
096e6224 4322 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
5da2b158
ME
4323 adapter->mii_bus->priv = netdev;
4324 adapter->mii_bus->read = et131x_mdio_read;
4325 adapter->mii_bus->write = et131x_mdio_write;
78110bb8
JP
4326 adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int),
4327 GFP_KERNEL);
4328 if (!adapter->mii_bus->irq)
5da2b158 4329 goto err_mdio_free;
5da2b158
ME
4330
4331 for (ii = 0; ii < PHY_MAX_ADDR; ii++)
4332 adapter->mii_bus->irq[ii] = PHY_POLL;
4333
fa9f0a65
FR
4334 rc = mdiobus_register(adapter->mii_bus);
4335 if (rc < 0) {
5da2b158 4336 dev_err(&pdev->dev, "failed to register MII bus\n");
5da2b158
ME
4337 goto err_mdio_free_irq;
4338 }
4339
fa9f0a65
FR
4340 rc = et131x_mii_probe(netdev);
4341 if (rc < 0) {
5da2b158
ME
4342 dev_err(&pdev->dev, "failed to probe MII bus\n");
4343 goto err_mdio_unregister;
4344 }
4345
4346 /* Setup et1310 as per the documentation */
4347 et131x_adapter_setup(adapter);
4348
ec0a38bf
ME
4349 /* Init variable for counting how long we do not have link status */
4350 adapter->boot_coma = 0;
4351 et1310_disable_phy_coma(adapter);
4352
5da2b158
ME
4353 /* We can enable interrupts now
4354 *
4355 * NOTE - Because registration of interrupt handler is done in the
4356 * device's open(), defer enabling device interrupts to that
4357 * point
4358 */
4359
4360 /* Register the net_device struct with the Linux network layer */
fa9f0a65
FR
4361 rc = register_netdev(netdev);
4362 if (rc < 0) {
5da2b158 4363 dev_err(&pdev->dev, "register_netdev() failed\n");
fa9f0a65 4364 goto err_phy_disconnect;
5da2b158
ME
4365 }
4366
4367 /* Register the net_device struct with the PCI subsystem. Save a copy
4368 * of the PCI config space for this device now that the device has
4369 * been initialized, just in case it needs to be quickly restored.
4370 */
4371 pci_set_drvdata(pdev, netdev);
fa9f0a65
FR
4372out:
4373 return rc;
5da2b158 4374
fa9f0a65
FR
4375err_phy_disconnect:
4376 phy_disconnect(adapter->phydev);
5da2b158
ME
4377err_mdio_unregister:
4378 mdiobus_unregister(adapter->mii_bus);
4379err_mdio_free_irq:
4380 kfree(adapter->mii_bus->irq);
4381err_mdio_free:
4382 mdiobus_free(adapter->mii_bus);
4383err_mem_free:
4384 et131x_adapter_memory_free(adapter);
4385err_iounmap:
4386 iounmap(adapter->regs);
4387err_free_dev:
4388 pci_dev_put(pdev);
4389 free_netdev(netdev);
4390err_release_res:
4391 pci_release_regions(pdev);
4392err_disable:
4393 pci_disable_device(pdev);
fa9f0a65 4394 goto out;
5da2b158
ME
4395}
4396
41e043fc 4397static const struct pci_device_id et131x_pci_table[] = {
5da2b158
ME
4398 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
4399 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
b57cb55b 4400 { 0,}
5da2b158
ME
4401};
4402MODULE_DEVICE_TABLE(pci, et131x_pci_table);
4403
4404static struct pci_driver et131x_driver = {
4405 .name = DRIVER_NAME,
4406 .id_table = et131x_pci_table,
4407 .probe = et131x_pci_setup,
0b5e4092 4408 .remove = et131x_pci_remove,
5da2b158
ME
4409 .driver.pm = ET131X_PM_OPS,
4410};
4411
89812b1f 4412module_pci_driver(et131x_driver);
This page took 0.694659 seconds and 5 git commands to generate.