2 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 * Copyright © 2005 Agere Systems Inc.
8 * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
10 *------------------------------------------------------------------------------
14 * This software is provided subject to the following terms and conditions,
15 * which you should read carefully before using the software. Using this
16 * software indicates your acceptance of these terms and conditions. If you do
17 * not agree with these terms and conditions, do not use the software.
19 * Copyright © 2005 Agere Systems Inc.
20 * All rights reserved.
22 * Redistribution and use in source or binary forms, with or without
23 * modifications, are permitted provided that the following conditions are met:
25 * . Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following Disclaimer as comments in the code as
27 * well as in the documentation and/or other materials provided with the
30 * . Redistributions in binary form must reproduce the above copyright notice,
31 * this list of conditions and the following Disclaimer in the documentation
32 * and/or other materials provided with the distribution.
34 * . Neither the name of Agere Systems Inc. nor the names of the contributors
35 * may be used to endorse or promote products derived from this software
36 * without specific prior written permission.
40 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
41 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
42 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
43 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
44 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
48 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56 #include <linux/pci.h>
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/kernel.h>
61 #include <linux/sched.h>
62 #include <linux/ptrace.h>
63 #include <linux/slab.h>
64 #include <linux/ctype.h>
65 #include <linux/string.h>
66 #include <linux/timer.h>
67 #include <linux/interrupt.h>
69 #include <linux/delay.h>
70 #include <linux/bitops.h>
73 #include <linux/netdevice.h>
74 #include <linux/etherdevice.h>
75 #include <linux/skbuff.h>
76 #include <linux/if_arp.h>
77 #include <linux/ioport.h>
78 #include <linux/crc32.h>
79 #include <linux/random.h>
80 #include <linux/phy.h>
84 MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
85 MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
86 MODULE_LICENSE("Dual BSD/GPL");
87 MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
90 #define MAX_NUM_REGISTER_POLLS 1000
91 #define MAX_NUM_WRITE_RETRIES 2
94 #define COUNTER_WRAP_16_BIT 0x10000
95 #define COUNTER_WRAP_12_BIT 0x1000
98 #define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */
99 #define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */
102 /* For interrupts, normal running is:
103 * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
104 * watchdog_interrupt & txdma_xfer_done
106 * In both cases, when flow control is enabled for either Tx or bi-direction,
107 * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
108 * buffer rings are running low.
110 #define INT_MASK_DISABLE 0xffffffff
112 /* NOTE: Masking out MAC_STAT Interrupt for now...
113 * #define INT_MASK_ENABLE 0xfff6bf17
114 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7
116 #define INT_MASK_ENABLE 0xfffebf17
117 #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
119 /* General defines */
120 /* Packet and header sizes */
121 #define NIC_MIN_PACKET_SIZE 60
123 /* Multicast list size */
124 #define NIC_MAX_MCAST_LIST 128
126 /* Supported Filters */
127 #define ET131X_PACKET_TYPE_DIRECTED 0x0001
128 #define ET131X_PACKET_TYPE_MULTICAST 0x0002
129 #define ET131X_PACKET_TYPE_BROADCAST 0x0004
130 #define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
131 #define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
134 #define ET131X_TX_TIMEOUT (1 * HZ)
135 #define NIC_SEND_HANG_THRESHOLD 0
137 /* MP_ADAPTER flags */
138 #define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
140 /* MP_SHARED flags */
141 #define FMP_ADAPTER_LOWER_POWER 0x00200000
143 #define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
144 #define FMP_ADAPTER_HARDWARE_ERROR 0x04000000
146 #define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
148 /* Some offsets in PCI config space that are actually used. */
149 #define ET1310_PCI_MAC_ADDRESS 0xA4
150 #define ET1310_PCI_EEPROM_STATUS 0xB2
151 #define ET1310_PCI_ACK_NACK 0xC0
152 #define ET1310_PCI_REPLAY 0xC2
153 #define ET1310_PCI_L0L1LATENCY 0xCF
155 /* PCI Product IDs */
156 #define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */
157 #define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */
159 /* Define order of magnitude converter */
160 #define NANO_IN_A_MICRO 1000
162 #define PARM_RX_NUM_BUFS_DEF 4
163 #define PARM_RX_TIME_INT_DEF 10
164 #define PARM_RX_MEM_END_DEF 0x2bc
165 #define PARM_TX_TIME_INT_DEF 40
166 #define PARM_TX_NUM_BUFS_DEF 4
167 #define PARM_DMA_CACHE_DEF 0
170 #define FBR_CHUNKS 32
171 #define MAX_DESC_PER_RING_RX 1024
173 /* number of RFDs - default and min */
174 #define RFD_LOW_WATER_MARK 40
175 #define NIC_DEFAULT_NUM_RFD 1024
178 #define MAX_PACKETS_HANDLED 256
180 #define ALCATEL_MULTICAST_PKT 0x01000000
181 #define ALCATEL_BROADCAST_PKT 0x02000000
183 /* typedefs for Free Buffer Descriptors */
187 u32 word2
; /* Bits 10-31 reserved, 0-9 descriptor */
190 /* Packet Status Ring Descriptors
194 * top 16 bits are from the Alcatel Status Word as enumerated in
195 * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
198 * 1: ipa IP checksum assist
199 * 2: ipp IP checksum pass
200 * 3: tcpa TCP checksum assist
201 * 4: tcpp TCP checksum pass
203 * 6: rxmac_error RXMAC Error Indicator
204 * 7: drop Drop packet
205 * 8: ft Frame Truncated
209 * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
210 * 17: asw_RX_DV_event short receive event detected
211 * 18: asw_false_carrier_event bad carrier since last good packet
212 * 19: asw_code_err one or more nibbles signalled as errors
213 * 20: asw_CRC_err CRC error
214 * 21: asw_len_chk_err frame length field incorrect
215 * 22: asw_too_long frame length > 1518 bytes
216 * 23: asw_OK valid CRC + no code error
217 * 24: asw_multicast has a multicast address
218 * 25: asw_broadcast has a broadcast address
219 * 26: asw_dribble_nibble spurious bits after EOP
220 * 27: asw_control_frame is a control frame
221 * 28: asw_pause_frame is a pause frame
222 * 29: asw_unsupported_op unsupported OP code
223 * 30: asw_VLAN_tag VLAN tag detected
224 * 31: asw_long_evt Rx long event
227 * 0-15: length length in bytes
228 * 16-25: bi Buffer Index
229 * 26-27: ri Ring Index
232 struct pkt_stat_desc
{
237 /* Typedefs for the RX DMA status word */
239 /* rx status word 0 holds part of the status bits of the Rx DMA engine
240 * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word
241 * which contains the Free Buffer ring 0 and 1 available offset.
243 * bit 0-9 FBR1 offset
244 * bit 10 Wrap flag for FBR1
245 * bit 16-25 FBR0 offset
246 * bit 26 Wrap flag for FBR0
249 /* RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
250 * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word
251 * which contains the Packet Status Ring available offset.
254 * bit 16-27 PSRoffset
259 /* struct rx_status_block is a structure representing the status of the Rx
260 * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
262 struct rx_status_block
{
267 /* Structure for look-up table holding free buffer ring pointers, addresses
271 void *virt
[MAX_DESC_PER_RING_RX
];
272 u32 bus_high
[MAX_DESC_PER_RING_RX
];
273 u32 bus_low
[MAX_DESC_PER_RING_RX
];
275 dma_addr_t ring_physaddr
;
276 void *mem_virtaddrs
[MAX_DESC_PER_RING_RX
/ FBR_CHUNKS
];
277 dma_addr_t mem_physaddrs
[MAX_DESC_PER_RING_RX
/ FBR_CHUNKS
];
283 /* struct rx_ring is the structure representing the adaptor's local
284 * reference(s) to the rings
287 struct fbr_lookup
*fbr
[NUM_FBRS
];
288 void *ps_ring_virtaddr
;
289 dma_addr_t ps_ring_physaddr
;
293 struct rx_status_block
*rx_status_block
;
294 dma_addr_t rx_status_bus
;
296 struct list_head recv_list
;
301 bool unfinished_receives
;
305 /* word 2 of the control bits in the Tx Descriptor ring for the ET-1310
307 * 0-15: length of packet
310 * 29-31: VLAN priority
312 * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
314 * 0: last packet in the sequence
315 * 1: first packet in the sequence
316 * 2: interrupt the processor when this pkt sent
317 * 3: Control word - no packet data
318 * 4: Issue half-duplex backpressure : XON/XOFF
319 * 5: send pause frame
320 * 6: Tx frame has error
324 * 10: Packet is a Huge packet
325 * 11: append VLAN tag
326 * 12: IP checksum assist
327 * 13: TCP checksum assist
328 * 14: UDP checksum assist
330 #define TXDESC_FLAG_LASTPKT 0x0001
331 #define TXDESC_FLAG_FIRSTPKT 0x0002
332 #define TXDESC_FLAG_INTPROC 0x0004
334 /* struct tx_desc represents each descriptor on the ring */
338 u32 len_vlan
; /* control words how to xmit the */
339 u32 flags
; /* data (detailed above) */
342 /* The status of the Tx DMA engine it sits in free memory, and is pointed to
343 * by 0x101c / 0x1020. This is a DMA10 type
346 /* TCB (Transmit Control Block: Host Side) */
348 struct tcb
*next
; /* Next entry in ring */
349 u32 count
; /* Used to spot stuck/lost packets */
350 u32 stale
; /* Used to spot stuck/lost packets */
351 struct sk_buff
*skb
; /* Network skb we are tied to */
352 u32 index
; /* Ring indexes */
356 /* Structure representing our local reference(s) to the ring */
358 /* TCB (Transmit Control Block) memory and lists */
359 struct tcb
*tcb_ring
;
361 /* List of TCBs that are ready to be used */
362 struct tcb
*tcb_qhead
;
363 struct tcb
*tcb_qtail
;
365 /* list of TCBs that are currently being sent. */
366 struct tcb
*send_head
;
367 struct tcb
*send_tail
;
370 /* The actual descriptor ring */
371 struct tx_desc
*tx_desc_ring
;
372 dma_addr_t tx_desc_ring_pa
;
374 /* send_idx indicates where we last wrote to in the descriptor ring. */
377 /* The location of the write-back status block */
379 dma_addr_t tx_status_pa
;
381 /* Packets since the last IRQ: used for interrupt coalescing */
385 /* Do not change these values: if changed, then change also in respective
386 * TXdma and Rxdma engines
388 #define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
391 /* These values are all superseded by registry entries to facilitate tuning.
392 * Once the desired performance has been achieved, the optimal registry values
393 * should be re-populated to these #defines:
395 #define TX_ERROR_PERIOD 1000
397 #define LO_MARK_PERCENT_FOR_PSR 15
398 #define LO_MARK_PERCENT_FOR_RX 15
400 /* RFD (Receive Frame Descriptor) */
402 struct list_head list_node
;
404 u32 len
; /* total size of receive frame */
411 #define FLOW_TXONLY 1
412 #define FLOW_RXONLY 2
415 /* Struct to define some device statistics */
417 u32 multicast_pkts_rcvd
;
418 u32 rcvd_pkts_dropped
;
422 u32 tx_excessive_collisions
;
423 u32 tx_first_collisions
;
424 u32 tx_late_collisions
;
432 u32 rx_code_violations
;
435 u32 interrupt_status
;
438 /* The private adapter structure */
439 struct et131x_adapter
{
440 struct net_device
*netdev
;
441 struct pci_dev
*pdev
;
442 struct mii_bus
*mii_bus
;
443 struct phy_device
*phydev
;
444 struct napi_struct napi
;
446 /* Flags that indicate current state of the adapter */
449 /* local link state, to determine if a state change has occurred */
453 u8 rom_addr
[ETH_ALEN
];
458 spinlock_t tcb_send_qlock
; /* protects the tx_ring send tcb list */
459 spinlock_t tcb_ready_qlock
; /* protects the tx_ring ready tcb list */
460 spinlock_t rcv_lock
; /* protects the rx_ring receive list */
462 /* Packet Filter and look ahead size */
466 u32 multicast_addr_count
;
467 u8 multicast_list
[NIC_MAX_MCAST_LIST
][ETH_ALEN
];
469 /* Pointer to the device's PCI register space */
470 struct address_map __iomem
*regs
;
472 /* Registry parameters */
473 u8 wanted_flow
; /* Flow we want for 802.3x flow control */
474 u32 registry_jumbo_packet
; /* Max supported ethernet packet size */
476 /* Derived from the registry: */
477 u8 flow
; /* flow control validated by the far-end */
479 /* Minimize init-time */
480 struct timer_list error_timer
;
482 /* variable putting the phy into coma mode when boot up with no cable
483 * plugged in after 5 seconds
487 /* Tx Memory Variables */
488 struct tx_ring tx_ring
;
490 /* Rx Memory Variables */
491 struct rx_ring rx_ring
;
493 struct ce_stats stats
;
496 static int eeprom_wait_ready(struct pci_dev
*pdev
, u32
*status
)
501 /* 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
502 * bits 7,1:0 both equal to 1, at least once after reset.
503 * Subsequent operations need only to check that bits 1:0 are equal
504 * to 1 prior to starting a single byte read/write
506 for (i
= 0; i
< MAX_NUM_REGISTER_POLLS
; i
++) {
507 if (pci_read_config_dword(pdev
, LBCIF_DWORD1_GROUP
, ®
))
510 /* I2C idle and Phy Queue Avail both true */
511 if ((reg
& 0x3000) == 0x3000) {
520 static int eeprom_write(struct et131x_adapter
*adapter
, u32 addr
, u8 data
)
522 struct pci_dev
*pdev
= adapter
->pdev
;
530 /* For an EEPROM, an I2C single byte write is defined as a START
531 * condition followed by the device address, EEPROM address, one byte
532 * of data and a STOP condition. The STOP condition will trigger the
533 * EEPROM's internally timed write cycle to the nonvolatile memory.
534 * All inputs are disabled during this write cycle and the EEPROM will
535 * not respond to any access until the internal write is complete.
537 err
= eeprom_wait_ready(pdev
, NULL
);
541 /* 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
542 * and bits 1:0 both =0. Bit 5 should be set according to the
543 * type of EEPROM being accessed (1=two byte addressing, 0=one
546 if (pci_write_config_byte(pdev
, LBCIF_CONTROL_REGISTER
,
547 LBCIF_CONTROL_LBCIF_ENABLE
|
548 LBCIF_CONTROL_I2C_WRITE
))
551 /* Prepare EEPROM address for Step 3 */
552 for (retries
= 0; retries
< MAX_NUM_WRITE_RETRIES
; retries
++) {
553 if (pci_write_config_dword(pdev
, LBCIF_ADDRESS_REGISTER
, addr
))
555 /* Write the data to the LBCIF Data Register (the I2C write
558 if (pci_write_config_byte(pdev
, LBCIF_DATA_REGISTER
, data
))
560 /* Monitor bit 1:0 of the LBCIF Status Register. When bits
561 * 1:0 are both equal to 1, the I2C write has completed and the
562 * internal write cycle of the EEPROM is about to start.
563 * (bits 1:0 = 01 is a legal state while waiting from both
564 * equal to 1, but bits 1:0 = 10 is invalid and implies that
565 * something is broken).
567 err
= eeprom_wait_ready(pdev
, &status
);
571 /* Check bit 3 of the LBCIF Status Register. If equal to 1,
572 * an error has occurred.Don't break here if we are revision
573 * 1, this is so we do a blind write for load bug.
575 if ((status
& LBCIF_STATUS_GENERAL_ERROR
) &&
576 adapter
->pdev
->revision
== 0)
579 /* Check bit 2 of the LBCIF Status Register. If equal to 1 an
580 * ACK error has occurred on the address phase of the write.
581 * This could be due to an actual hardware failure or the
582 * EEPROM may still be in its internal write cycle from a
583 * previous write. This write operation was ignored and must be
586 if (status
& LBCIF_STATUS_ACK_ERROR
) {
587 /* This could be due to an actual hardware failure
588 * or the EEPROM may still be in its internal write
589 * cycle from a previous write. This write operation
590 * was ignored and must be repeated later.
603 if (pci_write_config_byte(pdev
, LBCIF_CONTROL_REGISTER
,
604 LBCIF_CONTROL_LBCIF_ENABLE
))
607 /* Do read until internal ACK_ERROR goes away meaning write
611 pci_write_config_dword(pdev
,
612 LBCIF_ADDRESS_REGISTER
,
615 pci_read_config_dword(pdev
,
618 } while ((val
& 0x00010000) == 0);
619 } while (val
& 0x00040000);
621 if ((val
& 0xFF00) != 0xC000 || index
== 10000)
625 return writeok
? 0 : -EIO
;
628 static int eeprom_read(struct et131x_adapter
*adapter
, u32 addr
, u8
*pdata
)
630 struct pci_dev
*pdev
= adapter
->pdev
;
634 /* A single byte read is similar to the single byte write, with the
635 * exception of the data flow:
637 err
= eeprom_wait_ready(pdev
, NULL
);
640 /* Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
641 * and bits 1:0 both =0. Bit 5 should be set according to the type
642 * of EEPROM being accessed (1=two byte addressing, 0=one byte
645 if (pci_write_config_byte(pdev
, LBCIF_CONTROL_REGISTER
,
646 LBCIF_CONTROL_LBCIF_ENABLE
))
648 /* Write the address to the LBCIF Address Register (I2C read will
651 if (pci_write_config_dword(pdev
, LBCIF_ADDRESS_REGISTER
, addr
))
653 /* Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
654 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
657 err
= eeprom_wait_ready(pdev
, &status
);
660 /* Regardless of error status, read data byte from LBCIF Data
665 return (status
& LBCIF_STATUS_ACK_ERROR
) ? -EIO
: 0;
668 static int et131x_init_eeprom(struct et131x_adapter
*adapter
)
670 struct pci_dev
*pdev
= adapter
->pdev
;
673 pci_read_config_byte(pdev
, ET1310_PCI_EEPROM_STATUS
, &eestatus
);
675 /* THIS IS A WORKAROUND:
676 * I need to call this function twice to get my card in a
677 * LG M1 Express Dual running. I tried also a msleep before this
678 * function, because I thought there could be some time conditions
679 * but it didn't work. Call the whole function twice also work.
681 if (pci_read_config_byte(pdev
, ET1310_PCI_EEPROM_STATUS
, &eestatus
)) {
683 "Could not read PCI config space for EEPROM Status\n");
687 /* Determine if the error(s) we care about are present. If they are
688 * present we need to fail.
690 if (eestatus
& 0x4C) {
691 int write_failed
= 0;
693 if (pdev
->revision
== 0x01) {
695 static const u8 eedata
[4] = { 0xFE, 0x13, 0x10, 0xFF };
697 /* Re-write the first 4 bytes if we have an eeprom
698 * present and the revision id is 1, this fixes the
699 * corruption seen with 1310 B Silicon
701 for (i
= 0; i
< 3; i
++)
702 if (eeprom_write(adapter
, i
, eedata
[i
]) < 0)
705 if (pdev
->revision
!= 0x01 || write_failed
) {
707 "Fatal EEPROM Status Error - 0x%04x\n",
710 /* This error could mean that there was an error
711 * reading the eeprom or that the eeprom doesn't exist.
712 * We will treat each case the same and not try to
713 * gather additional information that normally would
714 * come from the eeprom, like MAC Address
716 adapter
->has_eeprom
= 0;
720 adapter
->has_eeprom
= 1;
722 /* Read the EEPROM for information regarding LED behavior. Refer to
723 * et131x_xcvr_init() for its use.
725 eeprom_read(adapter
, 0x70, &adapter
->eeprom_data
[0]);
726 eeprom_read(adapter
, 0x71, &adapter
->eeprom_data
[1]);
728 if (adapter
->eeprom_data
[0] != 0xcd)
729 /* Disable all optional features */
730 adapter
->eeprom_data
[1] = 0x00;
735 static void et131x_rx_dma_enable(struct et131x_adapter
*adapter
)
737 /* Setup the receive dma configuration register for normal operation */
738 u32 csr
= ET_RXDMA_CSR_FBR1_ENABLE
;
739 struct rx_ring
*rx_ring
= &adapter
->rx_ring
;
741 if (rx_ring
->fbr
[1]->buffsize
== 4096)
742 csr
|= ET_RXDMA_CSR_FBR1_SIZE_LO
;
743 else if (rx_ring
->fbr
[1]->buffsize
== 8192)
744 csr
|= ET_RXDMA_CSR_FBR1_SIZE_HI
;
745 else if (rx_ring
->fbr
[1]->buffsize
== 16384)
746 csr
|= ET_RXDMA_CSR_FBR1_SIZE_LO
| ET_RXDMA_CSR_FBR1_SIZE_HI
;
748 csr
|= ET_RXDMA_CSR_FBR0_ENABLE
;
749 if (rx_ring
->fbr
[0]->buffsize
== 256)
750 csr
|= ET_RXDMA_CSR_FBR0_SIZE_LO
;
751 else if (rx_ring
->fbr
[0]->buffsize
== 512)
752 csr
|= ET_RXDMA_CSR_FBR0_SIZE_HI
;
753 else if (rx_ring
->fbr
[0]->buffsize
== 1024)
754 csr
|= ET_RXDMA_CSR_FBR0_SIZE_LO
| ET_RXDMA_CSR_FBR0_SIZE_HI
;
755 writel(csr
, &adapter
->regs
->rxdma
.csr
);
757 csr
= readl(&adapter
->regs
->rxdma
.csr
);
758 if (csr
& ET_RXDMA_CSR_HALT_STATUS
) {
760 csr
= readl(&adapter
->regs
->rxdma
.csr
);
761 if (csr
& ET_RXDMA_CSR_HALT_STATUS
) {
762 dev_err(&adapter
->pdev
->dev
,
763 "RX Dma failed to exit halt state. CSR 0x%08x\n",
769 static void et131x_rx_dma_disable(struct et131x_adapter
*adapter
)
772 /* Setup the receive dma configuration register */
773 writel(ET_RXDMA_CSR_HALT
| ET_RXDMA_CSR_FBR1_ENABLE
,
774 &adapter
->regs
->rxdma
.csr
);
775 csr
= readl(&adapter
->regs
->rxdma
.csr
);
776 if (!(csr
& ET_RXDMA_CSR_HALT_STATUS
)) {
778 csr
= readl(&adapter
->regs
->rxdma
.csr
);
779 if (!(csr
& ET_RXDMA_CSR_HALT_STATUS
))
780 dev_err(&adapter
->pdev
->dev
,
781 "RX Dma failed to enter halt state. CSR 0x%08x\n",
786 static void et131x_tx_dma_enable(struct et131x_adapter
*adapter
)
788 /* Setup the transmit dma configuration register for normal
791 writel(ET_TXDMA_SNGL_EPKT
| (PARM_DMA_CACHE_DEF
<< ET_TXDMA_CACHE_SHIFT
),
792 &adapter
->regs
->txdma
.csr
);
795 static inline void add_10bit(u32
*v
, int n
)
797 *v
= INDEX10(*v
+ n
) | (*v
& ET_DMA10_WRAP
);
800 static inline void add_12bit(u32
*v
, int n
)
802 *v
= INDEX12(*v
+ n
) | (*v
& ET_DMA12_WRAP
);
805 static void et1310_config_mac_regs1(struct et131x_adapter
*adapter
)
807 struct mac_regs __iomem
*macregs
= &adapter
->regs
->mac
;
812 /* First we need to reset everything. Write to MAC configuration
813 * register 1 to perform reset.
815 writel(ET_MAC_CFG1_SOFT_RESET
| ET_MAC_CFG1_SIM_RESET
|
816 ET_MAC_CFG1_RESET_RXMC
| ET_MAC_CFG1_RESET_TXMC
|
817 ET_MAC_CFG1_RESET_RXFUNC
| ET_MAC_CFG1_RESET_TXFUNC
,
820 /* Next lets configure the MAC Inter-packet gap register */
821 ipg
= 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
822 ipg
|= 0x50 << 8; /* ifg enforce 0x50 */
823 writel(ipg
, ¯egs
->ipg
);
825 /* Next lets configure the MAC Half Duplex register */
826 /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
827 writel(0x00A1F037, ¯egs
->hfdp
);
829 /* Next lets configure the MAC Interface Control register */
830 writel(0, ¯egs
->if_ctrl
);
832 writel(ET_MAC_MIIMGMT_CLK_RST
, ¯egs
->mii_mgmt_cfg
);
834 /* Next lets configure the MAC Station Address register. These
835 * values are read from the EEPROM during initialization and stored
836 * in the adapter structure. We write what is stored in the adapter
837 * structure to the MAC Station Address registers high and low. This
838 * station address is used for generating and checking pause control
841 station2
= (adapter
->addr
[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT
) |
842 (adapter
->addr
[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT
);
843 station1
= (adapter
->addr
[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT
) |
844 (adapter
->addr
[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT
) |
845 (adapter
->addr
[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT
) |
847 writel(station1
, ¯egs
->station_addr_1
);
848 writel(station2
, ¯egs
->station_addr_2
);
850 /* Max ethernet packet in bytes that will be passed by the mac without
851 * being truncated. Allow the MAC to pass 4 more than our max packet
852 * size. This is 4 for the Ethernet CRC.
854 * Packets larger than (registry_jumbo_packet) that do not contain a
855 * VLAN ID will be dropped by the Rx function.
857 writel(adapter
->registry_jumbo_packet
+ 4, ¯egs
->max_fm_len
);
859 /* clear out MAC config reset */
860 writel(0, ¯egs
->cfg1
);
863 static void et1310_config_mac_regs2(struct et131x_adapter
*adapter
)
866 struct mac_regs __iomem
*mac
= &adapter
->regs
->mac
;
867 struct phy_device
*phydev
= adapter
->phydev
;
873 ctl
= readl(&adapter
->regs
->txmac
.ctl
);
874 cfg1
= readl(&mac
->cfg1
);
875 cfg2
= readl(&mac
->cfg2
);
876 ifctrl
= readl(&mac
->if_ctrl
);
878 /* Set up the if mode bits */
879 cfg2
&= ~ET_MAC_CFG2_IFMODE_MASK
;
880 if (phydev
->speed
== SPEED_1000
) {
881 cfg2
|= ET_MAC_CFG2_IFMODE_1000
;
882 ifctrl
&= ~ET_MAC_IFCTRL_PHYMODE
;
884 cfg2
|= ET_MAC_CFG2_IFMODE_100
;
885 ifctrl
|= ET_MAC_IFCTRL_PHYMODE
;
888 cfg1
|= ET_MAC_CFG1_RX_ENABLE
| ET_MAC_CFG1_TX_ENABLE
|
891 cfg1
&= ~(ET_MAC_CFG1_LOOPBACK
| ET_MAC_CFG1_RX_FLOW
);
892 if (adapter
->flow
== FLOW_RXONLY
|| adapter
->flow
== FLOW_BOTH
)
893 cfg1
|= ET_MAC_CFG1_RX_FLOW
;
894 writel(cfg1
, &mac
->cfg1
);
896 /* Now we need to initialize the MAC Configuration 2 register */
897 /* preamble 7, check length, huge frame off, pad crc, crc enable
900 cfg2
|= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT
;
901 cfg2
|= ET_MAC_CFG2_IFMODE_LEN_CHECK
;
902 cfg2
|= ET_MAC_CFG2_IFMODE_PAD_CRC
;
903 cfg2
|= ET_MAC_CFG2_IFMODE_CRC_ENABLE
;
904 cfg2
&= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME
;
905 cfg2
&= ~ET_MAC_CFG2_IFMODE_FULL_DPLX
;
907 if (phydev
->duplex
== DUPLEX_FULL
)
908 cfg2
|= ET_MAC_CFG2_IFMODE_FULL_DPLX
;
910 ifctrl
&= ~ET_MAC_IFCTRL_GHDMODE
;
911 if (phydev
->duplex
== DUPLEX_HALF
)
912 ifctrl
|= ET_MAC_IFCTRL_GHDMODE
;
914 writel(ifctrl
, &mac
->if_ctrl
);
915 writel(cfg2
, &mac
->cfg2
);
920 cfg1
= readl(&mac
->cfg1
);
921 } while ((cfg1
& ET_MAC_CFG1_WAIT
) != ET_MAC_CFG1_WAIT
&& delay
< 100);
924 dev_warn(&adapter
->pdev
->dev
,
925 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
929 ctl
|= ET_TX_CTRL_TXMAC_ENABLE
| ET_TX_CTRL_FC_DISABLE
;
930 writel(ctl
, &adapter
->regs
->txmac
.ctl
);
932 if (adapter
->flags
& FMP_ADAPTER_LOWER_POWER
) {
933 et131x_rx_dma_enable(adapter
);
934 et131x_tx_dma_enable(adapter
);
938 static int et1310_in_phy_coma(struct et131x_adapter
*adapter
)
940 u32 pmcsr
= readl(&adapter
->regs
->global
.pm_csr
);
942 return ET_PM_PHY_SW_COMA
& pmcsr
? 1 : 0;
945 static void et1310_setup_device_for_multicast(struct et131x_adapter
*adapter
)
947 struct rxmac_regs __iomem
*rxmac
= &adapter
->regs
->rxmac
;
954 /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
955 * the multi-cast LIST. If it is NOT specified, (and "ALL" is not
956 * specified) then we should pass NO multi-cast addresses to the
959 if (adapter
->packet_filter
& ET131X_PACKET_TYPE_MULTICAST
) {
962 /* Loop through our multicast array and set up the device */
963 for (i
= 0; i
< adapter
->multicast_addr_count
; i
++) {
966 result
= ether_crc(6, adapter
->multicast_list
[i
]);
968 result
= (result
& 0x3F800000) >> 23;
971 hash1
|= (1 << result
);
972 } else if ((31 < result
) && (result
< 64)) {
974 hash2
|= (1 << result
);
975 } else if ((63 < result
) && (result
< 96)) {
977 hash3
|= (1 << result
);
980 hash4
|= (1 << result
);
985 /* Write out the new hash to the device */
986 pm_csr
= readl(&adapter
->regs
->global
.pm_csr
);
987 if (!et1310_in_phy_coma(adapter
)) {
988 writel(hash1
, &rxmac
->multi_hash1
);
989 writel(hash2
, &rxmac
->multi_hash2
);
990 writel(hash3
, &rxmac
->multi_hash3
);
991 writel(hash4
, &rxmac
->multi_hash4
);
995 static void et1310_setup_device_for_unicast(struct et131x_adapter
*adapter
)
997 struct rxmac_regs __iomem
*rxmac
= &adapter
->regs
->rxmac
;
1003 /* Set up unicast packet filter reg 3 to be the first two octets of
1004 * the MAC address for both address
1006 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
1007 * MAC address for second address
1009 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
1010 * MAC address for first address
1012 uni_pf3
= (adapter
->addr
[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT
) |
1013 (adapter
->addr
[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT
) |
1014 (adapter
->addr
[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT
) |
1017 uni_pf2
= (adapter
->addr
[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT
) |
1018 (adapter
->addr
[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT
) |
1019 (adapter
->addr
[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT
) |
1022 uni_pf1
= (adapter
->addr
[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT
) |
1023 (adapter
->addr
[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT
) |
1024 (adapter
->addr
[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT
) |
1027 pm_csr
= readl(&adapter
->regs
->global
.pm_csr
);
1028 if (!et1310_in_phy_coma(adapter
)) {
1029 writel(uni_pf1
, &rxmac
->uni_pf_addr1
);
1030 writel(uni_pf2
, &rxmac
->uni_pf_addr2
);
1031 writel(uni_pf3
, &rxmac
->uni_pf_addr3
);
1035 static void et1310_config_rxmac_regs(struct et131x_adapter
*adapter
)
1037 struct rxmac_regs __iomem
*rxmac
= &adapter
->regs
->rxmac
;
1038 struct phy_device
*phydev
= adapter
->phydev
;
1044 /* Disable the MAC while it is being configured (also disable WOL) */
1045 writel(0x8, &rxmac
->ctrl
);
1047 /* Initialize WOL to disabled. */
1048 writel(0, &rxmac
->crc0
);
1049 writel(0, &rxmac
->crc12
);
1050 writel(0, &rxmac
->crc34
);
1052 /* We need to set the WOL mask0 - mask4 next. We initialize it to
1053 * its default Values of 0x00000000 because there are not WOL masks
1056 for (wolw
= &rxmac
->mask0_word0
; wolw
<= &rxmac
->mask4_word3
; wolw
++)
1059 /* Lets setup the WOL Source Address */
1060 sa_lo
= (adapter
->addr
[2] << ET_RX_WOL_LO_SA3_SHIFT
) |
1061 (adapter
->addr
[3] << ET_RX_WOL_LO_SA4_SHIFT
) |
1062 (adapter
->addr
[4] << ET_RX_WOL_LO_SA5_SHIFT
) |
1064 writel(sa_lo
, &rxmac
->sa_lo
);
1066 sa_hi
= (u32
)(adapter
->addr
[0] << ET_RX_WOL_HI_SA1_SHIFT
) |
1068 writel(sa_hi
, &rxmac
->sa_hi
);
1070 /* Disable all Packet Filtering */
1071 writel(0, &rxmac
->pf_ctrl
);
1073 /* Let's initialize the Unicast Packet filtering address */
1074 if (adapter
->packet_filter
& ET131X_PACKET_TYPE_DIRECTED
) {
1075 et1310_setup_device_for_unicast(adapter
);
1076 pf_ctrl
|= ET_RX_PFCTRL_UNICST_FILTER_ENABLE
;
1078 writel(0, &rxmac
->uni_pf_addr1
);
1079 writel(0, &rxmac
->uni_pf_addr2
);
1080 writel(0, &rxmac
->uni_pf_addr3
);
1083 /* Let's initialize the Multicast hash */
1084 if (!(adapter
->packet_filter
& ET131X_PACKET_TYPE_ALL_MULTICAST
)) {
1085 pf_ctrl
|= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE
;
1086 et1310_setup_device_for_multicast(adapter
);
1089 /* Runt packet filtering. Didn't work in version A silicon. */
1090 pf_ctrl
|= (NIC_MIN_PACKET_SIZE
+ 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT
;
1091 pf_ctrl
|= ET_RX_PFCTRL_FRAG_FILTER_ENABLE
;
1093 if (adapter
->registry_jumbo_packet
> 8192)
1094 /* In order to transmit jumbo packets greater than 8k, the
1095 * FIFO between RxMAC and RxDMA needs to be reduced in size
1096 * to (16k - Jumbo packet size). In order to implement this,
1097 * we must use "cut through" mode in the RxMAC, which chops
1098 * packets down into segments which are (max_size * 16). In
1099 * this case we selected 256 bytes, since this is the size of
1100 * the PCI-Express TLP's that the 1310 uses.
1102 * seg_en on, fc_en off, size 0x10
1104 writel(0x41, &rxmac
->mcif_ctrl_max_seg
);
1106 writel(0, &rxmac
->mcif_ctrl_max_seg
);
1108 writel(0, &rxmac
->mcif_water_mark
);
1109 writel(0, &rxmac
->mif_ctrl
);
1110 writel(0, &rxmac
->space_avail
);
1112 /* Initialize the the mif_ctrl register
1113 * bit 3: Receive code error. One or more nibbles were signaled as
1114 * errors during the reception of the packet. Clear this
1115 * bit in Gigabit, set it in 100Mbit. This was derived
1116 * experimentally at UNH.
1117 * bit 4: Receive CRC error. The packet's CRC did not match the
1118 * internally generated CRC.
1119 * bit 5: Receive length check error. Indicates that frame length
1120 * field value in the packet does not match the actual data
1121 * byte length and is not a type field.
1122 * bit 16: Receive frame truncated.
1123 * bit 17: Drop packet enable
1125 if (phydev
&& phydev
->speed
== SPEED_100
)
1126 writel(0x30038, &rxmac
->mif_ctrl
);
1128 writel(0x30030, &rxmac
->mif_ctrl
);
1130 /* Finally we initialize RxMac to be enabled & WOL disabled. Packet
1131 * filter is always enabled since it is where the runt packets are
1132 * supposed to be dropped. For version A silicon, runt packet
1133 * dropping doesn't work, so it is disabled in the pf_ctrl register,
1134 * but we still leave the packet filter on.
1136 writel(pf_ctrl
, &rxmac
->pf_ctrl
);
1137 writel(ET_RX_CTRL_RXMAC_ENABLE
| ET_RX_CTRL_WOL_DISABLE
, &rxmac
->ctrl
);
1140 static void et1310_config_txmac_regs(struct et131x_adapter
*adapter
)
1142 struct txmac_regs __iomem
*txmac
= &adapter
->regs
->txmac
;
1144 /* We need to update the Control Frame Parameters
1145 * cfpt - control frame pause timer set to 64 (0x40)
1146 * cfep - control frame extended pause timer set to 0x0
1148 if (adapter
->flow
== FLOW_NONE
)
1149 writel(0, &txmac
->cf_param
);
1151 writel(0x40, &txmac
->cf_param
);
1154 static void et1310_config_macstat_regs(struct et131x_adapter
*adapter
)
1156 struct macstat_regs __iomem
*macstat
= &adapter
->regs
->macstat
;
1159 /* initialize all the macstat registers to zero on the device */
1160 for (reg
= &macstat
->txrx_0_64_byte_frames
;
1161 reg
<= &macstat
->carry_reg2
; reg
++)
1164 /* Unmask any counters that we want to track the overflow of.
1165 * Initially this will be all counters. It may become clear later
1166 * that we do not need to track all counters.
1168 writel(0xFFFFBE32, &macstat
->carry_reg1_mask
);
1169 writel(0xFFFE7E8B, &macstat
->carry_reg2_mask
);
1172 static int et131x_phy_mii_read(struct et131x_adapter
*adapter
, u8 addr
,
1175 struct mac_regs __iomem
*mac
= &adapter
->regs
->mac
;
1182 /* Save a local copy of the registers we are dealing with so we can
1185 mii_addr
= readl(&mac
->mii_mgmt_addr
);
1186 mii_cmd
= readl(&mac
->mii_mgmt_cmd
);
1188 /* Stop the current operation */
1189 writel(0, &mac
->mii_mgmt_cmd
);
1191 /* Set up the register we need to read from on the correct PHY */
1192 writel(ET_MAC_MII_ADDR(addr
, reg
), &mac
->mii_mgmt_addr
);
1194 writel(0x1, &mac
->mii_mgmt_cmd
);
1199 mii_indicator
= readl(&mac
->mii_mgmt_indicator
);
1200 } while ((mii_indicator
& ET_MAC_MGMT_WAIT
) && delay
< 50);
1202 /* If we hit the max delay, we could not read the register */
1204 dev_warn(&adapter
->pdev
->dev
,
1205 "reg 0x%08x could not be read\n", reg
);
1206 dev_warn(&adapter
->pdev
->dev
, "status is 0x%08x\n",
1213 /* If we hit here we were able to read the register and we need to
1214 * return the value to the caller
1216 *value
= readl(&mac
->mii_mgmt_stat
) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK
;
1219 /* Stop the read operation */
1220 writel(0, &mac
->mii_mgmt_cmd
);
1222 /* set the registers we touched back to the state at which we entered
1225 writel(mii_addr
, &mac
->mii_mgmt_addr
);
1226 writel(mii_cmd
, &mac
->mii_mgmt_cmd
);
1231 static int et131x_mii_read(struct et131x_adapter
*adapter
, u8 reg
, u16
*value
)
1233 struct phy_device
*phydev
= adapter
->phydev
;
1238 return et131x_phy_mii_read(adapter
, phydev
->mdio
.addr
, reg
, value
);
1241 static int et131x_mii_write(struct et131x_adapter
*adapter
, u8 addr
, u8 reg
,
1244 struct mac_regs __iomem
*mac
= &adapter
->regs
->mac
;
1251 /* Save a local copy of the registers we are dealing with so we can
1254 mii_addr
= readl(&mac
->mii_mgmt_addr
);
1255 mii_cmd
= readl(&mac
->mii_mgmt_cmd
);
1257 /* Stop the current operation */
1258 writel(0, &mac
->mii_mgmt_cmd
);
1260 /* Set up the register we need to write to on the correct PHY */
1261 writel(ET_MAC_MII_ADDR(addr
, reg
), &mac
->mii_mgmt_addr
);
1263 /* Add the value to write to the registers to the mac */
1264 writel(value
, &mac
->mii_mgmt_ctrl
);
1269 mii_indicator
= readl(&mac
->mii_mgmt_indicator
);
1270 } while ((mii_indicator
& ET_MAC_MGMT_BUSY
) && delay
< 100);
1272 /* If we hit the max delay, we could not write the register */
1276 dev_warn(&adapter
->pdev
->dev
,
1277 "reg 0x%08x could not be written", reg
);
1278 dev_warn(&adapter
->pdev
->dev
, "status is 0x%08x\n",
1280 dev_warn(&adapter
->pdev
->dev
, "command is 0x%08x\n",
1281 readl(&mac
->mii_mgmt_cmd
));
1283 et131x_mii_read(adapter
, reg
, &tmp
);
1287 /* Stop the write operation */
1288 writel(0, &mac
->mii_mgmt_cmd
);
1290 /* set the registers we touched back to the state at which we entered
1293 writel(mii_addr
, &mac
->mii_mgmt_addr
);
1294 writel(mii_cmd
, &mac
->mii_mgmt_cmd
);
1299 static void et1310_phy_read_mii_bit(struct et131x_adapter
*adapter
,
1305 u16 mask
= 1 << bitnum
;
1307 et131x_mii_read(adapter
, regnum
, ®
);
1309 *value
= (reg
& mask
) >> bitnum
;
1312 static void et1310_config_flow_control(struct et131x_adapter
*adapter
)
1314 struct phy_device
*phydev
= adapter
->phydev
;
1316 if (phydev
->duplex
== DUPLEX_HALF
) {
1317 adapter
->flow
= FLOW_NONE
;
1319 char remote_pause
, remote_async_pause
;
1321 et1310_phy_read_mii_bit(adapter
, 5, 10, &remote_pause
);
1322 et1310_phy_read_mii_bit(adapter
, 5, 11, &remote_async_pause
);
1324 if (remote_pause
&& remote_async_pause
) {
1325 adapter
->flow
= adapter
->wanted_flow
;
1326 } else if (remote_pause
&& !remote_async_pause
) {
1327 if (adapter
->wanted_flow
== FLOW_BOTH
)
1328 adapter
->flow
= FLOW_BOTH
;
1330 adapter
->flow
= FLOW_NONE
;
1331 } else if (!remote_pause
&& !remote_async_pause
) {
1332 adapter
->flow
= FLOW_NONE
;
1334 if (adapter
->wanted_flow
== FLOW_BOTH
)
1335 adapter
->flow
= FLOW_RXONLY
;
1337 adapter
->flow
= FLOW_NONE
;
1342 /* et1310_update_macstat_host_counters - Update local copy of the statistics */
1343 static void et1310_update_macstat_host_counters(struct et131x_adapter
*adapter
)
1345 struct ce_stats
*stats
= &adapter
->stats
;
1346 struct macstat_regs __iomem
*macstat
=
1347 &adapter
->regs
->macstat
;
1349 stats
->tx_collisions
+= readl(&macstat
->tx_total_collisions
);
1350 stats
->tx_first_collisions
+= readl(&macstat
->tx_single_collisions
);
1351 stats
->tx_deferred
+= readl(&macstat
->tx_deferred
);
1352 stats
->tx_excessive_collisions
+=
1353 readl(&macstat
->tx_multiple_collisions
);
1354 stats
->tx_late_collisions
+= readl(&macstat
->tx_late_collisions
);
1355 stats
->tx_underflows
+= readl(&macstat
->tx_undersize_frames
);
1356 stats
->tx_max_pkt_errs
+= readl(&macstat
->tx_oversize_frames
);
1358 stats
->rx_align_errs
+= readl(&macstat
->rx_align_errs
);
1359 stats
->rx_crc_errs
+= readl(&macstat
->rx_code_errs
);
1360 stats
->rcvd_pkts_dropped
+= readl(&macstat
->rx_drops
);
1361 stats
->rx_overflows
+= readl(&macstat
->rx_oversize_packets
);
1362 stats
->rx_code_violations
+= readl(&macstat
->rx_fcs_errs
);
1363 stats
->rx_length_errs
+= readl(&macstat
->rx_frame_len_errs
);
1364 stats
->rx_other_errs
+= readl(&macstat
->rx_fragment_packets
);
1367 /* et1310_handle_macstat_interrupt
1369 * One of the MACSTAT counters has wrapped. Update the local copy of
1370 * the statistics held in the adapter structure, checking the "wrap"
1371 * bit for each counter.
1373 static void et1310_handle_macstat_interrupt(struct et131x_adapter
*adapter
)
1378 /* Read the interrupt bits from the register(s). These are Clear On
1381 carry_reg1
= readl(&adapter
->regs
->macstat
.carry_reg1
);
1382 carry_reg2
= readl(&adapter
->regs
->macstat
.carry_reg2
);
1384 writel(carry_reg1
, &adapter
->regs
->macstat
.carry_reg1
);
1385 writel(carry_reg2
, &adapter
->regs
->macstat
.carry_reg2
);
1387 /* We need to do update the host copy of all the MAC_STAT counters.
1388 * For each counter, check it's overflow bit. If the overflow bit is
1389 * set, then increment the host version of the count by one complete
1390 * revolution of the counter. This routine is called when the counter
1391 * block indicates that one of the counters has wrapped.
1393 if (carry_reg1
& (1 << 14))
1394 adapter
->stats
.rx_code_violations
+= COUNTER_WRAP_16_BIT
;
1395 if (carry_reg1
& (1 << 8))
1396 adapter
->stats
.rx_align_errs
+= COUNTER_WRAP_12_BIT
;
1397 if (carry_reg1
& (1 << 7))
1398 adapter
->stats
.rx_length_errs
+= COUNTER_WRAP_16_BIT
;
1399 if (carry_reg1
& (1 << 2))
1400 adapter
->stats
.rx_other_errs
+= COUNTER_WRAP_16_BIT
;
1401 if (carry_reg1
& (1 << 6))
1402 adapter
->stats
.rx_crc_errs
+= COUNTER_WRAP_16_BIT
;
1403 if (carry_reg1
& (1 << 3))
1404 adapter
->stats
.rx_overflows
+= COUNTER_WRAP_16_BIT
;
1405 if (carry_reg1
& (1 << 0))
1406 adapter
->stats
.rcvd_pkts_dropped
+= COUNTER_WRAP_16_BIT
;
1407 if (carry_reg2
& (1 << 16))
1408 adapter
->stats
.tx_max_pkt_errs
+= COUNTER_WRAP_12_BIT
;
1409 if (carry_reg2
& (1 << 15))
1410 adapter
->stats
.tx_underflows
+= COUNTER_WRAP_12_BIT
;
1411 if (carry_reg2
& (1 << 6))
1412 adapter
->stats
.tx_first_collisions
+= COUNTER_WRAP_12_BIT
;
1413 if (carry_reg2
& (1 << 8))
1414 adapter
->stats
.tx_deferred
+= COUNTER_WRAP_12_BIT
;
1415 if (carry_reg2
& (1 << 5))
1416 adapter
->stats
.tx_excessive_collisions
+= COUNTER_WRAP_12_BIT
;
1417 if (carry_reg2
& (1 << 4))
1418 adapter
->stats
.tx_late_collisions
+= COUNTER_WRAP_12_BIT
;
1419 if (carry_reg2
& (1 << 2))
1420 adapter
->stats
.tx_collisions
+= COUNTER_WRAP_12_BIT
;
1423 static int et131x_mdio_read(struct mii_bus
*bus
, int phy_addr
, int reg
)
1425 struct net_device
*netdev
= bus
->priv
;
1426 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
1430 ret
= et131x_phy_mii_read(adapter
, phy_addr
, reg
, &value
);
1438 static int et131x_mdio_write(struct mii_bus
*bus
, int phy_addr
,
1441 struct net_device
*netdev
= bus
->priv
;
1442 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
1444 return et131x_mii_write(adapter
, phy_addr
, reg
, value
);
1447 /* et1310_phy_power_switch - PHY power control
1448 * @adapter: device to control
1449 * @down: true for off/false for back on
1451 * one hundred, ten, one thousand megs
1452 * How would you like to have your LAN accessed
1453 * Can't you see that this code processed
1454 * Phy power, phy power..
1456 static void et1310_phy_power_switch(struct et131x_adapter
*adapter
, bool down
)
1459 struct phy_device
*phydev
= adapter
->phydev
;
1461 et131x_mii_read(adapter
, MII_BMCR
, &data
);
1462 data
&= ~BMCR_PDOWN
;
1465 et131x_mii_write(adapter
, phydev
->mdio
.addr
, MII_BMCR
, data
);
1468 /* et131x_xcvr_init - Init the phy if we are setting it into force mode */
1469 static void et131x_xcvr_init(struct et131x_adapter
*adapter
)
1472 struct phy_device
*phydev
= adapter
->phydev
;
1474 /* Set the LED behavior such that LED 1 indicates speed (off =
1475 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
1476 * link and activity (on for link, blink off for activity).
1478 * NOTE: Some customizations have been added here for specific
1479 * vendors; The LED behavior is now determined by vendor data in the
1480 * EEPROM. However, the above description is the default.
1482 if ((adapter
->eeprom_data
[1] & 0x4) == 0) {
1483 et131x_mii_read(adapter
, PHY_LED_2
, &lcr2
);
1485 lcr2
&= (ET_LED2_LED_100TX
| ET_LED2_LED_1000T
);
1486 lcr2
|= (LED_VAL_LINKON_ACTIVE
<< LED_LINK_SHIFT
);
1488 if ((adapter
->eeprom_data
[1] & 0x8) == 0)
1489 lcr2
|= (LED_VAL_1000BT_100BTX
<< LED_TXRX_SHIFT
);
1491 lcr2
|= (LED_VAL_LINKON
<< LED_TXRX_SHIFT
);
1493 et131x_mii_write(adapter
, phydev
->mdio
.addr
, PHY_LED_2
, lcr2
);
1497 /* et131x_configure_global_regs - configure JAGCore global regs */
1498 static void et131x_configure_global_regs(struct et131x_adapter
*adapter
)
1500 struct global_regs __iomem
*regs
= &adapter
->regs
->global
;
1502 writel(0, ®s
->rxq_start_addr
);
1503 writel(INTERNAL_MEM_SIZE
- 1, ®s
->txq_end_addr
);
1505 if (adapter
->registry_jumbo_packet
< 2048) {
1506 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
1507 * block of RAM that the driver can split between Tx
1508 * and Rx as it desires. Our default is to split it
1511 writel(PARM_RX_MEM_END_DEF
, ®s
->rxq_end_addr
);
1512 writel(PARM_RX_MEM_END_DEF
+ 1, ®s
->txq_start_addr
);
1513 } else if (adapter
->registry_jumbo_packet
< 8192) {
1514 /* For jumbo packets > 2k but < 8k, split 50-50. */
1515 writel(INTERNAL_MEM_RX_OFFSET
, ®s
->rxq_end_addr
);
1516 writel(INTERNAL_MEM_RX_OFFSET
+ 1, ®s
->txq_start_addr
);
1518 /* 9216 is the only packet size greater than 8k that
1519 * is available. The Tx buffer has to be big enough
1520 * for one whole packet on the Tx side. We'll make
1521 * the Tx 9408, and give the rest to Rx
1523 writel(0x01b3, ®s
->rxq_end_addr
);
1524 writel(0x01b4, ®s
->txq_start_addr
);
1527 /* Initialize the loopback register. Disable all loopbacks. */
1528 writel(0, ®s
->loopback
);
1530 writel(0, ®s
->msi_config
);
1532 /* By default, disable the watchdog timer. It will be enabled when
1533 * a packet is queued.
1535 writel(0, ®s
->watchdog_timer
);
1538 /* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence */
1539 static void et131x_config_rx_dma_regs(struct et131x_adapter
*adapter
)
1541 struct rxdma_regs __iomem
*rx_dma
= &adapter
->regs
->rxdma
;
1542 struct rx_ring
*rx_local
= &adapter
->rx_ring
;
1543 struct fbr_desc
*fbr_entry
;
1546 unsigned long flags
;
1549 et131x_rx_dma_disable(adapter
);
1551 /* Load the completion writeback physical address */
1552 writel(upper_32_bits(rx_local
->rx_status_bus
), &rx_dma
->dma_wb_base_hi
);
1553 writel(lower_32_bits(rx_local
->rx_status_bus
), &rx_dma
->dma_wb_base_lo
);
1555 memset(rx_local
->rx_status_block
, 0, sizeof(struct rx_status_block
));
1557 /* Set the address and parameters of the packet status ring */
1558 writel(upper_32_bits(rx_local
->ps_ring_physaddr
), &rx_dma
->psr_base_hi
);
1559 writel(lower_32_bits(rx_local
->ps_ring_physaddr
), &rx_dma
->psr_base_lo
);
1560 writel(rx_local
->psr_entries
- 1, &rx_dma
->psr_num_des
);
1561 writel(0, &rx_dma
->psr_full_offset
);
1563 psr_num_des
= readl(&rx_dma
->psr_num_des
) & ET_RXDMA_PSR_NUM_DES_MASK
;
1564 writel((psr_num_des
* LO_MARK_PERCENT_FOR_PSR
) / 100,
1565 &rx_dma
->psr_min_des
);
1567 spin_lock_irqsave(&adapter
->rcv_lock
, flags
);
1569 /* These local variables track the PSR in the adapter structure */
1570 rx_local
->local_psr_full
= 0;
1572 for (id
= 0; id
< NUM_FBRS
; id
++) {
1573 u32 __iomem
*num_des
;
1574 u32 __iomem
*full_offset
;
1575 u32 __iomem
*min_des
;
1576 u32 __iomem
*base_hi
;
1577 u32 __iomem
*base_lo
;
1578 struct fbr_lookup
*fbr
= rx_local
->fbr
[id
];
1581 num_des
= &rx_dma
->fbr0_num_des
;
1582 full_offset
= &rx_dma
->fbr0_full_offset
;
1583 min_des
= &rx_dma
->fbr0_min_des
;
1584 base_hi
= &rx_dma
->fbr0_base_hi
;
1585 base_lo
= &rx_dma
->fbr0_base_lo
;
1587 num_des
= &rx_dma
->fbr1_num_des
;
1588 full_offset
= &rx_dma
->fbr1_full_offset
;
1589 min_des
= &rx_dma
->fbr1_min_des
;
1590 base_hi
= &rx_dma
->fbr1_base_hi
;
1591 base_lo
= &rx_dma
->fbr1_base_lo
;
1594 /* Now's the best time to initialize FBR contents */
1595 fbr_entry
= fbr
->ring_virtaddr
;
1596 for (entry
= 0; entry
< fbr
->num_entries
; entry
++) {
1597 fbr_entry
->addr_hi
= fbr
->bus_high
[entry
];
1598 fbr_entry
->addr_lo
= fbr
->bus_low
[entry
];
1599 fbr_entry
->word2
= entry
;
1603 /* Set the address and parameters of Free buffer ring 1 and 0 */
1604 writel(upper_32_bits(fbr
->ring_physaddr
), base_hi
);
1605 writel(lower_32_bits(fbr
->ring_physaddr
), base_lo
);
1606 writel(fbr
->num_entries
- 1, num_des
);
1607 writel(ET_DMA10_WRAP
, full_offset
);
1609 /* This variable tracks the free buffer ring 1 full position,
1610 * so it has to match the above.
1612 fbr
->local_full
= ET_DMA10_WRAP
;
1613 writel(((fbr
->num_entries
* LO_MARK_PERCENT_FOR_RX
) / 100) - 1,
1617 /* Program the number of packets we will receive before generating an
1619 * For version B silicon, this value gets updated once autoneg is
1622 writel(PARM_RX_NUM_BUFS_DEF
, &rx_dma
->num_pkt_done
);
1624 /* The "time_done" is not working correctly to coalesce interrupts
1625 * after a given time period, but rather is giving us an interrupt
1626 * regardless of whether we have received packets.
1627 * This value gets updated once autoneg is complete.
1629 writel(PARM_RX_TIME_INT_DEF
, &rx_dma
->max_pkt_time
);
1631 spin_unlock_irqrestore(&adapter
->rcv_lock
, flags
);
1634 /* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
1636 * Configure the transmit engine with the ring buffers we have created
1637 * and prepare it for use.
1639 static void et131x_config_tx_dma_regs(struct et131x_adapter
*adapter
)
1641 struct txdma_regs __iomem
*txdma
= &adapter
->regs
->txdma
;
1642 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
1644 /* Load the hardware with the start of the transmit descriptor ring. */
1645 writel(upper_32_bits(tx_ring
->tx_desc_ring_pa
), &txdma
->pr_base_hi
);
1646 writel(lower_32_bits(tx_ring
->tx_desc_ring_pa
), &txdma
->pr_base_lo
);
1648 /* Initialise the transmit DMA engine */
1649 writel(NUM_DESC_PER_RING_TX
- 1, &txdma
->pr_num_des
);
1651 /* Load the completion writeback physical address */
1652 writel(upper_32_bits(tx_ring
->tx_status_pa
), &txdma
->dma_wb_base_hi
);
1653 writel(lower_32_bits(tx_ring
->tx_status_pa
), &txdma
->dma_wb_base_lo
);
1655 *tx_ring
->tx_status
= 0;
1657 writel(0, &txdma
->service_request
);
1658 tx_ring
->send_idx
= 0;
1661 /* et131x_adapter_setup - Set the adapter up as per cassini+ documentation */
1662 static void et131x_adapter_setup(struct et131x_adapter
*adapter
)
1664 et131x_configure_global_regs(adapter
);
1665 et1310_config_mac_regs1(adapter
);
1667 /* Configure the MMC registers */
1668 /* All we need to do is initialize the Memory Control Register */
1669 writel(ET_MMC_ENABLE
, &adapter
->regs
->mmc
.mmc_ctrl
);
1671 et1310_config_rxmac_regs(adapter
);
1672 et1310_config_txmac_regs(adapter
);
1674 et131x_config_rx_dma_regs(adapter
);
1675 et131x_config_tx_dma_regs(adapter
);
1677 et1310_config_macstat_regs(adapter
);
1679 et1310_phy_power_switch(adapter
, 0);
1680 et131x_xcvr_init(adapter
);
1683 /* et131x_soft_reset - Issue soft reset to the hardware, complete for ET1310 */
1684 static void et131x_soft_reset(struct et131x_adapter
*adapter
)
1688 /* Disable MAC Core */
1689 reg
= ET_MAC_CFG1_SOFT_RESET
| ET_MAC_CFG1_SIM_RESET
|
1690 ET_MAC_CFG1_RESET_RXMC
| ET_MAC_CFG1_RESET_TXMC
|
1691 ET_MAC_CFG1_RESET_RXFUNC
| ET_MAC_CFG1_RESET_TXFUNC
;
1692 writel(reg
, &adapter
->regs
->mac
.cfg1
);
1695 writel(reg
, &adapter
->regs
->global
.sw_reset
);
1697 reg
= ET_MAC_CFG1_RESET_RXMC
| ET_MAC_CFG1_RESET_TXMC
|
1698 ET_MAC_CFG1_RESET_RXFUNC
| ET_MAC_CFG1_RESET_TXFUNC
;
1699 writel(reg
, &adapter
->regs
->mac
.cfg1
);
1700 writel(0, &adapter
->regs
->mac
.cfg1
);
1703 static void et131x_enable_interrupts(struct et131x_adapter
*adapter
)
1707 if (adapter
->flow
== FLOW_TXONLY
|| adapter
->flow
== FLOW_BOTH
)
1708 mask
= INT_MASK_ENABLE
;
1710 mask
= INT_MASK_ENABLE_NO_FLOW
;
1712 writel(mask
, &adapter
->regs
->global
.int_mask
);
1715 static void et131x_disable_interrupts(struct et131x_adapter
*adapter
)
1717 writel(INT_MASK_DISABLE
, &adapter
->regs
->global
.int_mask
);
1720 static void et131x_tx_dma_disable(struct et131x_adapter
*adapter
)
1722 /* Setup the transmit dma configuration register */
1723 writel(ET_TXDMA_CSR_HALT
| ET_TXDMA_SNGL_EPKT
,
1724 &adapter
->regs
->txdma
.csr
);
1727 static void et131x_enable_txrx(struct net_device
*netdev
)
1729 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
1731 et131x_rx_dma_enable(adapter
);
1732 et131x_tx_dma_enable(adapter
);
1734 if (adapter
->flags
& FMP_ADAPTER_INTERRUPT_IN_USE
)
1735 et131x_enable_interrupts(adapter
);
1737 netif_start_queue(netdev
);
1740 static void et131x_disable_txrx(struct net_device
*netdev
)
1742 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
1744 netif_stop_queue(netdev
);
1746 et131x_rx_dma_disable(adapter
);
1747 et131x_tx_dma_disable(adapter
);
1749 et131x_disable_interrupts(adapter
);
1752 static void et131x_init_send(struct et131x_adapter
*adapter
)
1755 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
1756 struct tcb
*tcb
= tx_ring
->tcb_ring
;
1758 tx_ring
->tcb_qhead
= tcb
;
1760 memset(tcb
, 0, sizeof(struct tcb
) * NUM_TCB
);
1762 for (i
= 0; i
< NUM_TCB
; i
++) {
1763 tcb
->next
= tcb
+ 1;
1768 tx_ring
->tcb_qtail
= tcb
;
1770 /* Curr send queue should now be empty */
1771 tx_ring
->send_head
= NULL
;
1772 tx_ring
->send_tail
= NULL
;
1775 /* et1310_enable_phy_coma
1777 * driver receive an phy status change interrupt while in D0 and check that
1778 * phy_status is down.
1780 * -- gate off JAGCore;
1781 * -- set gigE PHY in Coma mode
1782 * -- wake on phy_interrupt; Perform software reset JAGCore,
1783 * re-initialize jagcore and gigE PHY
1785 static void et1310_enable_phy_coma(struct et131x_adapter
*adapter
)
1787 u32 pmcsr
= readl(&adapter
->regs
->global
.pm_csr
);
1789 /* Stop sending packets. */
1790 adapter
->flags
|= FMP_ADAPTER_LOWER_POWER
;
1792 /* Wait for outstanding Receive packets */
1793 et131x_disable_txrx(adapter
->netdev
);
1795 /* Gate off JAGCore 3 clock domains */
1796 pmcsr
&= ~ET_PMCSR_INIT
;
1797 writel(pmcsr
, &adapter
->regs
->global
.pm_csr
);
1799 /* Program gigE PHY in to Coma mode */
1800 pmcsr
|= ET_PM_PHY_SW_COMA
;
1801 writel(pmcsr
, &adapter
->regs
->global
.pm_csr
);
1804 static void et1310_disable_phy_coma(struct et131x_adapter
*adapter
)
1808 pmcsr
= readl(&adapter
->regs
->global
.pm_csr
);
1810 /* Disable phy_sw_coma register and re-enable JAGCore clocks */
1811 pmcsr
|= ET_PMCSR_INIT
;
1812 pmcsr
&= ~ET_PM_PHY_SW_COMA
;
1813 writel(pmcsr
, &adapter
->regs
->global
.pm_csr
);
1815 /* Restore the GbE PHY speed and duplex modes;
1816 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
1819 /* Re-initialize the send structures */
1820 et131x_init_send(adapter
);
1822 /* Bring the device back to the state it was during init prior to
1823 * autonegotiation being complete. This way, when we get the auto-neg
1824 * complete interrupt, we can complete init by calling ConfigMacREGS2.
1826 et131x_soft_reset(adapter
);
1828 et131x_adapter_setup(adapter
);
1830 /* Allow Tx to restart */
1831 adapter
->flags
&= ~FMP_ADAPTER_LOWER_POWER
;
1833 et131x_enable_txrx(adapter
->netdev
);
1836 static inline u32
bump_free_buff_ring(u32
*free_buff_ring
, u32 limit
)
1838 u32 tmp_free_buff_ring
= *free_buff_ring
;
1840 tmp_free_buff_ring
++;
1841 /* This works for all cases where limit < 1024. The 1023 case
1842 * works because 1023++ is 1024 which means the if condition is not
1843 * taken but the carry of the bit into the wrap bit toggles the wrap
1846 if ((tmp_free_buff_ring
& ET_DMA10_MASK
) > limit
) {
1847 tmp_free_buff_ring
&= ~ET_DMA10_MASK
;
1848 tmp_free_buff_ring
^= ET_DMA10_WRAP
;
1850 /* For the 1023 case */
1851 tmp_free_buff_ring
&= (ET_DMA10_MASK
| ET_DMA10_WRAP
);
1852 *free_buff_ring
= tmp_free_buff_ring
;
1853 return tmp_free_buff_ring
;
1856 /* et131x_rx_dma_memory_alloc
1858 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
1859 * and the Packet Status Ring.
1861 static int et131x_rx_dma_memory_alloc(struct et131x_adapter
*adapter
)
1868 struct rx_ring
*rx_ring
= &adapter
->rx_ring
;
1869 struct fbr_lookup
*fbr
;
1871 /* Alloc memory for the lookup table */
1872 rx_ring
->fbr
[0] = kzalloc(sizeof(*fbr
), GFP_KERNEL
);
1873 if (rx_ring
->fbr
[0] == NULL
)
1875 rx_ring
->fbr
[1] = kzalloc(sizeof(*fbr
), GFP_KERNEL
);
1876 if (rx_ring
->fbr
[1] == NULL
)
1879 /* The first thing we will do is configure the sizes of the buffer
1880 * rings. These will change based on jumbo packet support. Larger
1881 * jumbo packets increases the size of each entry in FBR0, and the
1882 * number of entries in FBR0, while at the same time decreasing the
1883 * number of entries in FBR1.
1885 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
1886 * entries are huge in order to accommodate a "jumbo" frame, then it
1887 * will have less entries. Conversely, FBR1 will now be relied upon
1888 * to carry more "normal" frames, thus it's entry size also increases
1889 * and the number of entries goes up too (since it now carries
1890 * "small" + "regular" packets.
1892 * In this scheme, we try to maintain 512 entries between the two
1893 * rings. Also, FBR1 remains a constant size - when it's size doubles
1894 * the number of entries halves. FBR0 increases in size, however.
1896 if (adapter
->registry_jumbo_packet
< 2048) {
1897 rx_ring
->fbr
[0]->buffsize
= 256;
1898 rx_ring
->fbr
[0]->num_entries
= 512;
1899 rx_ring
->fbr
[1]->buffsize
= 2048;
1900 rx_ring
->fbr
[1]->num_entries
= 512;
1901 } else if (adapter
->registry_jumbo_packet
< 4096) {
1902 rx_ring
->fbr
[0]->buffsize
= 512;
1903 rx_ring
->fbr
[0]->num_entries
= 1024;
1904 rx_ring
->fbr
[1]->buffsize
= 4096;
1905 rx_ring
->fbr
[1]->num_entries
= 512;
1907 rx_ring
->fbr
[0]->buffsize
= 1024;
1908 rx_ring
->fbr
[0]->num_entries
= 768;
1909 rx_ring
->fbr
[1]->buffsize
= 16384;
1910 rx_ring
->fbr
[1]->num_entries
= 128;
1913 rx_ring
->psr_entries
= rx_ring
->fbr
[0]->num_entries
+
1914 rx_ring
->fbr
[1]->num_entries
;
1916 for (id
= 0; id
< NUM_FBRS
; id
++) {
1917 fbr
= rx_ring
->fbr
[id
];
1918 /* Allocate an area of memory for Free Buffer Ring */
1919 bufsize
= sizeof(struct fbr_desc
) * fbr
->num_entries
;
1920 fbr
->ring_virtaddr
= dma_alloc_coherent(&adapter
->pdev
->dev
,
1922 &fbr
->ring_physaddr
,
1924 if (!fbr
->ring_virtaddr
) {
1925 dev_err(&adapter
->pdev
->dev
,
1926 "Cannot alloc memory for Free Buffer Ring %d\n",
1932 for (id
= 0; id
< NUM_FBRS
; id
++) {
1933 fbr
= rx_ring
->fbr
[id
];
1934 fbr_chunksize
= (FBR_CHUNKS
* fbr
->buffsize
);
1936 for (i
= 0; i
< fbr
->num_entries
/ FBR_CHUNKS
; i
++) {
1937 dma_addr_t fbr_physaddr
;
1939 fbr
->mem_virtaddrs
[i
] = dma_alloc_coherent(
1940 &adapter
->pdev
->dev
, fbr_chunksize
,
1941 &fbr
->mem_physaddrs
[i
],
1944 if (!fbr
->mem_virtaddrs
[i
]) {
1945 dev_err(&adapter
->pdev
->dev
,
1946 "Could not alloc memory\n");
1950 /* See NOTE in "Save Physical Address" comment above */
1951 fbr_physaddr
= fbr
->mem_physaddrs
[i
];
1953 for (j
= 0; j
< FBR_CHUNKS
; j
++) {
1954 u32 k
= (i
* FBR_CHUNKS
) + j
;
1956 /* Save the Virtual address of this index for
1957 * quick access later
1959 fbr
->virt
[k
] = (u8
*)fbr
->mem_virtaddrs
[i
] +
1960 (j
* fbr
->buffsize
);
1962 /* now store the physical address in the
1963 * descriptor so the device can access it
1965 fbr
->bus_high
[k
] = upper_32_bits(fbr_physaddr
);
1966 fbr
->bus_low
[k
] = lower_32_bits(fbr_physaddr
);
1967 fbr_physaddr
+= fbr
->buffsize
;
1972 /* Allocate an area of memory for FIFO of Packet Status ring entries */
1973 psr_size
= sizeof(struct pkt_stat_desc
) * rx_ring
->psr_entries
;
1975 rx_ring
->ps_ring_virtaddr
= dma_alloc_coherent(&adapter
->pdev
->dev
,
1977 &rx_ring
->ps_ring_physaddr
,
1980 if (!rx_ring
->ps_ring_virtaddr
) {
1981 dev_err(&adapter
->pdev
->dev
,
1982 "Cannot alloc memory for Packet Status Ring\n");
1986 /* Allocate an area of memory for writeback of status information */
1987 rx_ring
->rx_status_block
= dma_alloc_coherent(&adapter
->pdev
->dev
,
1988 sizeof(struct rx_status_block
),
1989 &rx_ring
->rx_status_bus
,
1991 if (!rx_ring
->rx_status_block
) {
1992 dev_err(&adapter
->pdev
->dev
,
1993 "Cannot alloc memory for Status Block\n");
1996 rx_ring
->num_rfd
= NIC_DEFAULT_NUM_RFD
;
1998 /* The RFDs are going to be put on lists later on, so initialize the
2001 INIT_LIST_HEAD(&rx_ring
->recv_list
);
2005 static void et131x_rx_dma_memory_free(struct et131x_adapter
*adapter
)
2012 struct rx_ring
*rx_ring
= &adapter
->rx_ring
;
2013 struct fbr_lookup
*fbr
;
2015 /* Free RFDs and associated packet descriptors */
2016 WARN_ON(rx_ring
->num_ready_recv
!= rx_ring
->num_rfd
);
2018 while (!list_empty(&rx_ring
->recv_list
)) {
2019 rfd
= list_entry(rx_ring
->recv_list
.next
,
2020 struct rfd
, list_node
);
2022 list_del(&rfd
->list_node
);
2027 /* Free Free Buffer Rings */
2028 for (id
= 0; id
< NUM_FBRS
; id
++) {
2029 fbr
= rx_ring
->fbr
[id
];
2031 if (!fbr
|| !fbr
->ring_virtaddr
)
2034 /* First the packet memory */
2035 for (ii
= 0; ii
< fbr
->num_entries
/ FBR_CHUNKS
; ii
++) {
2036 if (fbr
->mem_virtaddrs
[ii
]) {
2037 bufsize
= fbr
->buffsize
* FBR_CHUNKS
;
2039 dma_free_coherent(&adapter
->pdev
->dev
,
2041 fbr
->mem_virtaddrs
[ii
],
2042 fbr
->mem_physaddrs
[ii
]);
2044 fbr
->mem_virtaddrs
[ii
] = NULL
;
2048 bufsize
= sizeof(struct fbr_desc
) * fbr
->num_entries
;
2050 dma_free_coherent(&adapter
->pdev
->dev
,
2053 fbr
->ring_physaddr
);
2055 fbr
->ring_virtaddr
= NULL
;
2058 /* Free Packet Status Ring */
2059 if (rx_ring
->ps_ring_virtaddr
) {
2060 psr_size
= sizeof(struct pkt_stat_desc
) * rx_ring
->psr_entries
;
2062 dma_free_coherent(&adapter
->pdev
->dev
, psr_size
,
2063 rx_ring
->ps_ring_virtaddr
,
2064 rx_ring
->ps_ring_physaddr
);
2066 rx_ring
->ps_ring_virtaddr
= NULL
;
2069 /* Free area of memory for the writeback of status information */
2070 if (rx_ring
->rx_status_block
) {
2071 dma_free_coherent(&adapter
->pdev
->dev
,
2072 sizeof(struct rx_status_block
),
2073 rx_ring
->rx_status_block
,
2074 rx_ring
->rx_status_bus
);
2075 rx_ring
->rx_status_block
= NULL
;
2078 /* Free the FBR Lookup Table */
2079 kfree(rx_ring
->fbr
[0]);
2080 kfree(rx_ring
->fbr
[1]);
2082 /* Reset Counters */
2083 rx_ring
->num_ready_recv
= 0;
2086 /* et131x_init_recv - Initialize receive data structures */
2087 static int et131x_init_recv(struct et131x_adapter
*adapter
)
2091 struct rx_ring
*rx_ring
= &adapter
->rx_ring
;
2093 /* Setup each RFD */
2094 for (rfdct
= 0; rfdct
< rx_ring
->num_rfd
; rfdct
++) {
2095 rfd
= kzalloc(sizeof(*rfd
), GFP_ATOMIC
| GFP_DMA
);
2101 /* Add this RFD to the recv_list */
2102 list_add_tail(&rfd
->list_node
, &rx_ring
->recv_list
);
2104 /* Increment the available RFD's */
2105 rx_ring
->num_ready_recv
++;
2111 /* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */
2112 static void et131x_set_rx_dma_timer(struct et131x_adapter
*adapter
)
2114 struct phy_device
*phydev
= adapter
->phydev
;
2116 /* For version B silicon, we do not use the RxDMA timer for 10 and 100
2117 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
2119 if ((phydev
->speed
== SPEED_100
) || (phydev
->speed
== SPEED_10
)) {
2120 writel(0, &adapter
->regs
->rxdma
.max_pkt_time
);
2121 writel(1, &adapter
->regs
->rxdma
.num_pkt_done
);
2125 /* nic_return_rfd - Recycle a RFD and put it back onto the receive list */
2126 static void nic_return_rfd(struct et131x_adapter
*adapter
, struct rfd
*rfd
)
2128 struct rx_ring
*rx_local
= &adapter
->rx_ring
;
2129 struct rxdma_regs __iomem
*rx_dma
= &adapter
->regs
->rxdma
;
2130 u16 buff_index
= rfd
->bufferindex
;
2131 u8 ring_index
= rfd
->ringindex
;
2132 unsigned long flags
;
2133 struct fbr_lookup
*fbr
= rx_local
->fbr
[ring_index
];
2135 /* We don't use any of the OOB data besides status. Otherwise, we
2136 * need to clean up OOB data
2138 if (buff_index
< fbr
->num_entries
) {
2140 u32 __iomem
*offset
;
2141 struct fbr_desc
*next
;
2143 if (ring_index
== 0)
2144 offset
= &rx_dma
->fbr0_full_offset
;
2146 offset
= &rx_dma
->fbr1_full_offset
;
2148 next
= (struct fbr_desc
*)(fbr
->ring_virtaddr
) +
2149 INDEX10(fbr
->local_full
);
2151 /* Handle the Free Buffer Ring advancement here. Write
2152 * the PA / Buffer Index for the returned buffer into
2153 * the oldest (next to be freed)FBR entry
2155 next
->addr_hi
= fbr
->bus_high
[buff_index
];
2156 next
->addr_lo
= fbr
->bus_low
[buff_index
];
2157 next
->word2
= buff_index
;
2159 free_buff_ring
= bump_free_buff_ring(&fbr
->local_full
,
2160 fbr
->num_entries
- 1);
2161 writel(free_buff_ring
, offset
);
2163 dev_err(&adapter
->pdev
->dev
,
2164 "%s illegal Buffer Index returned\n", __func__
);
2167 /* The processing on this RFD is done, so put it back on the tail of
2170 spin_lock_irqsave(&adapter
->rcv_lock
, flags
);
2171 list_add_tail(&rfd
->list_node
, &rx_local
->recv_list
);
2172 rx_local
->num_ready_recv
++;
2173 spin_unlock_irqrestore(&adapter
->rcv_lock
, flags
);
2175 WARN_ON(rx_local
->num_ready_recv
> rx_local
->num_rfd
);
2178 /* nic_rx_pkts - Checks the hardware for available packets
2180 * Checks the hardware for available packets, using completion ring
2181 * If packets are available, it gets an RFD from the recv_list, attaches
2182 * the packet to it, puts the RFD in the RecvPendList, and also returns
2183 * the pointer to the RFD.
2185 static struct rfd
*nic_rx_pkts(struct et131x_adapter
*adapter
)
2187 struct rx_ring
*rx_local
= &adapter
->rx_ring
;
2188 struct rx_status_block
*status
;
2189 struct pkt_stat_desc
*psr
;
2191 unsigned long flags
;
2192 struct list_head
*element
;
2198 struct sk_buff
*skb
;
2199 struct fbr_lookup
*fbr
;
2201 /* RX Status block is written by the DMA engine prior to every
2202 * interrupt. It contains the next to be used entry in the Packet
2203 * Status Ring, and also the two Free Buffer rings.
2205 status
= rx_local
->rx_status_block
;
2206 word1
= status
->word1
>> 16;
2208 /* Check the PSR and wrap bits do not match */
2209 if ((word1
& 0x1FFF) == (rx_local
->local_psr_full
& 0x1FFF))
2210 return NULL
; /* Looks like this ring is not updated yet */
2212 /* The packet status ring indicates that data is available. */
2213 psr
= (struct pkt_stat_desc
*)(rx_local
->ps_ring_virtaddr
) +
2214 (rx_local
->local_psr_full
& 0xFFF);
2216 /* Grab any information that is required once the PSR is advanced,
2217 * since we can no longer rely on the memory being accurate
2219 len
= psr
->word1
& 0xFFFF;
2220 ring_index
= (psr
->word1
>> 26) & 0x03;
2221 fbr
= rx_local
->fbr
[ring_index
];
2222 buff_index
= (psr
->word1
>> 16) & 0x3FF;
2225 /* Indicate that we have used this PSR entry. */
2227 add_12bit(&rx_local
->local_psr_full
, 1);
2228 if ((rx_local
->local_psr_full
& 0xFFF) > rx_local
->psr_entries
- 1) {
2229 /* Clear psr full and toggle the wrap bit */
2230 rx_local
->local_psr_full
&= ~0xFFF;
2231 rx_local
->local_psr_full
^= 0x1000;
2234 writel(rx_local
->local_psr_full
, &adapter
->regs
->rxdma
.psr_full_offset
);
2236 if (ring_index
> 1 || buff_index
> fbr
->num_entries
- 1) {
2237 /* Illegal buffer or ring index cannot be used by S/W*/
2238 dev_err(&adapter
->pdev
->dev
,
2239 "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
2240 rx_local
->local_psr_full
& 0xFFF, len
, buff_index
);
2244 /* Get and fill the RFD. */
2245 spin_lock_irqsave(&adapter
->rcv_lock
, flags
);
2247 element
= rx_local
->recv_list
.next
;
2248 rfd
= list_entry(element
, struct rfd
, list_node
);
2251 spin_unlock_irqrestore(&adapter
->rcv_lock
, flags
);
2255 list_del(&rfd
->list_node
);
2256 rx_local
->num_ready_recv
--;
2258 spin_unlock_irqrestore(&adapter
->rcv_lock
, flags
);
2260 rfd
->bufferindex
= buff_index
;
2261 rfd
->ringindex
= ring_index
;
2263 /* In V1 silicon, there is a bug which screws up filtering of runt
2264 * packets. Therefore runt packet filtering is disabled in the MAC and
2265 * the packets are dropped here. They are also counted here.
2267 if (len
< (NIC_MIN_PACKET_SIZE
+ 4)) {
2268 adapter
->stats
.rx_other_errs
++;
2273 if ((word0
& ALCATEL_MULTICAST_PKT
) && !(word0
& ALCATEL_BROADCAST_PKT
))
2274 adapter
->stats
.multicast_pkts_rcvd
++;
2278 skb
= dev_alloc_skb(rfd
->len
+ 2);
2282 adapter
->netdev
->stats
.rx_bytes
+= rfd
->len
;
2284 memcpy(skb_put(skb
, rfd
->len
), fbr
->virt
[buff_index
], rfd
->len
);
2286 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
2287 skb
->ip_summed
= CHECKSUM_NONE
;
2288 netif_receive_skb(skb
);
2291 nic_return_rfd(adapter
, rfd
);
2295 static int et131x_handle_recv_pkts(struct et131x_adapter
*adapter
, int budget
)
2297 struct rfd
*rfd
= NULL
;
2301 struct rx_ring
*rx_ring
= &adapter
->rx_ring
;
2303 if (budget
> MAX_PACKETS_HANDLED
)
2304 limit
= MAX_PACKETS_HANDLED
;
2306 /* Process up to available RFD's */
2307 while (count
< limit
) {
2308 if (list_empty(&rx_ring
->recv_list
)) {
2309 WARN_ON(rx_ring
->num_ready_recv
!= 0);
2314 rfd
= nic_rx_pkts(adapter
);
2319 /* Do not receive any packets until a filter has been set.
2320 * Do not receive any packets until we have link.
2321 * If length is zero, return the RFD in order to advance the
2324 if (!adapter
->packet_filter
||
2325 !netif_carrier_ok(adapter
->netdev
) ||
2329 adapter
->netdev
->stats
.rx_packets
++;
2331 if (rx_ring
->num_ready_recv
< RFD_LOW_WATER_MARK
)
2332 dev_warn(&adapter
->pdev
->dev
, "RFD's are running out\n");
2337 if (count
== limit
|| !done
) {
2338 rx_ring
->unfinished_receives
= true;
2339 writel(PARM_TX_TIME_INT_DEF
* NANO_IN_A_MICRO
,
2340 &adapter
->regs
->global
.watchdog_timer
);
2342 /* Watchdog timer will disable itself if appropriate. */
2343 rx_ring
->unfinished_receives
= false;
2349 /* et131x_tx_dma_memory_alloc
2351 * Allocates memory that will be visible both to the device and to the CPU.
2352 * The OS will pass us packets, pointers to which we will insert in the Tx
2353 * Descriptor queue. The device will read this queue to find the packets in
2354 * memory. The device will update the "status" in memory each time it xmits a
2357 static int et131x_tx_dma_memory_alloc(struct et131x_adapter
*adapter
)
2360 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
2362 /* Allocate memory for the TCB's (Transmit Control Block) */
2363 tx_ring
->tcb_ring
= kcalloc(NUM_TCB
, sizeof(struct tcb
),
2364 GFP_ATOMIC
| GFP_DMA
);
2365 if (!tx_ring
->tcb_ring
)
2368 desc_size
= (sizeof(struct tx_desc
) * NUM_DESC_PER_RING_TX
);
2369 tx_ring
->tx_desc_ring
= dma_alloc_coherent(&adapter
->pdev
->dev
,
2371 &tx_ring
->tx_desc_ring_pa
,
2373 if (!tx_ring
->tx_desc_ring
) {
2374 dev_err(&adapter
->pdev
->dev
,
2375 "Cannot alloc memory for Tx Ring\n");
2379 tx_ring
->tx_status
= dma_alloc_coherent(&adapter
->pdev
->dev
,
2381 &tx_ring
->tx_status_pa
,
2383 if (!tx_ring
->tx_status
) {
2384 dev_err(&adapter
->pdev
->dev
,
2385 "Cannot alloc memory for Tx status block\n");
2391 static void et131x_tx_dma_memory_free(struct et131x_adapter
*adapter
)
2394 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
2396 if (tx_ring
->tx_desc_ring
) {
2397 /* Free memory relating to Tx rings here */
2398 desc_size
= (sizeof(struct tx_desc
) * NUM_DESC_PER_RING_TX
);
2399 dma_free_coherent(&adapter
->pdev
->dev
,
2401 tx_ring
->tx_desc_ring
,
2402 tx_ring
->tx_desc_ring_pa
);
2403 tx_ring
->tx_desc_ring
= NULL
;
2406 /* Free memory for the Tx status block */
2407 if (tx_ring
->tx_status
) {
2408 dma_free_coherent(&adapter
->pdev
->dev
,
2411 tx_ring
->tx_status_pa
);
2413 tx_ring
->tx_status
= NULL
;
2415 /* Free the memory for the tcb structures */
2416 kfree(tx_ring
->tcb_ring
);
2419 /* nic_send_packet - NIC specific send handler for version B silicon. */
2420 static int nic_send_packet(struct et131x_adapter
*adapter
, struct tcb
*tcb
)
2423 struct tx_desc desc
[24];
2425 u32 thiscopy
, remainder
;
2426 struct sk_buff
*skb
= tcb
->skb
;
2427 u32 nr_frags
= skb_shinfo(skb
)->nr_frags
+ 1;
2428 struct skb_frag_struct
*frags
= &skb_shinfo(skb
)->frags
[0];
2429 struct phy_device
*phydev
= adapter
->phydev
;
2430 dma_addr_t dma_addr
;
2431 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
2433 /* Part of the optimizations of this send routine restrict us to
2434 * sending 24 fragments at a pass. In practice we should never see
2435 * more than 5 fragments.
2438 /* nr_frags should be no more than 18. */
2439 BUILD_BUG_ON(MAX_SKB_FRAGS
+ 1 > 23);
2441 memset(desc
, 0, sizeof(struct tx_desc
) * (nr_frags
+ 1));
2443 for (i
= 0; i
< nr_frags
; i
++) {
2444 /* If there is something in this element, lets get a
2445 * descriptor from the ring and get the necessary data
2448 /* If the fragments are smaller than a standard MTU,
2449 * then map them to a single descriptor in the Tx
2450 * Desc ring. However, if they're larger, as is
2451 * possible with support for jumbo packets, then
2452 * split them each across 2 descriptors.
2454 * This will work until we determine why the hardware
2455 * doesn't seem to like large fragments.
2457 if (skb_headlen(skb
) <= 1514) {
2458 /* Low 16bits are length, high is vlan and
2459 * unused currently so zero
2461 desc
[frag
].len_vlan
= skb_headlen(skb
);
2462 dma_addr
= dma_map_single(&adapter
->pdev
->dev
,
2466 desc
[frag
].addr_lo
= lower_32_bits(dma_addr
);
2467 desc
[frag
].addr_hi
= upper_32_bits(dma_addr
);
2470 desc
[frag
].len_vlan
= skb_headlen(skb
) / 2;
2471 dma_addr
= dma_map_single(&adapter
->pdev
->dev
,
2473 skb_headlen(skb
) / 2,
2475 desc
[frag
].addr_lo
= lower_32_bits(dma_addr
);
2476 desc
[frag
].addr_hi
= upper_32_bits(dma_addr
);
2479 desc
[frag
].len_vlan
= skb_headlen(skb
) / 2;
2480 dma_addr
= dma_map_single(&adapter
->pdev
->dev
,
2482 skb_headlen(skb
) / 2,
2483 skb_headlen(skb
) / 2,
2485 desc
[frag
].addr_lo
= lower_32_bits(dma_addr
);
2486 desc
[frag
].addr_hi
= upper_32_bits(dma_addr
);
2490 desc
[frag
].len_vlan
= frags
[i
- 1].size
;
2491 dma_addr
= skb_frag_dma_map(&adapter
->pdev
->dev
,
2496 desc
[frag
].addr_lo
= lower_32_bits(dma_addr
);
2497 desc
[frag
].addr_hi
= upper_32_bits(dma_addr
);
2502 if (phydev
&& phydev
->speed
== SPEED_1000
) {
2503 if (++tx_ring
->since_irq
== PARM_TX_NUM_BUFS_DEF
) {
2504 /* Last element & Interrupt flag */
2505 desc
[frag
- 1].flags
=
2506 TXDESC_FLAG_INTPROC
| TXDESC_FLAG_LASTPKT
;
2507 tx_ring
->since_irq
= 0;
2508 } else { /* Last element */
2509 desc
[frag
- 1].flags
= TXDESC_FLAG_LASTPKT
;
2512 desc
[frag
- 1].flags
=
2513 TXDESC_FLAG_INTPROC
| TXDESC_FLAG_LASTPKT
;
2516 desc
[0].flags
|= TXDESC_FLAG_FIRSTPKT
;
2518 tcb
->index_start
= tx_ring
->send_idx
;
2521 thiscopy
= NUM_DESC_PER_RING_TX
- INDEX10(tx_ring
->send_idx
);
2523 if (thiscopy
>= frag
) {
2527 remainder
= frag
- thiscopy
;
2530 memcpy(tx_ring
->tx_desc_ring
+ INDEX10(tx_ring
->send_idx
),
2532 sizeof(struct tx_desc
) * thiscopy
);
2534 add_10bit(&tx_ring
->send_idx
, thiscopy
);
2536 if (INDEX10(tx_ring
->send_idx
) == 0 ||
2537 INDEX10(tx_ring
->send_idx
) == NUM_DESC_PER_RING_TX
) {
2538 tx_ring
->send_idx
&= ~ET_DMA10_MASK
;
2539 tx_ring
->send_idx
^= ET_DMA10_WRAP
;
2543 memcpy(tx_ring
->tx_desc_ring
,
2545 sizeof(struct tx_desc
) * remainder
);
2547 add_10bit(&tx_ring
->send_idx
, remainder
);
2550 if (INDEX10(tx_ring
->send_idx
) == 0) {
2551 if (tx_ring
->send_idx
)
2552 tcb
->index
= NUM_DESC_PER_RING_TX
- 1;
2554 tcb
->index
= ET_DMA10_WRAP
|(NUM_DESC_PER_RING_TX
- 1);
2556 tcb
->index
= tx_ring
->send_idx
- 1;
2559 spin_lock(&adapter
->tcb_send_qlock
);
2561 if (tx_ring
->send_tail
)
2562 tx_ring
->send_tail
->next
= tcb
;
2564 tx_ring
->send_head
= tcb
;
2566 tx_ring
->send_tail
= tcb
;
2568 WARN_ON(tcb
->next
!= NULL
);
2572 spin_unlock(&adapter
->tcb_send_qlock
);
2574 /* Write the new write pointer back to the device. */
2575 writel(tx_ring
->send_idx
, &adapter
->regs
->txdma
.service_request
);
2577 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
2578 * timer to wake us up if this packet isn't followed by N more.
2580 if (phydev
&& phydev
->speed
== SPEED_1000
) {
2581 writel(PARM_TX_TIME_INT_DEF
* NANO_IN_A_MICRO
,
2582 &adapter
->regs
->global
.watchdog_timer
);
2587 static int send_packet(struct sk_buff
*skb
, struct et131x_adapter
*adapter
)
2591 unsigned long flags
;
2592 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
2594 /* All packets must have at least a MAC address and a protocol type */
2595 if (skb
->len
< ETH_HLEN
)
2598 spin_lock_irqsave(&adapter
->tcb_ready_qlock
, flags
);
2600 tcb
= tx_ring
->tcb_qhead
;
2603 spin_unlock_irqrestore(&adapter
->tcb_ready_qlock
, flags
);
2607 tx_ring
->tcb_qhead
= tcb
->next
;
2609 if (tx_ring
->tcb_qhead
== NULL
)
2610 tx_ring
->tcb_qtail
= NULL
;
2612 spin_unlock_irqrestore(&adapter
->tcb_ready_qlock
, flags
);
2617 status
= nic_send_packet(adapter
, tcb
);
2620 spin_lock_irqsave(&adapter
->tcb_ready_qlock
, flags
);
2622 if (tx_ring
->tcb_qtail
)
2623 tx_ring
->tcb_qtail
->next
= tcb
;
2625 /* Apparently ready Q is empty. */
2626 tx_ring
->tcb_qhead
= tcb
;
2628 tx_ring
->tcb_qtail
= tcb
;
2629 spin_unlock_irqrestore(&adapter
->tcb_ready_qlock
, flags
);
2632 WARN_ON(tx_ring
->used
> NUM_TCB
);
2636 /* free_send_packet - Recycle a struct tcb */
2637 static inline void free_send_packet(struct et131x_adapter
*adapter
,
2640 unsigned long flags
;
2641 struct tx_desc
*desc
= NULL
;
2642 struct net_device_stats
*stats
= &adapter
->netdev
->stats
;
2643 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
2647 stats
->tx_bytes
+= tcb
->skb
->len
;
2649 /* Iterate through the TX descriptors on the ring
2650 * corresponding to this packet and umap the fragments
2654 desc
= tx_ring
->tx_desc_ring
+
2655 INDEX10(tcb
->index_start
);
2657 dma_addr
= desc
->addr_lo
;
2658 dma_addr
|= (u64
)desc
->addr_hi
<< 32;
2660 dma_unmap_single(&adapter
->pdev
->dev
,
2662 desc
->len_vlan
, DMA_TO_DEVICE
);
2664 add_10bit(&tcb
->index_start
, 1);
2665 if (INDEX10(tcb
->index_start
) >=
2666 NUM_DESC_PER_RING_TX
) {
2667 tcb
->index_start
&= ~ET_DMA10_MASK
;
2668 tcb
->index_start
^= ET_DMA10_WRAP
;
2670 } while (desc
!= tx_ring
->tx_desc_ring
+ INDEX10(tcb
->index
));
2672 dev_kfree_skb_any(tcb
->skb
);
2675 memset(tcb
, 0, sizeof(struct tcb
));
2677 /* Add the TCB to the Ready Q */
2678 spin_lock_irqsave(&adapter
->tcb_ready_qlock
, flags
);
2680 stats
->tx_packets
++;
2682 if (tx_ring
->tcb_qtail
)
2683 tx_ring
->tcb_qtail
->next
= tcb
;
2684 else /* Apparently ready Q is empty. */
2685 tx_ring
->tcb_qhead
= tcb
;
2687 tx_ring
->tcb_qtail
= tcb
;
2689 spin_unlock_irqrestore(&adapter
->tcb_ready_qlock
, flags
);
2690 WARN_ON(tx_ring
->used
< 0);
2693 /* et131x_free_busy_send_packets - Free and complete the stopped active sends */
2694 static void et131x_free_busy_send_packets(struct et131x_adapter
*adapter
)
2697 unsigned long flags
;
2699 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
2701 /* Any packets being sent? Check the first TCB on the send list */
2702 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
2704 tcb
= tx_ring
->send_head
;
2706 while (tcb
!= NULL
&& freed
< NUM_TCB
) {
2707 struct tcb
*next
= tcb
->next
;
2709 tx_ring
->send_head
= next
;
2712 tx_ring
->send_tail
= NULL
;
2716 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);
2719 free_send_packet(adapter
, tcb
);
2721 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
2723 tcb
= tx_ring
->send_head
;
2726 WARN_ON(freed
== NUM_TCB
);
2728 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);
2733 /* et131x_handle_send_pkts
2735 * Re-claim the send resources, complete sends and get more to send from
2736 * the send wait queue.
2738 static void et131x_handle_send_pkts(struct et131x_adapter
*adapter
)
2740 unsigned long flags
;
2744 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
2746 serviced
= readl(&adapter
->regs
->txdma
.new_service_complete
);
2747 index
= INDEX10(serviced
);
2749 /* Has the ring wrapped? Process any descriptors that do not have
2750 * the same "wrap" indicator as the current completion indicator
2752 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
2754 tcb
= tx_ring
->send_head
;
2757 ((serviced
^ tcb
->index
) & ET_DMA10_WRAP
) &&
2758 index
< INDEX10(tcb
->index
)) {
2760 tx_ring
->send_head
= tcb
->next
;
2761 if (tcb
->next
== NULL
)
2762 tx_ring
->send_tail
= NULL
;
2764 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);
2765 free_send_packet(adapter
, tcb
);
2766 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
2768 /* Goto the next packet */
2769 tcb
= tx_ring
->send_head
;
2772 !((serviced
^ tcb
->index
) & ET_DMA10_WRAP
) &&
2773 index
> (tcb
->index
& ET_DMA10_MASK
)) {
2775 tx_ring
->send_head
= tcb
->next
;
2776 if (tcb
->next
== NULL
)
2777 tx_ring
->send_tail
= NULL
;
2779 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);
2780 free_send_packet(adapter
, tcb
);
2781 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
2783 /* Goto the next packet */
2784 tcb
= tx_ring
->send_head
;
2787 /* Wake up the queue when we hit a low-water mark */
2788 if (tx_ring
->used
<= NUM_TCB
/ 3)
2789 netif_wake_queue(adapter
->netdev
);
2791 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);
2794 static int et131x_get_settings(struct net_device
*netdev
,
2795 struct ethtool_cmd
*cmd
)
2797 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
2799 return phy_ethtool_gset(adapter
->phydev
, cmd
);
2802 static int et131x_set_settings(struct net_device
*netdev
,
2803 struct ethtool_cmd
*cmd
)
2805 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
2807 return phy_ethtool_sset(adapter
->phydev
, cmd
);
2810 static int et131x_get_regs_len(struct net_device
*netdev
)
2812 #define ET131X_REGS_LEN 256
2813 return ET131X_REGS_LEN
* sizeof(u32
);
2816 static void et131x_get_regs(struct net_device
*netdev
,
2817 struct ethtool_regs
*regs
, void *regs_data
)
2819 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
2820 struct address_map __iomem
*aregs
= adapter
->regs
;
2821 u32
*regs_buff
= regs_data
;
2825 memset(regs_data
, 0, et131x_get_regs_len(netdev
));
2827 regs
->version
= (1 << 24) | (adapter
->pdev
->revision
<< 16) |
2828 adapter
->pdev
->device
;
2831 et131x_mii_read(adapter
, MII_BMCR
, &tmp
);
2832 regs_buff
[num
++] = tmp
;
2833 et131x_mii_read(adapter
, MII_BMSR
, &tmp
);
2834 regs_buff
[num
++] = tmp
;
2835 et131x_mii_read(adapter
, MII_PHYSID1
, &tmp
);
2836 regs_buff
[num
++] = tmp
;
2837 et131x_mii_read(adapter
, MII_PHYSID2
, &tmp
);
2838 regs_buff
[num
++] = tmp
;
2839 et131x_mii_read(adapter
, MII_ADVERTISE
, &tmp
);
2840 regs_buff
[num
++] = tmp
;
2841 et131x_mii_read(adapter
, MII_LPA
, &tmp
);
2842 regs_buff
[num
++] = tmp
;
2843 et131x_mii_read(adapter
, MII_EXPANSION
, &tmp
);
2844 regs_buff
[num
++] = tmp
;
2845 /* Autoneg next page transmit reg */
2846 et131x_mii_read(adapter
, 0x07, &tmp
);
2847 regs_buff
[num
++] = tmp
;
2848 /* Link partner next page reg */
2849 et131x_mii_read(adapter
, 0x08, &tmp
);
2850 regs_buff
[num
++] = tmp
;
2851 et131x_mii_read(adapter
, MII_CTRL1000
, &tmp
);
2852 regs_buff
[num
++] = tmp
;
2853 et131x_mii_read(adapter
, MII_STAT1000
, &tmp
);
2854 regs_buff
[num
++] = tmp
;
2855 et131x_mii_read(adapter
, 0x0b, &tmp
);
2856 regs_buff
[num
++] = tmp
;
2857 et131x_mii_read(adapter
, 0x0c, &tmp
);
2858 regs_buff
[num
++] = tmp
;
2859 et131x_mii_read(adapter
, MII_MMD_CTRL
, &tmp
);
2860 regs_buff
[num
++] = tmp
;
2861 et131x_mii_read(adapter
, MII_MMD_DATA
, &tmp
);
2862 regs_buff
[num
++] = tmp
;
2863 et131x_mii_read(adapter
, MII_ESTATUS
, &tmp
);
2864 regs_buff
[num
++] = tmp
;
2866 et131x_mii_read(adapter
, PHY_INDEX_REG
, &tmp
);
2867 regs_buff
[num
++] = tmp
;
2868 et131x_mii_read(adapter
, PHY_DATA_REG
, &tmp
);
2869 regs_buff
[num
++] = tmp
;
2870 et131x_mii_read(adapter
, PHY_MPHY_CONTROL_REG
, &tmp
);
2871 regs_buff
[num
++] = tmp
;
2872 et131x_mii_read(adapter
, PHY_LOOPBACK_CONTROL
, &tmp
);
2873 regs_buff
[num
++] = tmp
;
2874 et131x_mii_read(adapter
, PHY_LOOPBACK_CONTROL
+ 1, &tmp
);
2875 regs_buff
[num
++] = tmp
;
2877 et131x_mii_read(adapter
, PHY_REGISTER_MGMT_CONTROL
, &tmp
);
2878 regs_buff
[num
++] = tmp
;
2879 et131x_mii_read(adapter
, PHY_CONFIG
, &tmp
);
2880 regs_buff
[num
++] = tmp
;
2881 et131x_mii_read(adapter
, PHY_PHY_CONTROL
, &tmp
);
2882 regs_buff
[num
++] = tmp
;
2883 et131x_mii_read(adapter
, PHY_INTERRUPT_MASK
, &tmp
);
2884 regs_buff
[num
++] = tmp
;
2885 et131x_mii_read(adapter
, PHY_INTERRUPT_STATUS
, &tmp
);
2886 regs_buff
[num
++] = tmp
;
2887 et131x_mii_read(adapter
, PHY_PHY_STATUS
, &tmp
);
2888 regs_buff
[num
++] = tmp
;
2889 et131x_mii_read(adapter
, PHY_LED_1
, &tmp
);
2890 regs_buff
[num
++] = tmp
;
2891 et131x_mii_read(adapter
, PHY_LED_2
, &tmp
);
2892 regs_buff
[num
++] = tmp
;
2895 regs_buff
[num
++] = readl(&aregs
->global
.txq_start_addr
);
2896 regs_buff
[num
++] = readl(&aregs
->global
.txq_end_addr
);
2897 regs_buff
[num
++] = readl(&aregs
->global
.rxq_start_addr
);
2898 regs_buff
[num
++] = readl(&aregs
->global
.rxq_end_addr
);
2899 regs_buff
[num
++] = readl(&aregs
->global
.pm_csr
);
2900 regs_buff
[num
++] = adapter
->stats
.interrupt_status
;
2901 regs_buff
[num
++] = readl(&aregs
->global
.int_mask
);
2902 regs_buff
[num
++] = readl(&aregs
->global
.int_alias_clr_en
);
2903 regs_buff
[num
++] = readl(&aregs
->global
.int_status_alias
);
2904 regs_buff
[num
++] = readl(&aregs
->global
.sw_reset
);
2905 regs_buff
[num
++] = readl(&aregs
->global
.slv_timer
);
2906 regs_buff
[num
++] = readl(&aregs
->global
.msi_config
);
2907 regs_buff
[num
++] = readl(&aregs
->global
.loopback
);
2908 regs_buff
[num
++] = readl(&aregs
->global
.watchdog_timer
);
2911 regs_buff
[num
++] = readl(&aregs
->txdma
.csr
);
2912 regs_buff
[num
++] = readl(&aregs
->txdma
.pr_base_hi
);
2913 regs_buff
[num
++] = readl(&aregs
->txdma
.pr_base_lo
);
2914 regs_buff
[num
++] = readl(&aregs
->txdma
.pr_num_des
);
2915 regs_buff
[num
++] = readl(&aregs
->txdma
.txq_wr_addr
);
2916 regs_buff
[num
++] = readl(&aregs
->txdma
.txq_wr_addr_ext
);
2917 regs_buff
[num
++] = readl(&aregs
->txdma
.txq_rd_addr
);
2918 regs_buff
[num
++] = readl(&aregs
->txdma
.dma_wb_base_hi
);
2919 regs_buff
[num
++] = readl(&aregs
->txdma
.dma_wb_base_lo
);
2920 regs_buff
[num
++] = readl(&aregs
->txdma
.service_request
);
2921 regs_buff
[num
++] = readl(&aregs
->txdma
.service_complete
);
2922 regs_buff
[num
++] = readl(&aregs
->txdma
.cache_rd_index
);
2923 regs_buff
[num
++] = readl(&aregs
->txdma
.cache_wr_index
);
2924 regs_buff
[num
++] = readl(&aregs
->txdma
.tx_dma_error
);
2925 regs_buff
[num
++] = readl(&aregs
->txdma
.desc_abort_cnt
);
2926 regs_buff
[num
++] = readl(&aregs
->txdma
.payload_abort_cnt
);
2927 regs_buff
[num
++] = readl(&aregs
->txdma
.writeback_abort_cnt
);
2928 regs_buff
[num
++] = readl(&aregs
->txdma
.desc_timeout_cnt
);
2929 regs_buff
[num
++] = readl(&aregs
->txdma
.payload_timeout_cnt
);
2930 regs_buff
[num
++] = readl(&aregs
->txdma
.writeback_timeout_cnt
);
2931 regs_buff
[num
++] = readl(&aregs
->txdma
.desc_error_cnt
);
2932 regs_buff
[num
++] = readl(&aregs
->txdma
.payload_error_cnt
);
2933 regs_buff
[num
++] = readl(&aregs
->txdma
.writeback_error_cnt
);
2934 regs_buff
[num
++] = readl(&aregs
->txdma
.dropped_tlp_cnt
);
2935 regs_buff
[num
++] = readl(&aregs
->txdma
.new_service_complete
);
2936 regs_buff
[num
++] = readl(&aregs
->txdma
.ethernet_packet_cnt
);
2939 regs_buff
[num
++] = readl(&aregs
->rxdma
.csr
);
2940 regs_buff
[num
++] = readl(&aregs
->rxdma
.dma_wb_base_hi
);
2941 regs_buff
[num
++] = readl(&aregs
->rxdma
.dma_wb_base_lo
);
2942 regs_buff
[num
++] = readl(&aregs
->rxdma
.num_pkt_done
);
2943 regs_buff
[num
++] = readl(&aregs
->rxdma
.max_pkt_time
);
2944 regs_buff
[num
++] = readl(&aregs
->rxdma
.rxq_rd_addr
);
2945 regs_buff
[num
++] = readl(&aregs
->rxdma
.rxq_rd_addr_ext
);
2946 regs_buff
[num
++] = readl(&aregs
->rxdma
.rxq_wr_addr
);
2947 regs_buff
[num
++] = readl(&aregs
->rxdma
.psr_base_hi
);
2948 regs_buff
[num
++] = readl(&aregs
->rxdma
.psr_base_lo
);
2949 regs_buff
[num
++] = readl(&aregs
->rxdma
.psr_num_des
);
2950 regs_buff
[num
++] = readl(&aregs
->rxdma
.psr_avail_offset
);
2951 regs_buff
[num
++] = readl(&aregs
->rxdma
.psr_full_offset
);
2952 regs_buff
[num
++] = readl(&aregs
->rxdma
.psr_access_index
);
2953 regs_buff
[num
++] = readl(&aregs
->rxdma
.psr_min_des
);
2954 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr0_base_lo
);
2955 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr0_base_hi
);
2956 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr0_num_des
);
2957 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr0_avail_offset
);
2958 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr0_full_offset
);
2959 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr0_rd_index
);
2960 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr0_min_des
);
2961 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr1_base_lo
);
2962 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr1_base_hi
);
2963 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr1_num_des
);
2964 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr1_avail_offset
);
2965 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr1_full_offset
);
2966 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr1_rd_index
);
2967 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr1_min_des
);
2970 static void et131x_get_drvinfo(struct net_device
*netdev
,
2971 struct ethtool_drvinfo
*info
)
2973 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
2975 strlcpy(info
->driver
, DRIVER_NAME
, sizeof(info
->driver
));
2976 strlcpy(info
->version
, DRIVER_VERSION
, sizeof(info
->version
));
2977 strlcpy(info
->bus_info
, pci_name(adapter
->pdev
),
2978 sizeof(info
->bus_info
));
2981 static struct ethtool_ops et131x_ethtool_ops
= {
2982 .get_settings
= et131x_get_settings
,
2983 .set_settings
= et131x_set_settings
,
2984 .get_drvinfo
= et131x_get_drvinfo
,
2985 .get_regs_len
= et131x_get_regs_len
,
2986 .get_regs
= et131x_get_regs
,
2987 .get_link
= ethtool_op_get_link
,
2990 /* et131x_hwaddr_init - set up the MAC Address */
2991 static void et131x_hwaddr_init(struct et131x_adapter
*adapter
)
2993 /* If have our default mac from init and no mac address from
2994 * EEPROM then we need to generate the last octet and set it on the
2997 if (is_zero_ether_addr(adapter
->rom_addr
)) {
2998 /* We need to randomly generate the last octet so we
2999 * decrease our chances of setting the mac address to
3000 * same as another one of our cards in the system
3002 get_random_bytes(&adapter
->addr
[5], 1);
3003 /* We have the default value in the register we are
3004 * working with so we need to copy the current
3005 * address into the permanent address
3007 ether_addr_copy(adapter
->rom_addr
, adapter
->addr
);
3009 /* We do not have an override address, so set the
3010 * current address to the permanent address and add
3013 ether_addr_copy(adapter
->addr
, adapter
->rom_addr
);
3017 static int et131x_pci_init(struct et131x_adapter
*adapter
,
3018 struct pci_dev
*pdev
)
3023 rc
= et131x_init_eeprom(adapter
);
3027 if (!pci_is_pcie(pdev
)) {
3028 dev_err(&pdev
->dev
, "Missing PCIe capabilities\n");
3032 /* Program the Ack/Nak latency and replay timers */
3033 max_payload
= pdev
->pcie_mpss
;
3035 if (max_payload
< 2) {
3036 static const u16 acknak
[2] = { 0x76, 0xD0 };
3037 static const u16 replay
[2] = { 0x1E0, 0x2ED };
3039 if (pci_write_config_word(pdev
, ET1310_PCI_ACK_NACK
,
3040 acknak
[max_payload
])) {
3042 "Could not write PCI config space for ACK/NAK\n");
3045 if (pci_write_config_word(pdev
, ET1310_PCI_REPLAY
,
3046 replay
[max_payload
])) {
3048 "Could not write PCI config space for Replay Timer\n");
3053 /* l0s and l1 latency timers. We are using default values.
3054 * Representing 001 for L0s and 010 for L1
3056 if (pci_write_config_byte(pdev
, ET1310_PCI_L0L1LATENCY
, 0x11)) {
3058 "Could not write PCI config space for Latency Timers\n");
3062 /* Change the max read size to 2k */
3063 if (pcie_set_readrq(pdev
, 2048)) {
3065 "Couldn't change PCI config space for Max read size\n");
3069 /* Get MAC address from config space if an eeprom exists, otherwise
3070 * the MAC address there will not be valid
3072 if (!adapter
->has_eeprom
) {
3073 et131x_hwaddr_init(adapter
);
3077 for (i
= 0; i
< ETH_ALEN
; i
++) {
3078 if (pci_read_config_byte(pdev
, ET1310_PCI_MAC_ADDRESS
+ i
,
3079 adapter
->rom_addr
+ i
)) {
3080 dev_err(&pdev
->dev
, "Could not read PCI config space for MAC address\n");
3084 ether_addr_copy(adapter
->addr
, adapter
->rom_addr
);
3092 /* et131x_error_timer_handler
3093 * @data: timer-specific variable; here a pointer to our adapter structure
3095 * The routine called when the error timer expires, to track the number of
3098 static void et131x_error_timer_handler(unsigned long data
)
3100 struct et131x_adapter
*adapter
= (struct et131x_adapter
*)data
;
3101 struct phy_device
*phydev
= adapter
->phydev
;
3103 if (et1310_in_phy_coma(adapter
)) {
3104 /* Bring the device immediately out of coma, to
3105 * prevent it from sleeping indefinitely, this
3106 * mechanism could be improved!
3108 et1310_disable_phy_coma(adapter
);
3109 adapter
->boot_coma
= 20;
3111 et1310_update_macstat_host_counters(adapter
);
3114 if (!phydev
->link
&& adapter
->boot_coma
< 11)
3115 adapter
->boot_coma
++;
3117 if (adapter
->boot_coma
== 10) {
3118 if (!phydev
->link
) {
3119 if (!et1310_in_phy_coma(adapter
)) {
3120 /* NOTE - This was originally a 'sync with
3121 * interrupt'. How to do that under Linux?
3123 et131x_enable_interrupts(adapter
);
3124 et1310_enable_phy_coma(adapter
);
3129 /* This is a periodic timer, so reschedule */
3130 mod_timer(&adapter
->error_timer
, jiffies
+
3131 msecs_to_jiffies(TX_ERROR_PERIOD
));
3134 static void et131x_adapter_memory_free(struct et131x_adapter
*adapter
)
3136 et131x_tx_dma_memory_free(adapter
);
3137 et131x_rx_dma_memory_free(adapter
);
3140 static int et131x_adapter_memory_alloc(struct et131x_adapter
*adapter
)
3144 status
= et131x_tx_dma_memory_alloc(adapter
);
3146 dev_err(&adapter
->pdev
->dev
,
3147 "et131x_tx_dma_memory_alloc FAILED\n");
3148 et131x_tx_dma_memory_free(adapter
);
3152 status
= et131x_rx_dma_memory_alloc(adapter
);
3154 dev_err(&adapter
->pdev
->dev
,
3155 "et131x_rx_dma_memory_alloc FAILED\n");
3156 et131x_adapter_memory_free(adapter
);
3160 status
= et131x_init_recv(adapter
);
3162 dev_err(&adapter
->pdev
->dev
, "et131x_init_recv FAILED\n");
3163 et131x_adapter_memory_free(adapter
);
3168 static void et131x_adjust_link(struct net_device
*netdev
)
3170 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3171 struct phy_device
*phydev
= adapter
->phydev
;
3175 if (phydev
->link
== adapter
->link
)
3178 /* Check to see if we are in coma mode and if
3179 * so, disable it because we will not be able
3180 * to read PHY values until we are out.
3182 if (et1310_in_phy_coma(adapter
))
3183 et1310_disable_phy_coma(adapter
);
3185 adapter
->link
= phydev
->link
;
3186 phy_print_status(phydev
);
3189 adapter
->boot_coma
= 20;
3190 if (phydev
->speed
== SPEED_10
) {
3193 et131x_mii_read(adapter
, PHY_MPHY_CONTROL_REG
,
3195 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3196 PHY_MPHY_CONTROL_REG
,
3198 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3199 PHY_INDEX_REG
, register18
| 0x8402);
3200 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3201 PHY_DATA_REG
, register18
| 511);
3202 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3203 PHY_MPHY_CONTROL_REG
, register18
);
3206 et1310_config_flow_control(adapter
);
3208 if (phydev
->speed
== SPEED_1000
&&
3209 adapter
->registry_jumbo_packet
> 2048) {
3212 et131x_mii_read(adapter
, PHY_CONFIG
, ®
);
3213 reg
&= ~ET_PHY_CONFIG_TX_FIFO_DEPTH
;
3214 reg
|= ET_PHY_CONFIG_FIFO_DEPTH_32
;
3215 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3219 et131x_set_rx_dma_timer(adapter
);
3220 et1310_config_mac_regs2(adapter
);
3222 adapter
->boot_coma
= 0;
3224 if (phydev
->speed
== SPEED_10
) {
3227 et131x_mii_read(adapter
, PHY_MPHY_CONTROL_REG
,
3229 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3230 PHY_MPHY_CONTROL_REG
,
3232 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3233 PHY_INDEX_REG
, register18
| 0x8402);
3234 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3235 PHY_DATA_REG
, register18
| 511);
3236 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3237 PHY_MPHY_CONTROL_REG
, register18
);
3240 et131x_free_busy_send_packets(adapter
);
3241 et131x_init_send(adapter
);
3243 /* Bring the device back to the state it was during
3244 * init prior to autonegotiation being complete. This
3245 * way, when we get the auto-neg complete interrupt,
3246 * we can complete init by calling config_mac_regs2.
3248 et131x_soft_reset(adapter
);
3250 et131x_adapter_setup(adapter
);
3252 et131x_disable_txrx(netdev
);
3253 et131x_enable_txrx(netdev
);
3257 static int et131x_mii_probe(struct net_device
*netdev
)
3259 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3260 struct phy_device
*phydev
= NULL
;
3262 phydev
= phy_find_first(adapter
->mii_bus
);
3264 dev_err(&adapter
->pdev
->dev
, "no PHY found\n");
3268 phydev
= phy_connect(netdev
, phydev_name(phydev
),
3269 &et131x_adjust_link
, PHY_INTERFACE_MODE_MII
);
3271 if (IS_ERR(phydev
)) {
3272 dev_err(&adapter
->pdev
->dev
, "Could not attach to PHY\n");
3273 return PTR_ERR(phydev
);
3276 phydev
->supported
&= (SUPPORTED_10baseT_Half
|
3277 SUPPORTED_10baseT_Full
|
3278 SUPPORTED_100baseT_Half
|
3279 SUPPORTED_100baseT_Full
|
3284 if (adapter
->pdev
->device
!= ET131X_PCI_DEVICE_ID_FAST
)
3285 phydev
->supported
|= SUPPORTED_1000baseT_Half
|
3286 SUPPORTED_1000baseT_Full
;
3288 phydev
->advertising
= phydev
->supported
;
3289 phydev
->autoneg
= AUTONEG_ENABLE
;
3290 adapter
->phydev
= phydev
;
3292 phy_attached_info(phydev
);
3297 static struct et131x_adapter
*et131x_adapter_init(struct net_device
*netdev
,
3298 struct pci_dev
*pdev
)
3300 static const u8 default_mac
[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
3302 struct et131x_adapter
*adapter
;
3304 adapter
= netdev_priv(netdev
);
3305 adapter
->pdev
= pci_dev_get(pdev
);
3306 adapter
->netdev
= netdev
;
3308 spin_lock_init(&adapter
->tcb_send_qlock
);
3309 spin_lock_init(&adapter
->tcb_ready_qlock
);
3310 spin_lock_init(&adapter
->rcv_lock
);
3312 adapter
->registry_jumbo_packet
= 1514; /* 1514-9216 */
3314 ether_addr_copy(adapter
->addr
, default_mac
);
3319 static void et131x_pci_remove(struct pci_dev
*pdev
)
3321 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3322 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3324 unregister_netdev(netdev
);
3325 netif_napi_del(&adapter
->napi
);
3326 phy_disconnect(adapter
->phydev
);
3327 mdiobus_unregister(adapter
->mii_bus
);
3328 mdiobus_free(adapter
->mii_bus
);
3330 et131x_adapter_memory_free(adapter
);
3331 iounmap(adapter
->regs
);
3334 free_netdev(netdev
);
3335 pci_release_regions(pdev
);
3336 pci_disable_device(pdev
);
3339 static void et131x_up(struct net_device
*netdev
)
3341 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3343 et131x_enable_txrx(netdev
);
3344 phy_start(adapter
->phydev
);
3347 static void et131x_down(struct net_device
*netdev
)
3349 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3351 /* Save the timestamp for the TX watchdog, prevent a timeout */
3352 netdev
->trans_start
= jiffies
;
3354 phy_stop(adapter
->phydev
);
3355 et131x_disable_txrx(netdev
);
3358 #ifdef CONFIG_PM_SLEEP
3359 static int et131x_suspend(struct device
*dev
)
3361 struct pci_dev
*pdev
= to_pci_dev(dev
);
3362 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3364 if (netif_running(netdev
)) {
3365 netif_device_detach(netdev
);
3366 et131x_down(netdev
);
3367 pci_save_state(pdev
);
3373 static int et131x_resume(struct device
*dev
)
3375 struct pci_dev
*pdev
= to_pci_dev(dev
);
3376 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3378 if (netif_running(netdev
)) {
3379 pci_restore_state(pdev
);
3381 netif_device_attach(netdev
);
3388 static SIMPLE_DEV_PM_OPS(et131x_pm_ops
, et131x_suspend
, et131x_resume
);
3390 static irqreturn_t
et131x_isr(int irq
, void *dev_id
)
3392 bool handled
= true;
3393 bool enable_interrupts
= true;
3394 struct net_device
*netdev
= dev_id
;
3395 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3396 struct address_map __iomem
*iomem
= adapter
->regs
;
3397 struct rx_ring
*rx_ring
= &adapter
->rx_ring
;
3398 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
3401 if (!netif_device_present(netdev
)) {
3403 enable_interrupts
= false;
3407 et131x_disable_interrupts(adapter
);
3409 status
= readl(&adapter
->regs
->global
.int_status
);
3411 if (adapter
->flow
== FLOW_TXONLY
|| adapter
->flow
== FLOW_BOTH
)
3412 status
&= ~INT_MASK_ENABLE
;
3414 status
&= ~INT_MASK_ENABLE_NO_FLOW
;
3416 /* Make sure this is our interrupt */
3419 et131x_enable_interrupts(adapter
);
3423 /* This is our interrupt, so process accordingly */
3424 if (status
& ET_INTR_WATCHDOG
) {
3425 struct tcb
*tcb
= tx_ring
->send_head
;
3428 if (++tcb
->stale
> 1)
3429 status
|= ET_INTR_TXDMA_ISR
;
3431 if (rx_ring
->unfinished_receives
)
3432 status
|= ET_INTR_RXDMA_XFR_DONE
;
3433 else if (tcb
== NULL
)
3434 writel(0, &adapter
->regs
->global
.watchdog_timer
);
3436 status
&= ~ET_INTR_WATCHDOG
;
3439 if (status
& (ET_INTR_RXDMA_XFR_DONE
| ET_INTR_TXDMA_ISR
)) {
3440 enable_interrupts
= false;
3441 napi_schedule(&adapter
->napi
);
3444 status
&= ~(ET_INTR_TXDMA_ISR
| ET_INTR_RXDMA_XFR_DONE
);
3449 if (status
& ET_INTR_TXDMA_ERR
) {
3450 /* Following read also clears the register (COR) */
3451 u32 txdma_err
= readl(&iomem
->txdma
.tx_dma_error
);
3453 dev_warn(&adapter
->pdev
->dev
,
3454 "TXDMA_ERR interrupt, error = %d\n",
3458 if (status
& (ET_INTR_RXDMA_FB_R0_LOW
| ET_INTR_RXDMA_FB_R1_LOW
)) {
3459 /* This indicates the number of unused buffers in RXDMA free
3460 * buffer ring 0 is <= the limit you programmed. Free buffer
3461 * resources need to be returned. Free buffers are consumed as
3462 * packets are passed from the network to the host. The host
3463 * becomes aware of the packets from the contents of the packet
3464 * status ring. This ring is queried when the packet done
3465 * interrupt occurs. Packets are then passed to the OS. When
3466 * the OS is done with the packets the resources can be
3467 * returned to the ET1310 for re-use. This interrupt is one
3468 * method of returning resources.
3471 /* If the user has flow control on, then we will
3472 * send a pause packet, otherwise just exit
3474 if (adapter
->flow
== FLOW_TXONLY
|| adapter
->flow
== FLOW_BOTH
) {
3477 /* Tell the device to send a pause packet via the back
3478 * pressure register (bp req and bp xon/xoff)
3480 pm_csr
= readl(&iomem
->global
.pm_csr
);
3481 if (!et1310_in_phy_coma(adapter
))
3482 writel(3, &iomem
->txmac
.bp_ctrl
);
3486 /* Handle Packet Status Ring Low Interrupt */
3487 if (status
& ET_INTR_RXDMA_STAT_LOW
) {
3488 /* Same idea as with the two Free Buffer Rings. Packets going
3489 * from the network to the host each consume a free buffer
3490 * resource and a packet status resource. These resources are
3491 * passed to the OS. When the OS is done with the resources,
3492 * they need to be returned to the ET1310. This is one method
3493 * of returning the resources.
3497 if (status
& ET_INTR_RXDMA_ERR
) {
3498 /* The rxdma_error interrupt is sent when a time-out on a
3499 * request issued by the JAGCore has occurred or a completion is
3500 * returned with an un-successful status. In both cases the
3501 * request is considered complete. The JAGCore will
3502 * automatically re-try the request in question. Normally
3503 * information on events like these are sent to the host using
3504 * the "Advanced Error Reporting" capability. This interrupt is
3505 * another way of getting similar information. The only thing
3506 * required is to clear the interrupt by reading the ISR in the
3507 * global resources. The JAGCore will do a re-try on the
3508 * request. Normally you should never see this interrupt. If
3509 * you start to see this interrupt occurring frequently then
3510 * something bad has occurred. A reset might be the thing to do.
3514 dev_warn(&adapter
->pdev
->dev
, "RxDMA_ERR interrupt, error %x\n",
3515 readl(&iomem
->txmac
.tx_test
));
3518 /* Handle the Wake on LAN Event */
3519 if (status
& ET_INTR_WOL
) {
3520 /* This is a secondary interrupt for wake on LAN. The driver
3521 * should never see this, if it does, something serious is
3524 dev_err(&adapter
->pdev
->dev
, "WAKE_ON_LAN interrupt\n");
3527 if (status
& ET_INTR_TXMAC
) {
3528 u32 err
= readl(&iomem
->txmac
.err
);
3530 /* When any of the errors occur and TXMAC generates an
3531 * interrupt to report these errors, it usually means that
3532 * TXMAC has detected an error in the data stream retrieved
3533 * from the on-chip Tx Q. All of these errors are catastrophic
3534 * and TXMAC won't be able to recover data when these errors
3535 * occur. In a nutshell, the whole Tx path will have to be reset
3536 * and re-configured afterwards.
3538 dev_warn(&adapter
->pdev
->dev
, "TXMAC interrupt, error 0x%08x\n",
3541 /* If we are debugging, we want to see this error, otherwise we
3542 * just want the device to be reset and continue
3546 if (status
& ET_INTR_RXMAC
) {
3547 /* These interrupts are catastrophic to the device, what we need
3548 * to do is disable the interrupts and set the flag to cause us
3549 * to reset so we can solve this issue.
3551 dev_warn(&adapter
->pdev
->dev
,
3552 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
3553 readl(&iomem
->rxmac
.err_reg
));
3555 dev_warn(&adapter
->pdev
->dev
,
3556 "Enable 0x%08x, Diag 0x%08x\n",
3557 readl(&iomem
->rxmac
.ctrl
),
3558 readl(&iomem
->rxmac
.rxq_diag
));
3560 /* If we are debugging, we want to see this error, otherwise we
3561 * just want the device to be reset and continue
3565 if (status
& ET_INTR_MAC_STAT
) {
3566 /* This means at least one of the un-masked counters in the
3567 * MAC_STAT block has rolled over. Use this to maintain the top,
3568 * software managed bits of the counter(s).
3570 et1310_handle_macstat_interrupt(adapter
);
3573 if (status
& ET_INTR_SLV_TIMEOUT
) {
3574 /* This means a timeout has occurred on a read or write request
3575 * to one of the JAGCore registers. The Global Resources block
3576 * has terminated the request and on a read request, returned a
3577 * "fake" value. The most likely reasons are: Bad Address or the
3578 * addressed module is in a power-down state and can't respond.
3583 if (enable_interrupts
)
3584 et131x_enable_interrupts(adapter
);
3586 return IRQ_RETVAL(handled
);
3589 static int et131x_poll(struct napi_struct
*napi
, int budget
)
3591 struct et131x_adapter
*adapter
=
3592 container_of(napi
, struct et131x_adapter
, napi
);
3593 int work_done
= et131x_handle_recv_pkts(adapter
, budget
);
3595 et131x_handle_send_pkts(adapter
);
3597 if (work_done
< budget
) {
3598 napi_complete(&adapter
->napi
);
3599 et131x_enable_interrupts(adapter
);
3605 /* et131x_stats - Return the current device statistics */
3606 static struct net_device_stats
*et131x_stats(struct net_device
*netdev
)
3608 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3609 struct net_device_stats
*stats
= &adapter
->netdev
->stats
;
3610 struct ce_stats
*devstat
= &adapter
->stats
;
3612 stats
->rx_errors
= devstat
->rx_length_errs
+
3613 devstat
->rx_align_errs
+
3614 devstat
->rx_crc_errs
+
3615 devstat
->rx_code_violations
+
3616 devstat
->rx_other_errs
;
3617 stats
->tx_errors
= devstat
->tx_max_pkt_errs
;
3618 stats
->multicast
= devstat
->multicast_pkts_rcvd
;
3619 stats
->collisions
= devstat
->tx_collisions
;
3621 stats
->rx_length_errors
= devstat
->rx_length_errs
;
3622 stats
->rx_over_errors
= devstat
->rx_overflows
;
3623 stats
->rx_crc_errors
= devstat
->rx_crc_errs
;
3624 stats
->rx_dropped
= devstat
->rcvd_pkts_dropped
;
3626 /* NOTE: Not used, can't find analogous statistics */
3627 /* stats->rx_frame_errors = devstat->; */
3628 /* stats->rx_fifo_errors = devstat->; */
3629 /* stats->rx_missed_errors = devstat->; */
3631 /* stats->tx_aborted_errors = devstat->; */
3632 /* stats->tx_carrier_errors = devstat->; */
3633 /* stats->tx_fifo_errors = devstat->; */
3634 /* stats->tx_heartbeat_errors = devstat->; */
3635 /* stats->tx_window_errors = devstat->; */
3639 static int et131x_open(struct net_device
*netdev
)
3641 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3642 struct pci_dev
*pdev
= adapter
->pdev
;
3643 unsigned int irq
= pdev
->irq
;
3646 /* Start the timer to track NIC errors */
3647 init_timer(&adapter
->error_timer
);
3648 adapter
->error_timer
.expires
= jiffies
+
3649 msecs_to_jiffies(TX_ERROR_PERIOD
);
3650 adapter
->error_timer
.function
= et131x_error_timer_handler
;
3651 adapter
->error_timer
.data
= (unsigned long)adapter
;
3652 add_timer(&adapter
->error_timer
);
3654 result
= request_irq(irq
, et131x_isr
,
3655 IRQF_SHARED
, netdev
->name
, netdev
);
3657 dev_err(&pdev
->dev
, "could not register IRQ %d\n", irq
);
3661 adapter
->flags
|= FMP_ADAPTER_INTERRUPT_IN_USE
;
3663 napi_enable(&adapter
->napi
);
3670 static int et131x_close(struct net_device
*netdev
)
3672 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3674 et131x_down(netdev
);
3675 napi_disable(&adapter
->napi
);
3677 adapter
->flags
&= ~FMP_ADAPTER_INTERRUPT_IN_USE
;
3678 free_irq(adapter
->pdev
->irq
, netdev
);
3680 /* Stop the error timer */
3681 return del_timer_sync(&adapter
->error_timer
);
3684 static int et131x_ioctl(struct net_device
*netdev
, struct ifreq
*reqbuf
,
3687 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3689 if (!adapter
->phydev
)
3692 return phy_mii_ioctl(adapter
->phydev
, reqbuf
, cmd
);
3695 /* et131x_set_packet_filter - Configures the Rx Packet filtering */
3696 static int et131x_set_packet_filter(struct et131x_adapter
*adapter
)
3698 int filter
= adapter
->packet_filter
;
3702 ctrl
= readl(&adapter
->regs
->rxmac
.ctrl
);
3703 pf_ctrl
= readl(&adapter
->regs
->rxmac
.pf_ctrl
);
3705 /* Default to disabled packet filtering */
3708 /* Set us to be in promiscuous mode so we receive everything, this
3709 * is also true when we get a packet filter of 0
3711 if ((filter
& ET131X_PACKET_TYPE_PROMISCUOUS
) || filter
== 0)
3712 pf_ctrl
&= ~7; /* Clear filter bits */
3714 /* Set us up with Multicast packet filtering. Three cases are
3715 * possible - (1) we have a multi-cast list, (2) we receive ALL
3716 * multicast entries or (3) we receive none.
3718 if (filter
& ET131X_PACKET_TYPE_ALL_MULTICAST
)
3719 pf_ctrl
&= ~2; /* Multicast filter bit */
3721 et1310_setup_device_for_multicast(adapter
);
3726 /* Set us up with Unicast packet filtering */
3727 if (filter
& ET131X_PACKET_TYPE_DIRECTED
) {
3728 et1310_setup_device_for_unicast(adapter
);
3733 /* Set us up with Broadcast packet filtering */
3734 if (filter
& ET131X_PACKET_TYPE_BROADCAST
) {
3735 pf_ctrl
|= 1; /* Broadcast filter bit */
3741 /* Setup the receive mac configuration registers - Packet
3742 * Filter control + the enable / disable for packet filter
3743 * in the control reg.
3745 writel(pf_ctrl
, &adapter
->regs
->rxmac
.pf_ctrl
);
3746 writel(ctrl
, &adapter
->regs
->rxmac
.ctrl
);
3751 static void et131x_multicast(struct net_device
*netdev
)
3753 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3755 struct netdev_hw_addr
*ha
;
3758 /* Before we modify the platform-independent filter flags, store them
3759 * locally. This allows us to determine if anything's changed and if
3760 * we even need to bother the hardware
3762 packet_filter
= adapter
->packet_filter
;
3764 /* Clear the 'multicast' flag locally; because we only have a single
3765 * flag to check multicast, and multiple multicast addresses can be
3766 * set, this is the easiest way to determine if more than one
3767 * multicast address is being set.
3769 packet_filter
&= ~ET131X_PACKET_TYPE_MULTICAST
;
3771 /* Check the net_device flags and set the device independent flags
3774 if (netdev
->flags
& IFF_PROMISC
)
3775 adapter
->packet_filter
|= ET131X_PACKET_TYPE_PROMISCUOUS
;
3777 adapter
->packet_filter
&= ~ET131X_PACKET_TYPE_PROMISCUOUS
;
3779 if ((netdev
->flags
& IFF_ALLMULTI
) ||
3780 (netdev_mc_count(netdev
) > NIC_MAX_MCAST_LIST
))
3781 adapter
->packet_filter
|= ET131X_PACKET_TYPE_ALL_MULTICAST
;
3783 if (netdev_mc_count(netdev
) < 1) {
3784 adapter
->packet_filter
&= ~ET131X_PACKET_TYPE_ALL_MULTICAST
;
3785 adapter
->packet_filter
&= ~ET131X_PACKET_TYPE_MULTICAST
;
3787 adapter
->packet_filter
|= ET131X_PACKET_TYPE_MULTICAST
;
3790 /* Set values in the private adapter struct */
3792 netdev_for_each_mc_addr(ha
, netdev
) {
3793 if (i
== NIC_MAX_MCAST_LIST
)
3795 ether_addr_copy(adapter
->multicast_list
[i
++], ha
->addr
);
3797 adapter
->multicast_addr_count
= i
;
3799 /* Are the new flags different from the previous ones? If not, then no
3800 * action is required
3802 * NOTE - This block will always update the multicast_list with the
3803 * hardware, even if the addresses aren't the same.
3805 if (packet_filter
!= adapter
->packet_filter
)
3806 et131x_set_packet_filter(adapter
);
3809 static netdev_tx_t
et131x_tx(struct sk_buff
*skb
, struct net_device
*netdev
)
3811 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3812 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
3814 /* stop the queue if it's getting full */
3815 if (tx_ring
->used
>= NUM_TCB
- 1 && !netif_queue_stopped(netdev
))
3816 netif_stop_queue(netdev
);
3818 /* Save the timestamp for the TX timeout watchdog */
3819 netdev
->trans_start
= jiffies
;
3821 /* TCB is not available */
3822 if (tx_ring
->used
>= NUM_TCB
)
3825 if ((adapter
->flags
& FMP_ADAPTER_FAIL_SEND_MASK
) ||
3826 !netif_carrier_ok(netdev
))
3829 if (send_packet(skb
, adapter
))
3832 return NETDEV_TX_OK
;
3835 dev_kfree_skb_any(skb
);
3836 adapter
->netdev
->stats
.tx_dropped
++;
3837 return NETDEV_TX_OK
;
3840 /* et131x_tx_timeout - Timeout handler
3842 * The handler called when a Tx request times out. The timeout period is
3843 * specified by the 'tx_timeo" element in the net_device structure (see
3844 * et131x_alloc_device() to see how this value is set).
3846 static void et131x_tx_timeout(struct net_device
*netdev
)
3848 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3849 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
3851 unsigned long flags
;
3853 /* If the device is closed, ignore the timeout */
3854 if (~(adapter
->flags
& FMP_ADAPTER_INTERRUPT_IN_USE
))
3857 /* Any nonrecoverable hardware error?
3858 * Checks adapter->flags for any failure in phy reading
3860 if (adapter
->flags
& FMP_ADAPTER_NON_RECOVER_ERROR
)
3863 /* Hardware failure? */
3864 if (adapter
->flags
& FMP_ADAPTER_HARDWARE_ERROR
) {
3865 dev_err(&adapter
->pdev
->dev
, "hardware error - reset\n");
3869 /* Is send stuck? */
3870 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
3871 tcb
= tx_ring
->send_head
;
3872 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);
3877 if (tcb
->count
> NIC_SEND_HANG_THRESHOLD
) {
3878 dev_warn(&adapter
->pdev
->dev
,
3879 "Send stuck - reset. tcb->WrIndex %x\n",
3882 adapter
->netdev
->stats
.tx_errors
++;
3884 /* perform reset of tx/rx */
3885 et131x_disable_txrx(netdev
);
3886 et131x_enable_txrx(netdev
);
3891 static int et131x_change_mtu(struct net_device
*netdev
, int new_mtu
)
3894 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3896 if (new_mtu
< 64 || new_mtu
> 9216)
3899 et131x_disable_txrx(netdev
);
3901 netdev
->mtu
= new_mtu
;
3903 et131x_adapter_memory_free(adapter
);
3905 /* Set the config parameter for Jumbo Packet support */
3906 adapter
->registry_jumbo_packet
= new_mtu
+ 14;
3907 et131x_soft_reset(adapter
);
3909 result
= et131x_adapter_memory_alloc(adapter
);
3911 dev_warn(&adapter
->pdev
->dev
,
3912 "Change MTU failed; couldn't re-alloc DMA memory\n");
3916 et131x_init_send(adapter
);
3917 et131x_hwaddr_init(adapter
);
3918 ether_addr_copy(netdev
->dev_addr
, adapter
->addr
);
3920 /* Init the device with the new settings */
3921 et131x_adapter_setup(adapter
);
3922 et131x_enable_txrx(netdev
);
3927 static const struct net_device_ops et131x_netdev_ops
= {
3928 .ndo_open
= et131x_open
,
3929 .ndo_stop
= et131x_close
,
3930 .ndo_start_xmit
= et131x_tx
,
3931 .ndo_set_rx_mode
= et131x_multicast
,
3932 .ndo_tx_timeout
= et131x_tx_timeout
,
3933 .ndo_change_mtu
= et131x_change_mtu
,
3934 .ndo_set_mac_address
= eth_mac_addr
,
3935 .ndo_validate_addr
= eth_validate_addr
,
3936 .ndo_get_stats
= et131x_stats
,
3937 .ndo_do_ioctl
= et131x_ioctl
,
3940 static int et131x_pci_setup(struct pci_dev
*pdev
,
3941 const struct pci_device_id
*ent
)
3943 struct net_device
*netdev
;
3944 struct et131x_adapter
*adapter
;
3947 rc
= pci_enable_device(pdev
);
3949 dev_err(&pdev
->dev
, "pci_enable_device() failed\n");
3953 /* Perform some basic PCI checks */
3954 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
3955 dev_err(&pdev
->dev
, "Can't find PCI device's base address\n");
3960 rc
= pci_request_regions(pdev
, DRIVER_NAME
);
3962 dev_err(&pdev
->dev
, "Can't get PCI resources\n");
3966 pci_set_master(pdev
);
3968 /* Check the DMA addressing support of this device */
3969 if (dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64)) &&
3970 dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32))) {
3971 dev_err(&pdev
->dev
, "No usable DMA addressing method\n");
3973 goto err_release_res
;
3976 netdev
= alloc_etherdev(sizeof(struct et131x_adapter
));
3978 dev_err(&pdev
->dev
, "Couldn't alloc netdev struct\n");
3980 goto err_release_res
;
3983 netdev
->watchdog_timeo
= ET131X_TX_TIMEOUT
;
3984 netdev
->netdev_ops
= &et131x_netdev_ops
;
3986 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3987 netdev
->ethtool_ops
= &et131x_ethtool_ops
;
3989 adapter
= et131x_adapter_init(netdev
, pdev
);
3991 rc
= et131x_pci_init(adapter
, pdev
);
3995 /* Map the bus-relative registers to system virtual memory */
3996 adapter
->regs
= pci_ioremap_bar(pdev
, 0);
3997 if (!adapter
->regs
) {
3998 dev_err(&pdev
->dev
, "Cannot map device registers\n");
4003 /* If Phy COMA mode was enabled when we went down, disable it here. */
4004 writel(ET_PMCSR_INIT
, &adapter
->regs
->global
.pm_csr
);
4006 et131x_soft_reset(adapter
);
4007 et131x_disable_interrupts(adapter
);
4009 rc
= et131x_adapter_memory_alloc(adapter
);
4011 dev_err(&pdev
->dev
, "Could not alloc adapter memory (DMA)\n");
4015 et131x_init_send(adapter
);
4017 netif_napi_add(netdev
, &adapter
->napi
, et131x_poll
, 64);
4019 ether_addr_copy(netdev
->dev_addr
, adapter
->addr
);
4023 adapter
->mii_bus
= mdiobus_alloc();
4024 if (!adapter
->mii_bus
) {
4025 dev_err(&pdev
->dev
, "Alloc of mii_bus struct failed\n");
4029 adapter
->mii_bus
->name
= "et131x_eth_mii";
4030 snprintf(adapter
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%x",
4031 (adapter
->pdev
->bus
->number
<< 8) | adapter
->pdev
->devfn
);
4032 adapter
->mii_bus
->priv
= netdev
;
4033 adapter
->mii_bus
->read
= et131x_mdio_read
;
4034 adapter
->mii_bus
->write
= et131x_mdio_write
;
4036 rc
= mdiobus_register(adapter
->mii_bus
);
4038 dev_err(&pdev
->dev
, "failed to register MII bus\n");
4042 rc
= et131x_mii_probe(netdev
);
4044 dev_err(&pdev
->dev
, "failed to probe MII bus\n");
4045 goto err_mdio_unregister
;
4048 et131x_adapter_setup(adapter
);
4050 /* Init variable for counting how long we do not have link status */
4051 adapter
->boot_coma
= 0;
4052 et1310_disable_phy_coma(adapter
);
4054 /* We can enable interrupts now
4056 * NOTE - Because registration of interrupt handler is done in the
4057 * device's open(), defer enabling device interrupts to that
4061 rc
= register_netdev(netdev
);
4063 dev_err(&pdev
->dev
, "register_netdev() failed\n");
4064 goto err_phy_disconnect
;
4067 /* Register the net_device struct with the PCI subsystem. Save a copy
4068 * of the PCI config space for this device now that the device has
4069 * been initialized, just in case it needs to be quickly restored.
4071 pci_set_drvdata(pdev
, netdev
);
4076 phy_disconnect(adapter
->phydev
);
4077 err_mdio_unregister
:
4078 mdiobus_unregister(adapter
->mii_bus
);
4080 mdiobus_free(adapter
->mii_bus
);
4082 et131x_adapter_memory_free(adapter
);
4084 iounmap(adapter
->regs
);
4087 free_netdev(netdev
);
4089 pci_release_regions(pdev
);
4091 pci_disable_device(pdev
);
4095 static const struct pci_device_id et131x_pci_table
[] = {
4096 { PCI_VDEVICE(ATT
, ET131X_PCI_DEVICE_ID_GIG
), 0UL},
4097 { PCI_VDEVICE(ATT
, ET131X_PCI_DEVICE_ID_FAST
), 0UL},
4100 MODULE_DEVICE_TABLE(pci
, et131x_pci_table
);
4102 static struct pci_driver et131x_driver
= {
4103 .name
= DRIVER_NAME
,
4104 .id_table
= et131x_pci_table
,
4105 .probe
= et131x_pci_setup
,
4106 .remove
= et131x_pci_remove
,
4107 .driver
.pm
= &et131x_pm_ops
,
4110 module_pci_driver(et131x_driver
);