2 * drivers/net/gianfar.c
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
10 * Maintainer: Kumar Gala
12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
13 * Copyright (c) 2007 MontaVista Software, Inc.
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
20 * Gianfar: AKA Lambda Draconis, "Dragon"
28 * The driver is initialized through platform_device. Structures which
29 * define the configuration needed by the board are defined in a
30 * board structure in arch/ppc/platforms (though I do not
31 * discount the possibility that other architectures could one
34 * The Gianfar Ethernet Controller uses a ring of buffer
35 * descriptors. The beginning is indicated by a register
36 * pointing to the physical address of the start of the ring.
37 * The end is determined by a "wrap" bit being set in the
38 * last descriptor of the ring.
40 * When a packet is received, the RXF bit in the
41 * IEVENT register is set, triggering an interrupt when the
42 * corresponding bit in the IMASK register is also set (if
43 * interrupt coalescing is active, then the interrupt may not
44 * happen immediately, but will wait until either a set number
45 * of frames or amount of time have passed). In NAPI, the
46 * interrupt handler will signal there is work to be done, and
47 * exit. This method will start at the last known empty
48 * descriptor, and process every subsequent descriptor until there
49 * are none left with data (NAPI will stop after a set number of
50 * packets to give time to other tasks, but will eventually
51 * process all the packets). The data arrives inside a
52 * pre-allocated skb, and so after the skb is passed up to the
53 * stack, a new skb must be allocated, and the address field in
54 * the buffer descriptor must be updated to indicate this new
57 * When the kernel requests that a packet be transmitted, the
58 * driver starts where it left off last time, and points the
59 * descriptor at the buffer which was passed in. The driver
60 * then informs the DMA engine that there are packets ready to
61 * be transmitted. Once the controller is finished transmitting
62 * the packet, an interrupt may be triggered (under the same
63 * conditions as for reception, but depending on the TXF bit).
64 * The driver then cleans up the buffer.
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/errno.h>
70 #include <linux/unistd.h>
71 #include <linux/slab.h>
72 #include <linux/interrupt.h>
73 #include <linux/init.h>
74 #include <linux/delay.h>
75 #include <linux/netdevice.h>
76 #include <linux/etherdevice.h>
77 #include <linux/skbuff.h>
78 #include <linux/if_vlan.h>
79 #include <linux/spinlock.h>
81 #include <linux/platform_device.h>
83 #include <linux/tcp.h>
84 #include <linux/udp.h>
89 #include <asm/uaccess.h>
90 #include <linux/module.h>
91 #include <linux/dma-mapping.h>
92 #include <linux/crc32.h>
93 #include <linux/mii.h>
94 #include <linux/phy.h>
97 #include "gianfar_mii.h"
99 #define TX_TIMEOUT (1*HZ)
100 #undef BRIEF_GFAR_ERRORS
101 #undef VERBOSE_GFAR_ERRORS
103 const char gfar_driver_name
[] = "Gianfar Ethernet";
104 const char gfar_driver_version
[] = "1.3";
106 static int gfar_enet_open(struct net_device
*dev
);
107 static int gfar_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
108 static void gfar_reset_task(struct work_struct
*work
);
109 static void gfar_timeout(struct net_device
*dev
);
110 static int gfar_close(struct net_device
*dev
);
111 struct sk_buff
*gfar_new_skb(struct net_device
*dev
);
112 static void gfar_new_rxbdp(struct net_device
*dev
, struct rxbd8
*bdp
,
113 struct sk_buff
*skb
);
114 static int gfar_set_mac_address(struct net_device
*dev
);
115 static int gfar_change_mtu(struct net_device
*dev
, int new_mtu
);
116 static irqreturn_t
gfar_error(int irq
, void *dev_id
);
117 static irqreturn_t
gfar_transmit(int irq
, void *dev_id
);
118 static irqreturn_t
gfar_interrupt(int irq
, void *dev_id
);
119 static void adjust_link(struct net_device
*dev
);
120 static void init_registers(struct net_device
*dev
);
121 static int init_phy(struct net_device
*dev
);
122 static int gfar_probe(struct platform_device
*pdev
);
123 static int gfar_remove(struct platform_device
*pdev
);
124 static void free_skb_resources(struct gfar_private
*priv
);
125 static void gfar_set_multi(struct net_device
*dev
);
126 static void gfar_set_hash_for_addr(struct net_device
*dev
, u8
*addr
);
127 static void gfar_configure_serdes(struct net_device
*dev
);
128 static int gfar_poll(struct napi_struct
*napi
, int budget
);
129 #ifdef CONFIG_NET_POLL_CONTROLLER
130 static void gfar_netpoll(struct net_device
*dev
);
132 int gfar_clean_rx_ring(struct net_device
*dev
, int rx_work_limit
);
133 static int gfar_clean_tx_ring(struct net_device
*dev
);
134 static int gfar_process_frame(struct net_device
*dev
, struct sk_buff
*skb
, int length
);
135 static void gfar_vlan_rx_register(struct net_device
*netdev
,
136 struct vlan_group
*grp
);
137 void gfar_halt(struct net_device
*dev
);
138 static void gfar_halt_nodisable(struct net_device
*dev
);
139 void gfar_start(struct net_device
*dev
);
140 static void gfar_clear_exact_match(struct net_device
*dev
);
141 static void gfar_set_mac_for_addr(struct net_device
*dev
, int num
, u8
*addr
);
143 extern const struct ethtool_ops gfar_ethtool_ops
;
145 MODULE_AUTHOR("Freescale Semiconductor, Inc");
146 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
147 MODULE_LICENSE("GPL");
149 /* Returns 1 if incoming frames use an FCB */
150 static inline int gfar_uses_fcb(struct gfar_private
*priv
)
152 return (priv
->vlan_enable
|| priv
->rx_csum_enable
);
155 /* Set up the ethernet device structure, private data,
156 * and anything else we need before we start */
157 static int gfar_probe(struct platform_device
*pdev
)
160 struct net_device
*dev
= NULL
;
161 struct gfar_private
*priv
= NULL
;
162 struct gianfar_platform_data
*einfo
;
165 DECLARE_MAC_BUF(mac
);
167 einfo
= (struct gianfar_platform_data
*) pdev
->dev
.platform_data
;
170 printk(KERN_ERR
"gfar %d: Missing additional data!\n",
176 /* Create an ethernet device instance */
177 dev
= alloc_etherdev(sizeof (*priv
));
182 priv
= netdev_priv(dev
);
185 /* Set the info in the priv to the current info */
188 /* fill out IRQ fields */
189 if (einfo
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
190 irq
= platform_get_irq_byname(pdev
, "tx");
193 priv
->interruptTransmit
= irq
;
195 irq
= platform_get_irq_byname(pdev
, "rx");
198 priv
->interruptReceive
= irq
;
200 irq
= platform_get_irq_byname(pdev
, "error");
203 priv
->interruptError
= irq
;
205 irq
= platform_get_irq(pdev
, 0);
208 priv
->interruptTransmit
= irq
;
211 /* get a pointer to the register memory */
212 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
213 priv
->regs
= ioremap(r
->start
, sizeof (struct gfar
));
215 if (NULL
== priv
->regs
) {
220 spin_lock_init(&priv
->txlock
);
221 spin_lock_init(&priv
->rxlock
);
222 spin_lock_init(&priv
->bflock
);
223 INIT_WORK(&priv
->reset_task
, gfar_reset_task
);
225 platform_set_drvdata(pdev
, dev
);
227 /* Stop the DMA engine now, in case it was running before */
228 /* (The firmware could have used it, and left it running). */
229 /* To do this, we write Graceful Receive Stop and Graceful */
230 /* Transmit Stop, and then wait until the corresponding bits */
231 /* in IEVENT indicate the stops have completed. */
232 tempval
= gfar_read(&priv
->regs
->dmactrl
);
233 tempval
&= ~(DMACTRL_GRS
| DMACTRL_GTS
);
234 gfar_write(&priv
->regs
->dmactrl
, tempval
);
236 tempval
= gfar_read(&priv
->regs
->dmactrl
);
237 tempval
|= (DMACTRL_GRS
| DMACTRL_GTS
);
238 gfar_write(&priv
->regs
->dmactrl
, tempval
);
240 while (!(gfar_read(&priv
->regs
->ievent
) & (IEVENT_GRSC
| IEVENT_GTSC
)))
243 /* Reset MAC layer */
244 gfar_write(&priv
->regs
->maccfg1
, MACCFG1_SOFT_RESET
);
246 tempval
= (MACCFG1_TX_FLOW
| MACCFG1_RX_FLOW
);
247 gfar_write(&priv
->regs
->maccfg1
, tempval
);
249 /* Initialize MACCFG2. */
250 gfar_write(&priv
->regs
->maccfg2
, MACCFG2_INIT_SETTINGS
);
252 /* Initialize ECNTRL */
253 gfar_write(&priv
->regs
->ecntrl
, ECNTRL_INIT_SETTINGS
);
255 /* Copy the station address into the dev structure, */
256 memcpy(dev
->dev_addr
, einfo
->mac_addr
, MAC_ADDR_LEN
);
258 /* Set the dev->base_addr to the gfar reg region */
259 dev
->base_addr
= (unsigned long) (priv
->regs
);
261 SET_NETDEV_DEV(dev
, &pdev
->dev
);
263 /* Fill in the dev structure */
264 dev
->open
= gfar_enet_open
;
265 dev
->hard_start_xmit
= gfar_start_xmit
;
266 dev
->tx_timeout
= gfar_timeout
;
267 dev
->watchdog_timeo
= TX_TIMEOUT
;
268 netif_napi_add(dev
, &priv
->napi
, gfar_poll
, GFAR_DEV_WEIGHT
);
269 #ifdef CONFIG_NET_POLL_CONTROLLER
270 dev
->poll_controller
= gfar_netpoll
;
272 dev
->stop
= gfar_close
;
273 dev
->change_mtu
= gfar_change_mtu
;
275 dev
->set_multicast_list
= gfar_set_multi
;
277 dev
->ethtool_ops
= &gfar_ethtool_ops
;
279 if (priv
->einfo
->device_flags
& FSL_GIANFAR_DEV_HAS_CSUM
) {
280 priv
->rx_csum_enable
= 1;
281 dev
->features
|= NETIF_F_IP_CSUM
;
283 priv
->rx_csum_enable
= 0;
287 if (priv
->einfo
->device_flags
& FSL_GIANFAR_DEV_HAS_VLAN
) {
288 dev
->vlan_rx_register
= gfar_vlan_rx_register
;
290 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
292 priv
->vlan_enable
= 1;
295 if (priv
->einfo
->device_flags
& FSL_GIANFAR_DEV_HAS_EXTENDED_HASH
) {
296 priv
->extended_hash
= 1;
297 priv
->hash_width
= 9;
299 priv
->hash_regs
[0] = &priv
->regs
->igaddr0
;
300 priv
->hash_regs
[1] = &priv
->regs
->igaddr1
;
301 priv
->hash_regs
[2] = &priv
->regs
->igaddr2
;
302 priv
->hash_regs
[3] = &priv
->regs
->igaddr3
;
303 priv
->hash_regs
[4] = &priv
->regs
->igaddr4
;
304 priv
->hash_regs
[5] = &priv
->regs
->igaddr5
;
305 priv
->hash_regs
[6] = &priv
->regs
->igaddr6
;
306 priv
->hash_regs
[7] = &priv
->regs
->igaddr7
;
307 priv
->hash_regs
[8] = &priv
->regs
->gaddr0
;
308 priv
->hash_regs
[9] = &priv
->regs
->gaddr1
;
309 priv
->hash_regs
[10] = &priv
->regs
->gaddr2
;
310 priv
->hash_regs
[11] = &priv
->regs
->gaddr3
;
311 priv
->hash_regs
[12] = &priv
->regs
->gaddr4
;
312 priv
->hash_regs
[13] = &priv
->regs
->gaddr5
;
313 priv
->hash_regs
[14] = &priv
->regs
->gaddr6
;
314 priv
->hash_regs
[15] = &priv
->regs
->gaddr7
;
317 priv
->extended_hash
= 0;
318 priv
->hash_width
= 8;
320 priv
->hash_regs
[0] = &priv
->regs
->gaddr0
;
321 priv
->hash_regs
[1] = &priv
->regs
->gaddr1
;
322 priv
->hash_regs
[2] = &priv
->regs
->gaddr2
;
323 priv
->hash_regs
[3] = &priv
->regs
->gaddr3
;
324 priv
->hash_regs
[4] = &priv
->regs
->gaddr4
;
325 priv
->hash_regs
[5] = &priv
->regs
->gaddr5
;
326 priv
->hash_regs
[6] = &priv
->regs
->gaddr6
;
327 priv
->hash_regs
[7] = &priv
->regs
->gaddr7
;
330 if (priv
->einfo
->device_flags
& FSL_GIANFAR_DEV_HAS_PADDING
)
331 priv
->padding
= DEFAULT_PADDING
;
335 if (dev
->features
& NETIF_F_IP_CSUM
)
336 dev
->hard_header_len
+= GMAC_FCB_LEN
;
338 priv
->rx_buffer_size
= DEFAULT_RX_BUFFER_SIZE
;
339 priv
->tx_ring_size
= DEFAULT_TX_RING_SIZE
;
340 priv
->rx_ring_size
= DEFAULT_RX_RING_SIZE
;
342 priv
->txcoalescing
= DEFAULT_TX_COALESCE
;
343 priv
->txcount
= DEFAULT_TXCOUNT
;
344 priv
->txtime
= DEFAULT_TXTIME
;
345 priv
->rxcoalescing
= DEFAULT_RX_COALESCE
;
346 priv
->rxcount
= DEFAULT_RXCOUNT
;
347 priv
->rxtime
= DEFAULT_RXTIME
;
349 /* Enable most messages by default */
350 priv
->msg_enable
= (NETIF_MSG_IFUP
<< 1 ) - 1;
352 /* Carrier starts down, phylib will bring it up */
353 netif_carrier_off(dev
);
355 err
= register_netdev(dev
);
358 printk(KERN_ERR
"%s: Cannot register net device, aborting.\n",
363 /* Create all the sysfs files */
364 gfar_init_sysfs(dev
);
366 /* Print out the device info */
367 printk(KERN_INFO DEVICE_NAME
"%s\n",
368 dev
->name
, print_mac(mac
, dev
->dev_addr
));
370 /* Even more device info helps when determining which kernel */
371 /* provided which set of benchmarks. */
372 printk(KERN_INFO
"%s: Running with NAPI enabled\n", dev
->name
);
373 printk(KERN_INFO
"%s: %d/%d RX/TX BD ring size\n",
374 dev
->name
, priv
->rx_ring_size
, priv
->tx_ring_size
);
385 static int gfar_remove(struct platform_device
*pdev
)
387 struct net_device
*dev
= platform_get_drvdata(pdev
);
388 struct gfar_private
*priv
= netdev_priv(dev
);
390 platform_set_drvdata(pdev
, NULL
);
399 static int gfar_suspend(struct platform_device
*pdev
, pm_message_t state
)
401 struct net_device
*dev
= platform_get_drvdata(pdev
);
402 struct gfar_private
*priv
= netdev_priv(dev
);
406 int magic_packet
= priv
->wol_en
&&
407 (priv
->einfo
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
409 netif_device_detach(dev
);
411 if (netif_running(dev
)) {
412 spin_lock_irqsave(&priv
->txlock
, flags
);
413 spin_lock(&priv
->rxlock
);
415 gfar_halt_nodisable(dev
);
417 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
418 tempval
= gfar_read(&priv
->regs
->maccfg1
);
420 tempval
&= ~MACCFG1_TX_EN
;
423 tempval
&= ~MACCFG1_RX_EN
;
425 gfar_write(&priv
->regs
->maccfg1
, tempval
);
427 spin_unlock(&priv
->rxlock
);
428 spin_unlock_irqrestore(&priv
->txlock
, flags
);
430 napi_disable(&priv
->napi
);
433 /* Enable interrupt on Magic Packet */
434 gfar_write(&priv
->regs
->imask
, IMASK_MAG
);
436 /* Enable Magic Packet mode */
437 tempval
= gfar_read(&priv
->regs
->maccfg2
);
438 tempval
|= MACCFG2_MPEN
;
439 gfar_write(&priv
->regs
->maccfg2
, tempval
);
441 phy_stop(priv
->phydev
);
448 static int gfar_resume(struct platform_device
*pdev
)
450 struct net_device
*dev
= platform_get_drvdata(pdev
);
451 struct gfar_private
*priv
= netdev_priv(dev
);
454 int magic_packet
= priv
->wol_en
&&
455 (priv
->einfo
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
457 if (!netif_running(dev
)) {
458 netif_device_attach(dev
);
462 if (!magic_packet
&& priv
->phydev
)
463 phy_start(priv
->phydev
);
465 /* Disable Magic Packet mode, in case something
469 spin_lock_irqsave(&priv
->txlock
, flags
);
470 spin_lock(&priv
->rxlock
);
472 tempval
= gfar_read(&priv
->regs
->maccfg2
);
473 tempval
&= ~MACCFG2_MPEN
;
474 gfar_write(&priv
->regs
->maccfg2
, tempval
);
478 spin_unlock(&priv
->rxlock
);
479 spin_unlock_irqrestore(&priv
->txlock
, flags
);
481 netif_device_attach(dev
);
483 napi_enable(&priv
->napi
);
488 #define gfar_suspend NULL
489 #define gfar_resume NULL
492 /* Reads the controller's registers to determine what interface
493 * connects it to the PHY.
495 static phy_interface_t
gfar_get_interface(struct net_device
*dev
)
497 struct gfar_private
*priv
= netdev_priv(dev
);
498 u32 ecntrl
= gfar_read(&priv
->regs
->ecntrl
);
500 if (ecntrl
& ECNTRL_SGMII_MODE
)
501 return PHY_INTERFACE_MODE_SGMII
;
503 if (ecntrl
& ECNTRL_TBI_MODE
) {
504 if (ecntrl
& ECNTRL_REDUCED_MODE
)
505 return PHY_INTERFACE_MODE_RTBI
;
507 return PHY_INTERFACE_MODE_TBI
;
510 if (ecntrl
& ECNTRL_REDUCED_MODE
) {
511 if (ecntrl
& ECNTRL_REDUCED_MII_MODE
)
512 return PHY_INTERFACE_MODE_RMII
;
514 phy_interface_t interface
= priv
->einfo
->interface
;
517 * This isn't autodetected right now, so it must
518 * be set by the device tree or platform code.
520 if (interface
== PHY_INTERFACE_MODE_RGMII_ID
)
521 return PHY_INTERFACE_MODE_RGMII_ID
;
523 return PHY_INTERFACE_MODE_RGMII
;
527 if (priv
->einfo
->device_flags
& FSL_GIANFAR_DEV_HAS_GIGABIT
)
528 return PHY_INTERFACE_MODE_GMII
;
530 return PHY_INTERFACE_MODE_MII
;
534 /* Initializes driver's PHY state, and attaches to the PHY.
535 * Returns 0 on success.
537 static int init_phy(struct net_device
*dev
)
539 struct gfar_private
*priv
= netdev_priv(dev
);
540 uint gigabit_support
=
541 priv
->einfo
->device_flags
& FSL_GIANFAR_DEV_HAS_GIGABIT
?
542 SUPPORTED_1000baseT_Full
: 0;
543 struct phy_device
*phydev
;
544 char phy_id
[BUS_ID_SIZE
];
545 phy_interface_t interface
;
549 priv
->oldduplex
= -1;
551 snprintf(phy_id
, BUS_ID_SIZE
, PHY_ID_FMT
, priv
->einfo
->bus_id
, priv
->einfo
->phy_id
);
553 interface
= gfar_get_interface(dev
);
555 phydev
= phy_connect(dev
, phy_id
, &adjust_link
, 0, interface
);
557 if (interface
== PHY_INTERFACE_MODE_SGMII
)
558 gfar_configure_serdes(dev
);
560 if (IS_ERR(phydev
)) {
561 printk(KERN_ERR
"%s: Could not attach to PHY\n", dev
->name
);
562 return PTR_ERR(phydev
);
565 /* Remove any features not supported by the controller */
566 phydev
->supported
&= (GFAR_SUPPORTED
| gigabit_support
);
567 phydev
->advertising
= phydev
->supported
;
569 priv
->phydev
= phydev
;
575 * Initialize TBI PHY interface for communicating with the
576 * SERDES lynx PHY on the chip. We communicate with this PHY
577 * through the MDIO bus on each controller, treating it as a
578 * "normal" PHY at the address found in the TBIPA register. We assume
579 * that the TBIPA register is valid. Either the MDIO bus code will set
580 * it to a value that doesn't conflict with other PHYs on the bus, or the
581 * value doesn't matter, as there are no other PHYs on the bus.
583 static void gfar_configure_serdes(struct net_device
*dev
)
585 struct gfar_private
*priv
= netdev_priv(dev
);
586 struct gfar_mii __iomem
*regs
=
587 (void __iomem
*)&priv
->regs
->gfar_mii_regs
;
588 int tbipa
= gfar_read(&priv
->regs
->tbipa
);
590 /* Single clk mode, mii mode off(for serdes communication) */
591 gfar_local_mdio_write(regs
, tbipa
, MII_TBICON
, TBICON_CLK_SELECT
);
593 gfar_local_mdio_write(regs
, tbipa
, MII_ADVERTISE
,
594 ADVERTISE_1000XFULL
| ADVERTISE_1000XPAUSE
|
595 ADVERTISE_1000XPSE_ASYM
);
597 gfar_local_mdio_write(regs
, tbipa
, MII_BMCR
, BMCR_ANENABLE
|
598 BMCR_ANRESTART
| BMCR_FULLDPLX
| BMCR_SPEED1000
);
601 static void init_registers(struct net_device
*dev
)
603 struct gfar_private
*priv
= netdev_priv(dev
);
606 gfar_write(&priv
->regs
->ievent
, IEVENT_INIT_CLEAR
);
608 /* Initialize IMASK */
609 gfar_write(&priv
->regs
->imask
, IMASK_INIT_CLEAR
);
611 /* Init hash registers to zero */
612 gfar_write(&priv
->regs
->igaddr0
, 0);
613 gfar_write(&priv
->regs
->igaddr1
, 0);
614 gfar_write(&priv
->regs
->igaddr2
, 0);
615 gfar_write(&priv
->regs
->igaddr3
, 0);
616 gfar_write(&priv
->regs
->igaddr4
, 0);
617 gfar_write(&priv
->regs
->igaddr5
, 0);
618 gfar_write(&priv
->regs
->igaddr6
, 0);
619 gfar_write(&priv
->regs
->igaddr7
, 0);
621 gfar_write(&priv
->regs
->gaddr0
, 0);
622 gfar_write(&priv
->regs
->gaddr1
, 0);
623 gfar_write(&priv
->regs
->gaddr2
, 0);
624 gfar_write(&priv
->regs
->gaddr3
, 0);
625 gfar_write(&priv
->regs
->gaddr4
, 0);
626 gfar_write(&priv
->regs
->gaddr5
, 0);
627 gfar_write(&priv
->regs
->gaddr6
, 0);
628 gfar_write(&priv
->regs
->gaddr7
, 0);
630 /* Zero out the rmon mib registers if it has them */
631 if (priv
->einfo
->device_flags
& FSL_GIANFAR_DEV_HAS_RMON
) {
632 memset_io(&(priv
->regs
->rmon
), 0, sizeof (struct rmon_mib
));
634 /* Mask off the CAM interrupts */
635 gfar_write(&priv
->regs
->rmon
.cam1
, 0xffffffff);
636 gfar_write(&priv
->regs
->rmon
.cam2
, 0xffffffff);
639 /* Initialize the max receive buffer length */
640 gfar_write(&priv
->regs
->mrblr
, priv
->rx_buffer_size
);
642 /* Initialize the Minimum Frame Length Register */
643 gfar_write(&priv
->regs
->minflr
, MINFLR_INIT_SETTINGS
);
647 /* Halt the receive and transmit queues */
648 static void gfar_halt_nodisable(struct net_device
*dev
)
650 struct gfar_private
*priv
= netdev_priv(dev
);
651 struct gfar __iomem
*regs
= priv
->regs
;
654 /* Mask all interrupts */
655 gfar_write(®s
->imask
, IMASK_INIT_CLEAR
);
657 /* Clear all interrupts */
658 gfar_write(®s
->ievent
, IEVENT_INIT_CLEAR
);
660 /* Stop the DMA, and wait for it to stop */
661 tempval
= gfar_read(&priv
->regs
->dmactrl
);
662 if ((tempval
& (DMACTRL_GRS
| DMACTRL_GTS
))
663 != (DMACTRL_GRS
| DMACTRL_GTS
)) {
664 tempval
|= (DMACTRL_GRS
| DMACTRL_GTS
);
665 gfar_write(&priv
->regs
->dmactrl
, tempval
);
667 while (!(gfar_read(&priv
->regs
->ievent
) &
668 (IEVENT_GRSC
| IEVENT_GTSC
)))
673 /* Halt the receive and transmit queues */
674 void gfar_halt(struct net_device
*dev
)
676 struct gfar_private
*priv
= netdev_priv(dev
);
677 struct gfar __iomem
*regs
= priv
->regs
;
680 gfar_halt_nodisable(dev
);
682 /* Disable Rx and Tx */
683 tempval
= gfar_read(®s
->maccfg1
);
684 tempval
&= ~(MACCFG1_RX_EN
| MACCFG1_TX_EN
);
685 gfar_write(®s
->maccfg1
, tempval
);
688 void stop_gfar(struct net_device
*dev
)
690 struct gfar_private
*priv
= netdev_priv(dev
);
691 struct gfar __iomem
*regs
= priv
->regs
;
694 phy_stop(priv
->phydev
);
697 spin_lock_irqsave(&priv
->txlock
, flags
);
698 spin_lock(&priv
->rxlock
);
702 spin_unlock(&priv
->rxlock
);
703 spin_unlock_irqrestore(&priv
->txlock
, flags
);
706 if (priv
->einfo
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
707 free_irq(priv
->interruptError
, dev
);
708 free_irq(priv
->interruptTransmit
, dev
);
709 free_irq(priv
->interruptReceive
, dev
);
711 free_irq(priv
->interruptTransmit
, dev
);
714 free_skb_resources(priv
);
716 dma_free_coherent(&dev
->dev
,
717 sizeof(struct txbd8
)*priv
->tx_ring_size
718 + sizeof(struct rxbd8
)*priv
->rx_ring_size
,
720 gfar_read(®s
->tbase0
));
723 /* If there are any tx skbs or rx skbs still around, free them.
724 * Then free tx_skbuff and rx_skbuff */
725 static void free_skb_resources(struct gfar_private
*priv
)
731 /* Go through all the buffer descriptors and free their data buffers */
732 txbdp
= priv
->tx_bd_base
;
734 for (i
= 0; i
< priv
->tx_ring_size
; i
++) {
736 if (priv
->tx_skbuff
[i
]) {
737 dma_unmap_single(&priv
->dev
->dev
, txbdp
->bufPtr
,
740 dev_kfree_skb_any(priv
->tx_skbuff
[i
]);
741 priv
->tx_skbuff
[i
] = NULL
;
747 kfree(priv
->tx_skbuff
);
749 rxbdp
= priv
->rx_bd_base
;
751 /* rx_skbuff is not guaranteed to be allocated, so only
752 * free it and its contents if it is allocated */
753 if(priv
->rx_skbuff
!= NULL
) {
754 for (i
= 0; i
< priv
->rx_ring_size
; i
++) {
755 if (priv
->rx_skbuff
[i
]) {
756 dma_unmap_single(&priv
->dev
->dev
, rxbdp
->bufPtr
,
757 priv
->rx_buffer_size
,
760 dev_kfree_skb_any(priv
->rx_skbuff
[i
]);
761 priv
->rx_skbuff
[i
] = NULL
;
771 kfree(priv
->rx_skbuff
);
775 void gfar_start(struct net_device
*dev
)
777 struct gfar_private
*priv
= netdev_priv(dev
);
778 struct gfar __iomem
*regs
= priv
->regs
;
781 /* Enable Rx and Tx in MACCFG1 */
782 tempval
= gfar_read(®s
->maccfg1
);
783 tempval
|= (MACCFG1_RX_EN
| MACCFG1_TX_EN
);
784 gfar_write(®s
->maccfg1
, tempval
);
786 /* Initialize DMACTRL to have WWR and WOP */
787 tempval
= gfar_read(&priv
->regs
->dmactrl
);
788 tempval
|= DMACTRL_INIT_SETTINGS
;
789 gfar_write(&priv
->regs
->dmactrl
, tempval
);
791 /* Make sure we aren't stopped */
792 tempval
= gfar_read(&priv
->regs
->dmactrl
);
793 tempval
&= ~(DMACTRL_GRS
| DMACTRL_GTS
);
794 gfar_write(&priv
->regs
->dmactrl
, tempval
);
796 /* Clear THLT/RHLT, so that the DMA starts polling now */
797 gfar_write(®s
->tstat
, TSTAT_CLEAR_THALT
);
798 gfar_write(®s
->rstat
, RSTAT_CLEAR_RHALT
);
800 /* Unmask the interrupts we look for */
801 gfar_write(®s
->imask
, IMASK_DEFAULT
);
804 /* Bring the controller up and running */
805 int startup_gfar(struct net_device
*dev
)
812 struct gfar_private
*priv
= netdev_priv(dev
);
813 struct gfar __iomem
*regs
= priv
->regs
;
818 gfar_write(®s
->imask
, IMASK_INIT_CLEAR
);
820 /* Allocate memory for the buffer descriptors */
821 vaddr
= (unsigned long) dma_alloc_coherent(&dev
->dev
,
822 sizeof (struct txbd8
) * priv
->tx_ring_size
+
823 sizeof (struct rxbd8
) * priv
->rx_ring_size
,
827 if (netif_msg_ifup(priv
))
828 printk(KERN_ERR
"%s: Could not allocate buffer descriptors!\n",
833 priv
->tx_bd_base
= (struct txbd8
*) vaddr
;
835 /* enet DMA only understands physical addresses */
836 gfar_write(®s
->tbase0
, addr
);
838 /* Start the rx descriptor ring where the tx ring leaves off */
839 addr
= addr
+ sizeof (struct txbd8
) * priv
->tx_ring_size
;
840 vaddr
= vaddr
+ sizeof (struct txbd8
) * priv
->tx_ring_size
;
841 priv
->rx_bd_base
= (struct rxbd8
*) vaddr
;
842 gfar_write(®s
->rbase0
, addr
);
844 /* Setup the skbuff rings */
846 (struct sk_buff
**) kmalloc(sizeof (struct sk_buff
*) *
847 priv
->tx_ring_size
, GFP_KERNEL
);
849 if (NULL
== priv
->tx_skbuff
) {
850 if (netif_msg_ifup(priv
))
851 printk(KERN_ERR
"%s: Could not allocate tx_skbuff\n",
857 for (i
= 0; i
< priv
->tx_ring_size
; i
++)
858 priv
->tx_skbuff
[i
] = NULL
;
861 (struct sk_buff
**) kmalloc(sizeof (struct sk_buff
*) *
862 priv
->rx_ring_size
, GFP_KERNEL
);
864 if (NULL
== priv
->rx_skbuff
) {
865 if (netif_msg_ifup(priv
))
866 printk(KERN_ERR
"%s: Could not allocate rx_skbuff\n",
872 for (i
= 0; i
< priv
->rx_ring_size
; i
++)
873 priv
->rx_skbuff
[i
] = NULL
;
875 /* Initialize some variables in our dev structure */
876 priv
->dirty_tx
= priv
->cur_tx
= priv
->tx_bd_base
;
877 priv
->cur_rx
= priv
->rx_bd_base
;
878 priv
->skb_curtx
= priv
->skb_dirtytx
= 0;
881 /* Initialize Transmit Descriptor Ring */
882 txbdp
= priv
->tx_bd_base
;
883 for (i
= 0; i
< priv
->tx_ring_size
; i
++) {
890 /* Set the last descriptor in the ring to indicate wrap */
892 txbdp
->status
|= TXBD_WRAP
;
894 rxbdp
= priv
->rx_bd_base
;
895 for (i
= 0; i
< priv
->rx_ring_size
; i
++) {
898 skb
= gfar_new_skb(dev
);
901 printk(KERN_ERR
"%s: Can't allocate RX buffers\n",
904 goto err_rxalloc_fail
;
907 priv
->rx_skbuff
[i
] = skb
;
909 gfar_new_rxbdp(dev
, rxbdp
, skb
);
914 /* Set the last descriptor in the ring to wrap */
916 rxbdp
->status
|= RXBD_WRAP
;
918 /* If the device has multiple interrupts, register for
919 * them. Otherwise, only register for the one */
920 if (priv
->einfo
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
921 /* Install our interrupt handlers for Error,
922 * Transmit, and Receive */
923 if (request_irq(priv
->interruptError
, gfar_error
,
924 0, "enet_error", dev
) < 0) {
925 if (netif_msg_intr(priv
))
926 printk(KERN_ERR
"%s: Can't get IRQ %d\n",
927 dev
->name
, priv
->interruptError
);
933 if (request_irq(priv
->interruptTransmit
, gfar_transmit
,
934 0, "enet_tx", dev
) < 0) {
935 if (netif_msg_intr(priv
))
936 printk(KERN_ERR
"%s: Can't get IRQ %d\n",
937 dev
->name
, priv
->interruptTransmit
);
944 if (request_irq(priv
->interruptReceive
, gfar_receive
,
945 0, "enet_rx", dev
) < 0) {
946 if (netif_msg_intr(priv
))
947 printk(KERN_ERR
"%s: Can't get IRQ %d (receive0)\n",
948 dev
->name
, priv
->interruptReceive
);
954 if (request_irq(priv
->interruptTransmit
, gfar_interrupt
,
955 0, "gfar_interrupt", dev
) < 0) {
956 if (netif_msg_intr(priv
))
957 printk(KERN_ERR
"%s: Can't get IRQ %d\n",
958 dev
->name
, priv
->interruptError
);
965 phy_start(priv
->phydev
);
967 /* Configure the coalescing support */
968 if (priv
->txcoalescing
)
969 gfar_write(®s
->txic
,
970 mk_ic_value(priv
->txcount
, priv
->txtime
));
972 gfar_write(®s
->txic
, 0);
974 if (priv
->rxcoalescing
)
975 gfar_write(®s
->rxic
,
976 mk_ic_value(priv
->rxcount
, priv
->rxtime
));
978 gfar_write(®s
->rxic
, 0);
980 if (priv
->rx_csum_enable
)
981 rctrl
|= RCTRL_CHECKSUMMING
;
983 if (priv
->extended_hash
) {
984 rctrl
|= RCTRL_EXTHASH
;
986 gfar_clear_exact_match(dev
);
990 if (priv
->vlan_enable
)
994 rctrl
&= ~RCTRL_PAL_MASK
;
995 rctrl
|= RCTRL_PADDING(priv
->padding
);
998 /* Init rctrl based on our settings */
999 gfar_write(&priv
->regs
->rctrl
, rctrl
);
1001 if (dev
->features
& NETIF_F_IP_CSUM
)
1002 gfar_write(&priv
->regs
->tctrl
, TCTRL_INIT_CSUM
);
1004 /* Set the extraction length and index */
1005 attrs
= ATTRELI_EL(priv
->rx_stash_size
) |
1006 ATTRELI_EI(priv
->rx_stash_index
);
1008 gfar_write(&priv
->regs
->attreli
, attrs
);
1010 /* Start with defaults, and add stashing or locking
1011 * depending on the approprate variables */
1012 attrs
= ATTR_INIT_SETTINGS
;
1014 if (priv
->bd_stash_en
)
1015 attrs
|= ATTR_BDSTASH
;
1017 if (priv
->rx_stash_size
!= 0)
1018 attrs
|= ATTR_BUFSTASH
;
1020 gfar_write(&priv
->regs
->attr
, attrs
);
1022 gfar_write(&priv
->regs
->fifo_tx_thr
, priv
->fifo_threshold
);
1023 gfar_write(&priv
->regs
->fifo_tx_starve
, priv
->fifo_starve
);
1024 gfar_write(&priv
->regs
->fifo_tx_starve_shutoff
, priv
->fifo_starve_off
);
1026 /* Start the controller */
1032 free_irq(priv
->interruptTransmit
, dev
);
1034 free_irq(priv
->interruptError
, dev
);
1038 free_skb_resources(priv
);
1040 dma_free_coherent(&dev
->dev
,
1041 sizeof(struct txbd8
)*priv
->tx_ring_size
1042 + sizeof(struct rxbd8
)*priv
->rx_ring_size
,
1044 gfar_read(®s
->tbase0
));
1049 /* Called when something needs to use the ethernet device */
1050 /* Returns 0 for success. */
1051 static int gfar_enet_open(struct net_device
*dev
)
1053 struct gfar_private
*priv
= netdev_priv(dev
);
1056 napi_enable(&priv
->napi
);
1058 /* Initialize a bunch of registers */
1059 init_registers(dev
);
1061 gfar_set_mac_address(dev
);
1063 err
= init_phy(dev
);
1066 napi_disable(&priv
->napi
);
1070 err
= startup_gfar(dev
);
1072 napi_disable(&priv
->napi
);
1076 netif_start_queue(dev
);
1081 static inline struct txfcb
*gfar_add_fcb(struct sk_buff
*skb
, struct txbd8
*bdp
)
1083 struct txfcb
*fcb
= (struct txfcb
*)skb_push (skb
, GMAC_FCB_LEN
);
1085 memset(fcb
, 0, GMAC_FCB_LEN
);
1090 static inline void gfar_tx_checksum(struct sk_buff
*skb
, struct txfcb
*fcb
)
1094 /* If we're here, it's a IP packet with a TCP or UDP
1095 * payload. We set it to checksum, using a pseudo-header
1098 flags
= TXFCB_DEFAULT
;
1100 /* Tell the controller what the protocol is */
1101 /* And provide the already calculated phcs */
1102 if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
) {
1104 fcb
->phcs
= udp_hdr(skb
)->check
;
1106 fcb
->phcs
= tcp_hdr(skb
)->check
;
1108 /* l3os is the distance between the start of the
1109 * frame (skb->data) and the start of the IP hdr.
1110 * l4os is the distance between the start of the
1111 * l3 hdr and the l4 hdr */
1112 fcb
->l3os
= (u16
)(skb_network_offset(skb
) - GMAC_FCB_LEN
);
1113 fcb
->l4os
= skb_network_header_len(skb
);
1118 void inline gfar_tx_vlan(struct sk_buff
*skb
, struct txfcb
*fcb
)
1120 fcb
->flags
|= TXFCB_VLN
;
1121 fcb
->vlctl
= vlan_tx_tag_get(skb
);
1124 /* This is called by the kernel when a frame is ready for transmission. */
1125 /* It is pointed to by the dev->hard_start_xmit function pointer */
1126 static int gfar_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1128 struct gfar_private
*priv
= netdev_priv(dev
);
1129 struct txfcb
*fcb
= NULL
;
1130 struct txbd8
*txbdp
;
1132 unsigned long flags
;
1134 /* Update transmit stats */
1135 dev
->stats
.tx_bytes
+= skb
->len
;
1138 spin_lock_irqsave(&priv
->txlock
, flags
);
1140 /* Point at the first free tx descriptor */
1141 txbdp
= priv
->cur_tx
;
1143 /* Clear all but the WRAP status flags */
1144 status
= txbdp
->status
& TXBD_WRAP
;
1146 /* Set up checksumming */
1147 if (likely((dev
->features
& NETIF_F_IP_CSUM
)
1148 && (CHECKSUM_PARTIAL
== skb
->ip_summed
))) {
1149 fcb
= gfar_add_fcb(skb
, txbdp
);
1151 gfar_tx_checksum(skb
, fcb
);
1154 if (priv
->vlan_enable
&&
1155 unlikely(priv
->vlgrp
&& vlan_tx_tag_present(skb
))) {
1156 if (unlikely(NULL
== fcb
)) {
1157 fcb
= gfar_add_fcb(skb
, txbdp
);
1161 gfar_tx_vlan(skb
, fcb
);
1164 /* Set buffer length and pointer */
1165 txbdp
->length
= skb
->len
;
1166 txbdp
->bufPtr
= dma_map_single(&dev
->dev
, skb
->data
,
1167 skb
->len
, DMA_TO_DEVICE
);
1169 /* Save the skb pointer so we can free it later */
1170 priv
->tx_skbuff
[priv
->skb_curtx
] = skb
;
1172 /* Update the current skb pointer (wrapping if this was the last) */
1174 (priv
->skb_curtx
+ 1) & TX_RING_MOD_MASK(priv
->tx_ring_size
);
1176 /* Flag the BD as interrupt-causing */
1177 status
|= TXBD_INTERRUPT
;
1179 /* Flag the BD as ready to go, last in frame, and */
1180 /* in need of CRC */
1181 status
|= (TXBD_READY
| TXBD_LAST
| TXBD_CRC
);
1183 dev
->trans_start
= jiffies
;
1185 /* The powerpc-specific eieio() is used, as wmb() has too strong
1186 * semantics (it requires synchronization between cacheable and
1187 * uncacheable mappings, which eieio doesn't provide and which we
1188 * don't need), thus requiring a more expensive sync instruction. At
1189 * some point, the set of architecture-independent barrier functions
1190 * should be expanded to include weaker barriers.
1194 txbdp
->status
= status
;
1196 /* If this was the last BD in the ring, the next one */
1197 /* is at the beginning of the ring */
1198 if (txbdp
->status
& TXBD_WRAP
)
1199 txbdp
= priv
->tx_bd_base
;
1203 /* If the next BD still needs to be cleaned up, then the bds
1204 are full. We need to tell the kernel to stop sending us stuff. */
1205 if (txbdp
== priv
->dirty_tx
) {
1206 netif_stop_queue(dev
);
1208 dev
->stats
.tx_fifo_errors
++;
1211 /* Update the current txbd to the next one */
1212 priv
->cur_tx
= txbdp
;
1214 /* Tell the DMA to go go go */
1215 gfar_write(&priv
->regs
->tstat
, TSTAT_CLEAR_THALT
);
1218 spin_unlock_irqrestore(&priv
->txlock
, flags
);
1223 /* Stops the kernel queue, and halts the controller */
1224 static int gfar_close(struct net_device
*dev
)
1226 struct gfar_private
*priv
= netdev_priv(dev
);
1228 napi_disable(&priv
->napi
);
1230 cancel_work_sync(&priv
->reset_task
);
1233 /* Disconnect from the PHY */
1234 phy_disconnect(priv
->phydev
);
1235 priv
->phydev
= NULL
;
1237 netif_stop_queue(dev
);
1242 /* Changes the mac address if the controller is not running. */
1243 static int gfar_set_mac_address(struct net_device
*dev
)
1245 gfar_set_mac_for_addr(dev
, 0, dev
->dev_addr
);
1251 /* Enables and disables VLAN insertion/extraction */
1252 static void gfar_vlan_rx_register(struct net_device
*dev
,
1253 struct vlan_group
*grp
)
1255 struct gfar_private
*priv
= netdev_priv(dev
);
1256 unsigned long flags
;
1259 spin_lock_irqsave(&priv
->rxlock
, flags
);
1264 /* Enable VLAN tag insertion */
1265 tempval
= gfar_read(&priv
->regs
->tctrl
);
1266 tempval
|= TCTRL_VLINS
;
1268 gfar_write(&priv
->regs
->tctrl
, tempval
);
1270 /* Enable VLAN tag extraction */
1271 tempval
= gfar_read(&priv
->regs
->rctrl
);
1272 tempval
|= RCTRL_VLEX
;
1273 gfar_write(&priv
->regs
->rctrl
, tempval
);
1275 /* Disable VLAN tag insertion */
1276 tempval
= gfar_read(&priv
->regs
->tctrl
);
1277 tempval
&= ~TCTRL_VLINS
;
1278 gfar_write(&priv
->regs
->tctrl
, tempval
);
1280 /* Disable VLAN tag extraction */
1281 tempval
= gfar_read(&priv
->regs
->rctrl
);
1282 tempval
&= ~RCTRL_VLEX
;
1283 gfar_write(&priv
->regs
->rctrl
, tempval
);
1286 spin_unlock_irqrestore(&priv
->rxlock
, flags
);
1289 static int gfar_change_mtu(struct net_device
*dev
, int new_mtu
)
1291 int tempsize
, tempval
;
1292 struct gfar_private
*priv
= netdev_priv(dev
);
1293 int oldsize
= priv
->rx_buffer_size
;
1294 int frame_size
= new_mtu
+ ETH_HLEN
;
1296 if (priv
->vlan_enable
)
1297 frame_size
+= VLAN_HLEN
;
1299 if (gfar_uses_fcb(priv
))
1300 frame_size
+= GMAC_FCB_LEN
;
1302 frame_size
+= priv
->padding
;
1304 if ((frame_size
< 64) || (frame_size
> JUMBO_FRAME_SIZE
)) {
1305 if (netif_msg_drv(priv
))
1306 printk(KERN_ERR
"%s: Invalid MTU setting\n",
1312 (frame_size
& ~(INCREMENTAL_BUFFER_SIZE
- 1)) +
1313 INCREMENTAL_BUFFER_SIZE
;
1315 /* Only stop and start the controller if it isn't already
1316 * stopped, and we changed something */
1317 if ((oldsize
!= tempsize
) && (dev
->flags
& IFF_UP
))
1320 priv
->rx_buffer_size
= tempsize
;
1324 gfar_write(&priv
->regs
->mrblr
, priv
->rx_buffer_size
);
1325 gfar_write(&priv
->regs
->maxfrm
, priv
->rx_buffer_size
);
1327 /* If the mtu is larger than the max size for standard
1328 * ethernet frames (ie, a jumbo frame), then set maccfg2
1329 * to allow huge frames, and to check the length */
1330 tempval
= gfar_read(&priv
->regs
->maccfg2
);
1332 if (priv
->rx_buffer_size
> DEFAULT_RX_BUFFER_SIZE
)
1333 tempval
|= (MACCFG2_HUGEFRAME
| MACCFG2_LENGTHCHECK
);
1335 tempval
&= ~(MACCFG2_HUGEFRAME
| MACCFG2_LENGTHCHECK
);
1337 gfar_write(&priv
->regs
->maccfg2
, tempval
);
1339 if ((oldsize
!= tempsize
) && (dev
->flags
& IFF_UP
))
1345 /* gfar_reset_task gets scheduled when a packet has not been
1346 * transmitted after a set amount of time.
1347 * For now, assume that clearing out all the structures, and
1348 * starting over will fix the problem.
1350 static void gfar_reset_task(struct work_struct
*work
)
1352 struct gfar_private
*priv
= container_of(work
, struct gfar_private
,
1354 struct net_device
*dev
= priv
->dev
;
1356 if (dev
->flags
& IFF_UP
) {
1361 netif_tx_schedule_all(dev
);
1364 static void gfar_timeout(struct net_device
*dev
)
1366 struct gfar_private
*priv
= netdev_priv(dev
);
1368 dev
->stats
.tx_errors
++;
1369 schedule_work(&priv
->reset_task
);
1372 /* Interrupt Handler for Transmit complete */
1373 static int gfar_clean_tx_ring(struct net_device
*dev
)
1376 struct gfar_private
*priv
= netdev_priv(dev
);
1379 bdp
= priv
->dirty_tx
;
1380 while ((bdp
->status
& TXBD_READY
) == 0) {
1381 /* If dirty_tx and cur_tx are the same, then either the */
1382 /* ring is empty or full now (it could only be full in the beginning, */
1383 /* obviously). If it is empty, we are done. */
1384 if ((bdp
== priv
->cur_tx
) && (netif_queue_stopped(dev
) == 0))
1389 /* Deferred means some collisions occurred during transmit, */
1390 /* but we eventually sent the packet. */
1391 if (bdp
->status
& TXBD_DEF
)
1392 dev
->stats
.collisions
++;
1394 /* Free the sk buffer associated with this TxBD */
1395 dev_kfree_skb_irq(priv
->tx_skbuff
[priv
->skb_dirtytx
]);
1397 priv
->tx_skbuff
[priv
->skb_dirtytx
] = NULL
;
1399 (priv
->skb_dirtytx
+
1400 1) & TX_RING_MOD_MASK(priv
->tx_ring_size
);
1402 /* Clean BD length for empty detection */
1405 /* update bdp to point at next bd in the ring (wrapping if necessary) */
1406 if (bdp
->status
& TXBD_WRAP
)
1407 bdp
= priv
->tx_bd_base
;
1411 /* Move dirty_tx to be the next bd */
1412 priv
->dirty_tx
= bdp
;
1414 /* We freed a buffer, so now we can restart transmission */
1415 if (netif_queue_stopped(dev
))
1416 netif_wake_queue(dev
);
1417 } /* while ((bdp->status & TXBD_READY) == 0) */
1419 dev
->stats
.tx_packets
+= howmany
;
1424 /* Interrupt Handler for Transmit complete */
1425 static irqreturn_t
gfar_transmit(int irq
, void *dev_id
)
1427 struct net_device
*dev
= (struct net_device
*) dev_id
;
1428 struct gfar_private
*priv
= netdev_priv(dev
);
1431 gfar_write(&priv
->regs
->ievent
, IEVENT_TX_MASK
);
1434 spin_lock(&priv
->txlock
);
1436 gfar_clean_tx_ring(dev
);
1438 /* If we are coalescing the interrupts, reset the timer */
1439 /* Otherwise, clear it */
1440 if (likely(priv
->txcoalescing
)) {
1441 gfar_write(&priv
->regs
->txic
, 0);
1442 gfar_write(&priv
->regs
->txic
,
1443 mk_ic_value(priv
->txcount
, priv
->txtime
));
1446 spin_unlock(&priv
->txlock
);
1451 static void gfar_new_rxbdp(struct net_device
*dev
, struct rxbd8
*bdp
,
1452 struct sk_buff
*skb
)
1454 struct gfar_private
*priv
= netdev_priv(dev
);
1455 u32
* status_len
= (u32
*)bdp
;
1458 bdp
->bufPtr
= dma_map_single(&dev
->dev
, skb
->data
,
1459 priv
->rx_buffer_size
, DMA_FROM_DEVICE
);
1461 flags
= RXBD_EMPTY
| RXBD_INTERRUPT
;
1463 if (bdp
== priv
->rx_bd_base
+ priv
->rx_ring_size
- 1)
1468 *status_len
= (u32
)flags
<< 16;
1472 struct sk_buff
* gfar_new_skb(struct net_device
*dev
)
1474 unsigned int alignamount
;
1475 struct gfar_private
*priv
= netdev_priv(dev
);
1476 struct sk_buff
*skb
= NULL
;
1478 /* We have to allocate the skb, so keep trying till we succeed */
1479 skb
= netdev_alloc_skb(dev
, priv
->rx_buffer_size
+ RXBUF_ALIGNMENT
);
1484 alignamount
= RXBUF_ALIGNMENT
-
1485 (((unsigned long) skb
->data
) & (RXBUF_ALIGNMENT
- 1));
1487 /* We need the data buffer to be aligned properly. We will reserve
1488 * as many bytes as needed to align the data properly
1490 skb_reserve(skb
, alignamount
);
1495 static inline void count_errors(unsigned short status
, struct net_device
*dev
)
1497 struct gfar_private
*priv
= netdev_priv(dev
);
1498 struct net_device_stats
*stats
= &dev
->stats
;
1499 struct gfar_extra_stats
*estats
= &priv
->extra_stats
;
1501 /* If the packet was truncated, none of the other errors
1503 if (status
& RXBD_TRUNCATED
) {
1504 stats
->rx_length_errors
++;
1510 /* Count the errors, if there were any */
1511 if (status
& (RXBD_LARGE
| RXBD_SHORT
)) {
1512 stats
->rx_length_errors
++;
1514 if (status
& RXBD_LARGE
)
1519 if (status
& RXBD_NONOCTET
) {
1520 stats
->rx_frame_errors
++;
1521 estats
->rx_nonoctet
++;
1523 if (status
& RXBD_CRCERR
) {
1524 estats
->rx_crcerr
++;
1525 stats
->rx_crc_errors
++;
1527 if (status
& RXBD_OVERRUN
) {
1528 estats
->rx_overrun
++;
1529 stats
->rx_crc_errors
++;
1533 irqreturn_t
gfar_receive(int irq
, void *dev_id
)
1535 struct net_device
*dev
= (struct net_device
*) dev_id
;
1536 struct gfar_private
*priv
= netdev_priv(dev
);
1540 /* Clear IEVENT, so interrupts aren't called again
1541 * because of the packets that have already arrived */
1542 gfar_write(&priv
->regs
->ievent
, IEVENT_RTX_MASK
);
1544 if (netif_rx_schedule_prep(dev
, &priv
->napi
)) {
1545 tempval
= gfar_read(&priv
->regs
->imask
);
1546 tempval
&= IMASK_RTX_DISABLED
;
1547 gfar_write(&priv
->regs
->imask
, tempval
);
1549 __netif_rx_schedule(dev
, &priv
->napi
);
1551 if (netif_msg_rx_err(priv
))
1552 printk(KERN_DEBUG
"%s: receive called twice (%x)[%x]\n",
1553 dev
->name
, gfar_read(&priv
->regs
->ievent
),
1554 gfar_read(&priv
->regs
->imask
));
1560 static inline void gfar_rx_checksum(struct sk_buff
*skb
, struct rxfcb
*fcb
)
1562 /* If valid headers were found, and valid sums
1563 * were verified, then we tell the kernel that no
1564 * checksumming is necessary. Otherwise, it is */
1565 if ((fcb
->flags
& RXFCB_CSUM_MASK
) == (RXFCB_CIP
| RXFCB_CTU
))
1566 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1568 skb
->ip_summed
= CHECKSUM_NONE
;
1572 static inline struct rxfcb
*gfar_get_fcb(struct sk_buff
*skb
)
1574 struct rxfcb
*fcb
= (struct rxfcb
*)skb
->data
;
1576 /* Remove the FCB from the skb */
1577 skb_pull(skb
, GMAC_FCB_LEN
);
1582 /* gfar_process_frame() -- handle one incoming packet if skb
1584 static int gfar_process_frame(struct net_device
*dev
, struct sk_buff
*skb
,
1587 struct gfar_private
*priv
= netdev_priv(dev
);
1588 struct rxfcb
*fcb
= NULL
;
1591 if (netif_msg_rx_err(priv
))
1592 printk(KERN_WARNING
"%s: Missing skb!!.\n", dev
->name
);
1593 dev
->stats
.rx_dropped
++;
1594 priv
->extra_stats
.rx_skbmissing
++;
1598 /* Prep the skb for the packet */
1599 skb_put(skb
, length
);
1601 /* Grab the FCB if there is one */
1602 if (gfar_uses_fcb(priv
))
1603 fcb
= gfar_get_fcb(skb
);
1605 /* Remove the padded bytes, if there are any */
1607 skb_pull(skb
, priv
->padding
);
1609 if (priv
->rx_csum_enable
)
1610 gfar_rx_checksum(skb
, fcb
);
1612 /* Tell the skb what kind of packet this is */
1613 skb
->protocol
= eth_type_trans(skb
, dev
);
1615 /* Send the packet up the stack */
1616 if (unlikely(priv
->vlgrp
&& (fcb
->flags
& RXFCB_VLN
))) {
1617 ret
= vlan_hwaccel_receive_skb(skb
, priv
->vlgrp
,
1620 ret
= netif_receive_skb(skb
);
1622 if (NET_RX_DROP
== ret
)
1623 priv
->extra_stats
.kernel_dropped
++;
1629 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1630 * until the budget/quota has been reached. Returns the number
1633 int gfar_clean_rx_ring(struct net_device
*dev
, int rx_work_limit
)
1636 struct sk_buff
*skb
;
1639 struct gfar_private
*priv
= netdev_priv(dev
);
1641 /* Get the first full descriptor */
1644 while (!((bdp
->status
& RXBD_EMPTY
) || (--rx_work_limit
< 0))) {
1645 struct sk_buff
*newskb
;
1648 /* Add another skb for the future */
1649 newskb
= gfar_new_skb(dev
);
1651 skb
= priv
->rx_skbuff
[priv
->skb_currx
];
1653 /* We drop the frame if we failed to allocate a new buffer */
1654 if (unlikely(!newskb
|| !(bdp
->status
& RXBD_LAST
) ||
1655 bdp
->status
& RXBD_ERR
)) {
1656 count_errors(bdp
->status
, dev
);
1658 if (unlikely(!newskb
))
1662 dma_unmap_single(&priv
->dev
->dev
,
1664 priv
->rx_buffer_size
,
1667 dev_kfree_skb_any(skb
);
1670 /* Increment the number of packets */
1671 dev
->stats
.rx_packets
++;
1674 /* Remove the FCS from the packet length */
1675 pkt_len
= bdp
->length
- 4;
1677 gfar_process_frame(dev
, skb
, pkt_len
);
1679 dev
->stats
.rx_bytes
+= pkt_len
;
1682 dev
->last_rx
= jiffies
;
1684 priv
->rx_skbuff
[priv
->skb_currx
] = newskb
;
1686 /* Setup the new bdp */
1687 gfar_new_rxbdp(dev
, bdp
, newskb
);
1689 /* Update to the next pointer */
1690 if (bdp
->status
& RXBD_WRAP
)
1691 bdp
= priv
->rx_bd_base
;
1695 /* update to point at the next skb */
1697 (priv
->skb_currx
+ 1) &
1698 RX_RING_MOD_MASK(priv
->rx_ring_size
);
1701 /* Update the current rxbd pointer to be the next one */
1707 static int gfar_poll(struct napi_struct
*napi
, int budget
)
1709 struct gfar_private
*priv
= container_of(napi
, struct gfar_private
, napi
);
1710 struct net_device
*dev
= priv
->dev
;
1712 unsigned long flags
;
1714 /* If we fail to get the lock, don't bother with the TX BDs */
1715 if (spin_trylock_irqsave(&priv
->txlock
, flags
)) {
1716 gfar_clean_tx_ring(dev
);
1717 spin_unlock_irqrestore(&priv
->txlock
, flags
);
1720 howmany
= gfar_clean_rx_ring(dev
, budget
);
1722 if (howmany
< budget
) {
1723 netif_rx_complete(dev
, napi
);
1725 /* Clear the halt bit in RSTAT */
1726 gfar_write(&priv
->regs
->rstat
, RSTAT_CLEAR_RHALT
);
1728 gfar_write(&priv
->regs
->imask
, IMASK_DEFAULT
);
1730 /* If we are coalescing interrupts, update the timer */
1731 /* Otherwise, clear it */
1732 if (likely(priv
->rxcoalescing
)) {
1733 gfar_write(&priv
->regs
->rxic
, 0);
1734 gfar_write(&priv
->regs
->rxic
,
1735 mk_ic_value(priv
->rxcount
, priv
->rxtime
));
1742 #ifdef CONFIG_NET_POLL_CONTROLLER
1744 * Polling 'interrupt' - used by things like netconsole to send skbs
1745 * without having to re-enable interrupts. It's not called while
1746 * the interrupt routine is executing.
1748 static void gfar_netpoll(struct net_device
*dev
)
1750 struct gfar_private
*priv
= netdev_priv(dev
);
1752 /* If the device has multiple interrupts, run tx/rx */
1753 if (priv
->einfo
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
1754 disable_irq(priv
->interruptTransmit
);
1755 disable_irq(priv
->interruptReceive
);
1756 disable_irq(priv
->interruptError
);
1757 gfar_interrupt(priv
->interruptTransmit
, dev
);
1758 enable_irq(priv
->interruptError
);
1759 enable_irq(priv
->interruptReceive
);
1760 enable_irq(priv
->interruptTransmit
);
1762 disable_irq(priv
->interruptTransmit
);
1763 gfar_interrupt(priv
->interruptTransmit
, dev
);
1764 enable_irq(priv
->interruptTransmit
);
1769 /* The interrupt handler for devices with one interrupt */
1770 static irqreturn_t
gfar_interrupt(int irq
, void *dev_id
)
1772 struct net_device
*dev
= dev_id
;
1773 struct gfar_private
*priv
= netdev_priv(dev
);
1775 /* Save ievent for future reference */
1776 u32 events
= gfar_read(&priv
->regs
->ievent
);
1778 /* Check for reception */
1779 if (events
& IEVENT_RX_MASK
)
1780 gfar_receive(irq
, dev_id
);
1782 /* Check for transmit completion */
1783 if (events
& IEVENT_TX_MASK
)
1784 gfar_transmit(irq
, dev_id
);
1786 /* Check for errors */
1787 if (events
& IEVENT_ERR_MASK
)
1788 gfar_error(irq
, dev_id
);
1793 /* Called every time the controller might need to be made
1794 * aware of new link state. The PHY code conveys this
1795 * information through variables in the phydev structure, and this
1796 * function converts those variables into the appropriate
1797 * register values, and can bring down the device if needed.
1799 static void adjust_link(struct net_device
*dev
)
1801 struct gfar_private
*priv
= netdev_priv(dev
);
1802 struct gfar __iomem
*regs
= priv
->regs
;
1803 unsigned long flags
;
1804 struct phy_device
*phydev
= priv
->phydev
;
1807 spin_lock_irqsave(&priv
->txlock
, flags
);
1809 u32 tempval
= gfar_read(®s
->maccfg2
);
1810 u32 ecntrl
= gfar_read(®s
->ecntrl
);
1812 /* Now we make sure that we can be in full duplex mode.
1813 * If not, we operate in half-duplex mode. */
1814 if (phydev
->duplex
!= priv
->oldduplex
) {
1816 if (!(phydev
->duplex
))
1817 tempval
&= ~(MACCFG2_FULL_DUPLEX
);
1819 tempval
|= MACCFG2_FULL_DUPLEX
;
1821 priv
->oldduplex
= phydev
->duplex
;
1824 if (phydev
->speed
!= priv
->oldspeed
) {
1826 switch (phydev
->speed
) {
1829 ((tempval
& ~(MACCFG2_IF
)) | MACCFG2_GMII
);
1834 ((tempval
& ~(MACCFG2_IF
)) | MACCFG2_MII
);
1836 /* Reduced mode distinguishes
1837 * between 10 and 100 */
1838 if (phydev
->speed
== SPEED_100
)
1839 ecntrl
|= ECNTRL_R100
;
1841 ecntrl
&= ~(ECNTRL_R100
);
1844 if (netif_msg_link(priv
))
1846 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
1847 dev
->name
, phydev
->speed
);
1851 priv
->oldspeed
= phydev
->speed
;
1854 gfar_write(®s
->maccfg2
, tempval
);
1855 gfar_write(®s
->ecntrl
, ecntrl
);
1857 if (!priv
->oldlink
) {
1861 } else if (priv
->oldlink
) {
1865 priv
->oldduplex
= -1;
1868 if (new_state
&& netif_msg_link(priv
))
1869 phy_print_status(phydev
);
1871 spin_unlock_irqrestore(&priv
->txlock
, flags
);
1874 /* Update the hash table based on the current list of multicast
1875 * addresses we subscribe to. Also, change the promiscuity of
1876 * the device based on the flags (this function is called
1877 * whenever dev->flags is changed */
1878 static void gfar_set_multi(struct net_device
*dev
)
1880 struct dev_mc_list
*mc_ptr
;
1881 struct gfar_private
*priv
= netdev_priv(dev
);
1882 struct gfar __iomem
*regs
= priv
->regs
;
1885 if(dev
->flags
& IFF_PROMISC
) {
1886 /* Set RCTRL to PROM */
1887 tempval
= gfar_read(®s
->rctrl
);
1888 tempval
|= RCTRL_PROM
;
1889 gfar_write(®s
->rctrl
, tempval
);
1891 /* Set RCTRL to not PROM */
1892 tempval
= gfar_read(®s
->rctrl
);
1893 tempval
&= ~(RCTRL_PROM
);
1894 gfar_write(®s
->rctrl
, tempval
);
1897 if(dev
->flags
& IFF_ALLMULTI
) {
1898 /* Set the hash to rx all multicast frames */
1899 gfar_write(®s
->igaddr0
, 0xffffffff);
1900 gfar_write(®s
->igaddr1
, 0xffffffff);
1901 gfar_write(®s
->igaddr2
, 0xffffffff);
1902 gfar_write(®s
->igaddr3
, 0xffffffff);
1903 gfar_write(®s
->igaddr4
, 0xffffffff);
1904 gfar_write(®s
->igaddr5
, 0xffffffff);
1905 gfar_write(®s
->igaddr6
, 0xffffffff);
1906 gfar_write(®s
->igaddr7
, 0xffffffff);
1907 gfar_write(®s
->gaddr0
, 0xffffffff);
1908 gfar_write(®s
->gaddr1
, 0xffffffff);
1909 gfar_write(®s
->gaddr2
, 0xffffffff);
1910 gfar_write(®s
->gaddr3
, 0xffffffff);
1911 gfar_write(®s
->gaddr4
, 0xffffffff);
1912 gfar_write(®s
->gaddr5
, 0xffffffff);
1913 gfar_write(®s
->gaddr6
, 0xffffffff);
1914 gfar_write(®s
->gaddr7
, 0xffffffff);
1919 /* zero out the hash */
1920 gfar_write(®s
->igaddr0
, 0x0);
1921 gfar_write(®s
->igaddr1
, 0x0);
1922 gfar_write(®s
->igaddr2
, 0x0);
1923 gfar_write(®s
->igaddr3
, 0x0);
1924 gfar_write(®s
->igaddr4
, 0x0);
1925 gfar_write(®s
->igaddr5
, 0x0);
1926 gfar_write(®s
->igaddr6
, 0x0);
1927 gfar_write(®s
->igaddr7
, 0x0);
1928 gfar_write(®s
->gaddr0
, 0x0);
1929 gfar_write(®s
->gaddr1
, 0x0);
1930 gfar_write(®s
->gaddr2
, 0x0);
1931 gfar_write(®s
->gaddr3
, 0x0);
1932 gfar_write(®s
->gaddr4
, 0x0);
1933 gfar_write(®s
->gaddr5
, 0x0);
1934 gfar_write(®s
->gaddr6
, 0x0);
1935 gfar_write(®s
->gaddr7
, 0x0);
1937 /* If we have extended hash tables, we need to
1938 * clear the exact match registers to prepare for
1940 if (priv
->extended_hash
) {
1941 em_num
= GFAR_EM_NUM
+ 1;
1942 gfar_clear_exact_match(dev
);
1949 if(dev
->mc_count
== 0)
1952 /* Parse the list, and set the appropriate bits */
1953 for(mc_ptr
= dev
->mc_list
; mc_ptr
; mc_ptr
= mc_ptr
->next
) {
1955 gfar_set_mac_for_addr(dev
, idx
,
1959 gfar_set_hash_for_addr(dev
, mc_ptr
->dmi_addr
);
1967 /* Clears each of the exact match registers to zero, so they
1968 * don't interfere with normal reception */
1969 static void gfar_clear_exact_match(struct net_device
*dev
)
1972 u8 zero_arr
[MAC_ADDR_LEN
] = {0,0,0,0,0,0};
1974 for(idx
= 1;idx
< GFAR_EM_NUM
+ 1;idx
++)
1975 gfar_set_mac_for_addr(dev
, idx
, (u8
*)zero_arr
);
1978 /* Set the appropriate hash bit for the given addr */
1979 /* The algorithm works like so:
1980 * 1) Take the Destination Address (ie the multicast address), and
1981 * do a CRC on it (little endian), and reverse the bits of the
1983 * 2) Use the 8 most significant bits as a hash into a 256-entry
1984 * table. The table is controlled through 8 32-bit registers:
1985 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
1986 * gaddr7. This means that the 3 most significant bits in the
1987 * hash index which gaddr register to use, and the 5 other bits
1988 * indicate which bit (assuming an IBM numbering scheme, which
1989 * for PowerPC (tm) is usually the case) in the register holds
1991 static void gfar_set_hash_for_addr(struct net_device
*dev
, u8
*addr
)
1994 struct gfar_private
*priv
= netdev_priv(dev
);
1995 u32 result
= ether_crc(MAC_ADDR_LEN
, addr
);
1996 int width
= priv
->hash_width
;
1997 u8 whichbit
= (result
>> (32 - width
)) & 0x1f;
1998 u8 whichreg
= result
>> (32 - width
+ 5);
1999 u32 value
= (1 << (31-whichbit
));
2001 tempval
= gfar_read(priv
->hash_regs
[whichreg
]);
2003 gfar_write(priv
->hash_regs
[whichreg
], tempval
);
2009 /* There are multiple MAC Address register pairs on some controllers
2010 * This function sets the numth pair to a given address
2012 static void gfar_set_mac_for_addr(struct net_device
*dev
, int num
, u8
*addr
)
2014 struct gfar_private
*priv
= netdev_priv(dev
);
2016 char tmpbuf
[MAC_ADDR_LEN
];
2018 u32 __iomem
*macptr
= &priv
->regs
->macstnaddr1
;
2022 /* Now copy it into the mac registers backwards, cuz */
2023 /* little endian is silly */
2024 for (idx
= 0; idx
< MAC_ADDR_LEN
; idx
++)
2025 tmpbuf
[MAC_ADDR_LEN
- 1 - idx
] = addr
[idx
];
2027 gfar_write(macptr
, *((u32
*) (tmpbuf
)));
2029 tempval
= *((u32
*) (tmpbuf
+ 4));
2031 gfar_write(macptr
+1, tempval
);
2034 /* GFAR error interrupt handler */
2035 static irqreturn_t
gfar_error(int irq
, void *dev_id
)
2037 struct net_device
*dev
= dev_id
;
2038 struct gfar_private
*priv
= netdev_priv(dev
);
2040 /* Save ievent for future reference */
2041 u32 events
= gfar_read(&priv
->regs
->ievent
);
2044 gfar_write(&priv
->regs
->ievent
, events
& IEVENT_ERR_MASK
);
2046 /* Magic Packet is not an error. */
2047 if ((priv
->einfo
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
) &&
2048 (events
& IEVENT_MAG
))
2049 events
&= ~IEVENT_MAG
;
2052 if (netif_msg_rx_err(priv
) || netif_msg_tx_err(priv
))
2053 printk(KERN_DEBUG
"%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
2054 dev
->name
, events
, gfar_read(&priv
->regs
->imask
));
2056 /* Update the error counters */
2057 if (events
& IEVENT_TXE
) {
2058 dev
->stats
.tx_errors
++;
2060 if (events
& IEVENT_LC
)
2061 dev
->stats
.tx_window_errors
++;
2062 if (events
& IEVENT_CRL
)
2063 dev
->stats
.tx_aborted_errors
++;
2064 if (events
& IEVENT_XFUN
) {
2065 if (netif_msg_tx_err(priv
))
2066 printk(KERN_DEBUG
"%s: TX FIFO underrun, "
2067 "packet dropped.\n", dev
->name
);
2068 dev
->stats
.tx_dropped
++;
2069 priv
->extra_stats
.tx_underrun
++;
2071 /* Reactivate the Tx Queues */
2072 gfar_write(&priv
->regs
->tstat
, TSTAT_CLEAR_THALT
);
2074 if (netif_msg_tx_err(priv
))
2075 printk(KERN_DEBUG
"%s: Transmit Error\n", dev
->name
);
2077 if (events
& IEVENT_BSY
) {
2078 dev
->stats
.rx_errors
++;
2079 priv
->extra_stats
.rx_bsy
++;
2081 gfar_receive(irq
, dev_id
);
2083 if (netif_msg_rx_err(priv
))
2084 printk(KERN_DEBUG
"%s: busy error (rstat: %x)\n",
2085 dev
->name
, gfar_read(&priv
->regs
->rstat
));
2087 if (events
& IEVENT_BABR
) {
2088 dev
->stats
.rx_errors
++;
2089 priv
->extra_stats
.rx_babr
++;
2091 if (netif_msg_rx_err(priv
))
2092 printk(KERN_DEBUG
"%s: babbling RX error\n", dev
->name
);
2094 if (events
& IEVENT_EBERR
) {
2095 priv
->extra_stats
.eberr
++;
2096 if (netif_msg_rx_err(priv
))
2097 printk(KERN_DEBUG
"%s: bus error\n", dev
->name
);
2099 if ((events
& IEVENT_RXC
) && netif_msg_rx_status(priv
))
2100 printk(KERN_DEBUG
"%s: control frame\n", dev
->name
);
2102 if (events
& IEVENT_BABT
) {
2103 priv
->extra_stats
.tx_babt
++;
2104 if (netif_msg_tx_err(priv
))
2105 printk(KERN_DEBUG
"%s: babbling TX error\n", dev
->name
);
2110 /* work with hotplug and coldplug */
2111 MODULE_ALIAS("platform:fsl-gianfar");
2113 /* Structure for a device driver */
2114 static struct platform_driver gfar_driver
= {
2115 .probe
= gfar_probe
,
2116 .remove
= gfar_remove
,
2117 .suspend
= gfar_suspend
,
2118 .resume
= gfar_resume
,
2120 .name
= "fsl-gianfar",
2121 .owner
= THIS_MODULE
,
2125 static int __init
gfar_init(void)
2127 int err
= gfar_mdio_init();
2132 err
= platform_driver_register(&gfar_driver
);
2140 static void __exit
gfar_exit(void)
2142 platform_driver_unregister(&gfar_driver
);
2146 module_init(gfar_init
);
2147 module_exit(gfar_exit
);