igb: misc whitespace/formatting cleanups
[deliverable/linux.git] / drivers / net / gianfar.c
CommitLineData
0bbaf069 1/*
1da177e4
LT
2 * drivers/net/gianfar.c
3 *
4 * Gianfar Ethernet Driver
7f7f5316
AF
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
1da177e4
LT
7 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
4c8d3d99 10 * Maintainer: Kumar Gala
1da177e4 11 *
e8a2b6a4 12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
538cc7ee 13 * Copyright (c) 2007 MontaVista Software, Inc.
1da177e4
LT
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Gianfar: AKA Lambda Draconis, "Dragon"
21 * RA 11 31 24.2
22 * Dec +69 19 52
23 * V 3.84
24 * B-V +1.62
25 *
26 * Theory of operation
0bbaf069 27 *
b31a1d8b
AF
28 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
1da177e4
LT
30 *
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
0bbaf069
KG
33 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
1da177e4
LT
35 * last descriptor of the ring.
36 *
37 * When a packet is received, the RXF bit in the
0bbaf069 38 * IEVENT register is set, triggering an interrupt when the
1da177e4
LT
39 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
bb40dcbb 42 * of frames or amount of time have passed). In NAPI, the
1da177e4 43 * interrupt handler will signal there is work to be done, and
0aa1538f 44 * exit. This method will start at the last known empty
0bbaf069 45 * descriptor, and process every subsequent descriptor until there
1da177e4
LT
46 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
52 * skb.
53 *
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
62 */
63
1da177e4 64#include <linux/kernel.h>
1da177e4
LT
65#include <linux/string.h>
66#include <linux/errno.h>
bb40dcbb 67#include <linux/unistd.h>
1da177e4
LT
68#include <linux/slab.h>
69#include <linux/interrupt.h>
70#include <linux/init.h>
71#include <linux/delay.h>
72#include <linux/netdevice.h>
73#include <linux/etherdevice.h>
74#include <linux/skbuff.h>
0bbaf069 75#include <linux/if_vlan.h>
1da177e4
LT
76#include <linux/spinlock.h>
77#include <linux/mm.h>
b31a1d8b 78#include <linux/of_platform.h>
0bbaf069
KG
79#include <linux/ip.h>
80#include <linux/tcp.h>
81#include <linux/udp.h>
9c07b884 82#include <linux/in.h>
1da177e4
LT
83
84#include <asm/io.h>
85#include <asm/irq.h>
86#include <asm/uaccess.h>
87#include <linux/module.h>
1da177e4
LT
88#include <linux/dma-mapping.h>
89#include <linux/crc32.h>
bb40dcbb
AF
90#include <linux/mii.h>
91#include <linux/phy.h>
b31a1d8b
AF
92#include <linux/phy_fixed.h>
93#include <linux/of.h>
1da177e4
LT
94
95#include "gianfar.h"
1577ecef 96#include "fsl_pq_mdio.h"
1da177e4
LT
97
98#define TX_TIMEOUT (1*HZ)
1da177e4
LT
99#undef BRIEF_GFAR_ERRORS
100#undef VERBOSE_GFAR_ERRORS
101
1da177e4 102const char gfar_driver_name[] = "Gianfar Ethernet";
7f7f5316 103const char gfar_driver_version[] = "1.3";
1da177e4 104
1da177e4
LT
105static int gfar_enet_open(struct net_device *dev);
106static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
ab939905 107static void gfar_reset_task(struct work_struct *work);
1da177e4
LT
108static void gfar_timeout(struct net_device *dev);
109static int gfar_close(struct net_device *dev);
815b97c6
AF
110struct sk_buff *gfar_new_skb(struct net_device *dev);
111static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
112 struct sk_buff *skb);
1da177e4
LT
113static int gfar_set_mac_address(struct net_device *dev);
114static int gfar_change_mtu(struct net_device *dev, int new_mtu);
7d12e780
DH
115static irqreturn_t gfar_error(int irq, void *dev_id);
116static irqreturn_t gfar_transmit(int irq, void *dev_id);
117static irqreturn_t gfar_interrupt(int irq, void *dev_id);
1da177e4
LT
118static void adjust_link(struct net_device *dev);
119static void init_registers(struct net_device *dev);
120static int init_phy(struct net_device *dev);
b31a1d8b
AF
121static int gfar_probe(struct of_device *ofdev,
122 const struct of_device_id *match);
123static int gfar_remove(struct of_device *ofdev);
bb40dcbb 124static void free_skb_resources(struct gfar_private *priv);
1da177e4
LT
125static void gfar_set_multi(struct net_device *dev);
126static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
d3c12873 127static void gfar_configure_serdes(struct net_device *dev);
bea3348e 128static int gfar_poll(struct napi_struct *napi, int budget);
f2d71c2d
VW
129#ifdef CONFIG_NET_POLL_CONTROLLER
130static void gfar_netpoll(struct net_device *dev);
131#endif
0bbaf069 132int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
f162b9d5 133static int gfar_clean_tx_ring(struct net_device *dev);
2c2db48a
DH
134static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
135 int amount_pull);
0bbaf069
KG
136static void gfar_vlan_rx_register(struct net_device *netdev,
137 struct vlan_group *grp);
7f7f5316 138void gfar_halt(struct net_device *dev);
d87eb127 139static void gfar_halt_nodisable(struct net_device *dev);
7f7f5316
AF
140void gfar_start(struct net_device *dev);
141static void gfar_clear_exact_match(struct net_device *dev);
142static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
1da177e4 143
1da177e4
LT
144MODULE_AUTHOR("Freescale Semiconductor, Inc");
145MODULE_DESCRIPTION("Gianfar Ethernet Driver");
146MODULE_LICENSE("GPL");
147
7f7f5316
AF
148/* Returns 1 if incoming frames use an FCB */
149static inline int gfar_uses_fcb(struct gfar_private *priv)
0bbaf069 150{
77ecaf2d 151 return priv->vlgrp || priv->rx_csum_enable;
0bbaf069 152}
bb40dcbb 153
b31a1d8b
AF
154static int gfar_of_init(struct net_device *dev)
155{
156 struct device_node *phy, *mdio;
157 const unsigned int *id;
158 const char *model;
159 const char *ctype;
160 const void *mac_addr;
161 const phandle *ph;
162 u64 addr, size;
163 int err = 0;
164 struct gfar_private *priv = netdev_priv(dev);
165 struct device_node *np = priv->node;
166 char bus_name[MII_BUS_ID_SIZE];
4d7902f2
AF
167 const u32 *stash;
168 const u32 *stash_len;
169 const u32 *stash_idx;
b31a1d8b
AF
170
171 if (!np || !of_device_is_available(np))
172 return -ENODEV;
173
174 /* get a pointer to the register memory */
175 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
176 priv->regs = ioremap(addr, size);
177
178 if (priv->regs == NULL)
179 return -ENOMEM;
180
181 priv->interruptTransmit = irq_of_parse_and_map(np, 0);
182
183 model = of_get_property(np, "model", NULL);
184
185 /* If we aren't the FEC we have multiple interrupts */
186 if (model && strcasecmp(model, "FEC")) {
187 priv->interruptReceive = irq_of_parse_and_map(np, 1);
188
189 priv->interruptError = irq_of_parse_and_map(np, 2);
190
191 if (priv->interruptTransmit < 0 ||
192 priv->interruptReceive < 0 ||
193 priv->interruptError < 0) {
194 err = -EINVAL;
195 goto err_out;
196 }
197 }
198
4d7902f2
AF
199 stash = of_get_property(np, "bd-stash", NULL);
200
201 if(stash) {
202 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
203 priv->bd_stash_en = 1;
204 }
205
206 stash_len = of_get_property(np, "rx-stash-len", NULL);
207
208 if (stash_len)
209 priv->rx_stash_size = *stash_len;
210
211 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
212
213 if (stash_idx)
214 priv->rx_stash_index = *stash_idx;
215
216 if (stash_len || stash_idx)
217 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
218
b31a1d8b
AF
219 mac_addr = of_get_mac_address(np);
220 if (mac_addr)
221 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
222
223 if (model && !strcasecmp(model, "TSEC"))
224 priv->device_flags =
225 FSL_GIANFAR_DEV_HAS_GIGABIT |
226 FSL_GIANFAR_DEV_HAS_COALESCE |
227 FSL_GIANFAR_DEV_HAS_RMON |
228 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
229 if (model && !strcasecmp(model, "eTSEC"))
230 priv->device_flags =
231 FSL_GIANFAR_DEV_HAS_GIGABIT |
232 FSL_GIANFAR_DEV_HAS_COALESCE |
233 FSL_GIANFAR_DEV_HAS_RMON |
234 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
2c2db48a 235 FSL_GIANFAR_DEV_HAS_PADDING |
b31a1d8b
AF
236 FSL_GIANFAR_DEV_HAS_CSUM |
237 FSL_GIANFAR_DEV_HAS_VLAN |
238 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
239 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
240
241 ctype = of_get_property(np, "phy-connection-type", NULL);
242
243 /* We only care about rgmii-id. The rest are autodetected */
244 if (ctype && !strcmp(ctype, "rgmii-id"))
245 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
246 else
247 priv->interface = PHY_INTERFACE_MODE_MII;
248
249 if (of_get_property(np, "fsl,magic-packet", NULL))
250 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
251
252 ph = of_get_property(np, "phy-handle", NULL);
253 if (ph == NULL) {
254 u32 *fixed_link;
255
256 fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL);
257 if (!fixed_link) {
258 err = -ENODEV;
259 goto err_out;
260 }
261
a1d8f601
KG
262 snprintf(priv->phy_bus_id, sizeof(priv->phy_bus_id),
263 PHY_ID_FMT, "0", fixed_link[0]);
b31a1d8b
AF
264 } else {
265 phy = of_find_node_by_phandle(*ph);
266
267 if (phy == NULL) {
268 err = -ENODEV;
269 goto err_out;
270 }
271
272 mdio = of_get_parent(phy);
273
274 id = of_get_property(phy, "reg", NULL);
275
276 of_node_put(phy);
277 of_node_put(mdio);
278
1577ecef 279 fsl_pq_mdio_bus_name(bus_name, mdio);
a1d8f601 280 snprintf(priv->phy_bus_id, sizeof(priv->phy_bus_id), "%s:%02x",
b31a1d8b
AF
281 bus_name, *id);
282 }
283
284 /* Find the TBI PHY. If it's not there, we don't support SGMII */
285 ph = of_get_property(np, "tbi-handle", NULL);
286 if (ph) {
287 struct device_node *tbi = of_find_node_by_phandle(*ph);
288 struct of_device *ofdev;
289 struct mii_bus *bus;
290
291 if (!tbi)
292 return 0;
293
294 mdio = of_get_parent(tbi);
295 if (!mdio)
296 return 0;
297
298 ofdev = of_find_device_by_node(mdio);
299
300 of_node_put(mdio);
301
302 id = of_get_property(tbi, "reg", NULL);
303 if (!id)
304 return 0;
305
306 of_node_put(tbi);
307
308 bus = dev_get_drvdata(&ofdev->dev);
309
310 priv->tbiphy = bus->phy_map[*id];
311 }
312
313 return 0;
314
315err_out:
316 iounmap(priv->regs);
317 return err;
318}
319
0faac9f7
CW
320/* Ioctl MII Interface */
321static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
322{
323 struct gfar_private *priv = netdev_priv(dev);
324
325 if (!netif_running(dev))
326 return -EINVAL;
327
328 if (!priv->phydev)
329 return -ENODEV;
330
331 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
332}
333
bb40dcbb
AF
334/* Set up the ethernet device structure, private data,
335 * and anything else we need before we start */
b31a1d8b
AF
336static int gfar_probe(struct of_device *ofdev,
337 const struct of_device_id *match)
1da177e4
LT
338{
339 u32 tempval;
340 struct net_device *dev = NULL;
341 struct gfar_private *priv = NULL;
b31a1d8b 342 DECLARE_MAC_BUF(mac);
c50a5d9a
DH
343 int err = 0;
344 int len_devname;
1da177e4
LT
345
346 /* Create an ethernet device instance */
347 dev = alloc_etherdev(sizeof (*priv));
348
bb40dcbb 349 if (NULL == dev)
1da177e4
LT
350 return -ENOMEM;
351
352 priv = netdev_priv(dev);
bea3348e 353 priv->dev = dev;
b31a1d8b 354 priv->node = ofdev->node;
1da177e4 355
b31a1d8b 356 err = gfar_of_init(dev);
1da177e4 357
b31a1d8b 358 if (err)
1da177e4 359 goto regs_fail;
1da177e4 360
fef6108d
AF
361 spin_lock_init(&priv->txlock);
362 spin_lock_init(&priv->rxlock);
d87eb127 363 spin_lock_init(&priv->bflock);
ab939905 364 INIT_WORK(&priv->reset_task, gfar_reset_task);
1da177e4 365
b31a1d8b 366 dev_set_drvdata(&ofdev->dev, priv);
1da177e4
LT
367
368 /* Stop the DMA engine now, in case it was running before */
369 /* (The firmware could have used it, and left it running). */
257d938a 370 gfar_halt(dev);
1da177e4
LT
371
372 /* Reset MAC layer */
373 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
374
b98ac702
AF
375 /* We need to delay at least 3 TX clocks */
376 udelay(2);
377
1da177e4
LT
378 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
379 gfar_write(&priv->regs->maccfg1, tempval);
380
381 /* Initialize MACCFG2. */
382 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
383
384 /* Initialize ECNTRL */
385 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
386
1da177e4
LT
387 /* Set the dev->base_addr to the gfar reg region */
388 dev->base_addr = (unsigned long) (priv->regs);
389
b31a1d8b 390 SET_NETDEV_DEV(dev, &ofdev->dev);
1da177e4
LT
391
392 /* Fill in the dev structure */
393 dev->open = gfar_enet_open;
394 dev->hard_start_xmit = gfar_start_xmit;
395 dev->tx_timeout = gfar_timeout;
396 dev->watchdog_timeo = TX_TIMEOUT;
bea3348e 397 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
f2d71c2d
VW
398#ifdef CONFIG_NET_POLL_CONTROLLER
399 dev->poll_controller = gfar_netpoll;
1da177e4
LT
400#endif
401 dev->stop = gfar_close;
1da177e4
LT
402 dev->change_mtu = gfar_change_mtu;
403 dev->mtu = 1500;
404 dev->set_multicast_list = gfar_set_multi;
405
0bbaf069 406 dev->ethtool_ops = &gfar_ethtool_ops;
0faac9f7 407 dev->do_ioctl = gfar_ioctl;
0bbaf069 408
b31a1d8b 409 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
0bbaf069 410 priv->rx_csum_enable = 1;
4669bc90 411 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
0bbaf069
KG
412 } else
413 priv->rx_csum_enable = 0;
414
415 priv->vlgrp = NULL;
1da177e4 416
b31a1d8b 417 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
0bbaf069 418 dev->vlan_rx_register = gfar_vlan_rx_register;
1da177e4 419
0bbaf069 420 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
0bbaf069
KG
421 }
422
b31a1d8b 423 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
0bbaf069
KG
424 priv->extended_hash = 1;
425 priv->hash_width = 9;
426
427 priv->hash_regs[0] = &priv->regs->igaddr0;
428 priv->hash_regs[1] = &priv->regs->igaddr1;
429 priv->hash_regs[2] = &priv->regs->igaddr2;
430 priv->hash_regs[3] = &priv->regs->igaddr3;
431 priv->hash_regs[4] = &priv->regs->igaddr4;
432 priv->hash_regs[5] = &priv->regs->igaddr5;
433 priv->hash_regs[6] = &priv->regs->igaddr6;
434 priv->hash_regs[7] = &priv->regs->igaddr7;
435 priv->hash_regs[8] = &priv->regs->gaddr0;
436 priv->hash_regs[9] = &priv->regs->gaddr1;
437 priv->hash_regs[10] = &priv->regs->gaddr2;
438 priv->hash_regs[11] = &priv->regs->gaddr3;
439 priv->hash_regs[12] = &priv->regs->gaddr4;
440 priv->hash_regs[13] = &priv->regs->gaddr5;
441 priv->hash_regs[14] = &priv->regs->gaddr6;
442 priv->hash_regs[15] = &priv->regs->gaddr7;
443
444 } else {
445 priv->extended_hash = 0;
446 priv->hash_width = 8;
447
448 priv->hash_regs[0] = &priv->regs->gaddr0;
1577ecef 449 priv->hash_regs[1] = &priv->regs->gaddr1;
0bbaf069
KG
450 priv->hash_regs[2] = &priv->regs->gaddr2;
451 priv->hash_regs[3] = &priv->regs->gaddr3;
452 priv->hash_regs[4] = &priv->regs->gaddr4;
453 priv->hash_regs[5] = &priv->regs->gaddr5;
454 priv->hash_regs[6] = &priv->regs->gaddr6;
455 priv->hash_regs[7] = &priv->regs->gaddr7;
456 }
457
b31a1d8b 458 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
0bbaf069
KG
459 priv->padding = DEFAULT_PADDING;
460 else
461 priv->padding = 0;
462
0bbaf069
KG
463 if (dev->features & NETIF_F_IP_CSUM)
464 dev->hard_header_len += GMAC_FCB_LEN;
1da177e4
LT
465
466 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1da177e4
LT
467 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
468 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
4669bc90 469 priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
1da177e4
LT
470
471 priv->txcoalescing = DEFAULT_TX_COALESCE;
b46a8454 472 priv->txic = DEFAULT_TXIC;
1da177e4 473 priv->rxcoalescing = DEFAULT_RX_COALESCE;
b46a8454 474 priv->rxic = DEFAULT_RXIC;
1da177e4 475
0bbaf069
KG
476 /* Enable most messages by default */
477 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
478
d3eab82b
TP
479 /* Carrier starts down, phylib will bring it up */
480 netif_carrier_off(dev);
481
1da177e4
LT
482 err = register_netdev(dev);
483
484 if (err) {
485 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
486 dev->name);
487 goto register_fail;
488 }
489
2884e5cc
AV
490 device_init_wakeup(&dev->dev,
491 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
492
c50a5d9a
DH
493 /* fill out IRQ number and name fields */
494 len_devname = strlen(dev->name);
495 strncpy(&priv->int_name_tx[0], dev->name, len_devname);
496 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
497 strncpy(&priv->int_name_tx[len_devname],
498 "_tx", sizeof("_tx") + 1);
499
500 strncpy(&priv->int_name_rx[0], dev->name, len_devname);
501 strncpy(&priv->int_name_rx[len_devname],
502 "_rx", sizeof("_rx") + 1);
503
504 strncpy(&priv->int_name_er[0], dev->name, len_devname);
505 strncpy(&priv->int_name_er[len_devname],
506 "_er", sizeof("_er") + 1);
507 } else
508 priv->int_name_tx[len_devname] = '\0';
509
7f7f5316
AF
510 /* Create all the sysfs files */
511 gfar_init_sysfs(dev);
512
1da177e4 513 /* Print out the device info */
e174961c 514 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
1da177e4
LT
515
516 /* Even more device info helps when determining which kernel */
7f7f5316 517 /* provided which set of benchmarks. */
1da177e4 518 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
1da177e4
LT
519 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
520 dev->name, priv->rx_ring_size, priv->tx_ring_size);
521
522 return 0;
523
524register_fail:
cc8c6e37 525 iounmap(priv->regs);
1da177e4
LT
526regs_fail:
527 free_netdev(dev);
bb40dcbb 528 return err;
1da177e4
LT
529}
530
b31a1d8b 531static int gfar_remove(struct of_device *ofdev)
1da177e4 532{
b31a1d8b 533 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
1da177e4 534
b31a1d8b 535 dev_set_drvdata(&ofdev->dev, NULL);
1da177e4 536
cc8c6e37 537 iounmap(priv->regs);
b31a1d8b 538 free_netdev(priv->dev);
1da177e4
LT
539
540 return 0;
541}
542
d87eb127 543#ifdef CONFIG_PM
b31a1d8b 544static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
d87eb127 545{
b31a1d8b
AF
546 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
547 struct net_device *dev = priv->dev;
d87eb127
SW
548 unsigned long flags;
549 u32 tempval;
550
551 int magic_packet = priv->wol_en &&
b31a1d8b 552 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
d87eb127
SW
553
554 netif_device_detach(dev);
555
556 if (netif_running(dev)) {
557 spin_lock_irqsave(&priv->txlock, flags);
558 spin_lock(&priv->rxlock);
559
560 gfar_halt_nodisable(dev);
561
562 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
563 tempval = gfar_read(&priv->regs->maccfg1);
564
565 tempval &= ~MACCFG1_TX_EN;
566
567 if (!magic_packet)
568 tempval &= ~MACCFG1_RX_EN;
569
570 gfar_write(&priv->regs->maccfg1, tempval);
571
572 spin_unlock(&priv->rxlock);
573 spin_unlock_irqrestore(&priv->txlock, flags);
574
d87eb127 575 napi_disable(&priv->napi);
d87eb127
SW
576
577 if (magic_packet) {
578 /* Enable interrupt on Magic Packet */
579 gfar_write(&priv->regs->imask, IMASK_MAG);
580
581 /* Enable Magic Packet mode */
582 tempval = gfar_read(&priv->regs->maccfg2);
583 tempval |= MACCFG2_MPEN;
584 gfar_write(&priv->regs->maccfg2, tempval);
585 } else {
586 phy_stop(priv->phydev);
587 }
588 }
589
590 return 0;
591}
592
b31a1d8b 593static int gfar_resume(struct of_device *ofdev)
d87eb127 594{
b31a1d8b
AF
595 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
596 struct net_device *dev = priv->dev;
d87eb127
SW
597 unsigned long flags;
598 u32 tempval;
599 int magic_packet = priv->wol_en &&
b31a1d8b 600 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
d87eb127
SW
601
602 if (!netif_running(dev)) {
603 netif_device_attach(dev);
604 return 0;
605 }
606
607 if (!magic_packet && priv->phydev)
608 phy_start(priv->phydev);
609
610 /* Disable Magic Packet mode, in case something
611 * else woke us up.
612 */
613
614 spin_lock_irqsave(&priv->txlock, flags);
615 spin_lock(&priv->rxlock);
616
617 tempval = gfar_read(&priv->regs->maccfg2);
618 tempval &= ~MACCFG2_MPEN;
619 gfar_write(&priv->regs->maccfg2, tempval);
620
621 gfar_start(dev);
622
623 spin_unlock(&priv->rxlock);
624 spin_unlock_irqrestore(&priv->txlock, flags);
625
626 netif_device_attach(dev);
627
d87eb127 628 napi_enable(&priv->napi);
d87eb127
SW
629
630 return 0;
631}
632#else
633#define gfar_suspend NULL
634#define gfar_resume NULL
635#endif
1da177e4 636
e8a2b6a4
AF
637/* Reads the controller's registers to determine what interface
638 * connects it to the PHY.
639 */
640static phy_interface_t gfar_get_interface(struct net_device *dev)
641{
642 struct gfar_private *priv = netdev_priv(dev);
643 u32 ecntrl = gfar_read(&priv->regs->ecntrl);
644
645 if (ecntrl & ECNTRL_SGMII_MODE)
646 return PHY_INTERFACE_MODE_SGMII;
647
648 if (ecntrl & ECNTRL_TBI_MODE) {
649 if (ecntrl & ECNTRL_REDUCED_MODE)
650 return PHY_INTERFACE_MODE_RTBI;
651 else
652 return PHY_INTERFACE_MODE_TBI;
653 }
654
655 if (ecntrl & ECNTRL_REDUCED_MODE) {
656 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
657 return PHY_INTERFACE_MODE_RMII;
7132ab7f 658 else {
b31a1d8b 659 phy_interface_t interface = priv->interface;
7132ab7f
AF
660
661 /*
662 * This isn't autodetected right now, so it must
663 * be set by the device tree or platform code.
664 */
665 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
666 return PHY_INTERFACE_MODE_RGMII_ID;
667
e8a2b6a4 668 return PHY_INTERFACE_MODE_RGMII;
7132ab7f 669 }
e8a2b6a4
AF
670 }
671
b31a1d8b 672 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
e8a2b6a4
AF
673 return PHY_INTERFACE_MODE_GMII;
674
675 return PHY_INTERFACE_MODE_MII;
676}
677
678
bb40dcbb
AF
679/* Initializes driver's PHY state, and attaches to the PHY.
680 * Returns 0 on success.
1da177e4
LT
681 */
682static int init_phy(struct net_device *dev)
683{
684 struct gfar_private *priv = netdev_priv(dev);
bb40dcbb 685 uint gigabit_support =
b31a1d8b 686 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
bb40dcbb
AF
687 SUPPORTED_1000baseT_Full : 0;
688 struct phy_device *phydev;
e8a2b6a4 689 phy_interface_t interface;
1da177e4
LT
690
691 priv->oldlink = 0;
692 priv->oldspeed = 0;
693 priv->oldduplex = -1;
694
e8a2b6a4
AF
695 interface = gfar_get_interface(dev);
696
b31a1d8b 697 phydev = phy_connect(dev, priv->phy_bus_id, &adjust_link, 0, interface);
1da177e4 698
d3c12873
KJ
699 if (interface == PHY_INTERFACE_MODE_SGMII)
700 gfar_configure_serdes(dev);
701
bb40dcbb
AF
702 if (IS_ERR(phydev)) {
703 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
704 return PTR_ERR(phydev);
1da177e4
LT
705 }
706
bb40dcbb
AF
707 /* Remove any features not supported by the controller */
708 phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
709 phydev->advertising = phydev->supported;
1da177e4 710
bb40dcbb 711 priv->phydev = phydev;
1da177e4
LT
712
713 return 0;
1da177e4
LT
714}
715
d0313587
PG
716/*
717 * Initialize TBI PHY interface for communicating with the
718 * SERDES lynx PHY on the chip. We communicate with this PHY
719 * through the MDIO bus on each controller, treating it as a
720 * "normal" PHY at the address found in the TBIPA register. We assume
721 * that the TBIPA register is valid. Either the MDIO bus code will set
722 * it to a value that doesn't conflict with other PHYs on the bus, or the
723 * value doesn't matter, as there are no other PHYs on the bus.
724 */
d3c12873
KJ
725static void gfar_configure_serdes(struct net_device *dev)
726{
727 struct gfar_private *priv = netdev_priv(dev);
c132419e 728
b31a1d8b
AF
729 if (!priv->tbiphy) {
730 printk(KERN_WARNING "SGMII mode requires that the device "
731 "tree specify a tbi-handle\n");
732 return;
733 }
d3c12873 734
b31a1d8b
AF
735 /*
736 * If the link is already up, we must already be ok, and don't need to
bdb59f94
TP
737 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
738 * everything for us? Resetting it takes the link down and requires
739 * several seconds for it to come back.
740 */
b31a1d8b
AF
741 if (phy_read(priv->tbiphy, MII_BMSR) & BMSR_LSTATUS)
742 return;
d3c12873 743
d0313587 744 /* Single clk mode, mii mode off(for serdes communication) */
b31a1d8b 745 phy_write(priv->tbiphy, MII_TBICON, TBICON_CLK_SELECT);
d3c12873 746
b31a1d8b 747 phy_write(priv->tbiphy, MII_ADVERTISE,
d3c12873
KJ
748 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
749 ADVERTISE_1000XPSE_ASYM);
750
b31a1d8b 751 phy_write(priv->tbiphy, MII_BMCR, BMCR_ANENABLE |
d3c12873
KJ
752 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
753}
754
1da177e4
LT
755static void init_registers(struct net_device *dev)
756{
757 struct gfar_private *priv = netdev_priv(dev);
758
759 /* Clear IEVENT */
760 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
761
762 /* Initialize IMASK */
763 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
764
765 /* Init hash registers to zero */
0bbaf069
KG
766 gfar_write(&priv->regs->igaddr0, 0);
767 gfar_write(&priv->regs->igaddr1, 0);
768 gfar_write(&priv->regs->igaddr2, 0);
769 gfar_write(&priv->regs->igaddr3, 0);
770 gfar_write(&priv->regs->igaddr4, 0);
771 gfar_write(&priv->regs->igaddr5, 0);
772 gfar_write(&priv->regs->igaddr6, 0);
773 gfar_write(&priv->regs->igaddr7, 0);
1da177e4
LT
774
775 gfar_write(&priv->regs->gaddr0, 0);
776 gfar_write(&priv->regs->gaddr1, 0);
777 gfar_write(&priv->regs->gaddr2, 0);
778 gfar_write(&priv->regs->gaddr3, 0);
779 gfar_write(&priv->regs->gaddr4, 0);
780 gfar_write(&priv->regs->gaddr5, 0);
781 gfar_write(&priv->regs->gaddr6, 0);
782 gfar_write(&priv->regs->gaddr7, 0);
783
1da177e4 784 /* Zero out the rmon mib registers if it has them */
b31a1d8b 785 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
cc8c6e37 786 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
1da177e4
LT
787
788 /* Mask off the CAM interrupts */
789 gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
790 gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
791 }
792
793 /* Initialize the max receive buffer length */
794 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
795
1da177e4
LT
796 /* Initialize the Minimum Frame Length Register */
797 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
1da177e4
LT
798}
799
0bbaf069
KG
800
801/* Halt the receive and transmit queues */
d87eb127 802static void gfar_halt_nodisable(struct net_device *dev)
1da177e4
LT
803{
804 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 805 struct gfar __iomem *regs = priv->regs;
1da177e4
LT
806 u32 tempval;
807
1da177e4
LT
808 /* Mask all interrupts */
809 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
810
811 /* Clear all interrupts */
812 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
813
814 /* Stop the DMA, and wait for it to stop */
815 tempval = gfar_read(&priv->regs->dmactrl);
816 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
817 != (DMACTRL_GRS | DMACTRL_GTS)) {
818 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
819 gfar_write(&priv->regs->dmactrl, tempval);
820
821 while (!(gfar_read(&priv->regs->ievent) &
822 (IEVENT_GRSC | IEVENT_GTSC)))
823 cpu_relax();
824 }
d87eb127 825}
d87eb127
SW
826
827/* Halt the receive and transmit queues */
828void gfar_halt(struct net_device *dev)
829{
830 struct gfar_private *priv = netdev_priv(dev);
831 struct gfar __iomem *regs = priv->regs;
832 u32 tempval;
1da177e4 833
2a54adc3
SW
834 gfar_halt_nodisable(dev);
835
1da177e4
LT
836 /* Disable Rx and Tx */
837 tempval = gfar_read(&regs->maccfg1);
838 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
839 gfar_write(&regs->maccfg1, tempval);
0bbaf069
KG
840}
841
842void stop_gfar(struct net_device *dev)
843{
844 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 845 struct gfar __iomem *regs = priv->regs;
0bbaf069
KG
846 unsigned long flags;
847
bb40dcbb
AF
848 phy_stop(priv->phydev);
849
0bbaf069 850 /* Lock it down */
fef6108d
AF
851 spin_lock_irqsave(&priv->txlock, flags);
852 spin_lock(&priv->rxlock);
0bbaf069 853
0bbaf069 854 gfar_halt(dev);
1da177e4 855
fef6108d
AF
856 spin_unlock(&priv->rxlock);
857 spin_unlock_irqrestore(&priv->txlock, flags);
1da177e4
LT
858
859 /* Free the IRQs */
b31a1d8b 860 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1da177e4
LT
861 free_irq(priv->interruptError, dev);
862 free_irq(priv->interruptTransmit, dev);
863 free_irq(priv->interruptReceive, dev);
864 } else {
1577ecef 865 free_irq(priv->interruptTransmit, dev);
1da177e4
LT
866 }
867
868 free_skb_resources(priv);
869
cf782298 870 dma_free_coherent(&dev->dev,
1da177e4
LT
871 sizeof(struct txbd8)*priv->tx_ring_size
872 + sizeof(struct rxbd8)*priv->rx_ring_size,
873 priv->tx_bd_base,
0bbaf069 874 gfar_read(&regs->tbase0));
1da177e4
LT
875}
876
877/* If there are any tx skbs or rx skbs still around, free them.
878 * Then free tx_skbuff and rx_skbuff */
bb40dcbb 879static void free_skb_resources(struct gfar_private *priv)
1da177e4
LT
880{
881 struct rxbd8 *rxbdp;
882 struct txbd8 *txbdp;
4669bc90 883 int i, j;
1da177e4
LT
884
885 /* Go through all the buffer descriptors and free their data buffers */
886 txbdp = priv->tx_bd_base;
887
888 for (i = 0; i < priv->tx_ring_size; i++) {
4669bc90
DH
889 if (!priv->tx_skbuff[i])
890 continue;
1da177e4 891
4669bc90
DH
892 dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
893 txbdp->length, DMA_TO_DEVICE);
894 txbdp->lstatus = 0;
895 for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) {
896 txbdp++;
897 dma_unmap_page(&priv->dev->dev, txbdp->bufPtr,
898 txbdp->length, DMA_TO_DEVICE);
1da177e4 899 }
ad5da7ab 900 txbdp++;
4669bc90
DH
901 dev_kfree_skb_any(priv->tx_skbuff[i]);
902 priv->tx_skbuff[i] = NULL;
1da177e4
LT
903 }
904
905 kfree(priv->tx_skbuff);
906
907 rxbdp = priv->rx_bd_base;
908
909 /* rx_skbuff is not guaranteed to be allocated, so only
910 * free it and its contents if it is allocated */
911 if(priv->rx_skbuff != NULL) {
912 for (i = 0; i < priv->rx_ring_size; i++) {
913 if (priv->rx_skbuff[i]) {
cf782298 914 dma_unmap_single(&priv->dev->dev, rxbdp->bufPtr,
7f7f5316 915 priv->rx_buffer_size,
1da177e4
LT
916 DMA_FROM_DEVICE);
917
918 dev_kfree_skb_any(priv->rx_skbuff[i]);
919 priv->rx_skbuff[i] = NULL;
920 }
921
5a5efed4 922 rxbdp->lstatus = 0;
1da177e4
LT
923 rxbdp->bufPtr = 0;
924
925 rxbdp++;
926 }
927
928 kfree(priv->rx_skbuff);
929 }
930}
931
0bbaf069
KG
932void gfar_start(struct net_device *dev)
933{
934 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 935 struct gfar __iomem *regs = priv->regs;
0bbaf069
KG
936 u32 tempval;
937
938 /* Enable Rx and Tx in MACCFG1 */
939 tempval = gfar_read(&regs->maccfg1);
940 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
941 gfar_write(&regs->maccfg1, tempval);
942
943 /* Initialize DMACTRL to have WWR and WOP */
944 tempval = gfar_read(&priv->regs->dmactrl);
945 tempval |= DMACTRL_INIT_SETTINGS;
946 gfar_write(&priv->regs->dmactrl, tempval);
947
0bbaf069
KG
948 /* Make sure we aren't stopped */
949 tempval = gfar_read(&priv->regs->dmactrl);
950 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
951 gfar_write(&priv->regs->dmactrl, tempval);
952
fef6108d
AF
953 /* Clear THLT/RHLT, so that the DMA starts polling now */
954 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
955 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
956
0bbaf069
KG
957 /* Unmask the interrupts we look for */
958 gfar_write(&regs->imask, IMASK_DEFAULT);
12dea57b
DH
959
960 dev->trans_start = jiffies;
0bbaf069
KG
961}
962
1da177e4
LT
963/* Bring the controller up and running */
964int startup_gfar(struct net_device *dev)
965{
966 struct txbd8 *txbdp;
967 struct rxbd8 *rxbdp;
f9663aea 968 dma_addr_t addr = 0;
1da177e4
LT
969 unsigned long vaddr;
970 int i;
971 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 972 struct gfar __iomem *regs = priv->regs;
1da177e4 973 int err = 0;
0bbaf069 974 u32 rctrl = 0;
7f7f5316 975 u32 attrs = 0;
1da177e4
LT
976
977 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
978
979 /* Allocate memory for the buffer descriptors */
cf782298 980 vaddr = (unsigned long) dma_alloc_coherent(&dev->dev,
1da177e4
LT
981 sizeof (struct txbd8) * priv->tx_ring_size +
982 sizeof (struct rxbd8) * priv->rx_ring_size,
983 &addr, GFP_KERNEL);
984
985 if (vaddr == 0) {
0bbaf069
KG
986 if (netif_msg_ifup(priv))
987 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
988 dev->name);
1da177e4
LT
989 return -ENOMEM;
990 }
991
992 priv->tx_bd_base = (struct txbd8 *) vaddr;
993
994 /* enet DMA only understands physical addresses */
0bbaf069 995 gfar_write(&regs->tbase0, addr);
1da177e4
LT
996
997 /* Start the rx descriptor ring where the tx ring leaves off */
998 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
999 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
1000 priv->rx_bd_base = (struct rxbd8 *) vaddr;
0bbaf069 1001 gfar_write(&regs->rbase0, addr);
1da177e4
LT
1002
1003 /* Setup the skbuff rings */
1004 priv->tx_skbuff =
1005 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
1006 priv->tx_ring_size, GFP_KERNEL);
1007
bb40dcbb 1008 if (NULL == priv->tx_skbuff) {
0bbaf069
KG
1009 if (netif_msg_ifup(priv))
1010 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
1011 dev->name);
1da177e4
LT
1012 err = -ENOMEM;
1013 goto tx_skb_fail;
1014 }
1015
1016 for (i = 0; i < priv->tx_ring_size; i++)
1017 priv->tx_skbuff[i] = NULL;
1018
1019 priv->rx_skbuff =
1020 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
1021 priv->rx_ring_size, GFP_KERNEL);
1022
bb40dcbb 1023 if (NULL == priv->rx_skbuff) {
0bbaf069
KG
1024 if (netif_msg_ifup(priv))
1025 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
1026 dev->name);
1da177e4
LT
1027 err = -ENOMEM;
1028 goto rx_skb_fail;
1029 }
1030
1031 for (i = 0; i < priv->rx_ring_size; i++)
1032 priv->rx_skbuff[i] = NULL;
1033
1034 /* Initialize some variables in our dev structure */
4669bc90 1035 priv->num_txbdfree = priv->tx_ring_size;
1da177e4
LT
1036 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
1037 priv->cur_rx = priv->rx_bd_base;
1038 priv->skb_curtx = priv->skb_dirtytx = 0;
1039 priv->skb_currx = 0;
1040
1041 /* Initialize Transmit Descriptor Ring */
1042 txbdp = priv->tx_bd_base;
1043 for (i = 0; i < priv->tx_ring_size; i++) {
5a5efed4 1044 txbdp->lstatus = 0;
1da177e4
LT
1045 txbdp->bufPtr = 0;
1046 txbdp++;
1047 }
1048
1049 /* Set the last descriptor in the ring to indicate wrap */
1050 txbdp--;
1051 txbdp->status |= TXBD_WRAP;
1052
1053 rxbdp = priv->rx_bd_base;
1054 for (i = 0; i < priv->rx_ring_size; i++) {
815b97c6 1055 struct sk_buff *skb;
1da177e4 1056
815b97c6 1057 skb = gfar_new_skb(dev);
1da177e4 1058
815b97c6
AF
1059 if (!skb) {
1060 printk(KERN_ERR "%s: Can't allocate RX buffers\n",
1061 dev->name);
1062
1063 goto err_rxalloc_fail;
1064 }
1da177e4
LT
1065
1066 priv->rx_skbuff[i] = skb;
1067
815b97c6
AF
1068 gfar_new_rxbdp(dev, rxbdp, skb);
1069
1da177e4
LT
1070 rxbdp++;
1071 }
1072
1073 /* Set the last descriptor in the ring to wrap */
1074 rxbdp--;
1075 rxbdp->status |= RXBD_WRAP;
1076
1077 /* If the device has multiple interrupts, register for
1078 * them. Otherwise, only register for the one */
b31a1d8b 1079 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
0bbaf069 1080 /* Install our interrupt handlers for Error,
1da177e4
LT
1081 * Transmit, and Receive */
1082 if (request_irq(priv->interruptError, gfar_error,
c50a5d9a 1083 0, priv->int_name_er, dev) < 0) {
0bbaf069
KG
1084 if (netif_msg_intr(priv))
1085 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1086 dev->name, priv->interruptError);
1da177e4
LT
1087
1088 err = -1;
1089 goto err_irq_fail;
1090 }
1091
1092 if (request_irq(priv->interruptTransmit, gfar_transmit,
c50a5d9a 1093 0, priv->int_name_tx, dev) < 0) {
0bbaf069
KG
1094 if (netif_msg_intr(priv))
1095 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1096 dev->name, priv->interruptTransmit);
1da177e4
LT
1097
1098 err = -1;
1099
1100 goto tx_irq_fail;
1101 }
1102
1103 if (request_irq(priv->interruptReceive, gfar_receive,
c50a5d9a 1104 0, priv->int_name_rx, dev) < 0) {
0bbaf069
KG
1105 if (netif_msg_intr(priv))
1106 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
1107 dev->name, priv->interruptReceive);
1da177e4
LT
1108
1109 err = -1;
1110 goto rx_irq_fail;
1111 }
1112 } else {
1113 if (request_irq(priv->interruptTransmit, gfar_interrupt,
c50a5d9a 1114 0, priv->int_name_tx, dev) < 0) {
0bbaf069
KG
1115 if (netif_msg_intr(priv))
1116 printk(KERN_ERR "%s: Can't get IRQ %d\n",
c50a5d9a 1117 dev->name, priv->interruptTransmit);
1da177e4
LT
1118
1119 err = -1;
1120 goto err_irq_fail;
1121 }
1122 }
1123
bb40dcbb 1124 phy_start(priv->phydev);
1da177e4
LT
1125
1126 /* Configure the coalescing support */
b46a8454 1127 gfar_write(&regs->txic, 0);
1da177e4 1128 if (priv->txcoalescing)
b46a8454 1129 gfar_write(&regs->txic, priv->txic);
1da177e4 1130
b46a8454 1131 gfar_write(&regs->rxic, 0);
1da177e4 1132 if (priv->rxcoalescing)
b46a8454 1133 gfar_write(&regs->rxic, priv->rxic);
1da177e4 1134
0bbaf069
KG
1135 if (priv->rx_csum_enable)
1136 rctrl |= RCTRL_CHECKSUMMING;
1da177e4 1137
7f7f5316 1138 if (priv->extended_hash) {
0bbaf069 1139 rctrl |= RCTRL_EXTHASH;
1da177e4 1140
7f7f5316
AF
1141 gfar_clear_exact_match(dev);
1142 rctrl |= RCTRL_EMEN;
1143 }
1144
7f7f5316
AF
1145 if (priv->padding) {
1146 rctrl &= ~RCTRL_PAL_MASK;
1147 rctrl |= RCTRL_PADDING(priv->padding);
1148 }
1149
0bbaf069
KG
1150 /* Init rctrl based on our settings */
1151 gfar_write(&priv->regs->rctrl, rctrl);
1da177e4 1152
0bbaf069
KG
1153 if (dev->features & NETIF_F_IP_CSUM)
1154 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
1da177e4 1155
7f7f5316
AF
1156 /* Set the extraction length and index */
1157 attrs = ATTRELI_EL(priv->rx_stash_size) |
1158 ATTRELI_EI(priv->rx_stash_index);
1159
1160 gfar_write(&priv->regs->attreli, attrs);
1161
1162 /* Start with defaults, and add stashing or locking
1163 * depending on the approprate variables */
1164 attrs = ATTR_INIT_SETTINGS;
1165
1166 if (priv->bd_stash_en)
1167 attrs |= ATTR_BDSTASH;
1168
1169 if (priv->rx_stash_size != 0)
1170 attrs |= ATTR_BUFSTASH;
1171
1172 gfar_write(&priv->regs->attr, attrs);
1173
1174 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
1175 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
1176 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
1177
1178 /* Start the controller */
0bbaf069 1179 gfar_start(dev);
1da177e4
LT
1180
1181 return 0;
1182
1183rx_irq_fail:
1184 free_irq(priv->interruptTransmit, dev);
1185tx_irq_fail:
1186 free_irq(priv->interruptError, dev);
1187err_irq_fail:
7d2e3cb7 1188err_rxalloc_fail:
1da177e4
LT
1189rx_skb_fail:
1190 free_skb_resources(priv);
1191tx_skb_fail:
cf782298 1192 dma_free_coherent(&dev->dev,
1da177e4
LT
1193 sizeof(struct txbd8)*priv->tx_ring_size
1194 + sizeof(struct rxbd8)*priv->rx_ring_size,
1195 priv->tx_bd_base,
0bbaf069 1196 gfar_read(&regs->tbase0));
1da177e4 1197
1da177e4
LT
1198 return err;
1199}
1200
1201/* Called when something needs to use the ethernet device */
1202/* Returns 0 for success. */
1203static int gfar_enet_open(struct net_device *dev)
1204{
94e8cc35 1205 struct gfar_private *priv = netdev_priv(dev);
1da177e4
LT
1206 int err;
1207
bea3348e
SH
1208 napi_enable(&priv->napi);
1209
0fd56bb5
AF
1210 skb_queue_head_init(&priv->rx_recycle);
1211
1da177e4
LT
1212 /* Initialize a bunch of registers */
1213 init_registers(dev);
1214
1215 gfar_set_mac_address(dev);
1216
1217 err = init_phy(dev);
1218
bea3348e
SH
1219 if(err) {
1220 napi_disable(&priv->napi);
1da177e4 1221 return err;
bea3348e 1222 }
1da177e4
LT
1223
1224 err = startup_gfar(dev);
db0e8e3f 1225 if (err) {
bea3348e 1226 napi_disable(&priv->napi);
db0e8e3f
AV
1227 return err;
1228 }
1da177e4
LT
1229
1230 netif_start_queue(dev);
1231
2884e5cc
AV
1232 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1233
1da177e4
LT
1234 return err;
1235}
1236
a22823e7 1237static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
0bbaf069
KG
1238{
1239 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
1240
a22823e7 1241 cacheable_memzero(fcb, GMAC_FCB_LEN);
0bbaf069 1242
0bbaf069
KG
1243 return fcb;
1244}
1245
1246static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1247{
7f7f5316 1248 u8 flags = 0;
0bbaf069
KG
1249
1250 /* If we're here, it's a IP packet with a TCP or UDP
1251 * payload. We set it to checksum, using a pseudo-header
1252 * we provide
1253 */
7f7f5316 1254 flags = TXFCB_DEFAULT;
0bbaf069 1255
7f7f5316
AF
1256 /* Tell the controller what the protocol is */
1257 /* And provide the already calculated phcs */
eddc9ec5 1258 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
7f7f5316 1259 flags |= TXFCB_UDP;
4bedb452 1260 fcb->phcs = udp_hdr(skb)->check;
7f7f5316 1261 } else
8da32de5 1262 fcb->phcs = tcp_hdr(skb)->check;
0bbaf069
KG
1263
1264 /* l3os is the distance between the start of the
1265 * frame (skb->data) and the start of the IP hdr.
1266 * l4os is the distance between the start of the
1267 * l3 hdr and the l4 hdr */
bbe735e4 1268 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
cfe1fc77 1269 fcb->l4os = skb_network_header_len(skb);
0bbaf069 1270
7f7f5316 1271 fcb->flags = flags;
0bbaf069
KG
1272}
1273
7f7f5316 1274void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
0bbaf069 1275{
7f7f5316 1276 fcb->flags |= TXFCB_VLN;
0bbaf069
KG
1277 fcb->vlctl = vlan_tx_tag_get(skb);
1278}
1279
4669bc90
DH
1280static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1281 struct txbd8 *base, int ring_size)
1282{
1283 struct txbd8 *new_bd = bdp + stride;
1284
1285 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1286}
1287
1288static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1289 int ring_size)
1290{
1291 return skip_txbd(bdp, 1, base, ring_size);
1292}
1293
1da177e4
LT
1294/* This is called by the kernel when a frame is ready for transmission. */
1295/* It is pointed to by the dev->hard_start_xmit function pointer */
1296static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1297{
1298 struct gfar_private *priv = netdev_priv(dev);
0bbaf069 1299 struct txfcb *fcb = NULL;
4669bc90 1300 struct txbd8 *txbdp, *txbdp_start, *base;
5a5efed4 1301 u32 lstatus;
4669bc90
DH
1302 int i;
1303 u32 bufaddr;
fef6108d 1304 unsigned long flags;
4669bc90
DH
1305 unsigned int nr_frags, length;
1306
1307 base = priv->tx_bd_base;
1308
1309 /* total number of fragments in the SKB */
1310 nr_frags = skb_shinfo(skb)->nr_frags;
1311
1312 spin_lock_irqsave(&priv->txlock, flags);
1313
1314 /* check if there is space to queue this packet */
1315 if (nr_frags > priv->num_txbdfree) {
1316 /* no space, stop the queue */
1317 netif_stop_queue(dev);
1318 dev->stats.tx_fifo_errors++;
1319 spin_unlock_irqrestore(&priv->txlock, flags);
1320 return NETDEV_TX_BUSY;
1321 }
1da177e4
LT
1322
1323 /* Update transmit stats */
09f75cd7 1324 dev->stats.tx_bytes += skb->len;
1da177e4 1325
4669bc90 1326 txbdp = txbdp_start = priv->cur_tx;
1da177e4 1327
4669bc90
DH
1328 if (nr_frags == 0) {
1329 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1330 } else {
1331 /* Place the fragment addresses and lengths into the TxBDs */
1332 for (i = 0; i < nr_frags; i++) {
1333 /* Point at the next BD, wrapping as needed */
1334 txbdp = next_txbd(txbdp, base, priv->tx_ring_size);
1335
1336 length = skb_shinfo(skb)->frags[i].size;
1337
1338 lstatus = txbdp->lstatus | length |
1339 BD_LFLAG(TXBD_READY);
1340
1341 /* Handle the last BD specially */
1342 if (i == nr_frags - 1)
1343 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1da177e4 1344
4669bc90
DH
1345 bufaddr = dma_map_page(&dev->dev,
1346 skb_shinfo(skb)->frags[i].page,
1347 skb_shinfo(skb)->frags[i].page_offset,
1348 length,
1349 DMA_TO_DEVICE);
1350
1351 /* set the TxBD length and buffer pointer */
1352 txbdp->bufPtr = bufaddr;
1353 txbdp->lstatus = lstatus;
1354 }
1355
1356 lstatus = txbdp_start->lstatus;
1357 }
1da177e4 1358
0bbaf069 1359 /* Set up checksumming */
12dea57b 1360 if (CHECKSUM_PARTIAL == skb->ip_summed) {
a22823e7 1361 fcb = gfar_add_fcb(skb);
5a5efed4 1362 lstatus |= BD_LFLAG(TXBD_TOE);
0bbaf069
KG
1363 gfar_tx_checksum(skb, fcb);
1364 }
1365
77ecaf2d 1366 if (priv->vlgrp && vlan_tx_tag_present(skb)) {
7f7f5316 1367 if (unlikely(NULL == fcb)) {
a22823e7 1368 fcb = gfar_add_fcb(skb);
5a5efed4 1369 lstatus |= BD_LFLAG(TXBD_TOE);
7f7f5316 1370 }
0bbaf069
KG
1371
1372 gfar_tx_vlan(skb, fcb);
1373 }
1374
4669bc90 1375 /* setup the TxBD length and buffer pointer for the first BD */
1da177e4 1376 priv->tx_skbuff[priv->skb_curtx] = skb;
4669bc90
DH
1377 txbdp_start->bufPtr = dma_map_single(&dev->dev, skb->data,
1378 skb_headlen(skb), DMA_TO_DEVICE);
1da177e4 1379
4669bc90 1380 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1da177e4 1381
4669bc90
DH
1382 /*
1383 * The powerpc-specific eieio() is used, as wmb() has too strong
3b6330ce
SW
1384 * semantics (it requires synchronization between cacheable and
1385 * uncacheable mappings, which eieio doesn't provide and which we
1386 * don't need), thus requiring a more expensive sync instruction. At
1387 * some point, the set of architecture-independent barrier functions
1388 * should be expanded to include weaker barriers.
1389 */
3b6330ce 1390 eieio();
7f7f5316 1391
4669bc90
DH
1392 txbdp_start->lstatus = lstatus;
1393
1394 /* Update the current skb pointer to the next entry we will use
1395 * (wrapping if necessary) */
1396 priv->skb_curtx = (priv->skb_curtx + 1) &
1397 TX_RING_MOD_MASK(priv->tx_ring_size);
1398
1399 priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size);
1400
1401 /* reduce TxBD free count */
1402 priv->num_txbdfree -= (nr_frags + 1);
1403
1404 dev->trans_start = jiffies;
1da177e4
LT
1405
1406 /* If the next BD still needs to be cleaned up, then the bds
1407 are full. We need to tell the kernel to stop sending us stuff. */
4669bc90 1408 if (!priv->num_txbdfree) {
1da177e4
LT
1409 netif_stop_queue(dev);
1410
09f75cd7 1411 dev->stats.tx_fifo_errors++;
1da177e4
LT
1412 }
1413
1da177e4
LT
1414 /* Tell the DMA to go go go */
1415 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1416
1417 /* Unlock priv */
fef6108d 1418 spin_unlock_irqrestore(&priv->txlock, flags);
1da177e4
LT
1419
1420 return 0;
1421}
1422
1423/* Stops the kernel queue, and halts the controller */
1424static int gfar_close(struct net_device *dev)
1425{
1426 struct gfar_private *priv = netdev_priv(dev);
bea3348e
SH
1427
1428 napi_disable(&priv->napi);
1429
0fd56bb5 1430 skb_queue_purge(&priv->rx_recycle);
ab939905 1431 cancel_work_sync(&priv->reset_task);
1da177e4
LT
1432 stop_gfar(dev);
1433
bb40dcbb
AF
1434 /* Disconnect from the PHY */
1435 phy_disconnect(priv->phydev);
1436 priv->phydev = NULL;
1da177e4
LT
1437
1438 netif_stop_queue(dev);
1439
1440 return 0;
1441}
1442
1da177e4 1443/* Changes the mac address if the controller is not running. */
f162b9d5 1444static int gfar_set_mac_address(struct net_device *dev)
1da177e4 1445{
7f7f5316 1446 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1da177e4
LT
1447
1448 return 0;
1449}
1450
1451
0bbaf069
KG
1452/* Enables and disables VLAN insertion/extraction */
1453static void gfar_vlan_rx_register(struct net_device *dev,
1454 struct vlan_group *grp)
1455{
1456 struct gfar_private *priv = netdev_priv(dev);
1457 unsigned long flags;
1458 u32 tempval;
1459
fef6108d 1460 spin_lock_irqsave(&priv->rxlock, flags);
0bbaf069 1461
cd1f55a5 1462 priv->vlgrp = grp;
0bbaf069
KG
1463
1464 if (grp) {
1465 /* Enable VLAN tag insertion */
1466 tempval = gfar_read(&priv->regs->tctrl);
1467 tempval |= TCTRL_VLINS;
1468
1469 gfar_write(&priv->regs->tctrl, tempval);
6aa20a22 1470
0bbaf069
KG
1471 /* Enable VLAN tag extraction */
1472 tempval = gfar_read(&priv->regs->rctrl);
1473 tempval |= RCTRL_VLEX;
77ecaf2d 1474 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
0bbaf069
KG
1475 gfar_write(&priv->regs->rctrl, tempval);
1476 } else {
1477 /* Disable VLAN tag insertion */
1478 tempval = gfar_read(&priv->regs->tctrl);
1479 tempval &= ~TCTRL_VLINS;
1480 gfar_write(&priv->regs->tctrl, tempval);
1481
1482 /* Disable VLAN tag extraction */
1483 tempval = gfar_read(&priv->regs->rctrl);
1484 tempval &= ~RCTRL_VLEX;
77ecaf2d
DH
1485 /* If parse is no longer required, then disable parser */
1486 if (tempval & RCTRL_REQ_PARSER)
1487 tempval |= RCTRL_PRSDEP_INIT;
1488 else
1489 tempval &= ~RCTRL_PRSDEP_INIT;
0bbaf069
KG
1490 gfar_write(&priv->regs->rctrl, tempval);
1491 }
1492
77ecaf2d
DH
1493 gfar_change_mtu(dev, dev->mtu);
1494
fef6108d 1495 spin_unlock_irqrestore(&priv->rxlock, flags);
0bbaf069
KG
1496}
1497
1da177e4
LT
1498static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1499{
1500 int tempsize, tempval;
1501 struct gfar_private *priv = netdev_priv(dev);
1502 int oldsize = priv->rx_buffer_size;
0bbaf069
KG
1503 int frame_size = new_mtu + ETH_HLEN;
1504
77ecaf2d 1505 if (priv->vlgrp)
faa89577 1506 frame_size += VLAN_HLEN;
0bbaf069 1507
1da177e4 1508 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
0bbaf069
KG
1509 if (netif_msg_drv(priv))
1510 printk(KERN_ERR "%s: Invalid MTU setting\n",
1511 dev->name);
1da177e4
LT
1512 return -EINVAL;
1513 }
1514
77ecaf2d
DH
1515 if (gfar_uses_fcb(priv))
1516 frame_size += GMAC_FCB_LEN;
1517
1518 frame_size += priv->padding;
1519
1da177e4
LT
1520 tempsize =
1521 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1522 INCREMENTAL_BUFFER_SIZE;
1523
1524 /* Only stop and start the controller if it isn't already
7f7f5316 1525 * stopped, and we changed something */
1da177e4
LT
1526 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1527 stop_gfar(dev);
1528
1529 priv->rx_buffer_size = tempsize;
1530
1531 dev->mtu = new_mtu;
1532
1533 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
1534 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
1535
1536 /* If the mtu is larger than the max size for standard
1537 * ethernet frames (ie, a jumbo frame), then set maccfg2
1538 * to allow huge frames, and to check the length */
1539 tempval = gfar_read(&priv->regs->maccfg2);
1540
1541 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1542 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1543 else
1544 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1545
1546 gfar_write(&priv->regs->maccfg2, tempval);
1547
1548 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1549 startup_gfar(dev);
1550
1551 return 0;
1552}
1553
ab939905 1554/* gfar_reset_task gets scheduled when a packet has not been
1da177e4
LT
1555 * transmitted after a set amount of time.
1556 * For now, assume that clearing out all the structures, and
ab939905
SS
1557 * starting over will fix the problem.
1558 */
1559static void gfar_reset_task(struct work_struct *work)
1da177e4 1560{
ab939905
SS
1561 struct gfar_private *priv = container_of(work, struct gfar_private,
1562 reset_task);
1563 struct net_device *dev = priv->dev;
1da177e4
LT
1564
1565 if (dev->flags & IFF_UP) {
1566 stop_gfar(dev);
1567 startup_gfar(dev);
1568 }
1569
263ba320 1570 netif_tx_schedule_all(dev);
1da177e4
LT
1571}
1572
ab939905
SS
1573static void gfar_timeout(struct net_device *dev)
1574{
1575 struct gfar_private *priv = netdev_priv(dev);
1576
1577 dev->stats.tx_errors++;
1578 schedule_work(&priv->reset_task);
1579}
1580
1da177e4 1581/* Interrupt Handler for Transmit complete */
f162b9d5 1582static int gfar_clean_tx_ring(struct net_device *dev)
1da177e4 1583{
d080cd63 1584 struct gfar_private *priv = netdev_priv(dev);
4669bc90
DH
1585 struct txbd8 *bdp;
1586 struct txbd8 *lbdp = NULL;
1587 struct txbd8 *base = priv->tx_bd_base;
1588 struct sk_buff *skb;
1589 int skb_dirtytx;
1590 int tx_ring_size = priv->tx_ring_size;
1591 int frags = 0;
1592 int i;
d080cd63 1593 int howmany = 0;
4669bc90 1594 u32 lstatus;
1da177e4 1595
1da177e4 1596 bdp = priv->dirty_tx;
4669bc90 1597 skb_dirtytx = priv->skb_dirtytx;
1da177e4 1598
4669bc90
DH
1599 while ((skb = priv->tx_skbuff[skb_dirtytx])) {
1600 frags = skb_shinfo(skb)->nr_frags;
1601 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
1da177e4 1602
4669bc90 1603 lstatus = lbdp->lstatus;
1da177e4 1604
4669bc90
DH
1605 /* Only clean completed frames */
1606 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
1607 (lstatus & BD_LENGTH_MASK))
1608 break;
1609
1610 dma_unmap_single(&dev->dev,
1611 bdp->bufPtr,
1612 bdp->length,
1613 DMA_TO_DEVICE);
81183059 1614
4669bc90
DH
1615 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
1616 bdp = next_txbd(bdp, base, tx_ring_size);
d080cd63 1617
4669bc90
DH
1618 for (i = 0; i < frags; i++) {
1619 dma_unmap_page(&dev->dev,
1620 bdp->bufPtr,
1621 bdp->length,
1622 DMA_TO_DEVICE);
1623 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
1624 bdp = next_txbd(bdp, base, tx_ring_size);
1625 }
1da177e4 1626
0fd56bb5
AF
1627 /*
1628 * If there's room in the queue (limit it to rx_buffer_size)
1629 * we add this skb back into the pool, if it's the right size
1630 */
1631 if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size &&
1632 skb_recycle_check(skb, priv->rx_buffer_size +
1633 RXBUF_ALIGNMENT))
1634 __skb_queue_head(&priv->rx_recycle, skb);
1635 else
1636 dev_kfree_skb_any(skb);
1637
4669bc90 1638 priv->tx_skbuff[skb_dirtytx] = NULL;
d080cd63 1639
4669bc90
DH
1640 skb_dirtytx = (skb_dirtytx + 1) &
1641 TX_RING_MOD_MASK(tx_ring_size);
1642
1643 howmany++;
1644 priv->num_txbdfree += frags + 1;
1645 }
1da177e4 1646
4669bc90
DH
1647 /* If we freed a buffer, we can restart transmission, if necessary */
1648 if (netif_queue_stopped(dev) && priv->num_txbdfree)
1649 netif_wake_queue(dev);
1da177e4 1650
4669bc90
DH
1651 /* Update dirty indicators */
1652 priv->skb_dirtytx = skb_dirtytx;
1653 priv->dirty_tx = bdp;
1da177e4 1654
d080cd63
DH
1655 dev->stats.tx_packets += howmany;
1656
1657 return howmany;
1658}
1659
8c7396ae 1660static void gfar_schedule_cleanup(struct net_device *dev)
d080cd63 1661{
d080cd63 1662 struct gfar_private *priv = netdev_priv(dev);
a6d0b91a
AV
1663 unsigned long flags;
1664
1665 spin_lock_irqsave(&priv->txlock, flags);
1666 spin_lock(&priv->rxlock);
1667
288379f0 1668 if (napi_schedule_prep(&priv->napi)) {
8c7396ae 1669 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
288379f0 1670 __napi_schedule(&priv->napi);
8707bdd4
JP
1671 } else {
1672 /*
1673 * Clear IEVENT, so interrupts aren't called again
1674 * because of the packets that have already arrived.
1675 */
1676 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
2f448911 1677 }
a6d0b91a
AV
1678
1679 spin_unlock(&priv->rxlock);
1680 spin_unlock_irqrestore(&priv->txlock, flags);
8c7396ae 1681}
1da177e4 1682
8c7396ae
DH
1683/* Interrupt Handler for Transmit complete */
1684static irqreturn_t gfar_transmit(int irq, void *dev_id)
1685{
1686 gfar_schedule_cleanup((struct net_device *)dev_id);
1da177e4
LT
1687 return IRQ_HANDLED;
1688}
1689
815b97c6
AF
1690static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
1691 struct sk_buff *skb)
1692{
1693 struct gfar_private *priv = netdev_priv(dev);
5a5efed4 1694 u32 lstatus;
815b97c6
AF
1695
1696 bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1697 priv->rx_buffer_size, DMA_FROM_DEVICE);
1698
5a5efed4 1699 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
815b97c6
AF
1700
1701 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
5a5efed4 1702 lstatus |= BD_LFLAG(RXBD_WRAP);
815b97c6
AF
1703
1704 eieio();
1705
5a5efed4 1706 bdp->lstatus = lstatus;
815b97c6
AF
1707}
1708
1709
1710struct sk_buff * gfar_new_skb(struct net_device *dev)
1da177e4 1711{
7f7f5316 1712 unsigned int alignamount;
1da177e4
LT
1713 struct gfar_private *priv = netdev_priv(dev);
1714 struct sk_buff *skb = NULL;
1da177e4 1715
0fd56bb5
AF
1716 skb = __skb_dequeue(&priv->rx_recycle);
1717 if (!skb)
1718 skb = netdev_alloc_skb(dev,
1719 priv->rx_buffer_size + RXBUF_ALIGNMENT);
1da177e4 1720
815b97c6 1721 if (!skb)
1da177e4
LT
1722 return NULL;
1723
7f7f5316 1724 alignamount = RXBUF_ALIGNMENT -
bea3348e 1725 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
7f7f5316 1726
1da177e4
LT
1727 /* We need the data buffer to be aligned properly. We will reserve
1728 * as many bytes as needed to align the data properly
1729 */
7f7f5316 1730 skb_reserve(skb, alignamount);
1da177e4 1731
1da177e4
LT
1732 return skb;
1733}
1734
298e1a9e 1735static inline void count_errors(unsigned short status, struct net_device *dev)
1da177e4 1736{
298e1a9e 1737 struct gfar_private *priv = netdev_priv(dev);
09f75cd7 1738 struct net_device_stats *stats = &dev->stats;
1da177e4
LT
1739 struct gfar_extra_stats *estats = &priv->extra_stats;
1740
1741 /* If the packet was truncated, none of the other errors
1742 * matter */
1743 if (status & RXBD_TRUNCATED) {
1744 stats->rx_length_errors++;
1745
1746 estats->rx_trunc++;
1747
1748 return;
1749 }
1750 /* Count the errors, if there were any */
1751 if (status & (RXBD_LARGE | RXBD_SHORT)) {
1752 stats->rx_length_errors++;
1753
1754 if (status & RXBD_LARGE)
1755 estats->rx_large++;
1756 else
1757 estats->rx_short++;
1758 }
1759 if (status & RXBD_NONOCTET) {
1760 stats->rx_frame_errors++;
1761 estats->rx_nonoctet++;
1762 }
1763 if (status & RXBD_CRCERR) {
1764 estats->rx_crcerr++;
1765 stats->rx_crc_errors++;
1766 }
1767 if (status & RXBD_OVERRUN) {
1768 estats->rx_overrun++;
1769 stats->rx_crc_errors++;
1770 }
1771}
1772
7d12e780 1773irqreturn_t gfar_receive(int irq, void *dev_id)
1da177e4 1774{
8c7396ae 1775 gfar_schedule_cleanup((struct net_device *)dev_id);
1da177e4
LT
1776 return IRQ_HANDLED;
1777}
1778
0bbaf069
KG
1779static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1780{
1781 /* If valid headers were found, and valid sums
1782 * were verified, then we tell the kernel that no
1783 * checksumming is necessary. Otherwise, it is */
7f7f5316 1784 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
0bbaf069
KG
1785 skb->ip_summed = CHECKSUM_UNNECESSARY;
1786 else
1787 skb->ip_summed = CHECKSUM_NONE;
1788}
1789
1790
1da177e4
LT
1791/* gfar_process_frame() -- handle one incoming packet if skb
1792 * isn't NULL. */
1793static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2c2db48a 1794 int amount_pull)
1da177e4
LT
1795{
1796 struct gfar_private *priv = netdev_priv(dev);
0bbaf069 1797 struct rxfcb *fcb = NULL;
1da177e4 1798
2c2db48a 1799 int ret;
1da177e4 1800
2c2db48a
DH
1801 /* fcb is at the beginning if exists */
1802 fcb = (struct rxfcb *)skb->data;
0bbaf069 1803
2c2db48a
DH
1804 /* Remove the FCB from the skb */
1805 /* Remove the padded bytes, if there are any */
1806 if (amount_pull)
1807 skb_pull(skb, amount_pull);
0bbaf069 1808
2c2db48a
DH
1809 if (priv->rx_csum_enable)
1810 gfar_rx_checksum(skb, fcb);
0bbaf069 1811
2c2db48a
DH
1812 /* Tell the skb what kind of packet this is */
1813 skb->protocol = eth_type_trans(skb, dev);
1da177e4 1814
2c2db48a
DH
1815 /* Send the packet up the stack */
1816 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
1817 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
1818 else
1819 ret = netif_receive_skb(skb);
0bbaf069 1820
2c2db48a
DH
1821 if (NET_RX_DROP == ret)
1822 priv->extra_stats.kernel_dropped++;
1da177e4
LT
1823
1824 return 0;
1825}
1826
1827/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
0bbaf069 1828 * until the budget/quota has been reached. Returns the number
1da177e4
LT
1829 * of frames handled
1830 */
0bbaf069 1831int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1da177e4 1832{
31de198b 1833 struct rxbd8 *bdp, *base;
1da177e4 1834 struct sk_buff *skb;
2c2db48a
DH
1835 int pkt_len;
1836 int amount_pull;
1da177e4
LT
1837 int howmany = 0;
1838 struct gfar_private *priv = netdev_priv(dev);
1839
1840 /* Get the first full descriptor */
1841 bdp = priv->cur_rx;
31de198b 1842 base = priv->rx_bd_base;
1da177e4 1843
2c2db48a
DH
1844 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
1845 priv->padding;
1846
1da177e4 1847 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
815b97c6 1848 struct sk_buff *newskb;
3b6330ce 1849 rmb();
815b97c6
AF
1850
1851 /* Add another skb for the future */
1852 newskb = gfar_new_skb(dev);
1853
1da177e4
LT
1854 skb = priv->rx_skbuff[priv->skb_currx];
1855
81183059
AF
1856 dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
1857 priv->rx_buffer_size, DMA_FROM_DEVICE);
1858
815b97c6
AF
1859 /* We drop the frame if we failed to allocate a new buffer */
1860 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
1861 bdp->status & RXBD_ERR)) {
1862 count_errors(bdp->status, dev);
1863
1864 if (unlikely(!newskb))
1865 newskb = skb;
8882d9a6 1866 else if (skb)
0fd56bb5 1867 __skb_queue_head(&priv->rx_recycle, skb);
815b97c6 1868 } else {
1da177e4 1869 /* Increment the number of packets */
09f75cd7 1870 dev->stats.rx_packets++;
1da177e4
LT
1871 howmany++;
1872
2c2db48a
DH
1873 if (likely(skb)) {
1874 pkt_len = bdp->length - ETH_FCS_LEN;
1875 /* Remove the FCS from the packet length */
1876 skb_put(skb, pkt_len);
1877 dev->stats.rx_bytes += pkt_len;
1da177e4 1878
1577ecef
AF
1879 if (in_irq() || irqs_disabled())
1880 printk("Interrupt problem!\n");
2c2db48a
DH
1881 gfar_process_frame(dev, skb, amount_pull);
1882
1883 } else {
1884 if (netif_msg_rx_err(priv))
1885 printk(KERN_WARNING
1886 "%s: Missing skb!\n", dev->name);
1887 dev->stats.rx_dropped++;
1888 priv->extra_stats.rx_skbmissing++;
1889 }
1da177e4 1890
1da177e4
LT
1891 }
1892
815b97c6 1893 priv->rx_skbuff[priv->skb_currx] = newskb;
1da177e4 1894
815b97c6
AF
1895 /* Setup the new bdp */
1896 gfar_new_rxbdp(dev, bdp, newskb);
1da177e4
LT
1897
1898 /* Update to the next pointer */
31de198b 1899 bdp = next_bd(bdp, base, priv->rx_ring_size);
1da177e4
LT
1900
1901 /* update to point at the next skb */
1902 priv->skb_currx =
815b97c6
AF
1903 (priv->skb_currx + 1) &
1904 RX_RING_MOD_MASK(priv->rx_ring_size);
1da177e4
LT
1905 }
1906
1907 /* Update the current rxbd pointer to be the next one */
1908 priv->cur_rx = bdp;
1909
1da177e4
LT
1910 return howmany;
1911}
1912
bea3348e 1913static int gfar_poll(struct napi_struct *napi, int budget)
1da177e4 1914{
bea3348e
SH
1915 struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
1916 struct net_device *dev = priv->dev;
42199884
AF
1917 int tx_cleaned = 0;
1918 int rx_cleaned = 0;
d080cd63
DH
1919 unsigned long flags;
1920
8c7396ae
DH
1921 /* Clear IEVENT, so interrupts aren't called again
1922 * because of the packets that have already arrived */
1923 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1924
d080cd63
DH
1925 /* If we fail to get the lock, don't bother with the TX BDs */
1926 if (spin_trylock_irqsave(&priv->txlock, flags)) {
42199884 1927 tx_cleaned = gfar_clean_tx_ring(dev);
d080cd63
DH
1928 spin_unlock_irqrestore(&priv->txlock, flags);
1929 }
1da177e4 1930
42199884 1931 rx_cleaned = gfar_clean_rx_ring(dev, budget);
1da177e4 1932
42199884
AF
1933 if (tx_cleaned)
1934 return budget;
1935
1936 if (rx_cleaned < budget) {
288379f0 1937 napi_complete(napi);
1da177e4
LT
1938
1939 /* Clear the halt bit in RSTAT */
1940 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1941
1942 gfar_write(&priv->regs->imask, IMASK_DEFAULT);
1943
1944 /* If we are coalescing interrupts, update the timer */
1945 /* Otherwise, clear it */
2f448911
AF
1946 if (likely(priv->rxcoalescing)) {
1947 gfar_write(&priv->regs->rxic, 0);
b46a8454 1948 gfar_write(&priv->regs->rxic, priv->rxic);
2f448911 1949 }
8c7396ae
DH
1950 if (likely(priv->txcoalescing)) {
1951 gfar_write(&priv->regs->txic, 0);
1952 gfar_write(&priv->regs->txic, priv->txic);
1953 }
1da177e4
LT
1954 }
1955
42199884 1956 return rx_cleaned;
1da177e4 1957}
1da177e4 1958
f2d71c2d
VW
1959#ifdef CONFIG_NET_POLL_CONTROLLER
1960/*
1961 * Polling 'interrupt' - used by things like netconsole to send skbs
1962 * without having to re-enable interrupts. It's not called while
1963 * the interrupt routine is executing.
1964 */
1965static void gfar_netpoll(struct net_device *dev)
1966{
1967 struct gfar_private *priv = netdev_priv(dev);
1968
1969 /* If the device has multiple interrupts, run tx/rx */
b31a1d8b 1970 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
f2d71c2d
VW
1971 disable_irq(priv->interruptTransmit);
1972 disable_irq(priv->interruptReceive);
1973 disable_irq(priv->interruptError);
1974 gfar_interrupt(priv->interruptTransmit, dev);
1975 enable_irq(priv->interruptError);
1976 enable_irq(priv->interruptReceive);
1977 enable_irq(priv->interruptTransmit);
1978 } else {
1979 disable_irq(priv->interruptTransmit);
1980 gfar_interrupt(priv->interruptTransmit, dev);
1981 enable_irq(priv->interruptTransmit);
1982 }
1983}
1984#endif
1985
1da177e4 1986/* The interrupt handler for devices with one interrupt */
7d12e780 1987static irqreturn_t gfar_interrupt(int irq, void *dev_id)
1da177e4
LT
1988{
1989 struct net_device *dev = dev_id;
1990 struct gfar_private *priv = netdev_priv(dev);
1991
1992 /* Save ievent for future reference */
1993 u32 events = gfar_read(&priv->regs->ievent);
1994
1da177e4 1995 /* Check for reception */
538cc7ee 1996 if (events & IEVENT_RX_MASK)
7d12e780 1997 gfar_receive(irq, dev_id);
1da177e4
LT
1998
1999 /* Check for transmit completion */
538cc7ee 2000 if (events & IEVENT_TX_MASK)
7d12e780 2001 gfar_transmit(irq, dev_id);
1da177e4 2002
538cc7ee
SS
2003 /* Check for errors */
2004 if (events & IEVENT_ERR_MASK)
2005 gfar_error(irq, dev_id);
1da177e4
LT
2006
2007 return IRQ_HANDLED;
2008}
2009
1da177e4
LT
2010/* Called every time the controller might need to be made
2011 * aware of new link state. The PHY code conveys this
bb40dcbb 2012 * information through variables in the phydev structure, and this
1da177e4
LT
2013 * function converts those variables into the appropriate
2014 * register values, and can bring down the device if needed.
2015 */
2016static void adjust_link(struct net_device *dev)
2017{
2018 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 2019 struct gfar __iomem *regs = priv->regs;
bb40dcbb
AF
2020 unsigned long flags;
2021 struct phy_device *phydev = priv->phydev;
2022 int new_state = 0;
2023
fef6108d 2024 spin_lock_irqsave(&priv->txlock, flags);
bb40dcbb
AF
2025 if (phydev->link) {
2026 u32 tempval = gfar_read(&regs->maccfg2);
7f7f5316 2027 u32 ecntrl = gfar_read(&regs->ecntrl);
1da177e4 2028
1da177e4
LT
2029 /* Now we make sure that we can be in full duplex mode.
2030 * If not, we operate in half-duplex mode. */
bb40dcbb
AF
2031 if (phydev->duplex != priv->oldduplex) {
2032 new_state = 1;
2033 if (!(phydev->duplex))
1da177e4 2034 tempval &= ~(MACCFG2_FULL_DUPLEX);
bb40dcbb 2035 else
1da177e4 2036 tempval |= MACCFG2_FULL_DUPLEX;
1da177e4 2037
bb40dcbb 2038 priv->oldduplex = phydev->duplex;
1da177e4
LT
2039 }
2040
bb40dcbb
AF
2041 if (phydev->speed != priv->oldspeed) {
2042 new_state = 1;
2043 switch (phydev->speed) {
1da177e4 2044 case 1000:
1da177e4
LT
2045 tempval =
2046 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
f430e49e
LY
2047
2048 ecntrl &= ~(ECNTRL_R100);
1da177e4
LT
2049 break;
2050 case 100:
2051 case 10:
1da177e4
LT
2052 tempval =
2053 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
7f7f5316
AF
2054
2055 /* Reduced mode distinguishes
2056 * between 10 and 100 */
2057 if (phydev->speed == SPEED_100)
2058 ecntrl |= ECNTRL_R100;
2059 else
2060 ecntrl &= ~(ECNTRL_R100);
1da177e4
LT
2061 break;
2062 default:
0bbaf069
KG
2063 if (netif_msg_link(priv))
2064 printk(KERN_WARNING
bb40dcbb
AF
2065 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
2066 dev->name, phydev->speed);
1da177e4
LT
2067 break;
2068 }
2069
bb40dcbb 2070 priv->oldspeed = phydev->speed;
1da177e4
LT
2071 }
2072
bb40dcbb 2073 gfar_write(&regs->maccfg2, tempval);
7f7f5316 2074 gfar_write(&regs->ecntrl, ecntrl);
bb40dcbb 2075
1da177e4 2076 if (!priv->oldlink) {
bb40dcbb 2077 new_state = 1;
1da177e4 2078 priv->oldlink = 1;
1da177e4 2079 }
bb40dcbb
AF
2080 } else if (priv->oldlink) {
2081 new_state = 1;
2082 priv->oldlink = 0;
2083 priv->oldspeed = 0;
2084 priv->oldduplex = -1;
1da177e4 2085 }
1da177e4 2086
bb40dcbb
AF
2087 if (new_state && netif_msg_link(priv))
2088 phy_print_status(phydev);
2089
fef6108d 2090 spin_unlock_irqrestore(&priv->txlock, flags);
bb40dcbb 2091}
1da177e4
LT
2092
2093/* Update the hash table based on the current list of multicast
2094 * addresses we subscribe to. Also, change the promiscuity of
2095 * the device based on the flags (this function is called
2096 * whenever dev->flags is changed */
2097static void gfar_set_multi(struct net_device *dev)
2098{
2099 struct dev_mc_list *mc_ptr;
2100 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 2101 struct gfar __iomem *regs = priv->regs;
1da177e4
LT
2102 u32 tempval;
2103
2104 if(dev->flags & IFF_PROMISC) {
1da177e4
LT
2105 /* Set RCTRL to PROM */
2106 tempval = gfar_read(&regs->rctrl);
2107 tempval |= RCTRL_PROM;
2108 gfar_write(&regs->rctrl, tempval);
2109 } else {
2110 /* Set RCTRL to not PROM */
2111 tempval = gfar_read(&regs->rctrl);
2112 tempval &= ~(RCTRL_PROM);
2113 gfar_write(&regs->rctrl, tempval);
2114 }
6aa20a22 2115
1da177e4
LT
2116 if(dev->flags & IFF_ALLMULTI) {
2117 /* Set the hash to rx all multicast frames */
0bbaf069
KG
2118 gfar_write(&regs->igaddr0, 0xffffffff);
2119 gfar_write(&regs->igaddr1, 0xffffffff);
2120 gfar_write(&regs->igaddr2, 0xffffffff);
2121 gfar_write(&regs->igaddr3, 0xffffffff);
2122 gfar_write(&regs->igaddr4, 0xffffffff);
2123 gfar_write(&regs->igaddr5, 0xffffffff);
2124 gfar_write(&regs->igaddr6, 0xffffffff);
2125 gfar_write(&regs->igaddr7, 0xffffffff);
1da177e4
LT
2126 gfar_write(&regs->gaddr0, 0xffffffff);
2127 gfar_write(&regs->gaddr1, 0xffffffff);
2128 gfar_write(&regs->gaddr2, 0xffffffff);
2129 gfar_write(&regs->gaddr3, 0xffffffff);
2130 gfar_write(&regs->gaddr4, 0xffffffff);
2131 gfar_write(&regs->gaddr5, 0xffffffff);
2132 gfar_write(&regs->gaddr6, 0xffffffff);
2133 gfar_write(&regs->gaddr7, 0xffffffff);
2134 } else {
7f7f5316
AF
2135 int em_num;
2136 int idx;
2137
1da177e4 2138 /* zero out the hash */
0bbaf069
KG
2139 gfar_write(&regs->igaddr0, 0x0);
2140 gfar_write(&regs->igaddr1, 0x0);
2141 gfar_write(&regs->igaddr2, 0x0);
2142 gfar_write(&regs->igaddr3, 0x0);
2143 gfar_write(&regs->igaddr4, 0x0);
2144 gfar_write(&regs->igaddr5, 0x0);
2145 gfar_write(&regs->igaddr6, 0x0);
2146 gfar_write(&regs->igaddr7, 0x0);
1da177e4
LT
2147 gfar_write(&regs->gaddr0, 0x0);
2148 gfar_write(&regs->gaddr1, 0x0);
2149 gfar_write(&regs->gaddr2, 0x0);
2150 gfar_write(&regs->gaddr3, 0x0);
2151 gfar_write(&regs->gaddr4, 0x0);
2152 gfar_write(&regs->gaddr5, 0x0);
2153 gfar_write(&regs->gaddr6, 0x0);
2154 gfar_write(&regs->gaddr7, 0x0);
2155
7f7f5316
AF
2156 /* If we have extended hash tables, we need to
2157 * clear the exact match registers to prepare for
2158 * setting them */
2159 if (priv->extended_hash) {
2160 em_num = GFAR_EM_NUM + 1;
2161 gfar_clear_exact_match(dev);
2162 idx = 1;
2163 } else {
2164 idx = 0;
2165 em_num = 0;
2166 }
2167
1da177e4
LT
2168 if(dev->mc_count == 0)
2169 return;
2170
2171 /* Parse the list, and set the appropriate bits */
2172 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
7f7f5316
AF
2173 if (idx < em_num) {
2174 gfar_set_mac_for_addr(dev, idx,
2175 mc_ptr->dmi_addr);
2176 idx++;
2177 } else
2178 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
1da177e4
LT
2179 }
2180 }
2181
2182 return;
2183}
2184
7f7f5316
AF
2185
2186/* Clears each of the exact match registers to zero, so they
2187 * don't interfere with normal reception */
2188static void gfar_clear_exact_match(struct net_device *dev)
2189{
2190 int idx;
2191 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
2192
2193 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
2194 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
2195}
2196
1da177e4
LT
2197/* Set the appropriate hash bit for the given addr */
2198/* The algorithm works like so:
2199 * 1) Take the Destination Address (ie the multicast address), and
2200 * do a CRC on it (little endian), and reverse the bits of the
2201 * result.
2202 * 2) Use the 8 most significant bits as a hash into a 256-entry
2203 * table. The table is controlled through 8 32-bit registers:
2204 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
2205 * gaddr7. This means that the 3 most significant bits in the
2206 * hash index which gaddr register to use, and the 5 other bits
2207 * indicate which bit (assuming an IBM numbering scheme, which
2208 * for PowerPC (tm) is usually the case) in the register holds
2209 * the entry. */
2210static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2211{
2212 u32 tempval;
2213 struct gfar_private *priv = netdev_priv(dev);
1da177e4 2214 u32 result = ether_crc(MAC_ADDR_LEN, addr);
0bbaf069
KG
2215 int width = priv->hash_width;
2216 u8 whichbit = (result >> (32 - width)) & 0x1f;
2217 u8 whichreg = result >> (32 - width + 5);
1da177e4
LT
2218 u32 value = (1 << (31-whichbit));
2219
0bbaf069 2220 tempval = gfar_read(priv->hash_regs[whichreg]);
1da177e4 2221 tempval |= value;
0bbaf069 2222 gfar_write(priv->hash_regs[whichreg], tempval);
1da177e4
LT
2223
2224 return;
2225}
2226
7f7f5316
AF
2227
2228/* There are multiple MAC Address register pairs on some controllers
2229 * This function sets the numth pair to a given address
2230 */
2231static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2232{
2233 struct gfar_private *priv = netdev_priv(dev);
2234 int idx;
2235 char tmpbuf[MAC_ADDR_LEN];
2236 u32 tempval;
cc8c6e37 2237 u32 __iomem *macptr = &priv->regs->macstnaddr1;
7f7f5316
AF
2238
2239 macptr += num*2;
2240
2241 /* Now copy it into the mac registers backwards, cuz */
2242 /* little endian is silly */
2243 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
2244 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
2245
2246 gfar_write(macptr, *((u32 *) (tmpbuf)));
2247
2248 tempval = *((u32 *) (tmpbuf + 4));
2249
2250 gfar_write(macptr+1, tempval);
2251}
2252
1da177e4 2253/* GFAR error interrupt handler */
7d12e780 2254static irqreturn_t gfar_error(int irq, void *dev_id)
1da177e4
LT
2255{
2256 struct net_device *dev = dev_id;
2257 struct gfar_private *priv = netdev_priv(dev);
2258
2259 /* Save ievent for future reference */
2260 u32 events = gfar_read(&priv->regs->ievent);
2261
2262 /* Clear IEVENT */
d87eb127
SW
2263 gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK);
2264
2265 /* Magic Packet is not an error. */
b31a1d8b 2266 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
d87eb127
SW
2267 (events & IEVENT_MAG))
2268 events &= ~IEVENT_MAG;
1da177e4
LT
2269
2270 /* Hmm... */
0bbaf069
KG
2271 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2272 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
538cc7ee 2273 dev->name, events, gfar_read(&priv->regs->imask));
1da177e4
LT
2274
2275 /* Update the error counters */
2276 if (events & IEVENT_TXE) {
09f75cd7 2277 dev->stats.tx_errors++;
1da177e4
LT
2278
2279 if (events & IEVENT_LC)
09f75cd7 2280 dev->stats.tx_window_errors++;
1da177e4 2281 if (events & IEVENT_CRL)
09f75cd7 2282 dev->stats.tx_aborted_errors++;
1da177e4 2283 if (events & IEVENT_XFUN) {
0bbaf069 2284 if (netif_msg_tx_err(priv))
538cc7ee
SS
2285 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2286 "packet dropped.\n", dev->name);
09f75cd7 2287 dev->stats.tx_dropped++;
1da177e4
LT
2288 priv->extra_stats.tx_underrun++;
2289
2290 /* Reactivate the Tx Queues */
2291 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
2292 }
0bbaf069
KG
2293 if (netif_msg_tx_err(priv))
2294 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
1da177e4
LT
2295 }
2296 if (events & IEVENT_BSY) {
09f75cd7 2297 dev->stats.rx_errors++;
1da177e4
LT
2298 priv->extra_stats.rx_bsy++;
2299
7d12e780 2300 gfar_receive(irq, dev_id);
1da177e4 2301
0bbaf069 2302 if (netif_msg_rx_err(priv))
538cc7ee
SS
2303 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2304 dev->name, gfar_read(&priv->regs->rstat));
1da177e4
LT
2305 }
2306 if (events & IEVENT_BABR) {
09f75cd7 2307 dev->stats.rx_errors++;
1da177e4
LT
2308 priv->extra_stats.rx_babr++;
2309
0bbaf069 2310 if (netif_msg_rx_err(priv))
538cc7ee 2311 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
1da177e4
LT
2312 }
2313 if (events & IEVENT_EBERR) {
2314 priv->extra_stats.eberr++;
0bbaf069 2315 if (netif_msg_rx_err(priv))
538cc7ee 2316 printk(KERN_DEBUG "%s: bus error\n", dev->name);
1da177e4 2317 }
0bbaf069 2318 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
538cc7ee 2319 printk(KERN_DEBUG "%s: control frame\n", dev->name);
1da177e4
LT
2320
2321 if (events & IEVENT_BABT) {
2322 priv->extra_stats.tx_babt++;
0bbaf069 2323 if (netif_msg_tx_err(priv))
538cc7ee 2324 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
1da177e4
LT
2325 }
2326 return IRQ_HANDLED;
2327}
2328
72abb461
KS
2329/* work with hotplug and coldplug */
2330MODULE_ALIAS("platform:fsl-gianfar");
2331
b31a1d8b
AF
2332static struct of_device_id gfar_match[] =
2333{
2334 {
2335 .type = "network",
2336 .compatible = "gianfar",
2337 },
2338 {},
2339};
2340
1da177e4 2341/* Structure for a device driver */
b31a1d8b
AF
2342static struct of_platform_driver gfar_driver = {
2343 .name = "fsl-gianfar",
2344 .match_table = gfar_match,
2345
1da177e4
LT
2346 .probe = gfar_probe,
2347 .remove = gfar_remove,
d87eb127
SW
2348 .suspend = gfar_suspend,
2349 .resume = gfar_resume,
1da177e4
LT
2350};
2351
2352static int __init gfar_init(void)
2353{
1577ecef 2354 return of_register_platform_driver(&gfar_driver);
1da177e4
LT
2355}
2356
2357static void __exit gfar_exit(void)
2358{
b31a1d8b 2359 of_unregister_platform_driver(&gfar_driver);
1da177e4
LT
2360}
2361
2362module_init(gfar_init);
2363module_exit(gfar_exit);
2364
This page took 0.796077 seconds and 5 git commands to generate.