Merge tag 'upstream-4.7-rc1' of git://git.infradead.org/linux-ubifs
[deliverable/linux.git] / drivers / net / ethernet / cadence / macb.c
CommitLineData
89e5785f 1/*
f75ba50b 2 * Cadence MACB/GEM Ethernet Controller driver
89e5785f
HS
3 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
c220f8cd 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
89e5785f
HS
12#include <linux/clk.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
909a8583 17#include <linux/circ_buf.h>
89e5785f
HS
18#include <linux/slab.h>
19#include <linux/init.h>
60fe716f 20#include <linux/io.h>
2dbfdbb9 21#include <linux/gpio.h>
270c499f 22#include <linux/gpio/consumer.h>
a6b7a407 23#include <linux/interrupt.h>
89e5785f
HS
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
89e5785f 26#include <linux/dma-mapping.h>
84e0cdb0 27#include <linux/platform_data/macb.h>
89e5785f 28#include <linux/platform_device.h>
6c36a707 29#include <linux/phy.h>
b17471f5 30#include <linux/of.h>
fb97a846 31#include <linux/of_device.h>
270c499f 32#include <linux/of_gpio.h>
148cbb53 33#include <linux/of_mdio.h>
fb97a846 34#include <linux/of_net.h>
89e5785f 35
89e5785f
HS
36#include "macb.h"
37
1b44791a 38#define MACB_RX_BUFFER_SIZE 128
1b44791a 39#define RX_BUFFER_MULTIPLE 64 /* bytes */
55054a16
HS
40#define RX_RING_SIZE 512 /* must be power of 2 */
41#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
89e5785f 42
55054a16
HS
43#define TX_RING_SIZE 128 /* must be power of 2 */
44#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
89e5785f 45
909a8583
NF
46/* level of occupied TX descriptors under which we wake up TX process */
47#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
89e5785f
HS
48
49#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
50 | MACB_BIT(ISR_ROVR))
e86cd53a
NF
51#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
52 | MACB_BIT(ISR_RLE) \
53 | MACB_BIT(TXERR))
54#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
55
a4c35ed3
CP
56#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
57#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
58
a5898ea0
HK
59#define GEM_MTU_MIN_SIZE 68
60
3e2a5e15
SP
61#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
62#define MACB_WOL_ENABLED (0x1 << 1)
63
64ec42fe 64/* Graceful stop timeouts in us. We should allow up to
e86cd53a
NF
65 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
66 */
67#define MACB_HALT_TIMEOUT 1230
89e5785f 68
55054a16
HS
69/* Ring buffer accessors */
70static unsigned int macb_tx_ring_wrap(unsigned int index)
71{
72 return index & (TX_RING_SIZE - 1);
73}
74
02c958dd
CP
75static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
76 unsigned int index)
55054a16 77{
02c958dd 78 return &queue->tx_ring[macb_tx_ring_wrap(index)];
55054a16
HS
79}
80
02c958dd
CP
81static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
82 unsigned int index)
55054a16 83{
02c958dd 84 return &queue->tx_skb[macb_tx_ring_wrap(index)];
55054a16
HS
85}
86
02c958dd 87static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
55054a16
HS
88{
89 dma_addr_t offset;
90
91 offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
92
02c958dd 93 return queue->tx_ring_dma + offset;
55054a16
HS
94}
95
96static unsigned int macb_rx_ring_wrap(unsigned int index)
97{
98 return index & (RX_RING_SIZE - 1);
99}
100
101static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
102{
103 return &bp->rx_ring[macb_rx_ring_wrap(index)];
104}
105
106static void *macb_rx_buffer(struct macb *bp, unsigned int index)
107{
1b44791a 108 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
55054a16
HS
109}
110
f2ce8a9e
AS
111/* I/O accessors */
112static u32 hw_readl_native(struct macb *bp, int offset)
113{
114 return __raw_readl(bp->regs + offset);
115}
116
117static void hw_writel_native(struct macb *bp, int offset, u32 value)
118{
119 __raw_writel(value, bp->regs + offset);
120}
121
122static u32 hw_readl(struct macb *bp, int offset)
123{
124 return readl_relaxed(bp->regs + offset);
125}
126
127static void hw_writel(struct macb *bp, int offset, u32 value)
128{
129 writel_relaxed(value, bp->regs + offset);
130}
131
64ec42fe 132/* Find the CPU endianness by using the loopback bit of NCR register. When the
88023beb 133 * CPU is in big endian we need to program swapped mode for management
f2ce8a9e
AS
134 * descriptor access.
135 */
136static bool hw_is_native_io(void __iomem *addr)
137{
138 u32 value = MACB_BIT(LLB);
139
140 __raw_writel(value, addr + MACB_NCR);
141 value = __raw_readl(addr + MACB_NCR);
142
143 /* Write 0 back to disable everything */
144 __raw_writel(0, addr + MACB_NCR);
145
146 return value == MACB_BIT(LLB);
147}
148
149static bool hw_is_gem(void __iomem *addr, bool native_io)
150{
151 u32 id;
152
153 if (native_io)
154 id = __raw_readl(addr + MACB_MID);
155 else
156 id = readl_relaxed(addr + MACB_MID);
157
158 return MACB_BFEXT(IDNUM, id) >= 0x2;
159}
160
421d9df0 161static void macb_set_hwaddr(struct macb *bp)
89e5785f
HS
162{
163 u32 bottom;
164 u16 top;
165
166 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
f75ba50b 167 macb_or_gem_writel(bp, SA1B, bottom);
89e5785f 168 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
f75ba50b 169 macb_or_gem_writel(bp, SA1T, top);
3629a6ce
JE
170
171 /* Clear unused address register sets */
172 macb_or_gem_writel(bp, SA2B, 0);
173 macb_or_gem_writel(bp, SA2T, 0);
174 macb_or_gem_writel(bp, SA3B, 0);
175 macb_or_gem_writel(bp, SA3T, 0);
176 macb_or_gem_writel(bp, SA4B, 0);
177 macb_or_gem_writel(bp, SA4T, 0);
89e5785f
HS
178}
179
421d9df0 180static void macb_get_hwaddr(struct macb *bp)
89e5785f 181{
d25e78aa 182 struct macb_platform_data *pdata;
89e5785f
HS
183 u32 bottom;
184 u16 top;
185 u8 addr[6];
17b8bb3e
JE
186 int i;
187
c607a0d9 188 pdata = dev_get_platdata(&bp->pdev->dev);
d25e78aa 189
aa50b552 190 /* Check all 4 address register for valid address */
17b8bb3e
JE
191 for (i = 0; i < 4; i++) {
192 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
193 top = macb_or_gem_readl(bp, SA1T + i * 8);
194
d25e78aa
JE
195 if (pdata && pdata->rev_eth_addr) {
196 addr[5] = bottom & 0xff;
197 addr[4] = (bottom >> 8) & 0xff;
198 addr[3] = (bottom >> 16) & 0xff;
199 addr[2] = (bottom >> 24) & 0xff;
200 addr[1] = top & 0xff;
201 addr[0] = (top & 0xff00) >> 8;
202 } else {
203 addr[0] = bottom & 0xff;
204 addr[1] = (bottom >> 8) & 0xff;
205 addr[2] = (bottom >> 16) & 0xff;
206 addr[3] = (bottom >> 24) & 0xff;
207 addr[4] = top & 0xff;
208 addr[5] = (top >> 8) & 0xff;
209 }
17b8bb3e
JE
210
211 if (is_valid_ether_addr(addr)) {
212 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
213 return;
214 }
d1d5741d 215 }
17b8bb3e 216
a35919e1 217 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
17b8bb3e 218 eth_hw_addr_random(bp->dev);
89e5785f
HS
219}
220
6c36a707 221static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
89e5785f 222{
6c36a707 223 struct macb *bp = bus->priv;
89e5785f
HS
224 int value;
225
89e5785f
HS
226 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
227 | MACB_BF(RW, MACB_MAN_READ)
6c36a707
R
228 | MACB_BF(PHYA, mii_id)
229 | MACB_BF(REGA, regnum)
89e5785f
HS
230 | MACB_BF(CODE, MACB_MAN_CODE)));
231
6c36a707
R
232 /* wait for end of transfer */
233 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
234 cpu_relax();
89e5785f
HS
235
236 value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
89e5785f
HS
237
238 return value;
239}
240
6c36a707
R
241static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
242 u16 value)
89e5785f 243{
6c36a707 244 struct macb *bp = bus->priv;
89e5785f
HS
245
246 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
247 | MACB_BF(RW, MACB_MAN_WRITE)
6c36a707
R
248 | MACB_BF(PHYA, mii_id)
249 | MACB_BF(REGA, regnum)
89e5785f 250 | MACB_BF(CODE, MACB_MAN_CODE)
6c36a707 251 | MACB_BF(DATA, value)));
89e5785f 252
6c36a707
R
253 /* wait for end of transfer */
254 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
255 cpu_relax();
256
257 return 0;
258}
89e5785f 259
e1824dfe
SB
260/**
261 * macb_set_tx_clk() - Set a clock to a new frequency
262 * @clk Pointer to the clock to change
263 * @rate New frequency in Hz
264 * @dev Pointer to the struct net_device
265 */
266static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
267{
268 long ferr, rate, rate_rounded;
269
93b31f48
CP
270 if (!clk)
271 return;
272
e1824dfe
SB
273 switch (speed) {
274 case SPEED_10:
275 rate = 2500000;
276 break;
277 case SPEED_100:
278 rate = 25000000;
279 break;
280 case SPEED_1000:
281 rate = 125000000;
282 break;
283 default:
9319e47c 284 return;
e1824dfe
SB
285 }
286
287 rate_rounded = clk_round_rate(clk, rate);
288 if (rate_rounded < 0)
289 return;
290
291 /* RGMII allows 50 ppm frequency error. Test and warn if this limit
292 * is not satisfied.
293 */
294 ferr = abs(rate_rounded - rate);
295 ferr = DIV_ROUND_UP(ferr, rate / 100000);
296 if (ferr > 5)
297 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
aa50b552 298 rate);
e1824dfe
SB
299
300 if (clk_set_rate(clk, rate_rounded))
301 netdev_err(dev, "adjusting tx_clk failed.\n");
302}
303
6c36a707 304static void macb_handle_link_change(struct net_device *dev)
89e5785f 305{
6c36a707
R
306 struct macb *bp = netdev_priv(dev);
307 struct phy_device *phydev = bp->phy_dev;
308 unsigned long flags;
6c36a707 309 int status_change = 0;
89e5785f 310
6c36a707
R
311 spin_lock_irqsave(&bp->lock, flags);
312
313 if (phydev->link) {
314 if ((bp->speed != phydev->speed) ||
315 (bp->duplex != phydev->duplex)) {
316 u32 reg;
317
318 reg = macb_readl(bp, NCFGR);
319 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
140b7552
PV
320 if (macb_is_gem(bp))
321 reg &= ~GEM_BIT(GBE);
6c36a707
R
322
323 if (phydev->duplex)
324 reg |= MACB_BIT(FD);
179956f4 325 if (phydev->speed == SPEED_100)
6c36a707 326 reg |= MACB_BIT(SPD);
e175587f
NF
327 if (phydev->speed == SPEED_1000 &&
328 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
140b7552 329 reg |= GEM_BIT(GBE);
6c36a707 330
140b7552 331 macb_or_gem_writel(bp, NCFGR, reg);
6c36a707
R
332
333 bp->speed = phydev->speed;
334 bp->duplex = phydev->duplex;
335 status_change = 1;
336 }
89e5785f
HS
337 }
338
6c36a707 339 if (phydev->link != bp->link) {
c8f15686 340 if (!phydev->link) {
6c36a707
R
341 bp->speed = 0;
342 bp->duplex = -1;
343 }
344 bp->link = phydev->link;
89e5785f 345
6c36a707
R
346 status_change = 1;
347 }
89e5785f 348
6c36a707
R
349 spin_unlock_irqrestore(&bp->lock, flags);
350
351 if (status_change) {
03fc4721 352 if (phydev->link) {
2c29b235
JA
353 /* Update the TX clock rate if and only if the link is
354 * up and there has been a link change.
355 */
356 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
357
03fc4721 358 netif_carrier_on(dev);
c220f8cd
JI
359 netdev_info(dev, "link up (%d/%s)\n",
360 phydev->speed,
361 phydev->duplex == DUPLEX_FULL ?
362 "Full" : "Half");
03fc4721
NF
363 } else {
364 netif_carrier_off(dev);
c220f8cd 365 netdev_info(dev, "link down\n");
03fc4721 366 }
6c36a707 367 }
89e5785f
HS
368}
369
6c36a707
R
370/* based on au1000_eth. c*/
371static int macb_mii_probe(struct net_device *dev)
89e5785f 372{
6c36a707 373 struct macb *bp = netdev_priv(dev);
2dbfdbb9 374 struct macb_platform_data *pdata;
7455a76f 375 struct phy_device *phydev;
2dbfdbb9 376 int phy_irq;
7455a76f 377 int ret;
6c36a707 378
7455a76f 379 phydev = phy_find_first(bp->mii_bus);
6c36a707 380 if (!phydev) {
c220f8cd 381 netdev_err(dev, "no PHY found\n");
7daa78e3 382 return -ENXIO;
6c36a707
R
383 }
384
2dbfdbb9
JE
385 pdata = dev_get_platdata(&bp->pdev->dev);
386 if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
64ec42fe
MF
387 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin,
388 "phy int");
2dbfdbb9
JE
389 if (!ret) {
390 phy_irq = gpio_to_irq(pdata->phy_irq_pin);
391 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
392 }
393 }
6c36a707
R
394
395 /* attach the mac to the phy */
f9a8f83b 396 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
fb97a846 397 bp->phy_interface);
7455a76f 398 if (ret) {
c220f8cd 399 netdev_err(dev, "Could not attach to PHY\n");
7455a76f 400 return ret;
6c36a707
R
401 }
402
403 /* mask with MAC supported features */
e175587f 404 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
140b7552
PV
405 phydev->supported &= PHY_GBIT_FEATURES;
406 else
407 phydev->supported &= PHY_BASIC_FEATURES;
6c36a707 408
222ca8e0
NS
409 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
410 phydev->supported &= ~SUPPORTED_1000baseT_Half;
411
6c36a707
R
412 phydev->advertising = phydev->supported;
413
414 bp->link = 0;
415 bp->speed = 0;
416 bp->duplex = -1;
417 bp->phy_dev = phydev;
418
419 return 0;
89e5785f
HS
420}
421
421d9df0 422static int macb_mii_init(struct macb *bp)
89e5785f 423{
84e0cdb0 424 struct macb_platform_data *pdata;
148cbb53 425 struct device_node *np;
6c36a707 426 int err = -ENXIO, i;
89e5785f 427
3dbda77e 428 /* Enable management port */
6c36a707 429 macb_writel(bp, NCR, MACB_BIT(MPE));
89e5785f 430
298cf9be 431 bp->mii_bus = mdiobus_alloc();
aa50b552 432 if (!bp->mii_bus) {
298cf9be
LB
433 err = -ENOMEM;
434 goto err_out;
435 }
436
437 bp->mii_bus->name = "MACB_mii_bus";
438 bp->mii_bus->read = &macb_mdio_read;
439 bp->mii_bus->write = &macb_mdio_write;
98d5e57e 440 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
aa50b552 441 bp->pdev->name, bp->pdev->id);
298cf9be 442 bp->mii_bus->priv = bp;
cf669660 443 bp->mii_bus->parent = &bp->pdev->dev;
c607a0d9 444 pdata = dev_get_platdata(&bp->pdev->dev);
89e5785f 445
91523947 446 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
89e5785f 447
148cbb53
BB
448 np = bp->pdev->dev.of_node;
449 if (np) {
450 /* try dt phy registration */
451 err = of_mdiobus_register(bp->mii_bus, np);
452
453 /* fallback to standard phy registration if no phy were
64ec42fe
MF
454 * found during dt phy registration
455 */
148cbb53
BB
456 if (!err && !phy_find_first(bp->mii_bus)) {
457 for (i = 0; i < PHY_MAX_ADDR; i++) {
458 struct phy_device *phydev;
459
460 phydev = mdiobus_scan(bp->mii_bus, i);
ce24c2b8
SS
461 if (IS_ERR(phydev) &&
462 PTR_ERR(phydev) != -ENODEV) {
148cbb53
BB
463 err = PTR_ERR(phydev);
464 break;
465 }
466 }
467
468 if (err)
469 goto err_out_unregister_bus;
470 }
471 } else {
148cbb53
BB
472 if (pdata)
473 bp->mii_bus->phy_mask = pdata->phy_mask;
474
475 err = mdiobus_register(bp->mii_bus);
476 }
477
478 if (err)
e7f4dc35 479 goto err_out_free_mdiobus;
89e5785f 480
7daa78e3
BB
481 err = macb_mii_probe(bp->dev);
482 if (err)
6c36a707 483 goto err_out_unregister_bus;
89e5785f 484
6c36a707 485 return 0;
89e5785f 486
6c36a707 487err_out_unregister_bus:
298cf9be 488 mdiobus_unregister(bp->mii_bus);
298cf9be
LB
489err_out_free_mdiobus:
490 mdiobus_free(bp->mii_bus);
6c36a707
R
491err_out:
492 return err;
89e5785f
HS
493}
494
495static void macb_update_stats(struct macb *bp)
496{
a494ed8e
JI
497 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
498 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
f2ce8a9e 499 int offset = MACB_PFR;
89e5785f
HS
500
501 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
502
96ec6310 503 for (; p < end; p++, offset += 4)
7a6e0706 504 *p += bp->macb_reg_readl(bp, offset);
89e5785f
HS
505}
506
e86cd53a 507static int macb_halt_tx(struct macb *bp)
89e5785f 508{
e86cd53a
NF
509 unsigned long halt_time, timeout;
510 u32 status;
89e5785f 511
e86cd53a 512 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
89e5785f 513
e86cd53a
NF
514 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
515 do {
516 halt_time = jiffies;
517 status = macb_readl(bp, TSR);
518 if (!(status & MACB_BIT(TGO)))
519 return 0;
89e5785f 520
e86cd53a
NF
521 usleep_range(10, 250);
522 } while (time_before(halt_time, timeout));
bdcba151 523
e86cd53a
NF
524 return -ETIMEDOUT;
525}
39eddb4c 526
a4c35ed3
CP
527static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
528{
529 if (tx_skb->mapping) {
530 if (tx_skb->mapped_as_page)
531 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
532 tx_skb->size, DMA_TO_DEVICE);
533 else
534 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
535 tx_skb->size, DMA_TO_DEVICE);
536 tx_skb->mapping = 0;
537 }
538
539 if (tx_skb->skb) {
540 dev_kfree_skb_any(tx_skb->skb);
541 tx_skb->skb = NULL;
542 }
543}
544
e86cd53a
NF
545static void macb_tx_error_task(struct work_struct *work)
546{
02c958dd
CP
547 struct macb_queue *queue = container_of(work, struct macb_queue,
548 tx_error_task);
549 struct macb *bp = queue->bp;
e86cd53a 550 struct macb_tx_skb *tx_skb;
02c958dd 551 struct macb_dma_desc *desc;
e86cd53a
NF
552 struct sk_buff *skb;
553 unsigned int tail;
02c958dd
CP
554 unsigned long flags;
555
556 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
557 (unsigned int)(queue - bp->queues),
558 queue->tx_tail, queue->tx_head);
bdcba151 559
02c958dd
CP
560 /* Prevent the queue IRQ handlers from running: each of them may call
561 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
562 * As explained below, we have to halt the transmission before updating
563 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
564 * network engine about the macb/gem being halted.
565 */
566 spin_lock_irqsave(&bp->lock, flags);
bdcba151 567
e86cd53a 568 /* Make sure nobody is trying to queue up new packets */
02c958dd 569 netif_tx_stop_all_queues(bp->dev);
d3e61457 570
64ec42fe 571 /* Stop transmission now
e86cd53a 572 * (in case we have just queued new packets)
02c958dd 573 * macb/gem must be halted to write TBQP register
e86cd53a
NF
574 */
575 if (macb_halt_tx(bp))
576 /* Just complain for now, reinitializing TX path can be good */
577 netdev_err(bp->dev, "BUG: halt tx timed out\n");
bdcba151 578
64ec42fe 579 /* Treat frames in TX queue including the ones that caused the error.
e86cd53a
NF
580 * Free transmit buffers in upper layer.
581 */
02c958dd
CP
582 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
583 u32 ctrl;
55054a16 584
02c958dd 585 desc = macb_tx_desc(queue, tail);
e86cd53a 586 ctrl = desc->ctrl;
02c958dd 587 tx_skb = macb_tx_skb(queue, tail);
e86cd53a 588 skb = tx_skb->skb;
bdcba151 589
e86cd53a 590 if (ctrl & MACB_BIT(TX_USED)) {
a4c35ed3
CP
591 /* skb is set for the last buffer of the frame */
592 while (!skb) {
593 macb_tx_unmap(bp, tx_skb);
594 tail++;
02c958dd 595 tx_skb = macb_tx_skb(queue, tail);
a4c35ed3
CP
596 skb = tx_skb->skb;
597 }
598
599 /* ctrl still refers to the first buffer descriptor
600 * since it's the only one written back by the hardware
601 */
602 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
603 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
604 macb_tx_ring_wrap(tail), skb->data);
605 bp->stats.tx_packets++;
606 bp->stats.tx_bytes += skb->len;
607 }
e86cd53a 608 } else {
64ec42fe
MF
609 /* "Buffers exhausted mid-frame" errors may only happen
610 * if the driver is buggy, so complain loudly about
611 * those. Statistics are updated by hardware.
e86cd53a
NF
612 */
613 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
614 netdev_err(bp->dev,
615 "BUG: TX buffers exhausted mid-frame\n");
39eddb4c 616
e86cd53a
NF
617 desc->ctrl = ctrl | MACB_BIT(TX_USED);
618 }
619
a4c35ed3 620 macb_tx_unmap(bp, tx_skb);
89e5785f
HS
621 }
622
02c958dd
CP
623 /* Set end of TX queue */
624 desc = macb_tx_desc(queue, 0);
625 desc->addr = 0;
626 desc->ctrl = MACB_BIT(TX_USED);
627
e86cd53a
NF
628 /* Make descriptor updates visible to hardware */
629 wmb();
630
631 /* Reinitialize the TX desc queue */
02c958dd 632 queue_writel(queue, TBQP, queue->tx_ring_dma);
e86cd53a 633 /* Make TX ring reflect state of hardware */
02c958dd
CP
634 queue->tx_head = 0;
635 queue->tx_tail = 0;
e86cd53a
NF
636
637 /* Housework before enabling TX IRQ */
638 macb_writel(bp, TSR, macb_readl(bp, TSR));
02c958dd
CP
639 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
640
641 /* Now we are ready to start transmission again */
642 netif_tx_start_all_queues(bp->dev);
643 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
644
645 spin_unlock_irqrestore(&bp->lock, flags);
e86cd53a
NF
646}
647
02c958dd 648static void macb_tx_interrupt(struct macb_queue *queue)
e86cd53a
NF
649{
650 unsigned int tail;
651 unsigned int head;
652 u32 status;
02c958dd
CP
653 struct macb *bp = queue->bp;
654 u16 queue_index = queue - bp->queues;
e86cd53a
NF
655
656 status = macb_readl(bp, TSR);
657 macb_writel(bp, TSR, status);
658
581df9e1 659 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
02c958dd 660 queue_writel(queue, ISR, MACB_BIT(TCOMP));
749a2b66 661
e86cd53a 662 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
aa50b552 663 (unsigned long)status);
89e5785f 664
02c958dd
CP
665 head = queue->tx_head;
666 for (tail = queue->tx_tail; tail != head; tail++) {
55054a16
HS
667 struct macb_tx_skb *tx_skb;
668 struct sk_buff *skb;
669 struct macb_dma_desc *desc;
670 u32 ctrl;
89e5785f 671
02c958dd 672 desc = macb_tx_desc(queue, tail);
89e5785f 673
03dbe05f 674 /* Make hw descriptor updates visible to CPU */
89e5785f 675 rmb();
03dbe05f 676
55054a16 677 ctrl = desc->ctrl;
89e5785f 678
a4c35ed3
CP
679 /* TX_USED bit is only set by hardware on the very first buffer
680 * descriptor of the transmitted frame.
681 */
55054a16 682 if (!(ctrl & MACB_BIT(TX_USED)))
89e5785f
HS
683 break;
684
a4c35ed3
CP
685 /* Process all buffers of the current transmitted frame */
686 for (;; tail++) {
02c958dd 687 tx_skb = macb_tx_skb(queue, tail);
a4c35ed3
CP
688 skb = tx_skb->skb;
689
690 /* First, update TX stats if needed */
691 if (skb) {
692 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
693 macb_tx_ring_wrap(tail), skb->data);
694 bp->stats.tx_packets++;
695 bp->stats.tx_bytes += skb->len;
696 }
55054a16 697
a4c35ed3
CP
698 /* Now we can safely release resources */
699 macb_tx_unmap(bp, tx_skb);
700
701 /* skb is set only for the last buffer of the frame.
702 * WARNING: at this point skb has been freed by
703 * macb_tx_unmap().
704 */
705 if (skb)
706 break;
707 }
89e5785f
HS
708 }
709
02c958dd
CP
710 queue->tx_tail = tail;
711 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
712 CIRC_CNT(queue->tx_head, queue->tx_tail,
713 TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
714 netif_wake_subqueue(bp->dev, queue_index);
89e5785f
HS
715}
716
4df95131
NF
717static void gem_rx_refill(struct macb *bp)
718{
719 unsigned int entry;
720 struct sk_buff *skb;
4df95131
NF
721 dma_addr_t paddr;
722
64ec42fe
MF
723 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
724 RX_RING_SIZE) > 0) {
4df95131 725 entry = macb_rx_ring_wrap(bp->rx_prepared_head);
4df95131
NF
726
727 /* Make hw descriptor updates visible to CPU */
728 rmb();
729
4df95131
NF
730 bp->rx_prepared_head++;
731
aa50b552 732 if (!bp->rx_skbuff[entry]) {
4df95131
NF
733 /* allocate sk_buff for this free entry in ring */
734 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
aa50b552 735 if (unlikely(!skb)) {
4df95131
NF
736 netdev_err(bp->dev,
737 "Unable to allocate sk_buff\n");
738 break;
739 }
4df95131
NF
740
741 /* now fill corresponding descriptor entry */
742 paddr = dma_map_single(&bp->pdev->dev, skb->data,
64ec42fe
MF
743 bp->rx_buffer_size,
744 DMA_FROM_DEVICE);
92030908
SB
745 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
746 dev_kfree_skb(skb);
747 break;
748 }
749
750 bp->rx_skbuff[entry] = skb;
4df95131
NF
751
752 if (entry == RX_RING_SIZE - 1)
753 paddr |= MACB_BIT(RX_WRAP);
754 bp->rx_ring[entry].addr = paddr;
755 bp->rx_ring[entry].ctrl = 0;
756
757 /* properly align Ethernet header */
758 skb_reserve(skb, NET_IP_ALIGN);
d4c216c5
PCK
759 } else {
760 bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
761 bp->rx_ring[entry].ctrl = 0;
4df95131
NF
762 }
763 }
764
765 /* Make descriptor updates visible to hardware */
766 wmb();
767
768 netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
aa50b552 769 bp->rx_prepared_head, bp->rx_tail);
4df95131
NF
770}
771
772/* Mark DMA descriptors from begin up to and not including end as unused */
773static void discard_partial_frame(struct macb *bp, unsigned int begin,
774 unsigned int end)
775{
776 unsigned int frag;
777
778 for (frag = begin; frag != end; frag++) {
779 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
64ec42fe 780
4df95131
NF
781 desc->addr &= ~MACB_BIT(RX_USED);
782 }
783
784 /* Make descriptor updates visible to hardware */
785 wmb();
786
64ec42fe 787 /* When this happens, the hardware stats registers for
4df95131
NF
788 * whatever caused this is updated, so we don't have to record
789 * anything.
790 */
791}
792
793static int gem_rx(struct macb *bp, int budget)
794{
795 unsigned int len;
796 unsigned int entry;
797 struct sk_buff *skb;
798 struct macb_dma_desc *desc;
799 int count = 0;
800
801 while (count < budget) {
802 u32 addr, ctrl;
803
804 entry = macb_rx_ring_wrap(bp->rx_tail);
805 desc = &bp->rx_ring[entry];
806
807 /* Make hw descriptor updates visible to CPU */
808 rmb();
809
810 addr = desc->addr;
811 ctrl = desc->ctrl;
812
813 if (!(addr & MACB_BIT(RX_USED)))
814 break;
815
4df95131
NF
816 bp->rx_tail++;
817 count++;
818
819 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
820 netdev_err(bp->dev,
821 "not whole frame pointed by descriptor\n");
822 bp->stats.rx_dropped++;
823 break;
824 }
825 skb = bp->rx_skbuff[entry];
826 if (unlikely(!skb)) {
827 netdev_err(bp->dev,
828 "inconsistent Rx descriptor chain\n");
829 bp->stats.rx_dropped++;
830 break;
831 }
832 /* now everything is ready for receiving packet */
833 bp->rx_skbuff[entry] = NULL;
98b5a0f4 834 len = ctrl & bp->rx_frm_len_mask;
4df95131
NF
835
836 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
837
838 skb_put(skb, len);
839 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
840 dma_unmap_single(&bp->pdev->dev, addr,
48330e08 841 bp->rx_buffer_size, DMA_FROM_DEVICE);
4df95131
NF
842
843 skb->protocol = eth_type_trans(skb, bp->dev);
844 skb_checksum_none_assert(skb);
924ec53c
CP
845 if (bp->dev->features & NETIF_F_RXCSUM &&
846 !(bp->dev->flags & IFF_PROMISC) &&
847 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
848 skb->ip_summed = CHECKSUM_UNNECESSARY;
4df95131
NF
849
850 bp->stats.rx_packets++;
851 bp->stats.rx_bytes += skb->len;
852
853#if defined(DEBUG) && defined(VERBOSE_DEBUG)
854 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
855 skb->len, skb->csum);
856 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
51f83014 857 skb_mac_header(skb), 16, true);
4df95131
NF
858 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
859 skb->data, 32, true);
860#endif
861
862 netif_receive_skb(skb);
863 }
864
865 gem_rx_refill(bp);
866
867 return count;
868}
869
89e5785f
HS
870static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
871 unsigned int last_frag)
872{
873 unsigned int len;
874 unsigned int frag;
29bc2e1e 875 unsigned int offset;
89e5785f 876 struct sk_buff *skb;
55054a16 877 struct macb_dma_desc *desc;
89e5785f 878
55054a16 879 desc = macb_rx_desc(bp, last_frag);
98b5a0f4 880 len = desc->ctrl & bp->rx_frm_len_mask;
89e5785f 881
a268adb1 882 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
aa50b552
MF
883 macb_rx_ring_wrap(first_frag),
884 macb_rx_ring_wrap(last_frag), len);
89e5785f 885
64ec42fe 886 /* The ethernet header starts NET_IP_ALIGN bytes into the
29bc2e1e
HS
887 * first buffer. Since the header is 14 bytes, this makes the
888 * payload word-aligned.
889 *
890 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
891 * the two padding bytes into the skb so that we avoid hitting
892 * the slowpath in memcpy(), and pull them off afterwards.
893 */
894 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
89e5785f
HS
895 if (!skb) {
896 bp->stats.rx_dropped++;
55054a16
HS
897 for (frag = first_frag; ; frag++) {
898 desc = macb_rx_desc(bp, frag);
899 desc->addr &= ~MACB_BIT(RX_USED);
89e5785f
HS
900 if (frag == last_frag)
901 break;
902 }
03dbe05f
HS
903
904 /* Make descriptor updates visible to hardware */
89e5785f 905 wmb();
03dbe05f 906
89e5785f
HS
907 return 1;
908 }
909
29bc2e1e
HS
910 offset = 0;
911 len += NET_IP_ALIGN;
bc8acf2c 912 skb_checksum_none_assert(skb);
89e5785f
HS
913 skb_put(skb, len);
914
55054a16 915 for (frag = first_frag; ; frag++) {
1b44791a 916 unsigned int frag_len = bp->rx_buffer_size;
89e5785f
HS
917
918 if (offset + frag_len > len) {
9ba723b0
CP
919 if (unlikely(frag != last_frag)) {
920 dev_kfree_skb_any(skb);
921 return -1;
922 }
89e5785f
HS
923 frag_len = len - offset;
924 }
27d7ff46 925 skb_copy_to_linear_data_offset(skb, offset,
aa50b552
MF
926 macb_rx_buffer(bp, frag),
927 frag_len);
1b44791a 928 offset += bp->rx_buffer_size;
55054a16
HS
929 desc = macb_rx_desc(bp, frag);
930 desc->addr &= ~MACB_BIT(RX_USED);
89e5785f
HS
931
932 if (frag == last_frag)
933 break;
934 }
935
03dbe05f
HS
936 /* Make descriptor updates visible to hardware */
937 wmb();
938
29bc2e1e 939 __skb_pull(skb, NET_IP_ALIGN);
89e5785f
HS
940 skb->protocol = eth_type_trans(skb, bp->dev);
941
942 bp->stats.rx_packets++;
29bc2e1e 943 bp->stats.rx_bytes += skb->len;
a268adb1 944 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
aa50b552 945 skb->len, skb->csum);
89e5785f
HS
946 netif_receive_skb(skb);
947
948 return 0;
949}
950
9ba723b0
CP
951static inline void macb_init_rx_ring(struct macb *bp)
952{
953 dma_addr_t addr;
954 int i;
955
956 addr = bp->rx_buffers_dma;
957 for (i = 0; i < RX_RING_SIZE; i++) {
958 bp->rx_ring[i].addr = addr;
959 bp->rx_ring[i].ctrl = 0;
960 addr += bp->rx_buffer_size;
961 }
962 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
963}
964
89e5785f
HS
965static int macb_rx(struct macb *bp, int budget)
966{
9ba723b0 967 bool reset_rx_queue = false;
89e5785f 968 int received = 0;
55054a16 969 unsigned int tail;
89e5785f
HS
970 int first_frag = -1;
971
55054a16
HS
972 for (tail = bp->rx_tail; budget > 0; tail++) {
973 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
89e5785f
HS
974 u32 addr, ctrl;
975
03dbe05f 976 /* Make hw descriptor updates visible to CPU */
89e5785f 977 rmb();
03dbe05f 978
55054a16
HS
979 addr = desc->addr;
980 ctrl = desc->ctrl;
89e5785f
HS
981
982 if (!(addr & MACB_BIT(RX_USED)))
983 break;
984
985 if (ctrl & MACB_BIT(RX_SOF)) {
986 if (first_frag != -1)
987 discard_partial_frame(bp, first_frag, tail);
988 first_frag = tail;
989 }
990
991 if (ctrl & MACB_BIT(RX_EOF)) {
992 int dropped;
9ba723b0
CP
993
994 if (unlikely(first_frag == -1)) {
995 reset_rx_queue = true;
996 continue;
997 }
89e5785f
HS
998
999 dropped = macb_rx_frame(bp, first_frag, tail);
1000 first_frag = -1;
9ba723b0
CP
1001 if (unlikely(dropped < 0)) {
1002 reset_rx_queue = true;
1003 continue;
1004 }
89e5785f
HS
1005 if (!dropped) {
1006 received++;
1007 budget--;
1008 }
1009 }
1010 }
1011
9ba723b0
CP
1012 if (unlikely(reset_rx_queue)) {
1013 unsigned long flags;
1014 u32 ctrl;
1015
1016 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1017
1018 spin_lock_irqsave(&bp->lock, flags);
1019
1020 ctrl = macb_readl(bp, NCR);
1021 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1022
1023 macb_init_rx_ring(bp);
1024 macb_writel(bp, RBQP, bp->rx_ring_dma);
1025
1026 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1027
1028 spin_unlock_irqrestore(&bp->lock, flags);
1029 return received;
1030 }
1031
89e5785f
HS
1032 if (first_frag != -1)
1033 bp->rx_tail = first_frag;
1034 else
1035 bp->rx_tail = tail;
1036
1037 return received;
1038}
1039
bea3348e 1040static int macb_poll(struct napi_struct *napi, int budget)
89e5785f 1041{
bea3348e 1042 struct macb *bp = container_of(napi, struct macb, napi);
bea3348e 1043 int work_done;
89e5785f
HS
1044 u32 status;
1045
1046 status = macb_readl(bp, RSR);
1047 macb_writel(bp, RSR, status);
1048
bea3348e 1049 work_done = 0;
89e5785f 1050
a268adb1 1051 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
aa50b552 1052 (unsigned long)status, budget);
89e5785f 1053
4df95131 1054 work_done = bp->macbgem_ops.mog_rx(bp, budget);
b336369c 1055 if (work_done < budget) {
288379f0 1056 napi_complete(napi);
89e5785f 1057
8770e91a
NF
1058 /* Packets received while interrupts were disabled */
1059 status = macb_readl(bp, RSR);
504ad98d 1060 if (status) {
02f7a34f
SB
1061 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1062 macb_writel(bp, ISR, MACB_BIT(RCOMP));
8770e91a 1063 napi_reschedule(napi);
02f7a34f
SB
1064 } else {
1065 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
1066 }
b336369c 1067 }
89e5785f
HS
1068
1069 /* TODO: Handle errors */
1070
bea3348e 1071 return work_done;
89e5785f
HS
1072}
1073
1074static irqreturn_t macb_interrupt(int irq, void *dev_id)
1075{
02c958dd
CP
1076 struct macb_queue *queue = dev_id;
1077 struct macb *bp = queue->bp;
1078 struct net_device *dev = bp->dev;
bfbb92c4 1079 u32 status, ctrl;
89e5785f 1080
02c958dd 1081 status = queue_readl(queue, ISR);
89e5785f
HS
1082
1083 if (unlikely(!status))
1084 return IRQ_NONE;
1085
1086 spin_lock(&bp->lock);
1087
1088 while (status) {
89e5785f
HS
1089 /* close possible race with dev_close */
1090 if (unlikely(!netif_running(dev))) {
02c958dd 1091 queue_writel(queue, IDR, -1);
24468374
NS
1092 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1093 queue_writel(queue, ISR, -1);
89e5785f
HS
1094 break;
1095 }
1096
02c958dd
CP
1097 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1098 (unsigned int)(queue - bp->queues),
1099 (unsigned long)status);
a268adb1 1100
89e5785f 1101 if (status & MACB_RX_INT_FLAGS) {
64ec42fe 1102 /* There's no point taking any more interrupts
b336369c
JH
1103 * until we have processed the buffers. The
1104 * scheduling call may fail if the poll routine
1105 * is already scheduled, so disable interrupts
1106 * now.
1107 */
02c958dd 1108 queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
581df9e1 1109 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
02c958dd 1110 queue_writel(queue, ISR, MACB_BIT(RCOMP));
b336369c 1111
288379f0 1112 if (napi_schedule_prep(&bp->napi)) {
a268adb1 1113 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
288379f0 1114 __napi_schedule(&bp->napi);
89e5785f
HS
1115 }
1116 }
1117
e86cd53a 1118 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
02c958dd
CP
1119 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1120 schedule_work(&queue->tx_error_task);
6a027b70
SB
1121
1122 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
02c958dd 1123 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
6a027b70 1124
e86cd53a
NF
1125 break;
1126 }
1127
1128 if (status & MACB_BIT(TCOMP))
02c958dd 1129 macb_tx_interrupt(queue);
89e5785f 1130
64ec42fe 1131 /* Link change detection isn't possible with RMII, so we'll
89e5785f
HS
1132 * add that if/when we get our hands on a full-blown MII PHY.
1133 */
1134
86b5e7de
NS
1135 /* There is a hardware issue under heavy load where DMA can
1136 * stop, this causes endless "used buffer descriptor read"
1137 * interrupts but it can be cleared by re-enabling RX. See
1138 * the at91 manual, section 41.3.1 or the Zynq manual
1139 * section 16.7.4 for details.
1140 */
bfbb92c4
NS
1141 if (status & MACB_BIT(RXUBR)) {
1142 ctrl = macb_readl(bp, NCR);
1143 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1144 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1145
1146 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
ba504994 1147 queue_writel(queue, ISR, MACB_BIT(RXUBR));
bfbb92c4
NS
1148 }
1149
b19f7f71
AS
1150 if (status & MACB_BIT(ISR_ROVR)) {
1151 /* We missed at least one packet */
f75ba50b
JI
1152 if (macb_is_gem(bp))
1153 bp->hw_stats.gem.rx_overruns++;
1154 else
1155 bp->hw_stats.macb.rx_overruns++;
6a027b70
SB
1156
1157 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
02c958dd 1158 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
b19f7f71
AS
1159 }
1160
89e5785f 1161 if (status & MACB_BIT(HRESP)) {
64ec42fe 1162 /* TODO: Reset the hardware, and maybe move the
c220f8cd
JI
1163 * netdev_err to a lower-priority context as well
1164 * (work queue?)
89e5785f 1165 */
c220f8cd 1166 netdev_err(dev, "DMA bus error: HRESP not OK\n");
6a027b70
SB
1167
1168 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
02c958dd 1169 queue_writel(queue, ISR, MACB_BIT(HRESP));
89e5785f
HS
1170 }
1171
02c958dd 1172 status = queue_readl(queue, ISR);
89e5785f
HS
1173 }
1174
1175 spin_unlock(&bp->lock);
1176
1177 return IRQ_HANDLED;
1178}
1179
6e8cf5c0 1180#ifdef CONFIG_NET_POLL_CONTROLLER
64ec42fe 1181/* Polling receive - used by netconsole and other diagnostic tools
6e8cf5c0
TP
1182 * to allow network i/o with interrupts disabled.
1183 */
1184static void macb_poll_controller(struct net_device *dev)
1185{
02c958dd
CP
1186 struct macb *bp = netdev_priv(dev);
1187 struct macb_queue *queue;
6e8cf5c0 1188 unsigned long flags;
02c958dd 1189 unsigned int q;
6e8cf5c0
TP
1190
1191 local_irq_save(flags);
02c958dd
CP
1192 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1193 macb_interrupt(dev->irq, queue);
6e8cf5c0
TP
1194 local_irq_restore(flags);
1195}
1196#endif
1197
a4c35ed3 1198static unsigned int macb_tx_map(struct macb *bp,
02c958dd 1199 struct macb_queue *queue,
a4c35ed3 1200 struct sk_buff *skb)
89e5785f 1201{
89e5785f 1202 dma_addr_t mapping;
02c958dd 1203 unsigned int len, entry, i, tx_head = queue->tx_head;
a4c35ed3 1204 struct macb_tx_skb *tx_skb = NULL;
55054a16 1205 struct macb_dma_desc *desc;
a4c35ed3
CP
1206 unsigned int offset, size, count = 0;
1207 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1208 unsigned int eof = 1;
89e5785f 1209 u32 ctrl;
a4c35ed3
CP
1210
1211 /* First, map non-paged data */
1212 len = skb_headlen(skb);
1213 offset = 0;
1214 while (len) {
1215 size = min(len, bp->max_tx_length);
1216 entry = macb_tx_ring_wrap(tx_head);
02c958dd 1217 tx_skb = &queue->tx_skb[entry];
a4c35ed3
CP
1218
1219 mapping = dma_map_single(&bp->pdev->dev,
1220 skb->data + offset,
1221 size, DMA_TO_DEVICE);
1222 if (dma_mapping_error(&bp->pdev->dev, mapping))
1223 goto dma_error;
1224
1225 /* Save info to properly release resources */
1226 tx_skb->skb = NULL;
1227 tx_skb->mapping = mapping;
1228 tx_skb->size = size;
1229 tx_skb->mapped_as_page = false;
1230
1231 len -= size;
1232 offset += size;
1233 count++;
1234 tx_head++;
1235 }
1236
1237 /* Then, map paged data from fragments */
1238 for (f = 0; f < nr_frags; f++) {
1239 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1240
1241 len = skb_frag_size(frag);
1242 offset = 0;
1243 while (len) {
1244 size = min(len, bp->max_tx_length);
1245 entry = macb_tx_ring_wrap(tx_head);
02c958dd 1246 tx_skb = &queue->tx_skb[entry];
a4c35ed3
CP
1247
1248 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1249 offset, size, DMA_TO_DEVICE);
1250 if (dma_mapping_error(&bp->pdev->dev, mapping))
1251 goto dma_error;
1252
1253 /* Save info to properly release resources */
1254 tx_skb->skb = NULL;
1255 tx_skb->mapping = mapping;
1256 tx_skb->size = size;
1257 tx_skb->mapped_as_page = true;
1258
1259 len -= size;
1260 offset += size;
1261 count++;
1262 tx_head++;
1263 }
1264 }
1265
1266 /* Should never happen */
aa50b552 1267 if (unlikely(!tx_skb)) {
a4c35ed3
CP
1268 netdev_err(bp->dev, "BUG! empty skb!\n");
1269 return 0;
1270 }
1271
1272 /* This is the last buffer of the frame: save socket buffer */
1273 tx_skb->skb = skb;
1274
1275 /* Update TX ring: update buffer descriptors in reverse order
1276 * to avoid race condition
1277 */
1278
1279 /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1280 * to set the end of TX queue
1281 */
1282 i = tx_head;
1283 entry = macb_tx_ring_wrap(i);
1284 ctrl = MACB_BIT(TX_USED);
02c958dd 1285 desc = &queue->tx_ring[entry];
a4c35ed3
CP
1286 desc->ctrl = ctrl;
1287
1288 do {
1289 i--;
1290 entry = macb_tx_ring_wrap(i);
02c958dd
CP
1291 tx_skb = &queue->tx_skb[entry];
1292 desc = &queue->tx_ring[entry];
a4c35ed3
CP
1293
1294 ctrl = (u32)tx_skb->size;
1295 if (eof) {
1296 ctrl |= MACB_BIT(TX_LAST);
1297 eof = 0;
1298 }
1299 if (unlikely(entry == (TX_RING_SIZE - 1)))
1300 ctrl |= MACB_BIT(TX_WRAP);
1301
1302 /* Set TX buffer descriptor */
1303 desc->addr = tx_skb->mapping;
1304 /* desc->addr must be visible to hardware before clearing
1305 * 'TX_USED' bit in desc->ctrl.
1306 */
1307 wmb();
1308 desc->ctrl = ctrl;
02c958dd 1309 } while (i != queue->tx_head);
a4c35ed3 1310
02c958dd 1311 queue->tx_head = tx_head;
a4c35ed3
CP
1312
1313 return count;
1314
1315dma_error:
1316 netdev_err(bp->dev, "TX DMA map failed\n");
1317
02c958dd
CP
1318 for (i = queue->tx_head; i != tx_head; i++) {
1319 tx_skb = macb_tx_skb(queue, i);
a4c35ed3
CP
1320
1321 macb_tx_unmap(bp, tx_skb);
1322 }
1323
1324 return 0;
1325}
1326
1327static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1328{
02c958dd 1329 u16 queue_index = skb_get_queue_mapping(skb);
a4c35ed3 1330 struct macb *bp = netdev_priv(dev);
02c958dd 1331 struct macb_queue *queue = &bp->queues[queue_index];
4871953c 1332 unsigned long flags;
a4c35ed3 1333 unsigned int count, nr_frags, frag_size, f;
89e5785f 1334
a268adb1
HS
1335#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1336 netdev_vdbg(bp->dev,
aa50b552
MF
1337 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1338 queue_index, skb->len, skb->head, skb->data,
1339 skb_tail_pointer(skb), skb_end_pointer(skb));
c220f8cd
JI
1340 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1341 skb->data, 16, true);
89e5785f
HS
1342#endif
1343
a4c35ed3
CP
1344 /* Count how many TX buffer descriptors are needed to send this
1345 * socket buffer: skb fragments of jumbo frames may need to be
aa50b552 1346 * split into many buffer descriptors.
a4c35ed3 1347 */
94b295ed 1348 count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
a4c35ed3
CP
1349 nr_frags = skb_shinfo(skb)->nr_frags;
1350 for (f = 0; f < nr_frags; f++) {
1351 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
94b295ed 1352 count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
a4c35ed3
CP
1353 }
1354
4871953c 1355 spin_lock_irqsave(&bp->lock, flags);
89e5785f
HS
1356
1357 /* This is a hard error, log it. */
02c958dd
CP
1358 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) {
1359 netif_stop_subqueue(dev, queue_index);
4871953c 1360 spin_unlock_irqrestore(&bp->lock, flags);
c220f8cd 1361 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
02c958dd 1362 queue->tx_head, queue->tx_tail);
5b548140 1363 return NETDEV_TX_BUSY;
89e5785f
HS
1364 }
1365
a4c35ed3 1366 /* Map socket buffer for DMA transfer */
02c958dd 1367 if (!macb_tx_map(bp, queue, skb)) {
c88b5b6a 1368 dev_kfree_skb_any(skb);
92030908
SB
1369 goto unlock;
1370 }
55054a16 1371
03dbe05f 1372 /* Make newly initialized descriptor visible to hardware */
89e5785f
HS
1373 wmb();
1374
e072092f
RC
1375 skb_tx_timestamp(skb);
1376
89e5785f
HS
1377 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1378
02c958dd
CP
1379 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1)
1380 netif_stop_subqueue(dev, queue_index);
89e5785f 1381
92030908 1382unlock:
4871953c 1383 spin_unlock_irqrestore(&bp->lock, flags);
89e5785f 1384
6ed10654 1385 return NETDEV_TX_OK;
89e5785f
HS
1386}
1387
4df95131 1388static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1b44791a
NF
1389{
1390 if (!macb_is_gem(bp)) {
1391 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1392 } else {
4df95131 1393 bp->rx_buffer_size = size;
1b44791a 1394
1b44791a 1395 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
4df95131 1396 netdev_dbg(bp->dev,
aa50b552
MF
1397 "RX buffer must be multiple of %d bytes, expanding\n",
1398 RX_BUFFER_MULTIPLE);
1b44791a 1399 bp->rx_buffer_size =
4df95131 1400 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1b44791a 1401 }
1b44791a 1402 }
4df95131
NF
1403
1404 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
1405 bp->dev->mtu, bp->rx_buffer_size);
1b44791a
NF
1406}
1407
4df95131
NF
1408static void gem_free_rx_buffers(struct macb *bp)
1409{
1410 struct sk_buff *skb;
1411 struct macb_dma_desc *desc;
1412 dma_addr_t addr;
1413 int i;
1414
1415 if (!bp->rx_skbuff)
1416 return;
1417
1418 for (i = 0; i < RX_RING_SIZE; i++) {
1419 skb = bp->rx_skbuff[i];
1420
aa50b552 1421 if (!skb)
4df95131
NF
1422 continue;
1423
1424 desc = &bp->rx_ring[i];
1425 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
ccd6d0a9 1426 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
4df95131
NF
1427 DMA_FROM_DEVICE);
1428 dev_kfree_skb_any(skb);
1429 skb = NULL;
1430 }
1431
1432 kfree(bp->rx_skbuff);
1433 bp->rx_skbuff = NULL;
1434}
1435
1436static void macb_free_rx_buffers(struct macb *bp)
1437{
1438 if (bp->rx_buffers) {
1439 dma_free_coherent(&bp->pdev->dev,
1440 RX_RING_SIZE * bp->rx_buffer_size,
1441 bp->rx_buffers, bp->rx_buffers_dma);
1442 bp->rx_buffers = NULL;
1443 }
1444}
1b44791a 1445
89e5785f
HS
1446static void macb_free_consistent(struct macb *bp)
1447{
02c958dd
CP
1448 struct macb_queue *queue;
1449 unsigned int q;
1450
4df95131 1451 bp->macbgem_ops.mog_free_rx_buffers(bp);
89e5785f
HS
1452 if (bp->rx_ring) {
1453 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
1454 bp->rx_ring, bp->rx_ring_dma);
1455 bp->rx_ring = NULL;
1456 }
02c958dd
CP
1457
1458 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1459 kfree(queue->tx_skb);
1460 queue->tx_skb = NULL;
1461 if (queue->tx_ring) {
1462 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
1463 queue->tx_ring, queue->tx_ring_dma);
1464 queue->tx_ring = NULL;
1465 }
89e5785f 1466 }
4df95131
NF
1467}
1468
1469static int gem_alloc_rx_buffers(struct macb *bp)
1470{
1471 int size;
1472
1473 size = RX_RING_SIZE * sizeof(struct sk_buff *);
1474 bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
1475 if (!bp->rx_skbuff)
1476 return -ENOMEM;
64ec42fe
MF
1477
1478 netdev_dbg(bp->dev,
1479 "Allocated %d RX struct sk_buff entries at %p\n",
1480 RX_RING_SIZE, bp->rx_skbuff);
4df95131
NF
1481 return 0;
1482}
1483
1484static int macb_alloc_rx_buffers(struct macb *bp)
1485{
1486 int size;
1487
1488 size = RX_RING_SIZE * bp->rx_buffer_size;
1489 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1490 &bp->rx_buffers_dma, GFP_KERNEL);
1491 if (!bp->rx_buffers)
1492 return -ENOMEM;
64ec42fe
MF
1493
1494 netdev_dbg(bp->dev,
1495 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1496 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
4df95131 1497 return 0;
89e5785f
HS
1498}
1499
1500static int macb_alloc_consistent(struct macb *bp)
1501{
02c958dd
CP
1502 struct macb_queue *queue;
1503 unsigned int q;
89e5785f
HS
1504 int size;
1505
02c958dd
CP
1506 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1507 size = TX_RING_BYTES;
1508 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1509 &queue->tx_ring_dma,
1510 GFP_KERNEL);
1511 if (!queue->tx_ring)
1512 goto out_err;
1513 netdev_dbg(bp->dev,
1514 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1515 q, size, (unsigned long)queue->tx_ring_dma,
1516 queue->tx_ring);
1517
1518 size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
1519 queue->tx_skb = kmalloc(size, GFP_KERNEL);
1520 if (!queue->tx_skb)
1521 goto out_err;
1522 }
89e5785f
HS
1523
1524 size = RX_RING_BYTES;
1525 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1526 &bp->rx_ring_dma, GFP_KERNEL);
1527 if (!bp->rx_ring)
1528 goto out_err;
c220f8cd
JI
1529 netdev_dbg(bp->dev,
1530 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1531 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
89e5785f 1532
4df95131 1533 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
89e5785f 1534 goto out_err;
89e5785f
HS
1535
1536 return 0;
1537
1538out_err:
1539 macb_free_consistent(bp);
1540 return -ENOMEM;
1541}
1542
4df95131
NF
1543static void gem_init_rings(struct macb *bp)
1544{
02c958dd
CP
1545 struct macb_queue *queue;
1546 unsigned int q;
4df95131
NF
1547 int i;
1548
02c958dd
CP
1549 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1550 for (i = 0; i < TX_RING_SIZE; i++) {
1551 queue->tx_ring[i].addr = 0;
1552 queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
1553 }
1554 queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1555 queue->tx_head = 0;
1556 queue->tx_tail = 0;
4df95131 1557 }
4df95131 1558
02c958dd
CP
1559 bp->rx_tail = 0;
1560 bp->rx_prepared_head = 0;
4df95131
NF
1561
1562 gem_rx_refill(bp);
1563}
1564
89e5785f
HS
1565static void macb_init_rings(struct macb *bp)
1566{
1567 int i;
89e5785f 1568
9ba723b0 1569 macb_init_rx_ring(bp);
89e5785f
HS
1570
1571 for (i = 0; i < TX_RING_SIZE; i++) {
02c958dd
CP
1572 bp->queues[0].tx_ring[i].addr = 0;
1573 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
89e5785f 1574 }
21d3515c
BS
1575 bp->queues[0].tx_head = 0;
1576 bp->queues[0].tx_tail = 0;
02c958dd 1577 bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
89e5785f 1578
02c958dd 1579 bp->rx_tail = 0;
89e5785f
HS
1580}
1581
1582static void macb_reset_hw(struct macb *bp)
1583{
02c958dd
CP
1584 struct macb_queue *queue;
1585 unsigned int q;
1586
64ec42fe 1587 /* Disable RX and TX (XXX: Should we halt the transmission
89e5785f
HS
1588 * more gracefully?)
1589 */
1590 macb_writel(bp, NCR, 0);
1591
1592 /* Clear the stats registers (XXX: Update stats first?) */
1593 macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
1594
1595 /* Clear all status flags */
95ebcea6
JE
1596 macb_writel(bp, TSR, -1);
1597 macb_writel(bp, RSR, -1);
89e5785f
HS
1598
1599 /* Disable all interrupts */
02c958dd
CP
1600 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1601 queue_writel(queue, IDR, -1);
1602 queue_readl(queue, ISR);
24468374
NS
1603 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1604 queue_writel(queue, ISR, -1);
02c958dd 1605 }
89e5785f
HS
1606}
1607
70c9f3d4
JI
1608static u32 gem_mdc_clk_div(struct macb *bp)
1609{
1610 u32 config;
1611 unsigned long pclk_hz = clk_get_rate(bp->pclk);
1612
1613 if (pclk_hz <= 20000000)
1614 config = GEM_BF(CLK, GEM_CLK_DIV8);
1615 else if (pclk_hz <= 40000000)
1616 config = GEM_BF(CLK, GEM_CLK_DIV16);
1617 else if (pclk_hz <= 80000000)
1618 config = GEM_BF(CLK, GEM_CLK_DIV32);
1619 else if (pclk_hz <= 120000000)
1620 config = GEM_BF(CLK, GEM_CLK_DIV48);
1621 else if (pclk_hz <= 160000000)
1622 config = GEM_BF(CLK, GEM_CLK_DIV64);
1623 else
1624 config = GEM_BF(CLK, GEM_CLK_DIV96);
1625
1626 return config;
1627}
1628
1629static u32 macb_mdc_clk_div(struct macb *bp)
1630{
1631 u32 config;
1632 unsigned long pclk_hz;
1633
1634 if (macb_is_gem(bp))
1635 return gem_mdc_clk_div(bp);
1636
1637 pclk_hz = clk_get_rate(bp->pclk);
1638 if (pclk_hz <= 20000000)
1639 config = MACB_BF(CLK, MACB_CLK_DIV8);
1640 else if (pclk_hz <= 40000000)
1641 config = MACB_BF(CLK, MACB_CLK_DIV16);
1642 else if (pclk_hz <= 80000000)
1643 config = MACB_BF(CLK, MACB_CLK_DIV32);
1644 else
1645 config = MACB_BF(CLK, MACB_CLK_DIV64);
1646
1647 return config;
1648}
1649
64ec42fe 1650/* Get the DMA bus width field of the network configuration register that we
757a03c6
JI
1651 * should program. We find the width from decoding the design configuration
1652 * register to find the maximum supported data bus width.
1653 */
1654static u32 macb_dbw(struct macb *bp)
1655{
1656 if (!macb_is_gem(bp))
1657 return 0;
1658
1659 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
1660 case 4:
1661 return GEM_BF(DBW, GEM_DBW128);
1662 case 2:
1663 return GEM_BF(DBW, GEM_DBW64);
1664 case 1:
1665 default:
1666 return GEM_BF(DBW, GEM_DBW32);
1667 }
1668}
1669
64ec42fe 1670/* Configure the receive DMA engine
b3e3bd71 1671 * - use the correct receive buffer size
e175587f 1672 * - set best burst length for DMA operations
b3e3bd71
NF
1673 * (if not supported by FIFO, it will fallback to default)
1674 * - set both rx/tx packet buffers to full memory size
1675 * These are configurable parameters for GEM.
0116da4f
JI
1676 */
1677static void macb_configure_dma(struct macb *bp)
1678{
1679 u32 dmacfg;
1680
1681 if (macb_is_gem(bp)) {
1682 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1b44791a 1683 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
e175587f
NF
1684 if (bp->dma_burst_length)
1685 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
b3e3bd71 1686 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
a50dad35 1687 dmacfg &= ~GEM_BIT(ENDIA_PKT);
62f6924c 1688
f2ce8a9e 1689 if (bp->native_io)
62f6924c
AC
1690 dmacfg &= ~GEM_BIT(ENDIA_DESC);
1691 else
1692 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
1693
85ff3d87
CP
1694 if (bp->dev->features & NETIF_F_HW_CSUM)
1695 dmacfg |= GEM_BIT(TXCOEN);
1696 else
1697 dmacfg &= ~GEM_BIT(TXCOEN);
e175587f
NF
1698 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
1699 dmacfg);
0116da4f
JI
1700 gem_writel(bp, DMACFG, dmacfg);
1701 }
1702}
1703
89e5785f
HS
1704static void macb_init_hw(struct macb *bp)
1705{
02c958dd
CP
1706 struct macb_queue *queue;
1707 unsigned int q;
1708
89e5785f
HS
1709 u32 config;
1710
1711 macb_reset_hw(bp);
314bccc4 1712 macb_set_hwaddr(bp);
89e5785f 1713
70c9f3d4 1714 config = macb_mdc_clk_div(bp);
022be25c
PCK
1715 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
1716 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
29bc2e1e 1717 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
89e5785f
HS
1718 config |= MACB_BIT(PAE); /* PAuse Enable */
1719 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
a104a6b3 1720 if (bp->caps & MACB_CAPS_JUMBO)
98b5a0f4
HK
1721 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
1722 else
1723 config |= MACB_BIT(BIG); /* Receive oversized frames */
89e5785f
HS
1724 if (bp->dev->flags & IFF_PROMISC)
1725 config |= MACB_BIT(CAF); /* Copy All Frames */
924ec53c
CP
1726 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
1727 config |= GEM_BIT(RXCOEN);
89e5785f
HS
1728 if (!(bp->dev->flags & IFF_BROADCAST))
1729 config |= MACB_BIT(NBC); /* No BroadCast */
757a03c6 1730 config |= macb_dbw(bp);
89e5785f 1731 macb_writel(bp, NCFGR, config);
a104a6b3 1732 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
98b5a0f4 1733 gem_writel(bp, JML, bp->jumbo_max_len);
26cdfb49
VD
1734 bp->speed = SPEED_10;
1735 bp->duplex = DUPLEX_HALF;
98b5a0f4 1736 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
a104a6b3 1737 if (bp->caps & MACB_CAPS_JUMBO)
98b5a0f4 1738 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
89e5785f 1739
0116da4f
JI
1740 macb_configure_dma(bp);
1741
89e5785f
HS
1742 /* Initialize TX and RX buffers */
1743 macb_writel(bp, RBQP, bp->rx_ring_dma);
02c958dd
CP
1744 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1745 queue_writel(queue, TBQP, queue->tx_ring_dma);
1746
1747 /* Enable interrupts */
1748 queue_writel(queue, IER,
1749 MACB_RX_INT_FLAGS |
1750 MACB_TX_INT_FLAGS |
1751 MACB_BIT(HRESP));
1752 }
89e5785f
HS
1753
1754 /* Enable TX and RX */
6c36a707 1755 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
89e5785f
HS
1756}
1757
64ec42fe 1758/* The hash address register is 64 bits long and takes up two
446ebd01
PV
1759 * locations in the memory map. The least significant bits are stored
1760 * in EMAC_HSL and the most significant bits in EMAC_HSH.
1761 *
1762 * The unicast hash enable and the multicast hash enable bits in the
1763 * network configuration register enable the reception of hash matched
1764 * frames. The destination address is reduced to a 6 bit index into
1765 * the 64 bit hash register using the following hash function. The
1766 * hash function is an exclusive or of every sixth bit of the
1767 * destination address.
1768 *
1769 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
1770 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
1771 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
1772 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
1773 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
1774 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
1775 *
1776 * da[0] represents the least significant bit of the first byte
1777 * received, that is, the multicast/unicast indicator, and da[47]
1778 * represents the most significant bit of the last byte received. If
1779 * the hash index, hi[n], points to a bit that is set in the hash
1780 * register then the frame will be matched according to whether the
1781 * frame is multicast or unicast. A multicast match will be signalled
1782 * if the multicast hash enable bit is set, da[0] is 1 and the hash
1783 * index points to a bit set in the hash register. A unicast match
1784 * will be signalled if the unicast hash enable bit is set, da[0] is 0
1785 * and the hash index points to a bit set in the hash register. To
1786 * receive all multicast frames, the hash register should be set with
1787 * all ones and the multicast hash enable bit should be set in the
1788 * network configuration register.
1789 */
1790
1791static inline int hash_bit_value(int bitnr, __u8 *addr)
1792{
1793 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
1794 return 1;
1795 return 0;
1796}
1797
64ec42fe 1798/* Return the hash index value for the specified address. */
446ebd01
PV
1799static int hash_get_index(__u8 *addr)
1800{
1801 int i, j, bitval;
1802 int hash_index = 0;
1803
1804 for (j = 0; j < 6; j++) {
1805 for (i = 0, bitval = 0; i < 8; i++)
2fa45e22 1806 bitval ^= hash_bit_value(i * 6 + j, addr);
446ebd01
PV
1807
1808 hash_index |= (bitval << j);
1809 }
1810
1811 return hash_index;
1812}
1813
64ec42fe 1814/* Add multicast addresses to the internal multicast-hash table. */
446ebd01
PV
1815static void macb_sethashtable(struct net_device *dev)
1816{
22bedad3 1817 struct netdev_hw_addr *ha;
446ebd01 1818 unsigned long mc_filter[2];
f9dcbcc9 1819 unsigned int bitnr;
446ebd01
PV
1820 struct macb *bp = netdev_priv(dev);
1821
aa50b552
MF
1822 mc_filter[0] = 0;
1823 mc_filter[1] = 0;
446ebd01 1824
22bedad3
JP
1825 netdev_for_each_mc_addr(ha, dev) {
1826 bitnr = hash_get_index(ha->addr);
446ebd01
PV
1827 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1828 }
1829
f75ba50b
JI
1830 macb_or_gem_writel(bp, HRB, mc_filter[0]);
1831 macb_or_gem_writel(bp, HRT, mc_filter[1]);
446ebd01
PV
1832}
1833
64ec42fe 1834/* Enable/Disable promiscuous and multicast modes. */
421d9df0 1835static void macb_set_rx_mode(struct net_device *dev)
446ebd01
PV
1836{
1837 unsigned long cfg;
1838 struct macb *bp = netdev_priv(dev);
1839
1840 cfg = macb_readl(bp, NCFGR);
1841
924ec53c 1842 if (dev->flags & IFF_PROMISC) {
446ebd01
PV
1843 /* Enable promiscuous mode */
1844 cfg |= MACB_BIT(CAF);
924ec53c
CP
1845
1846 /* Disable RX checksum offload */
1847 if (macb_is_gem(bp))
1848 cfg &= ~GEM_BIT(RXCOEN);
1849 } else {
1850 /* Disable promiscuous mode */
446ebd01
PV
1851 cfg &= ~MACB_BIT(CAF);
1852
924ec53c
CP
1853 /* Enable RX checksum offload only if requested */
1854 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
1855 cfg |= GEM_BIT(RXCOEN);
1856 }
1857
446ebd01
PV
1858 if (dev->flags & IFF_ALLMULTI) {
1859 /* Enable all multicast mode */
f75ba50b
JI
1860 macb_or_gem_writel(bp, HRB, -1);
1861 macb_or_gem_writel(bp, HRT, -1);
446ebd01 1862 cfg |= MACB_BIT(NCFGR_MTI);
4cd24eaf 1863 } else if (!netdev_mc_empty(dev)) {
446ebd01
PV
1864 /* Enable specific multicasts */
1865 macb_sethashtable(dev);
1866 cfg |= MACB_BIT(NCFGR_MTI);
1867 } else if (dev->flags & (~IFF_ALLMULTI)) {
1868 /* Disable all multicast mode */
f75ba50b
JI
1869 macb_or_gem_writel(bp, HRB, 0);
1870 macb_or_gem_writel(bp, HRT, 0);
446ebd01
PV
1871 cfg &= ~MACB_BIT(NCFGR_MTI);
1872 }
1873
1874 macb_writel(bp, NCFGR, cfg);
1875}
1876
89e5785f
HS
1877static int macb_open(struct net_device *dev)
1878{
1879 struct macb *bp = netdev_priv(dev);
4df95131 1880 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
89e5785f
HS
1881 int err;
1882
c220f8cd 1883 netdev_dbg(bp->dev, "open\n");
89e5785f 1884
03fc4721
NF
1885 /* carrier starts down */
1886 netif_carrier_off(dev);
1887
6c36a707
R
1888 /* if the phy is not yet register, retry later*/
1889 if (!bp->phy_dev)
1890 return -EAGAIN;
1b44791a
NF
1891
1892 /* RX buffers initialization */
4df95131 1893 macb_init_rx_buffer_size(bp, bufsz);
6c36a707 1894
89e5785f
HS
1895 err = macb_alloc_consistent(bp);
1896 if (err) {
c220f8cd
JI
1897 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
1898 err);
89e5785f
HS
1899 return err;
1900 }
1901
bea3348e
SH
1902 napi_enable(&bp->napi);
1903
4df95131 1904 bp->macbgem_ops.mog_init_rings(bp);
89e5785f 1905 macb_init_hw(bp);
89e5785f 1906
6c36a707
R
1907 /* schedule a link state check */
1908 phy_start(bp->phy_dev);
89e5785f 1909
02c958dd 1910 netif_tx_start_all_queues(dev);
89e5785f
HS
1911
1912 return 0;
1913}
1914
1915static int macb_close(struct net_device *dev)
1916{
1917 struct macb *bp = netdev_priv(dev);
1918 unsigned long flags;
1919
02c958dd 1920 netif_tx_stop_all_queues(dev);
bea3348e 1921 napi_disable(&bp->napi);
89e5785f 1922
6c36a707
R
1923 if (bp->phy_dev)
1924 phy_stop(bp->phy_dev);
1925
89e5785f
HS
1926 spin_lock_irqsave(&bp->lock, flags);
1927 macb_reset_hw(bp);
1928 netif_carrier_off(dev);
1929 spin_unlock_irqrestore(&bp->lock, flags);
1930
1931 macb_free_consistent(bp);
1932
1933 return 0;
1934}
1935
a5898ea0
HK
1936static int macb_change_mtu(struct net_device *dev, int new_mtu)
1937{
1938 struct macb *bp = netdev_priv(dev);
1939 u32 max_mtu;
1940
1941 if (netif_running(dev))
1942 return -EBUSY;
1943
1944 max_mtu = ETH_DATA_LEN;
a104a6b3 1945 if (bp->caps & MACB_CAPS_JUMBO)
a5898ea0
HK
1946 max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
1947
1948 if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE))
1949 return -EINVAL;
1950
1951 dev->mtu = new_mtu;
1952
1953 return 0;
1954}
1955
a494ed8e
JI
1956static void gem_update_stats(struct macb *bp)
1957{
8bcbf82f 1958 unsigned int i;
a494ed8e 1959 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
a494ed8e 1960
3ff13f1c
XH
1961 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
1962 u32 offset = gem_statistics[i].offset;
7a6e0706 1963 u64 val = bp->macb_reg_readl(bp, offset);
3ff13f1c
XH
1964
1965 bp->ethtool_stats[i] += val;
1966 *p += val;
1967
1968 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
1969 /* Add GEM_OCTTXH, GEM_OCTRXH */
7a6e0706 1970 val = bp->macb_reg_readl(bp, offset + 4);
2fa45e22 1971 bp->ethtool_stats[i] += ((u64)val) << 32;
3ff13f1c
XH
1972 *(++p) += val;
1973 }
1974 }
a494ed8e
JI
1975}
1976
1977static struct net_device_stats *gem_get_stats(struct macb *bp)
1978{
1979 struct gem_stats *hwstat = &bp->hw_stats.gem;
1980 struct net_device_stats *nstat = &bp->stats;
1981
1982 gem_update_stats(bp);
1983
1984 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
1985 hwstat->rx_alignment_errors +
1986 hwstat->rx_resource_errors +
1987 hwstat->rx_overruns +
1988 hwstat->rx_oversize_frames +
1989 hwstat->rx_jabbers +
1990 hwstat->rx_undersized_frames +
1991 hwstat->rx_length_field_frame_errors);
1992 nstat->tx_errors = (hwstat->tx_late_collisions +
1993 hwstat->tx_excessive_collisions +
1994 hwstat->tx_underrun +
1995 hwstat->tx_carrier_sense_errors);
1996 nstat->multicast = hwstat->rx_multicast_frames;
1997 nstat->collisions = (hwstat->tx_single_collision_frames +
1998 hwstat->tx_multiple_collision_frames +
1999 hwstat->tx_excessive_collisions);
2000 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2001 hwstat->rx_jabbers +
2002 hwstat->rx_undersized_frames +
2003 hwstat->rx_length_field_frame_errors);
2004 nstat->rx_over_errors = hwstat->rx_resource_errors;
2005 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2006 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2007 nstat->rx_fifo_errors = hwstat->rx_overruns;
2008 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2009 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2010 nstat->tx_fifo_errors = hwstat->tx_underrun;
2011
2012 return nstat;
2013}
2014
3ff13f1c
XH
2015static void gem_get_ethtool_stats(struct net_device *dev,
2016 struct ethtool_stats *stats, u64 *data)
2017{
2018 struct macb *bp;
2019
2020 bp = netdev_priv(dev);
2021 gem_update_stats(bp);
2fa45e22 2022 memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
3ff13f1c
XH
2023}
2024
2025static int gem_get_sset_count(struct net_device *dev, int sset)
2026{
2027 switch (sset) {
2028 case ETH_SS_STATS:
2029 return GEM_STATS_LEN;
2030 default:
2031 return -EOPNOTSUPP;
2032 }
2033}
2034
2035static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2036{
8bcbf82f 2037 unsigned int i;
3ff13f1c
XH
2038
2039 switch (sset) {
2040 case ETH_SS_STATS:
2041 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2042 memcpy(p, gem_statistics[i].stat_string,
2043 ETH_GSTRING_LEN);
2044 break;
2045 }
2046}
2047
421d9df0 2048static struct net_device_stats *macb_get_stats(struct net_device *dev)
89e5785f
HS
2049{
2050 struct macb *bp = netdev_priv(dev);
2051 struct net_device_stats *nstat = &bp->stats;
a494ed8e
JI
2052 struct macb_stats *hwstat = &bp->hw_stats.macb;
2053
2054 if (macb_is_gem(bp))
2055 return gem_get_stats(bp);
89e5785f 2056
6c36a707
R
2057 /* read stats from hardware */
2058 macb_update_stats(bp);
2059
89e5785f
HS
2060 /* Convert HW stats into netdevice stats */
2061 nstat->rx_errors = (hwstat->rx_fcs_errors +
2062 hwstat->rx_align_errors +
2063 hwstat->rx_resource_errors +
2064 hwstat->rx_overruns +
2065 hwstat->rx_oversize_pkts +
2066 hwstat->rx_jabbers +
2067 hwstat->rx_undersize_pkts +
89e5785f
HS
2068 hwstat->rx_length_mismatch);
2069 nstat->tx_errors = (hwstat->tx_late_cols +
2070 hwstat->tx_excessive_cols +
2071 hwstat->tx_underruns +
716723c2
WS
2072 hwstat->tx_carrier_errors +
2073 hwstat->sqe_test_errors);
89e5785f
HS
2074 nstat->collisions = (hwstat->tx_single_cols +
2075 hwstat->tx_multiple_cols +
2076 hwstat->tx_excessive_cols);
2077 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2078 hwstat->rx_jabbers +
2079 hwstat->rx_undersize_pkts +
2080 hwstat->rx_length_mismatch);
b19f7f71
AS
2081 nstat->rx_over_errors = hwstat->rx_resource_errors +
2082 hwstat->rx_overruns;
89e5785f
HS
2083 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2084 nstat->rx_frame_errors = hwstat->rx_align_errors;
2085 nstat->rx_fifo_errors = hwstat->rx_overruns;
2086 /* XXX: What does "missed" mean? */
2087 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2088 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2089 nstat->tx_fifo_errors = hwstat->tx_underruns;
2090 /* Don't know about heartbeat or window errors... */
2091
2092 return nstat;
2093}
2094
2095static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2096{
2097 struct macb *bp = netdev_priv(dev);
6c36a707
R
2098 struct phy_device *phydev = bp->phy_dev;
2099
2100 if (!phydev)
2101 return -ENODEV;
89e5785f 2102
6c36a707 2103 return phy_ethtool_gset(phydev, cmd);
89e5785f
HS
2104}
2105
2106static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2107{
2108 struct macb *bp = netdev_priv(dev);
6c36a707 2109 struct phy_device *phydev = bp->phy_dev;
89e5785f 2110
6c36a707
R
2111 if (!phydev)
2112 return -ENODEV;
2113
2114 return phy_ethtool_sset(phydev, cmd);
89e5785f
HS
2115}
2116
d1d1b53d
NF
2117static int macb_get_regs_len(struct net_device *netdev)
2118{
2119 return MACB_GREGS_NBR * sizeof(u32);
2120}
2121
2122static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2123 void *p)
2124{
2125 struct macb *bp = netdev_priv(dev);
2126 unsigned int tail, head;
2127 u32 *regs_buff = p;
2128
2129 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2130 | MACB_GREGS_VERSION;
2131
02c958dd
CP
2132 tail = macb_tx_ring_wrap(bp->queues[0].tx_tail);
2133 head = macb_tx_ring_wrap(bp->queues[0].tx_head);
d1d1b53d
NF
2134
2135 regs_buff[0] = macb_readl(bp, NCR);
2136 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
2137 regs_buff[2] = macb_readl(bp, NSR);
2138 regs_buff[3] = macb_readl(bp, TSR);
2139 regs_buff[4] = macb_readl(bp, RBQP);
2140 regs_buff[5] = macb_readl(bp, TBQP);
2141 regs_buff[6] = macb_readl(bp, RSR);
2142 regs_buff[7] = macb_readl(bp, IMR);
2143
2144 regs_buff[8] = tail;
2145 regs_buff[9] = head;
02c958dd
CP
2146 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2147 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
d1d1b53d 2148
ce721a70
NA
2149 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2150 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
64ec42fe 2151 if (macb_is_gem(bp))
d1d1b53d 2152 regs_buff[13] = gem_readl(bp, DMACFG);
d1d1b53d
NF
2153}
2154
3e2a5e15
SP
2155static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2156{
2157 struct macb *bp = netdev_priv(netdev);
2158
2159 wol->supported = 0;
2160 wol->wolopts = 0;
2161
2162 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
2163 wol->supported = WAKE_MAGIC;
2164
2165 if (bp->wol & MACB_WOL_ENABLED)
2166 wol->wolopts |= WAKE_MAGIC;
2167 }
2168}
2169
2170static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2171{
2172 struct macb *bp = netdev_priv(netdev);
2173
2174 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
2175 (wol->wolopts & ~WAKE_MAGIC))
2176 return -EOPNOTSUPP;
2177
2178 if (wol->wolopts & WAKE_MAGIC)
2179 bp->wol |= MACB_WOL_ENABLED;
2180 else
2181 bp->wol &= ~MACB_WOL_ENABLED;
2182
2183 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
2184
2185 return 0;
2186}
2187
421d9df0 2188static const struct ethtool_ops macb_ethtool_ops = {
89e5785f
HS
2189 .get_settings = macb_get_settings,
2190 .set_settings = macb_set_settings,
d1d1b53d
NF
2191 .get_regs_len = macb_get_regs_len,
2192 .get_regs = macb_get_regs,
89e5785f 2193 .get_link = ethtool_op_get_link,
17f393e8 2194 .get_ts_info = ethtool_op_get_ts_info,
3e2a5e15
SP
2195 .get_wol = macb_get_wol,
2196 .set_wol = macb_set_wol,
8cd5a56c 2197};
8cd5a56c 2198
8093b1c3 2199static const struct ethtool_ops gem_ethtool_ops = {
8cd5a56c
XH
2200 .get_settings = macb_get_settings,
2201 .set_settings = macb_set_settings,
2202 .get_regs_len = macb_get_regs_len,
2203 .get_regs = macb_get_regs,
2204 .get_link = ethtool_op_get_link,
2205 .get_ts_info = ethtool_op_get_ts_info,
3ff13f1c
XH
2206 .get_ethtool_stats = gem_get_ethtool_stats,
2207 .get_strings = gem_get_ethtool_strings,
2208 .get_sset_count = gem_get_sset_count,
89e5785f
HS
2209};
2210
421d9df0 2211static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
89e5785f
HS
2212{
2213 struct macb *bp = netdev_priv(dev);
6c36a707 2214 struct phy_device *phydev = bp->phy_dev;
89e5785f
HS
2215
2216 if (!netif_running(dev))
2217 return -EINVAL;
2218
6c36a707
R
2219 if (!phydev)
2220 return -ENODEV;
89e5785f 2221
28b04113 2222 return phy_mii_ioctl(phydev, rq, cmd);
89e5785f
HS
2223}
2224
85ff3d87
CP
2225static int macb_set_features(struct net_device *netdev,
2226 netdev_features_t features)
2227{
2228 struct macb *bp = netdev_priv(netdev);
2229 netdev_features_t changed = features ^ netdev->features;
2230
2231 /* TX checksum offload */
2232 if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
2233 u32 dmacfg;
2234
2235 dmacfg = gem_readl(bp, DMACFG);
2236 if (features & NETIF_F_HW_CSUM)
2237 dmacfg |= GEM_BIT(TXCOEN);
2238 else
2239 dmacfg &= ~GEM_BIT(TXCOEN);
2240 gem_writel(bp, DMACFG, dmacfg);
2241 }
2242
924ec53c
CP
2243 /* RX checksum offload */
2244 if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
2245 u32 netcfg;
2246
2247 netcfg = gem_readl(bp, NCFGR);
2248 if (features & NETIF_F_RXCSUM &&
2249 !(netdev->flags & IFF_PROMISC))
2250 netcfg |= GEM_BIT(RXCOEN);
2251 else
2252 netcfg &= ~GEM_BIT(RXCOEN);
2253 gem_writel(bp, NCFGR, netcfg);
2254 }
2255
85ff3d87
CP
2256 return 0;
2257}
2258
5f1fa992
AB
2259static const struct net_device_ops macb_netdev_ops = {
2260 .ndo_open = macb_open,
2261 .ndo_stop = macb_close,
2262 .ndo_start_xmit = macb_start_xmit,
afc4b13d 2263 .ndo_set_rx_mode = macb_set_rx_mode,
5f1fa992
AB
2264 .ndo_get_stats = macb_get_stats,
2265 .ndo_do_ioctl = macb_ioctl,
2266 .ndo_validate_addr = eth_validate_addr,
a5898ea0 2267 .ndo_change_mtu = macb_change_mtu,
5f1fa992 2268 .ndo_set_mac_address = eth_mac_addr,
6e8cf5c0
TP
2269#ifdef CONFIG_NET_POLL_CONTROLLER
2270 .ndo_poll_controller = macb_poll_controller,
2271#endif
85ff3d87 2272 .ndo_set_features = macb_set_features,
5f1fa992
AB
2273};
2274
64ec42fe 2275/* Configure peripheral capabilities according to device tree
e175587f
NF
2276 * and integration options used
2277 */
64ec42fe
MF
2278static void macb_configure_caps(struct macb *bp,
2279 const struct macb_config *dt_conf)
e175587f
NF
2280{
2281 u32 dcfg;
e175587f 2282
f6970505
NF
2283 if (dt_conf)
2284 bp->caps = dt_conf->caps;
2285
f2ce8a9e 2286 if (hw_is_gem(bp->regs, bp->native_io)) {
e175587f
NF
2287 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2288
e175587f
NF
2289 dcfg = gem_readl(bp, DCFG1);
2290 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
2291 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
2292 dcfg = gem_readl(bp, DCFG2);
2293 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
2294 bp->caps |= MACB_CAPS_FIFO_MODE;
2295 }
2296
a35919e1 2297 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
e175587f
NF
2298}
2299
02c958dd 2300static void macb_probe_queues(void __iomem *mem,
f2ce8a9e 2301 bool native_io,
02c958dd
CP
2302 unsigned int *queue_mask,
2303 unsigned int *num_queues)
2304{
2305 unsigned int hw_q;
02c958dd
CP
2306
2307 *queue_mask = 0x1;
2308 *num_queues = 1;
2309
da120112
NF
2310 /* is it macb or gem ?
2311 *
2312 * We need to read directly from the hardware here because
2313 * we are early in the probe process and don't have the
2314 * MACB_CAPS_MACB_IS_GEM flag positioned
2315 */
f2ce8a9e 2316 if (!hw_is_gem(mem, native_io))
02c958dd
CP
2317 return;
2318
2319 /* bit 0 is never set but queue 0 always exists */
a50dad35
AC
2320 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
2321
02c958dd
CP
2322 *queue_mask |= 0x1;
2323
2324 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
2325 if (*queue_mask & (1 << hw_q))
2326 (*num_queues)++;
2327}
2328
c69618b3
NF
2329static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
2330 struct clk **hclk, struct clk **tx_clk)
89e5785f 2331{
421d9df0 2332 int err;
89e5785f 2333
c69618b3
NF
2334 *pclk = devm_clk_get(&pdev->dev, "pclk");
2335 if (IS_ERR(*pclk)) {
2336 err = PTR_ERR(*pclk);
b48e0bab 2337 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
421d9df0 2338 return err;
0cc8674f 2339 }
461845db 2340
c69618b3
NF
2341 *hclk = devm_clk_get(&pdev->dev, "hclk");
2342 if (IS_ERR(*hclk)) {
2343 err = PTR_ERR(*hclk);
b48e0bab 2344 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
421d9df0 2345 return err;
b48e0bab
SB
2346 }
2347
c69618b3
NF
2348 *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
2349 if (IS_ERR(*tx_clk))
2350 *tx_clk = NULL;
e1824dfe 2351
c69618b3 2352 err = clk_prepare_enable(*pclk);
b48e0bab
SB
2353 if (err) {
2354 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
421d9df0 2355 return err;
b48e0bab
SB
2356 }
2357
c69618b3 2358 err = clk_prepare_enable(*hclk);
b48e0bab
SB
2359 if (err) {
2360 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
421d9df0 2361 goto err_disable_pclk;
89e5785f 2362 }
89e5785f 2363
c69618b3 2364 err = clk_prepare_enable(*tx_clk);
93b31f48
CP
2365 if (err) {
2366 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
421d9df0 2367 goto err_disable_hclk;
e1824dfe
SB
2368 }
2369
c69618b3
NF
2370 return 0;
2371
2372err_disable_hclk:
2373 clk_disable_unprepare(*hclk);
2374
2375err_disable_pclk:
2376 clk_disable_unprepare(*pclk);
2377
2378 return err;
2379}
2380
2381static int macb_init(struct platform_device *pdev)
2382{
2383 struct net_device *dev = platform_get_drvdata(pdev);
2384 unsigned int hw_q, q;
2385 struct macb *bp = netdev_priv(dev);
2386 struct macb_queue *queue;
2387 int err;
2388 u32 val;
2389
02c958dd
CP
2390 /* set the queue register mapping once for all: queue0 has a special
2391 * register mapping but we don't want to test the queue index then
2392 * compute the corresponding register offset at run time.
2393 */
cf250de0 2394 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
bfa0914a 2395 if (!(bp->queue_mask & (1 << hw_q)))
02c958dd
CP
2396 continue;
2397
cf250de0 2398 queue = &bp->queues[q];
02c958dd
CP
2399 queue->bp = bp;
2400 if (hw_q) {
2401 queue->ISR = GEM_ISR(hw_q - 1);
2402 queue->IER = GEM_IER(hw_q - 1);
2403 queue->IDR = GEM_IDR(hw_q - 1);
2404 queue->IMR = GEM_IMR(hw_q - 1);
2405 queue->TBQP = GEM_TBQP(hw_q - 1);
2406 } else {
2407 /* queue0 uses legacy registers */
2408 queue->ISR = MACB_ISR;
2409 queue->IER = MACB_IER;
2410 queue->IDR = MACB_IDR;
2411 queue->IMR = MACB_IMR;
2412 queue->TBQP = MACB_TBQP;
2413 }
2414
2415 /* get irq: here we use the linux queue index, not the hardware
2416 * queue index. the queue irq definitions in the device tree
2417 * must remove the optional gaps that could exist in the
2418 * hardware queue mask.
2419 */
cf250de0 2420 queue->irq = platform_get_irq(pdev, q);
02c958dd 2421 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
20488239 2422 IRQF_SHARED, dev->name, queue);
02c958dd
CP
2423 if (err) {
2424 dev_err(&pdev->dev,
2425 "Unable to request IRQ %d (error %d)\n",
2426 queue->irq, err);
c69618b3 2427 return err;
02c958dd
CP
2428 }
2429
2430 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
cf250de0 2431 q++;
89e5785f
HS
2432 }
2433
5f1fa992 2434 dev->netdev_ops = &macb_netdev_ops;
bea3348e 2435 netif_napi_add(dev, &bp->napi, macb_poll, 64);
89e5785f 2436
4df95131
NF
2437 /* setup appropriated routines according to adapter type */
2438 if (macb_is_gem(bp)) {
a4c35ed3 2439 bp->max_tx_length = GEM_MAX_TX_LEN;
4df95131
NF
2440 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
2441 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
2442 bp->macbgem_ops.mog_init_rings = gem_init_rings;
2443 bp->macbgem_ops.mog_rx = gem_rx;
8cd5a56c 2444 dev->ethtool_ops = &gem_ethtool_ops;
4df95131 2445 } else {
a4c35ed3 2446 bp->max_tx_length = MACB_MAX_TX_LEN;
4df95131
NF
2447 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
2448 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
2449 bp->macbgem_ops.mog_init_rings = macb_init_rings;
2450 bp->macbgem_ops.mog_rx = macb_rx;
8cd5a56c 2451 dev->ethtool_ops = &macb_ethtool_ops;
4df95131
NF
2452 }
2453
a4c35ed3
CP
2454 /* Set features */
2455 dev->hw_features = NETIF_F_SG;
85ff3d87
CP
2456 /* Checksum offload is only available on gem with packet buffer */
2457 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
924ec53c 2458 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
a4c35ed3
CP
2459 if (bp->caps & MACB_CAPS_SG_DISABLED)
2460 dev->hw_features &= ~NETIF_F_SG;
2461 dev->features = dev->hw_features;
2462
ce721a70
NA
2463 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
2464 val = 0;
2465 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
2466 val = GEM_BIT(RGMII);
2467 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
6bdaa5e9 2468 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
ce721a70 2469 val = MACB_BIT(RMII);
6bdaa5e9 2470 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
ce721a70 2471 val = MACB_BIT(MII);
421d9df0 2472
ce721a70
NA
2473 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
2474 val |= MACB_BIT(CLKEN);
421d9df0 2475
ce721a70
NA
2476 macb_or_gem_writel(bp, USRIO, val);
2477 }
421d9df0 2478
89e5785f 2479 /* Set MII management clock divider */
421d9df0
CP
2480 val = macb_mdc_clk_div(bp);
2481 val |= macb_dbw(bp);
022be25c
PCK
2482 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2483 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
421d9df0
CP
2484 macb_writel(bp, NCFGR, val);
2485
2486 return 0;
421d9df0
CP
2487}
2488
2489#if defined(CONFIG_OF)
2490/* 1518 rounded up */
2491#define AT91ETHER_MAX_RBUFF_SZ 0x600
2492/* max number of receive buffers */
2493#define AT91ETHER_MAX_RX_DESCR 9
2494
2495/* Initialize and start the Receiver and Transmit subsystems */
2496static int at91ether_start(struct net_device *dev)
2497{
2498 struct macb *lp = netdev_priv(dev);
2499 dma_addr_t addr;
2500 u32 ctl;
2501 int i;
2502
2503 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
2504 (AT91ETHER_MAX_RX_DESCR *
2505 sizeof(struct macb_dma_desc)),
2506 &lp->rx_ring_dma, GFP_KERNEL);
2507 if (!lp->rx_ring)
2508 return -ENOMEM;
2509
2510 lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
2511 AT91ETHER_MAX_RX_DESCR *
2512 AT91ETHER_MAX_RBUFF_SZ,
2513 &lp->rx_buffers_dma, GFP_KERNEL);
2514 if (!lp->rx_buffers) {
2515 dma_free_coherent(&lp->pdev->dev,
2516 AT91ETHER_MAX_RX_DESCR *
2517 sizeof(struct macb_dma_desc),
2518 lp->rx_ring, lp->rx_ring_dma);
2519 lp->rx_ring = NULL;
2520 return -ENOMEM;
2521 }
2522
2523 addr = lp->rx_buffers_dma;
2524 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
2525 lp->rx_ring[i].addr = addr;
2526 lp->rx_ring[i].ctrl = 0;
2527 addr += AT91ETHER_MAX_RBUFF_SZ;
2528 }
2529
2530 /* Set the Wrap bit on the last descriptor */
2531 lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
2532
2533 /* Reset buffer index */
2534 lp->rx_tail = 0;
2535
2536 /* Program address of descriptor list in Rx Buffer Queue register */
2537 macb_writel(lp, RBQP, lp->rx_ring_dma);
2538
2539 /* Enable Receive and Transmit */
2540 ctl = macb_readl(lp, NCR);
2541 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
2542
2543 return 0;
2544}
2545
2546/* Open the ethernet interface */
2547static int at91ether_open(struct net_device *dev)
2548{
2549 struct macb *lp = netdev_priv(dev);
2550 u32 ctl;
2551 int ret;
2552
2553 /* Clear internal statistics */
2554 ctl = macb_readl(lp, NCR);
2555 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
2556
2557 macb_set_hwaddr(lp);
2558
2559 ret = at91ether_start(dev);
2560 if (ret)
2561 return ret;
2562
2563 /* Enable MAC interrupts */
2564 macb_writel(lp, IER, MACB_BIT(RCOMP) |
2565 MACB_BIT(RXUBR) |
2566 MACB_BIT(ISR_TUND) |
2567 MACB_BIT(ISR_RLE) |
2568 MACB_BIT(TCOMP) |
2569 MACB_BIT(ISR_ROVR) |
2570 MACB_BIT(HRESP));
2571
2572 /* schedule a link state check */
2573 phy_start(lp->phy_dev);
2574
2575 netif_start_queue(dev);
2576
2577 return 0;
2578}
2579
2580/* Close the interface */
2581static int at91ether_close(struct net_device *dev)
2582{
2583 struct macb *lp = netdev_priv(dev);
2584 u32 ctl;
2585
2586 /* Disable Receiver and Transmitter */
2587 ctl = macb_readl(lp, NCR);
2588 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
2589
2590 /* Disable MAC interrupts */
2591 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
2592 MACB_BIT(RXUBR) |
2593 MACB_BIT(ISR_TUND) |
2594 MACB_BIT(ISR_RLE) |
2595 MACB_BIT(TCOMP) |
2596 MACB_BIT(ISR_ROVR) |
2597 MACB_BIT(HRESP));
2598
2599 netif_stop_queue(dev);
2600
2601 dma_free_coherent(&lp->pdev->dev,
2602 AT91ETHER_MAX_RX_DESCR *
2603 sizeof(struct macb_dma_desc),
2604 lp->rx_ring, lp->rx_ring_dma);
2605 lp->rx_ring = NULL;
2606
2607 dma_free_coherent(&lp->pdev->dev,
2608 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
2609 lp->rx_buffers, lp->rx_buffers_dma);
2610 lp->rx_buffers = NULL;
2611
2612 return 0;
2613}
2614
2615/* Transmit packet */
2616static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
2617{
2618 struct macb *lp = netdev_priv(dev);
2619
2620 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
2621 netif_stop_queue(dev);
2622
2623 /* Store packet information (to free when Tx completed) */
2624 lp->skb = skb;
2625 lp->skb_length = skb->len;
2626 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
2627 DMA_TO_DEVICE);
2628
2629 /* Set address of the data in the Transmit Address register */
2630 macb_writel(lp, TAR, lp->skb_physaddr);
2631 /* Set length of the packet in the Transmit Control register */
2632 macb_writel(lp, TCR, skb->len);
89e5785f 2633
421d9df0
CP
2634 } else {
2635 netdev_err(dev, "%s called, but device is busy!\n", __func__);
2636 return NETDEV_TX_BUSY;
2637 }
2638
2639 return NETDEV_TX_OK;
2640}
2641
2642/* Extract received frame from buffer descriptors and sent to upper layers.
2643 * (Called from interrupt context)
2644 */
2645static void at91ether_rx(struct net_device *dev)
2646{
2647 struct macb *lp = netdev_priv(dev);
2648 unsigned char *p_recv;
2649 struct sk_buff *skb;
2650 unsigned int pktlen;
2651
2652 while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
2653 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
2654 pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
2655 skb = netdev_alloc_skb(dev, pktlen + 2);
2656 if (skb) {
2657 skb_reserve(skb, 2);
2658 memcpy(skb_put(skb, pktlen), p_recv, pktlen);
2659
2660 skb->protocol = eth_type_trans(skb, dev);
2661 lp->stats.rx_packets++;
2662 lp->stats.rx_bytes += pktlen;
2663 netif_rx(skb);
2664 } else {
2665 lp->stats.rx_dropped++;
2666 }
2667
2668 if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
2669 lp->stats.multicast++;
2670
2671 /* reset ownership bit */
2672 lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
2673
2674 /* wrap after last buffer */
2675 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
2676 lp->rx_tail = 0;
2677 else
2678 lp->rx_tail++;
2679 }
2680}
2681
2682/* MAC interrupt handler */
2683static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
2684{
2685 struct net_device *dev = dev_id;
2686 struct macb *lp = netdev_priv(dev);
2687 u32 intstatus, ctl;
2688
2689 /* MAC Interrupt Status register indicates what interrupts are pending.
2690 * It is automatically cleared once read.
2691 */
2692 intstatus = macb_readl(lp, ISR);
2693
2694 /* Receive complete */
2695 if (intstatus & MACB_BIT(RCOMP))
2696 at91ether_rx(dev);
2697
2698 /* Transmit complete */
2699 if (intstatus & MACB_BIT(TCOMP)) {
2700 /* The TCOM bit is set even if the transmission failed */
2701 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
2702 lp->stats.tx_errors++;
2703
2704 if (lp->skb) {
2705 dev_kfree_skb_irq(lp->skb);
2706 lp->skb = NULL;
2707 dma_unmap_single(NULL, lp->skb_physaddr,
2708 lp->skb_length, DMA_TO_DEVICE);
2709 lp->stats.tx_packets++;
2710 lp->stats.tx_bytes += lp->skb_length;
2711 }
2712 netif_wake_queue(dev);
2713 }
2714
2715 /* Work-around for EMAC Errata section 41.3.1 */
2716 if (intstatus & MACB_BIT(RXUBR)) {
2717 ctl = macb_readl(lp, NCR);
2718 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
2719 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
2720 }
2721
2722 if (intstatus & MACB_BIT(ISR_ROVR))
2723 netdev_err(dev, "ROVR error\n");
2724
2725 return IRQ_HANDLED;
2726}
2727
2728#ifdef CONFIG_NET_POLL_CONTROLLER
2729static void at91ether_poll_controller(struct net_device *dev)
2730{
2731 unsigned long flags;
2732
2733 local_irq_save(flags);
2734 at91ether_interrupt(dev->irq, dev);
2735 local_irq_restore(flags);
2736}
2737#endif
2738
2739static const struct net_device_ops at91ether_netdev_ops = {
2740 .ndo_open = at91ether_open,
2741 .ndo_stop = at91ether_close,
2742 .ndo_start_xmit = at91ether_start_xmit,
2743 .ndo_get_stats = macb_get_stats,
2744 .ndo_set_rx_mode = macb_set_rx_mode,
2745 .ndo_set_mac_address = eth_mac_addr,
2746 .ndo_do_ioctl = macb_ioctl,
2747 .ndo_validate_addr = eth_validate_addr,
2748 .ndo_change_mtu = eth_change_mtu,
2749#ifdef CONFIG_NET_POLL_CONTROLLER
2750 .ndo_poll_controller = at91ether_poll_controller,
2751#endif
2752};
2753
c69618b3
NF
2754static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
2755 struct clk **hclk, struct clk **tx_clk)
421d9df0 2756{
421d9df0 2757 int err;
421d9df0 2758
c69618b3
NF
2759 *hclk = NULL;
2760 *tx_clk = NULL;
2761
2762 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
2763 if (IS_ERR(*pclk))
2764 return PTR_ERR(*pclk);
421d9df0 2765
c69618b3 2766 err = clk_prepare_enable(*pclk);
421d9df0
CP
2767 if (err) {
2768 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2769 return err;
2770 }
2771
c69618b3
NF
2772 return 0;
2773}
2774
2775static int at91ether_init(struct platform_device *pdev)
2776{
2777 struct net_device *dev = platform_get_drvdata(pdev);
2778 struct macb *bp = netdev_priv(dev);
2779 int err;
2780 u32 reg;
2781
421d9df0
CP
2782 dev->netdev_ops = &at91ether_netdev_ops;
2783 dev->ethtool_ops = &macb_ethtool_ops;
2784
2785 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
2786 0, dev->name, dev);
2787 if (err)
c69618b3 2788 return err;
421d9df0
CP
2789
2790 macb_writel(bp, NCR, 0);
2791
2792 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
2793 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
2794 reg |= MACB_BIT(RM9200_RMII);
2795
2796 macb_writel(bp, NCFGR, reg);
2797
2798 return 0;
421d9df0
CP
2799}
2800
3cef5c5b 2801static const struct macb_config at91sam9260_config = {
6bdaa5e9 2802 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
c69618b3 2803 .clk_init = macb_clk_init,
421d9df0
CP
2804 .init = macb_init,
2805};
2806
3cef5c5b 2807static const struct macb_config pc302gem_config = {
421d9df0
CP
2808 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2809 .dma_burst_length = 16,
c69618b3 2810 .clk_init = macb_clk_init,
421d9df0
CP
2811 .init = macb_init,
2812};
2813
5c8fe711 2814static const struct macb_config sama5d2_config = {
6bdaa5e9 2815 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
5c8fe711
CP
2816 .dma_burst_length = 16,
2817 .clk_init = macb_clk_init,
2818 .init = macb_init,
2819};
2820
3cef5c5b 2821static const struct macb_config sama5d3_config = {
6bdaa5e9
NF
2822 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
2823 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
421d9df0 2824 .dma_burst_length = 16,
c69618b3 2825 .clk_init = macb_clk_init,
421d9df0
CP
2826 .init = macb_init,
2827};
2828
3cef5c5b 2829static const struct macb_config sama5d4_config = {
6bdaa5e9 2830 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
421d9df0 2831 .dma_burst_length = 4,
c69618b3 2832 .clk_init = macb_clk_init,
421d9df0
CP
2833 .init = macb_init,
2834};
2835
3cef5c5b 2836static const struct macb_config emac_config = {
c69618b3 2837 .clk_init = at91ether_clk_init,
421d9df0
CP
2838 .init = at91ether_init,
2839};
2840
e611b5b8
NA
2841static const struct macb_config np4_config = {
2842 .caps = MACB_CAPS_USRIO_DISABLED,
2843 .clk_init = macb_clk_init,
2844 .init = macb_init,
2845};
36583eb5 2846
7b61f9c1 2847static const struct macb_config zynqmp_config = {
7baaa909 2848 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
7b61f9c1
HK
2849 .dma_burst_length = 16,
2850 .clk_init = macb_clk_init,
2851 .init = macb_init,
98b5a0f4 2852 .jumbo_max_len = 10240,
7b61f9c1
HK
2853};
2854
222ca8e0 2855static const struct macb_config zynq_config = {
7baaa909 2856 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
222ca8e0
NS
2857 .dma_burst_length = 16,
2858 .clk_init = macb_clk_init,
2859 .init = macb_init,
2860};
2861
421d9df0
CP
2862static const struct of_device_id macb_dt_ids[] = {
2863 { .compatible = "cdns,at32ap7000-macb" },
2864 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
2865 { .compatible = "cdns,macb" },
e611b5b8 2866 { .compatible = "cdns,np4-macb", .data = &np4_config },
421d9df0
CP
2867 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
2868 { .compatible = "cdns,gem", .data = &pc302gem_config },
5c8fe711 2869 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
421d9df0
CP
2870 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
2871 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
2872 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
2873 { .compatible = "cdns,emac", .data = &emac_config },
7b61f9c1 2874 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
222ca8e0 2875 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
421d9df0
CP
2876 { /* sentinel */ }
2877};
2878MODULE_DEVICE_TABLE(of, macb_dt_ids);
2879#endif /* CONFIG_OF */
2880
2881static int macb_probe(struct platform_device *pdev)
2882{
c69618b3
NF
2883 int (*clk_init)(struct platform_device *, struct clk **,
2884 struct clk **, struct clk **)
2885 = macb_clk_init;
421d9df0
CP
2886 int (*init)(struct platform_device *) = macb_init;
2887 struct device_node *np = pdev->dev.of_node;
270c499f 2888 struct device_node *phy_node;
421d9df0 2889 const struct macb_config *macb_config = NULL;
36df7455 2890 struct clk *pclk, *hclk = NULL, *tx_clk = NULL;
421d9df0
CP
2891 unsigned int queue_mask, num_queues;
2892 struct macb_platform_data *pdata;
f2ce8a9e 2893 bool native_io;
421d9df0
CP
2894 struct phy_device *phydev;
2895 struct net_device *dev;
2896 struct resource *regs;
2897 void __iomem *mem;
2898 const char *mac;
2899 struct macb *bp;
2900 int err;
2901
f2ce8a9e
AS
2902 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2903 mem = devm_ioremap_resource(&pdev->dev, regs);
2904 if (IS_ERR(mem))
2905 return PTR_ERR(mem);
2906
c69618b3
NF
2907 if (np) {
2908 const struct of_device_id *match;
2909
2910 match = of_match_node(macb_dt_ids, np);
2911 if (match && match->data) {
2912 macb_config = match->data;
2913 clk_init = macb_config->clk_init;
2914 init = macb_config->init;
2915 }
2916 }
2917
2918 err = clk_init(pdev, &pclk, &hclk, &tx_clk);
2919 if (err)
2920 return err;
2921
f2ce8a9e 2922 native_io = hw_is_native_io(mem);
421d9df0 2923
f2ce8a9e 2924 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
421d9df0 2925 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
c69618b3
NF
2926 if (!dev) {
2927 err = -ENOMEM;
2928 goto err_disable_clocks;
2929 }
421d9df0
CP
2930
2931 dev->base_addr = regs->start;
2932
2933 SET_NETDEV_DEV(dev, &pdev->dev);
2934
2935 bp = netdev_priv(dev);
2936 bp->pdev = pdev;
2937 bp->dev = dev;
2938 bp->regs = mem;
f2ce8a9e
AS
2939 bp->native_io = native_io;
2940 if (native_io) {
7a6e0706
DM
2941 bp->macb_reg_readl = hw_readl_native;
2942 bp->macb_reg_writel = hw_writel_native;
f2ce8a9e 2943 } else {
7a6e0706
DM
2944 bp->macb_reg_readl = hw_readl;
2945 bp->macb_reg_writel = hw_writel;
f2ce8a9e 2946 }
421d9df0 2947 bp->num_queues = num_queues;
bfa0914a 2948 bp->queue_mask = queue_mask;
c69618b3
NF
2949 if (macb_config)
2950 bp->dma_burst_length = macb_config->dma_burst_length;
2951 bp->pclk = pclk;
2952 bp->hclk = hclk;
2953 bp->tx_clk = tx_clk;
f36dbe6a 2954 if (macb_config)
98b5a0f4 2955 bp->jumbo_max_len = macb_config->jumbo_max_len;
98b5a0f4 2956
3e2a5e15 2957 bp->wol = 0;
7c4a1d0c 2958 if (of_get_property(np, "magic-packet", NULL))
3e2a5e15
SP
2959 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
2960 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
2961
421d9df0
CP
2962 spin_lock_init(&bp->lock);
2963
ad78347f 2964 /* setup capabilities */
f6970505
NF
2965 macb_configure_caps(bp, macb_config);
2966
421d9df0
CP
2967 platform_set_drvdata(pdev, dev);
2968
2969 dev->irq = platform_get_irq(pdev, 0);
c69618b3
NF
2970 if (dev->irq < 0) {
2971 err = dev->irq;
2972 goto err_disable_clocks;
2973 }
421d9df0
CP
2974
2975 mac = of_get_mac_address(np);
50907043 2976 if (mac)
eefb52d1 2977 ether_addr_copy(bp->dev->dev_addr, mac);
50907043 2978 else
fb97a846
JCPV
2979 macb_get_hwaddr(bp);
2980
5833e052 2981 /* Power up the PHY if there is a GPIO reset */
270c499f
GC
2982 phy_node = of_get_next_available_child(np, NULL);
2983 if (phy_node) {
2984 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
64ec42fe 2985
0e3e7999 2986 if (gpio_is_valid(gpio)) {
270c499f 2987 bp->reset_gpio = gpio_to_desc(gpio);
0e3e7999
CK
2988 gpiod_direction_output(bp->reset_gpio, 1);
2989 }
270c499f
GC
2990 }
2991 of_node_put(phy_node);
5833e052 2992
421d9df0 2993 err = of_get_phy_mode(np);
fb97a846 2994 if (err < 0) {
c607a0d9 2995 pdata = dev_get_platdata(&pdev->dev);
fb97a846
JCPV
2996 if (pdata && pdata->is_rmii)
2997 bp->phy_interface = PHY_INTERFACE_MODE_RMII;
2998 else
2999 bp->phy_interface = PHY_INTERFACE_MODE_MII;
3000 } else {
3001 bp->phy_interface = err;
3002 }
6c36a707 3003
421d9df0
CP
3004 /* IP specific init */
3005 err = init(pdev);
3006 if (err)
3007 goto err_out_free_netdev;
89e5785f 3008
cf669660
FF
3009 err = macb_mii_init(bp);
3010 if (err)
3011 goto err_out_free_netdev;
3012
3013 phydev = bp->phy_dev;
3014
3015 netif_carrier_off(dev);
3016
89e5785f
HS
3017 err = register_netdev(dev);
3018 if (err) {
3019 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
cf669660 3020 goto err_out_unregister_mdio;
89e5785f
HS
3021 }
3022
cf669660 3023 phy_attached_info(phydev);
03fc4721 3024
5879823f
BS
3025 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
3026 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
3027 dev->base_addr, dev->irq, dev->dev_addr);
89e5785f
HS
3028
3029 return 0;
3030
cf669660
FF
3031err_out_unregister_mdio:
3032 phy_disconnect(bp->phy_dev);
3033 mdiobus_unregister(bp->mii_bus);
3034 mdiobus_free(bp->mii_bus);
3035
3036 /* Shutdown the PHY if there is a GPIO reset */
3037 if (bp->reset_gpio)
3038 gpiod_set_value(bp->reset_gpio, 0);
421d9df0 3039
cf250de0 3040err_out_free_netdev:
02c958dd 3041 free_netdev(dev);
421d9df0 3042
c69618b3
NF
3043err_disable_clocks:
3044 clk_disable_unprepare(tx_clk);
3045 clk_disable_unprepare(hclk);
3046 clk_disable_unprepare(pclk);
3047
89e5785f
HS
3048 return err;
3049}
3050
9e86d766 3051static int macb_remove(struct platform_device *pdev)
89e5785f
HS
3052{
3053 struct net_device *dev;
3054 struct macb *bp;
3055
3056 dev = platform_get_drvdata(pdev);
3057
3058 if (dev) {
3059 bp = netdev_priv(dev);
84b7901f
AN
3060 if (bp->phy_dev)
3061 phy_disconnect(bp->phy_dev);
298cf9be 3062 mdiobus_unregister(bp->mii_bus);
298cf9be 3063 mdiobus_free(bp->mii_bus);
5833e052
GC
3064
3065 /* Shutdown the PHY if there is a GPIO reset */
0e3e7999
CK
3066 if (bp->reset_gpio)
3067 gpiod_set_value(bp->reset_gpio, 0);
5833e052 3068
89e5785f 3069 unregister_netdev(dev);
93b31f48 3070 clk_disable_unprepare(bp->tx_clk);
ace58010 3071 clk_disable_unprepare(bp->hclk);
ace58010 3072 clk_disable_unprepare(bp->pclk);
e965be7d 3073 free_netdev(dev);
89e5785f
HS
3074 }
3075
3076 return 0;
3077}
3078
d23823dd 3079static int __maybe_unused macb_suspend(struct device *dev)
c1f598fd 3080{
0dfc3e18 3081 struct platform_device *pdev = to_platform_device(dev);
c1f598fd
HS
3082 struct net_device *netdev = platform_get_drvdata(pdev);
3083 struct macb *bp = netdev_priv(netdev);
3084
03fc4721 3085 netif_carrier_off(netdev);
c1f598fd
HS
3086 netif_device_detach(netdev);
3087
3e2a5e15
SP
3088 if (bp->wol & MACB_WOL_ENABLED) {
3089 macb_writel(bp, IER, MACB_BIT(WOL));
3090 macb_writel(bp, WOL, MACB_BIT(MAG));
3091 enable_irq_wake(bp->queues[0].irq);
3092 } else {
3093 clk_disable_unprepare(bp->tx_clk);
3094 clk_disable_unprepare(bp->hclk);
3095 clk_disable_unprepare(bp->pclk);
3096 }
c1f598fd
HS
3097
3098 return 0;
3099}
3100
d23823dd 3101static int __maybe_unused macb_resume(struct device *dev)
c1f598fd 3102{
0dfc3e18 3103 struct platform_device *pdev = to_platform_device(dev);
c1f598fd
HS
3104 struct net_device *netdev = platform_get_drvdata(pdev);
3105 struct macb *bp = netdev_priv(netdev);
3106
3e2a5e15
SP
3107 if (bp->wol & MACB_WOL_ENABLED) {
3108 macb_writel(bp, IDR, MACB_BIT(WOL));
3109 macb_writel(bp, WOL, 0);
3110 disable_irq_wake(bp->queues[0].irq);
3111 } else {
3112 clk_prepare_enable(bp->pclk);
3113 clk_prepare_enable(bp->hclk);
3114 clk_prepare_enable(bp->tx_clk);
3115 }
c1f598fd
HS
3116
3117 netif_device_attach(netdev);
3118
3119 return 0;
3120}
c1f598fd 3121
0dfc3e18
SB
3122static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
3123
89e5785f 3124static struct platform_driver macb_driver = {
9e86d766
NR
3125 .probe = macb_probe,
3126 .remove = macb_remove,
89e5785f
HS
3127 .driver = {
3128 .name = "macb",
fb97a846 3129 .of_match_table = of_match_ptr(macb_dt_ids),
0dfc3e18 3130 .pm = &macb_pm_ops,
89e5785f
HS
3131 },
3132};
3133
9e86d766 3134module_platform_driver(macb_driver);
89e5785f
HS
3135
3136MODULE_LICENSE("GPL");
f75ba50b 3137MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
e05503ef 3138MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
72abb461 3139MODULE_ALIAS("platform:macb");
This page took 1.268941 seconds and 5 git commands to generate.