pkt_sched: namespace aware act_mirred
[deliverable/linux.git] / drivers / net / ethernet / adi / bfin_mac.c
... / ...
CommitLineData
1/*
2 * Blackfin On-Chip MAC Driver
3 *
4 * Copyright 2004-2010 Analog Devices Inc.
5 *
6 * Enter bugs at http://blackfin.uclinux.org/
7 *
8 * Licensed under the GPL-2 or later.
9 */
10
11#define DRV_VERSION "1.1"
12#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/delay.h>
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/irq.h>
25#include <linux/io.h>
26#include <linux/ioport.h>
27#include <linux/crc32.h>
28#include <linux/device.h>
29#include <linux/spinlock.h>
30#include <linux/mii.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/ethtool.h>
34#include <linux/skbuff.h>
35#include <linux/platform_device.h>
36
37#include <asm/dma.h>
38#include <linux/dma-mapping.h>
39
40#include <asm/div64.h>
41#include <asm/dpmc.h>
42#include <asm/blackfin.h>
43#include <asm/cacheflush.h>
44#include <asm/portmux.h>
45#include <mach/pll.h>
46
47#include "bfin_mac.h"
48
49MODULE_AUTHOR("Bryan Wu, Luke Yang");
50MODULE_LICENSE("GPL");
51MODULE_DESCRIPTION(DRV_DESC);
52MODULE_ALIAS("platform:bfin_mac");
53
54#if defined(CONFIG_BFIN_MAC_USE_L1)
55# define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num)
56# define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr)
57#else
58# define bfin_mac_alloc(dma_handle, size, num) \
59 dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL)
60# define bfin_mac_free(dma_handle, ptr, num) \
61 dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle)
62#endif
63
64#define PKT_BUF_SZ 1580
65
66#define MAX_TIMEOUT_CNT 500
67
68/* pointers to maintain transmit list */
69static struct net_dma_desc_tx *tx_list_head;
70static struct net_dma_desc_tx *tx_list_tail;
71static struct net_dma_desc_rx *rx_list_head;
72static struct net_dma_desc_rx *rx_list_tail;
73static struct net_dma_desc_rx *current_rx_ptr;
74static struct net_dma_desc_tx *current_tx_ptr;
75static struct net_dma_desc_tx *tx_desc;
76static struct net_dma_desc_rx *rx_desc;
77
78static void desc_list_free(void)
79{
80 struct net_dma_desc_rx *r;
81 struct net_dma_desc_tx *t;
82 int i;
83#if !defined(CONFIG_BFIN_MAC_USE_L1)
84 dma_addr_t dma_handle = 0;
85#endif
86
87 if (tx_desc) {
88 t = tx_list_head;
89 for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
90 if (t) {
91 if (t->skb) {
92 dev_kfree_skb(t->skb);
93 t->skb = NULL;
94 }
95 t = t->next;
96 }
97 }
98 bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM);
99 }
100
101 if (rx_desc) {
102 r = rx_list_head;
103 for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
104 if (r) {
105 if (r->skb) {
106 dev_kfree_skb(r->skb);
107 r->skb = NULL;
108 }
109 r = r->next;
110 }
111 }
112 bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM);
113 }
114}
115
116static int desc_list_init(struct net_device *dev)
117{
118 int i;
119 struct sk_buff *new_skb;
120#if !defined(CONFIG_BFIN_MAC_USE_L1)
121 /*
122 * This dma_handle is useless in Blackfin dma_alloc_coherent().
123 * The real dma handler is the return value of dma_alloc_coherent().
124 */
125 dma_addr_t dma_handle;
126#endif
127
128 tx_desc = bfin_mac_alloc(&dma_handle,
129 sizeof(struct net_dma_desc_tx),
130 CONFIG_BFIN_TX_DESC_NUM);
131 if (tx_desc == NULL)
132 goto init_error;
133
134 rx_desc = bfin_mac_alloc(&dma_handle,
135 sizeof(struct net_dma_desc_rx),
136 CONFIG_BFIN_RX_DESC_NUM);
137 if (rx_desc == NULL)
138 goto init_error;
139
140 /* init tx_list */
141 tx_list_head = tx_list_tail = tx_desc;
142
143 for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
144 struct net_dma_desc_tx *t = tx_desc + i;
145 struct dma_descriptor *a = &(t->desc_a);
146 struct dma_descriptor *b = &(t->desc_b);
147
148 /*
149 * disable DMA
150 * read from memory WNR = 0
151 * wordsize is 32 bits
152 * 6 half words is desc size
153 * large desc flow
154 */
155 a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
156 a->start_addr = (unsigned long)t->packet;
157 a->x_count = 0;
158 a->next_dma_desc = b;
159
160 /*
161 * enabled DMA
162 * write to memory WNR = 1
163 * wordsize is 32 bits
164 * disable interrupt
165 * 6 half words is desc size
166 * large desc flow
167 */
168 b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
169 b->start_addr = (unsigned long)(&(t->status));
170 b->x_count = 0;
171
172 t->skb = NULL;
173 tx_list_tail->desc_b.next_dma_desc = a;
174 tx_list_tail->next = t;
175 tx_list_tail = t;
176 }
177 tx_list_tail->next = tx_list_head; /* tx_list is a circle */
178 tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a);
179 current_tx_ptr = tx_list_head;
180
181 /* init rx_list */
182 rx_list_head = rx_list_tail = rx_desc;
183
184 for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
185 struct net_dma_desc_rx *r = rx_desc + i;
186 struct dma_descriptor *a = &(r->desc_a);
187 struct dma_descriptor *b = &(r->desc_b);
188
189 /* allocate a new skb for next time receive */
190 new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
191 if (!new_skb) {
192 pr_notice("init: low on mem - packet dropped\n");
193 goto init_error;
194 }
195 skb_reserve(new_skb, NET_IP_ALIGN);
196 /* Invidate the data cache of skb->data range when it is write back
197 * cache. It will prevent overwritting the new data from DMA
198 */
199 blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
200 (unsigned long)new_skb->end);
201 r->skb = new_skb;
202
203 /*
204 * enabled DMA
205 * write to memory WNR = 1
206 * wordsize is 32 bits
207 * disable interrupt
208 * 6 half words is desc size
209 * large desc flow
210 */
211 a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
212 /* since RXDWA is enabled */
213 a->start_addr = (unsigned long)new_skb->data - 2;
214 a->x_count = 0;
215 a->next_dma_desc = b;
216
217 /*
218 * enabled DMA
219 * write to memory WNR = 1
220 * wordsize is 32 bits
221 * enable interrupt
222 * 6 half words is desc size
223 * large desc flow
224 */
225 b->config = DMAEN | WNR | WDSIZE_32 | DI_EN |
226 NDSIZE_6 | DMAFLOW_LARGE;
227 b->start_addr = (unsigned long)(&(r->status));
228 b->x_count = 0;
229
230 rx_list_tail->desc_b.next_dma_desc = a;
231 rx_list_tail->next = r;
232 rx_list_tail = r;
233 }
234 rx_list_tail->next = rx_list_head; /* rx_list is a circle */
235 rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a);
236 current_rx_ptr = rx_list_head;
237
238 return 0;
239
240init_error:
241 desc_list_free();
242 pr_err("kmalloc failed\n");
243 return -ENOMEM;
244}
245
246
247/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
248
249/*
250 * MII operations
251 */
252/* Wait until the previous MDC/MDIO transaction has completed */
253static int bfin_mdio_poll(void)
254{
255 int timeout_cnt = MAX_TIMEOUT_CNT;
256
257 /* poll the STABUSY bit */
258 while ((bfin_read_EMAC_STAADD()) & STABUSY) {
259 udelay(1);
260 if (timeout_cnt-- < 0) {
261 pr_err("wait MDC/MDIO transaction to complete timeout\n");
262 return -ETIMEDOUT;
263 }
264 }
265
266 return 0;
267}
268
269/* Read an off-chip register in a PHY through the MDC/MDIO port */
270static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
271{
272 int ret;
273
274 ret = bfin_mdio_poll();
275 if (ret)
276 return ret;
277
278 /* read mode */
279 bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
280 SET_REGAD((u16) regnum) |
281 STABUSY);
282
283 ret = bfin_mdio_poll();
284 if (ret)
285 return ret;
286
287 return (int) bfin_read_EMAC_STADAT();
288}
289
290/* Write an off-chip register in a PHY through the MDC/MDIO port */
291static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
292 u16 value)
293{
294 int ret;
295
296 ret = bfin_mdio_poll();
297 if (ret)
298 return ret;
299
300 bfin_write_EMAC_STADAT((u32) value);
301
302 /* write mode */
303 bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
304 SET_REGAD((u16) regnum) |
305 STAOP |
306 STABUSY);
307
308 return bfin_mdio_poll();
309}
310
311static int bfin_mdiobus_reset(struct mii_bus *bus)
312{
313 return 0;
314}
315
316static void bfin_mac_adjust_link(struct net_device *dev)
317{
318 struct bfin_mac_local *lp = netdev_priv(dev);
319 struct phy_device *phydev = lp->phydev;
320 unsigned long flags;
321 int new_state = 0;
322
323 spin_lock_irqsave(&lp->lock, flags);
324 if (phydev->link) {
325 /* Now we make sure that we can be in full duplex mode.
326 * If not, we operate in half-duplex mode. */
327 if (phydev->duplex != lp->old_duplex) {
328 u32 opmode = bfin_read_EMAC_OPMODE();
329 new_state = 1;
330
331 if (phydev->duplex)
332 opmode |= FDMODE;
333 else
334 opmode &= ~(FDMODE);
335
336 bfin_write_EMAC_OPMODE(opmode);
337 lp->old_duplex = phydev->duplex;
338 }
339
340 if (phydev->speed != lp->old_speed) {
341 if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
342 u32 opmode = bfin_read_EMAC_OPMODE();
343 switch (phydev->speed) {
344 case 10:
345 opmode |= RMII_10;
346 break;
347 case 100:
348 opmode &= ~RMII_10;
349 break;
350 default:
351 netdev_warn(dev,
352 "Ack! Speed (%d) is not 10/100!\n",
353 phydev->speed);
354 break;
355 }
356 bfin_write_EMAC_OPMODE(opmode);
357 }
358
359 new_state = 1;
360 lp->old_speed = phydev->speed;
361 }
362
363 if (!lp->old_link) {
364 new_state = 1;
365 lp->old_link = 1;
366 }
367 } else if (lp->old_link) {
368 new_state = 1;
369 lp->old_link = 0;
370 lp->old_speed = 0;
371 lp->old_duplex = -1;
372 }
373
374 if (new_state) {
375 u32 opmode = bfin_read_EMAC_OPMODE();
376 phy_print_status(phydev);
377 pr_debug("EMAC_OPMODE = 0x%08x\n", opmode);
378 }
379
380 spin_unlock_irqrestore(&lp->lock, flags);
381}
382
383/* MDC = 2.5 MHz */
384#define MDC_CLK 2500000
385
386static int mii_probe(struct net_device *dev, int phy_mode)
387{
388 struct bfin_mac_local *lp = netdev_priv(dev);
389 struct phy_device *phydev = NULL;
390 unsigned short sysctl;
391 int i;
392 u32 sclk, mdc_div;
393
394 /* Enable PHY output early */
395 if (!(bfin_read_VR_CTL() & CLKBUFOE))
396 bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
397
398 sclk = get_sclk();
399 mdc_div = ((sclk / MDC_CLK) / 2) - 1;
400
401 sysctl = bfin_read_EMAC_SYSCTL();
402 sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
403 bfin_write_EMAC_SYSCTL(sysctl);
404
405 /* search for connected PHY device */
406 for (i = 0; i < PHY_MAX_ADDR; ++i) {
407 struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i];
408
409 if (!tmp_phydev)
410 continue; /* no PHY here... */
411
412 phydev = tmp_phydev;
413 break; /* found it */
414 }
415
416 /* now we are supposed to have a proper phydev, to attach to... */
417 if (!phydev) {
418 netdev_err(dev, "no phy device found\n");
419 return -ENODEV;
420 }
421
422 if (phy_mode != PHY_INTERFACE_MODE_RMII &&
423 phy_mode != PHY_INTERFACE_MODE_MII) {
424 netdev_err(dev, "invalid phy interface mode\n");
425 return -EINVAL;
426 }
427
428 phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link,
429 0, phy_mode);
430
431 if (IS_ERR(phydev)) {
432 netdev_err(dev, "could not attach PHY\n");
433 return PTR_ERR(phydev);
434 }
435
436 /* mask with MAC supported features */
437 phydev->supported &= (SUPPORTED_10baseT_Half
438 | SUPPORTED_10baseT_Full
439 | SUPPORTED_100baseT_Half
440 | SUPPORTED_100baseT_Full
441 | SUPPORTED_Autoneg
442 | SUPPORTED_Pause | SUPPORTED_Asym_Pause
443 | SUPPORTED_MII
444 | SUPPORTED_TP);
445
446 phydev->advertising = phydev->supported;
447
448 lp->old_link = 0;
449 lp->old_speed = 0;
450 lp->old_duplex = -1;
451 lp->phydev = phydev;
452
453 pr_info("attached PHY driver [%s] "
454 "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
455 phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
456 MDC_CLK, mdc_div, sclk/1000000);
457
458 return 0;
459}
460
461/*
462 * Ethtool support
463 */
464
465/*
466 * interrupt routine for magic packet wakeup
467 */
468static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id)
469{
470 return IRQ_HANDLED;
471}
472
473static int
474bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
475{
476 struct bfin_mac_local *lp = netdev_priv(dev);
477
478 if (lp->phydev)
479 return phy_ethtool_gset(lp->phydev, cmd);
480
481 return -EINVAL;
482}
483
484static int
485bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
486{
487 struct bfin_mac_local *lp = netdev_priv(dev);
488
489 if (!capable(CAP_NET_ADMIN))
490 return -EPERM;
491
492 if (lp->phydev)
493 return phy_ethtool_sset(lp->phydev, cmd);
494
495 return -EINVAL;
496}
497
498static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
499 struct ethtool_drvinfo *info)
500{
501 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
502 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
503 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
504 strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
505}
506
507static void bfin_mac_ethtool_getwol(struct net_device *dev,
508 struct ethtool_wolinfo *wolinfo)
509{
510 struct bfin_mac_local *lp = netdev_priv(dev);
511
512 wolinfo->supported = WAKE_MAGIC;
513 wolinfo->wolopts = lp->wol;
514}
515
516static int bfin_mac_ethtool_setwol(struct net_device *dev,
517 struct ethtool_wolinfo *wolinfo)
518{
519 struct bfin_mac_local *lp = netdev_priv(dev);
520 int rc;
521
522 if (wolinfo->wolopts & (WAKE_MAGICSECURE |
523 WAKE_UCAST |
524 WAKE_MCAST |
525 WAKE_BCAST |
526 WAKE_ARP))
527 return -EOPNOTSUPP;
528
529 lp->wol = wolinfo->wolopts;
530
531 if (lp->wol && !lp->irq_wake_requested) {
532 /* register wake irq handler */
533 rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
534 IRQF_DISABLED, "EMAC_WAKE", dev);
535 if (rc)
536 return rc;
537 lp->irq_wake_requested = true;
538 }
539
540 if (!lp->wol && lp->irq_wake_requested) {
541 free_irq(IRQ_MAC_WAKEDET, dev);
542 lp->irq_wake_requested = false;
543 }
544
545 /* Make sure the PHY driver doesn't suspend */
546 device_init_wakeup(&dev->dev, lp->wol);
547
548 return 0;
549}
550
551#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
552static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
553 struct ethtool_ts_info *info)
554{
555 struct bfin_mac_local *lp = netdev_priv(dev);
556
557 info->so_timestamping =
558 SOF_TIMESTAMPING_TX_HARDWARE |
559 SOF_TIMESTAMPING_RX_HARDWARE |
560 SOF_TIMESTAMPING_RAW_HARDWARE;
561 info->phc_index = lp->phc_index;
562 info->tx_types =
563 (1 << HWTSTAMP_TX_OFF) |
564 (1 << HWTSTAMP_TX_ON);
565 info->rx_filters =
566 (1 << HWTSTAMP_FILTER_NONE) |
567 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
568 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
569 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
570 return 0;
571}
572#endif
573
574static const struct ethtool_ops bfin_mac_ethtool_ops = {
575 .get_settings = bfin_mac_ethtool_getsettings,
576 .set_settings = bfin_mac_ethtool_setsettings,
577 .get_link = ethtool_op_get_link,
578 .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
579 .get_wol = bfin_mac_ethtool_getwol,
580 .set_wol = bfin_mac_ethtool_setwol,
581#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
582 .get_ts_info = bfin_mac_ethtool_get_ts_info,
583#endif
584};
585
586/**************************************************************************/
587static void setup_system_regs(struct net_device *dev)
588{
589 struct bfin_mac_local *lp = netdev_priv(dev);
590 int i;
591 unsigned short sysctl;
592
593 /*
594 * Odd word alignment for Receive Frame DMA word
595 * Configure checksum support and rcve frame word alignment
596 */
597 sysctl = bfin_read_EMAC_SYSCTL();
598 /*
599 * check if interrupt is requested for any PHY,
600 * enable PHY interrupt only if needed
601 */
602 for (i = 0; i < PHY_MAX_ADDR; ++i)
603 if (lp->mii_bus->irq[i] != PHY_POLL)
604 break;
605 if (i < PHY_MAX_ADDR)
606 sysctl |= PHYIE;
607 sysctl |= RXDWA;
608#if defined(BFIN_MAC_CSUM_OFFLOAD)
609 sysctl |= RXCKS;
610#else
611 sysctl &= ~RXCKS;
612#endif
613 bfin_write_EMAC_SYSCTL(sysctl);
614
615 bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
616
617 /* Set vlan regs to let 1522 bytes long packets pass through */
618 bfin_write_EMAC_VLAN1(lp->vlan1_mask);
619 bfin_write_EMAC_VLAN2(lp->vlan2_mask);
620
621 /* Initialize the TX DMA channel registers */
622 bfin_write_DMA2_X_COUNT(0);
623 bfin_write_DMA2_X_MODIFY(4);
624 bfin_write_DMA2_Y_COUNT(0);
625 bfin_write_DMA2_Y_MODIFY(0);
626
627 /* Initialize the RX DMA channel registers */
628 bfin_write_DMA1_X_COUNT(0);
629 bfin_write_DMA1_X_MODIFY(4);
630 bfin_write_DMA1_Y_COUNT(0);
631 bfin_write_DMA1_Y_MODIFY(0);
632}
633
634static void setup_mac_addr(u8 *mac_addr)
635{
636 u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]);
637 u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]);
638
639 /* this depends on a little-endian machine */
640 bfin_write_EMAC_ADDRLO(addr_low);
641 bfin_write_EMAC_ADDRHI(addr_hi);
642}
643
644static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
645{
646 struct sockaddr *addr = p;
647 if (netif_running(dev))
648 return -EBUSY;
649 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
650 setup_mac_addr(dev->dev_addr);
651 return 0;
652}
653
654#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
655#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
656
657static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result)
658{
659 u32 ipn = 1000000000UL / input_clk;
660 u32 ppn = 1;
661 unsigned int shift = 0;
662
663 while (ppn <= ipn) {
664 ppn <<= 1;
665 shift++;
666 }
667 *shift_result = shift;
668 return 1000000000UL / ppn;
669}
670
671static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
672 struct ifreq *ifr, int cmd)
673{
674 struct hwtstamp_config config;
675 struct bfin_mac_local *lp = netdev_priv(netdev);
676 u16 ptpctl;
677 u32 ptpfv1, ptpfv2, ptpfv3, ptpfoff;
678
679 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
680 return -EFAULT;
681
682 pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
683 __func__, config.flags, config.tx_type, config.rx_filter);
684
685 /* reserved for future extensions */
686 if (config.flags)
687 return -EINVAL;
688
689 if ((config.tx_type != HWTSTAMP_TX_OFF) &&
690 (config.tx_type != HWTSTAMP_TX_ON))
691 return -ERANGE;
692
693 ptpctl = bfin_read_EMAC_PTP_CTL();
694
695 switch (config.rx_filter) {
696 case HWTSTAMP_FILTER_NONE:
697 /*
698 * Dont allow any timestamping
699 */
700 ptpfv3 = 0xFFFFFFFF;
701 bfin_write_EMAC_PTP_FV3(ptpfv3);
702 break;
703 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
704 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
705 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
706 /*
707 * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL)
708 * to enable all the field matches.
709 */
710 ptpctl &= ~0x1F00;
711 bfin_write_EMAC_PTP_CTL(ptpctl);
712 /*
713 * Keep the default values of the EMAC_PTP_FOFF register.
714 */
715 ptpfoff = 0x4A24170C;
716 bfin_write_EMAC_PTP_FOFF(ptpfoff);
717 /*
718 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
719 * registers.
720 */
721 ptpfv1 = 0x11040800;
722 bfin_write_EMAC_PTP_FV1(ptpfv1);
723 ptpfv2 = 0x0140013F;
724 bfin_write_EMAC_PTP_FV2(ptpfv2);
725 /*
726 * The default value (0xFFFC) allows the timestamping of both
727 * received Sync messages and Delay_Req messages.
728 */
729 ptpfv3 = 0xFFFFFFFC;
730 bfin_write_EMAC_PTP_FV3(ptpfv3);
731
732 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
733 break;
734 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
735 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
736 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
737 /* Clear all five comparison mask bits (bits[12:8]) in the
738 * EMAC_PTP_CTL register to enable all the field matches.
739 */
740 ptpctl &= ~0x1F00;
741 bfin_write_EMAC_PTP_CTL(ptpctl);
742 /*
743 * Keep the default values of the EMAC_PTP_FOFF register, except set
744 * the PTPCOF field to 0x2A.
745 */
746 ptpfoff = 0x2A24170C;
747 bfin_write_EMAC_PTP_FOFF(ptpfoff);
748 /*
749 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
750 * registers.
751 */
752 ptpfv1 = 0x11040800;
753 bfin_write_EMAC_PTP_FV1(ptpfv1);
754 ptpfv2 = 0x0140013F;
755 bfin_write_EMAC_PTP_FV2(ptpfv2);
756 /*
757 * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set
758 * the value to 0xFFF0.
759 */
760 ptpfv3 = 0xFFFFFFF0;
761 bfin_write_EMAC_PTP_FV3(ptpfv3);
762
763 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
764 break;
765 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
766 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
767 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
768 /*
769 * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the
770 * EFTM and PTPCM field comparison.
771 */
772 ptpctl &= ~0x1100;
773 bfin_write_EMAC_PTP_CTL(ptpctl);
774 /*
775 * Keep the default values of all the fields of the EMAC_PTP_FOFF
776 * register, except set the PTPCOF field to 0x0E.
777 */
778 ptpfoff = 0x0E24170C;
779 bfin_write_EMAC_PTP_FOFF(ptpfoff);
780 /*
781 * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which
782 * corresponds to PTP messages on the MAC layer.
783 */
784 ptpfv1 = 0x110488F7;
785 bfin_write_EMAC_PTP_FV1(ptpfv1);
786 ptpfv2 = 0x0140013F;
787 bfin_write_EMAC_PTP_FV2(ptpfv2);
788 /*
789 * To allow the timestamping of Pdelay_Req and Pdelay_Resp
790 * messages, set the value to 0xFFF0.
791 */
792 ptpfv3 = 0xFFFFFFF0;
793 bfin_write_EMAC_PTP_FV3(ptpfv3);
794
795 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
796 break;
797 default:
798 return -ERANGE;
799 }
800
801 if (config.tx_type == HWTSTAMP_TX_OFF &&
802 bfin_mac_hwtstamp_is_none(config.rx_filter)) {
803 ptpctl &= ~PTP_EN;
804 bfin_write_EMAC_PTP_CTL(ptpctl);
805
806 SSYNC();
807 } else {
808 ptpctl |= PTP_EN;
809 bfin_write_EMAC_PTP_CTL(ptpctl);
810
811 /*
812 * clear any existing timestamp
813 */
814 bfin_read_EMAC_PTP_RXSNAPLO();
815 bfin_read_EMAC_PTP_RXSNAPHI();
816
817 bfin_read_EMAC_PTP_TXSNAPLO();
818 bfin_read_EMAC_PTP_TXSNAPHI();
819
820 SSYNC();
821 }
822
823 lp->stamp_cfg = config;
824 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
825 -EFAULT : 0;
826}
827
828static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
829{
830 struct bfin_mac_local *lp = netdev_priv(netdev);
831
832 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
833 int timeout_cnt = MAX_TIMEOUT_CNT;
834
835 /* When doing time stamping, keep the connection to the socket
836 * a while longer
837 */
838 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
839
840 /*
841 * The timestamping is done at the EMAC module's MII/RMII interface
842 * when the module sees the Start of Frame of an event message packet. This
843 * interface is the closest possible place to the physical Ethernet transmission
844 * medium, providing the best timing accuracy.
845 */
846 while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
847 udelay(1);
848 if (timeout_cnt == 0)
849 netdev_err(netdev, "timestamp the TX packet failed\n");
850 else {
851 struct skb_shared_hwtstamps shhwtstamps;
852 u64 ns;
853 u64 regval;
854
855 regval = bfin_read_EMAC_PTP_TXSNAPLO();
856 regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
857 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
858 ns = regval << lp->shift;
859 shhwtstamps.hwtstamp = ns_to_ktime(ns);
860 skb_tstamp_tx(skb, &shhwtstamps);
861 }
862 }
863}
864
865static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
866{
867 struct bfin_mac_local *lp = netdev_priv(netdev);
868 u32 valid;
869 u64 regval, ns;
870 struct skb_shared_hwtstamps *shhwtstamps;
871
872 if (bfin_mac_hwtstamp_is_none(lp->stamp_cfg.rx_filter))
873 return;
874
875 valid = bfin_read_EMAC_PTP_ISTAT() & RXEL;
876 if (!valid)
877 return;
878
879 shhwtstamps = skb_hwtstamps(skb);
880
881 regval = bfin_read_EMAC_PTP_RXSNAPLO();
882 regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
883 ns = regval << lp->shift;
884 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
885 shhwtstamps->hwtstamp = ns_to_ktime(ns);
886}
887
888static void bfin_mac_hwtstamp_init(struct net_device *netdev)
889{
890 struct bfin_mac_local *lp = netdev_priv(netdev);
891 u64 addend, ppb;
892 u32 input_clk, phc_clk;
893
894 /* Initialize hardware timer */
895 input_clk = get_sclk();
896 phc_clk = bfin_select_phc_clock(input_clk, &lp->shift);
897 addend = phc_clk * (1ULL << 32);
898 do_div(addend, input_clk);
899 bfin_write_EMAC_PTP_ADDEND((u32)addend);
900
901 lp->addend = addend;
902 ppb = 1000000000ULL * input_clk;
903 do_div(ppb, phc_clk);
904 lp->max_ppb = ppb - 1000000000ULL - 1ULL;
905
906 /* Initialize hwstamp config */
907 lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
908 lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
909}
910
911static u64 bfin_ptp_time_read(struct bfin_mac_local *lp)
912{
913 u64 ns;
914 u32 lo, hi;
915
916 lo = bfin_read_EMAC_PTP_TIMELO();
917 hi = bfin_read_EMAC_PTP_TIMEHI();
918
919 ns = ((u64) hi) << 32;
920 ns |= lo;
921 ns <<= lp->shift;
922
923 return ns;
924}
925
926static void bfin_ptp_time_write(struct bfin_mac_local *lp, u64 ns)
927{
928 u32 hi, lo;
929
930 ns >>= lp->shift;
931 hi = ns >> 32;
932 lo = ns & 0xffffffff;
933
934 bfin_write_EMAC_PTP_TIMELO(lo);
935 bfin_write_EMAC_PTP_TIMEHI(hi);
936}
937
938/* PTP Hardware Clock operations */
939
940static int bfin_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
941{
942 u64 adj;
943 u32 diff, addend;
944 int neg_adj = 0;
945 struct bfin_mac_local *lp =
946 container_of(ptp, struct bfin_mac_local, caps);
947
948 if (ppb < 0) {
949 neg_adj = 1;
950 ppb = -ppb;
951 }
952 addend = lp->addend;
953 adj = addend;
954 adj *= ppb;
955 diff = div_u64(adj, 1000000000ULL);
956
957 addend = neg_adj ? addend - diff : addend + diff;
958
959 bfin_write_EMAC_PTP_ADDEND(addend);
960
961 return 0;
962}
963
964static int bfin_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
965{
966 s64 now;
967 unsigned long flags;
968 struct bfin_mac_local *lp =
969 container_of(ptp, struct bfin_mac_local, caps);
970
971 spin_lock_irqsave(&lp->phc_lock, flags);
972
973 now = bfin_ptp_time_read(lp);
974 now += delta;
975 bfin_ptp_time_write(lp, now);
976
977 spin_unlock_irqrestore(&lp->phc_lock, flags);
978
979 return 0;
980}
981
982static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
983{
984 u64 ns;
985 u32 remainder;
986 unsigned long flags;
987 struct bfin_mac_local *lp =
988 container_of(ptp, struct bfin_mac_local, caps);
989
990 spin_lock_irqsave(&lp->phc_lock, flags);
991
992 ns = bfin_ptp_time_read(lp);
993
994 spin_unlock_irqrestore(&lp->phc_lock, flags);
995
996 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
997 ts->tv_nsec = remainder;
998 return 0;
999}
1000
1001static int bfin_ptp_settime(struct ptp_clock_info *ptp,
1002 const struct timespec *ts)
1003{
1004 u64 ns;
1005 unsigned long flags;
1006 struct bfin_mac_local *lp =
1007 container_of(ptp, struct bfin_mac_local, caps);
1008
1009 ns = ts->tv_sec * 1000000000ULL;
1010 ns += ts->tv_nsec;
1011
1012 spin_lock_irqsave(&lp->phc_lock, flags);
1013
1014 bfin_ptp_time_write(lp, ns);
1015
1016 spin_unlock_irqrestore(&lp->phc_lock, flags);
1017
1018 return 0;
1019}
1020
1021static int bfin_ptp_enable(struct ptp_clock_info *ptp,
1022 struct ptp_clock_request *rq, int on)
1023{
1024 return -EOPNOTSUPP;
1025}
1026
1027static struct ptp_clock_info bfin_ptp_caps = {
1028 .owner = THIS_MODULE,
1029 .name = "BF518 clock",
1030 .max_adj = 0,
1031 .n_alarm = 0,
1032 .n_ext_ts = 0,
1033 .n_per_out = 0,
1034 .pps = 0,
1035 .adjfreq = bfin_ptp_adjfreq,
1036 .adjtime = bfin_ptp_adjtime,
1037 .gettime = bfin_ptp_gettime,
1038 .settime = bfin_ptp_settime,
1039 .enable = bfin_ptp_enable,
1040};
1041
1042static int bfin_phc_init(struct net_device *netdev, struct device *dev)
1043{
1044 struct bfin_mac_local *lp = netdev_priv(netdev);
1045
1046 lp->caps = bfin_ptp_caps;
1047 lp->caps.max_adj = lp->max_ppb;
1048 lp->clock = ptp_clock_register(&lp->caps, dev);
1049 if (IS_ERR(lp->clock))
1050 return PTR_ERR(lp->clock);
1051
1052 lp->phc_index = ptp_clock_index(lp->clock);
1053 spin_lock_init(&lp->phc_lock);
1054
1055 return 0;
1056}
1057
1058static void bfin_phc_release(struct bfin_mac_local *lp)
1059{
1060 ptp_clock_unregister(lp->clock);
1061}
1062
1063#else
1064# define bfin_mac_hwtstamp_is_none(cfg) 0
1065# define bfin_mac_hwtstamp_init(dev)
1066# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
1067# define bfin_rx_hwtstamp(dev, skb)
1068# define bfin_tx_hwtstamp(dev, skb)
1069# define bfin_phc_init(netdev, dev) 0
1070# define bfin_phc_release(lp)
1071#endif
1072
1073static inline void _tx_reclaim_skb(void)
1074{
1075 do {
1076 tx_list_head->desc_a.config &= ~DMAEN;
1077 tx_list_head->status.status_word = 0;
1078 if (tx_list_head->skb) {
1079 dev_kfree_skb(tx_list_head->skb);
1080 tx_list_head->skb = NULL;
1081 }
1082 tx_list_head = tx_list_head->next;
1083
1084 } while (tx_list_head->status.status_word != 0);
1085}
1086
1087static void tx_reclaim_skb(struct bfin_mac_local *lp)
1088{
1089 int timeout_cnt = MAX_TIMEOUT_CNT;
1090
1091 if (tx_list_head->status.status_word != 0)
1092 _tx_reclaim_skb();
1093
1094 if (current_tx_ptr->next == tx_list_head) {
1095 while (tx_list_head->status.status_word == 0) {
1096 /* slow down polling to avoid too many queue stop. */
1097 udelay(10);
1098 /* reclaim skb if DMA is not running. */
1099 if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN))
1100 break;
1101 if (timeout_cnt-- < 0)
1102 break;
1103 }
1104
1105 if (timeout_cnt >= 0)
1106 _tx_reclaim_skb();
1107 else
1108 netif_stop_queue(lp->ndev);
1109 }
1110
1111 if (current_tx_ptr->next != tx_list_head &&
1112 netif_queue_stopped(lp->ndev))
1113 netif_wake_queue(lp->ndev);
1114
1115 if (tx_list_head != current_tx_ptr) {
1116 /* shorten the timer interval if tx queue is stopped */
1117 if (netif_queue_stopped(lp->ndev))
1118 lp->tx_reclaim_timer.expires =
1119 jiffies + (TX_RECLAIM_JIFFIES >> 4);
1120 else
1121 lp->tx_reclaim_timer.expires =
1122 jiffies + TX_RECLAIM_JIFFIES;
1123
1124 mod_timer(&lp->tx_reclaim_timer,
1125 lp->tx_reclaim_timer.expires);
1126 }
1127
1128 return;
1129}
1130
1131static void tx_reclaim_skb_timeout(unsigned long lp)
1132{
1133 tx_reclaim_skb((struct bfin_mac_local *)lp);
1134}
1135
1136static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1137 struct net_device *dev)
1138{
1139 struct bfin_mac_local *lp = netdev_priv(dev);
1140 u16 *data;
1141 u32 data_align = (unsigned long)(skb->data) & 0x3;
1142
1143 current_tx_ptr->skb = skb;
1144
1145 if (data_align == 0x2) {
1146 /* move skb->data to current_tx_ptr payload */
1147 data = (u16 *)(skb->data) - 1;
1148 *data = (u16)(skb->len);
1149 /*
1150 * When transmitting an Ethernet packet, the PTP_TSYNC module requires
1151 * a DMA_Length_Word field associated with the packet. The lower 12 bits
1152 * of this field are the length of the packet payload in bytes and the higher
1153 * 4 bits are the timestamping enable field.
1154 */
1155 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1156 *data |= 0x1000;
1157
1158 current_tx_ptr->desc_a.start_addr = (u32)data;
1159 /* this is important! */
1160 blackfin_dcache_flush_range((u32)data,
1161 (u32)((u8 *)data + skb->len + 4));
1162 } else {
1163 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
1164 /* enable timestamping for the sent packet */
1165 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1166 *((u16 *)(current_tx_ptr->packet)) |= 0x1000;
1167 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
1168 skb->len);
1169 current_tx_ptr->desc_a.start_addr =
1170 (u32)current_tx_ptr->packet;
1171 blackfin_dcache_flush_range(
1172 (u32)current_tx_ptr->packet,
1173 (u32)(current_tx_ptr->packet + skb->len + 2));
1174 }
1175
1176 /* make sure the internal data buffers in the core are drained
1177 * so that the DMA descriptors are completely written when the
1178 * DMA engine goes to fetch them below
1179 */
1180 SSYNC();
1181
1182 /* always clear status buffer before start tx dma */
1183 current_tx_ptr->status.status_word = 0;
1184
1185 /* enable this packet's dma */
1186 current_tx_ptr->desc_a.config |= DMAEN;
1187
1188 /* tx dma is running, just return */
1189 if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)
1190 goto out;
1191
1192 /* tx dma is not running */
1193 bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a));
1194 /* dma enabled, read from memory, size is 6 */
1195 bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config);
1196 /* Turn on the EMAC tx */
1197 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
1198
1199out:
1200 bfin_tx_hwtstamp(dev, skb);
1201
1202 current_tx_ptr = current_tx_ptr->next;
1203 dev->stats.tx_packets++;
1204 dev->stats.tx_bytes += (skb->len);
1205
1206 tx_reclaim_skb(lp);
1207
1208 return NETDEV_TX_OK;
1209}
1210
1211#define IP_HEADER_OFF 0
1212#define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
1213 RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)
1214
1215static void bfin_mac_rx(struct net_device *dev)
1216{
1217 struct sk_buff *skb, *new_skb;
1218 unsigned short len;
1219 struct bfin_mac_local *lp __maybe_unused = netdev_priv(dev);
1220#if defined(BFIN_MAC_CSUM_OFFLOAD)
1221 unsigned int i;
1222 unsigned char fcs[ETH_FCS_LEN + 1];
1223#endif
1224
1225 /* check if frame status word reports an error condition
1226 * we which case we simply drop the packet
1227 */
1228 if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
1229 netdev_notice(dev, "rx: receive error - packet dropped\n");
1230 dev->stats.rx_dropped++;
1231 goto out;
1232 }
1233
1234 /* allocate a new skb for next time receive */
1235 skb = current_rx_ptr->skb;
1236
1237 new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
1238 if (!new_skb) {
1239 netdev_notice(dev, "rx: low on mem - packet dropped\n");
1240 dev->stats.rx_dropped++;
1241 goto out;
1242 }
1243 /* reserve 2 bytes for RXDWA padding */
1244 skb_reserve(new_skb, NET_IP_ALIGN);
1245 /* Invidate the data cache of skb->data range when it is write back
1246 * cache. It will prevent overwritting the new data from DMA
1247 */
1248 blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
1249 (unsigned long)new_skb->end);
1250
1251 current_rx_ptr->skb = new_skb;
1252 current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
1253
1254 len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
1255 /* Deduce Ethernet FCS length from Ethernet payload length */
1256 len -= ETH_FCS_LEN;
1257 skb_put(skb, len);
1258
1259 skb->protocol = eth_type_trans(skb, dev);
1260
1261 bfin_rx_hwtstamp(dev, skb);
1262
1263#if defined(BFIN_MAC_CSUM_OFFLOAD)
1264 /* Checksum offloading only works for IPv4 packets with the standard IP header
1265 * length of 20 bytes, because the blackfin MAC checksum calculation is
1266 * based on that assumption. We must NOT use the calculated checksum if our
1267 * IP version or header break that assumption.
1268 */
1269 if (skb->data[IP_HEADER_OFF] == 0x45) {
1270 skb->csum = current_rx_ptr->status.ip_payload_csum;
1271 /*
1272 * Deduce Ethernet FCS from hardware generated IP payload checksum.
1273 * IP checksum is based on 16-bit one's complement algorithm.
1274 * To deduce a value from checksum is equal to add its inversion.
1275 * If the IP payload len is odd, the inversed FCS should also
1276 * begin from odd address and leave first byte zero.
1277 */
1278 if (skb->len % 2) {
1279 fcs[0] = 0;
1280 for (i = 0; i < ETH_FCS_LEN; i++)
1281 fcs[i + 1] = ~skb->data[skb->len + i];
1282 skb->csum = csum_partial(fcs, ETH_FCS_LEN + 1, skb->csum);
1283 } else {
1284 for (i = 0; i < ETH_FCS_LEN; i++)
1285 fcs[i] = ~skb->data[skb->len + i];
1286 skb->csum = csum_partial(fcs, ETH_FCS_LEN, skb->csum);
1287 }
1288 skb->ip_summed = CHECKSUM_COMPLETE;
1289 }
1290#endif
1291
1292 netif_rx(skb);
1293 dev->stats.rx_packets++;
1294 dev->stats.rx_bytes += len;
1295out:
1296 current_rx_ptr->status.status_word = 0x00000000;
1297 current_rx_ptr = current_rx_ptr->next;
1298}
1299
1300/* interrupt routine to handle rx and error signal */
1301static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id)
1302{
1303 struct net_device *dev = dev_id;
1304 int number = 0;
1305
1306get_one_packet:
1307 if (current_rx_ptr->status.status_word == 0) {
1308 /* no more new packet received */
1309 if (number == 0) {
1310 if (current_rx_ptr->next->status.status_word != 0) {
1311 current_rx_ptr = current_rx_ptr->next;
1312 goto real_rx;
1313 }
1314 }
1315 bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() |
1316 DMA_DONE | DMA_ERR);
1317 return IRQ_HANDLED;
1318 }
1319
1320real_rx:
1321 bfin_mac_rx(dev);
1322 number++;
1323 goto get_one_packet;
1324}
1325
1326#ifdef CONFIG_NET_POLL_CONTROLLER
1327static void bfin_mac_poll(struct net_device *dev)
1328{
1329 struct bfin_mac_local *lp = netdev_priv(dev);
1330
1331 disable_irq(IRQ_MAC_RX);
1332 bfin_mac_interrupt(IRQ_MAC_RX, dev);
1333 tx_reclaim_skb(lp);
1334 enable_irq(IRQ_MAC_RX);
1335}
1336#endif /* CONFIG_NET_POLL_CONTROLLER */
1337
1338static void bfin_mac_disable(void)
1339{
1340 unsigned int opmode;
1341
1342 opmode = bfin_read_EMAC_OPMODE();
1343 opmode &= (~RE);
1344 opmode &= (~TE);
1345 /* Turn off the EMAC */
1346 bfin_write_EMAC_OPMODE(opmode);
1347}
1348
1349/*
1350 * Enable Interrupts, Receive, and Transmit
1351 */
1352static int bfin_mac_enable(struct phy_device *phydev)
1353{
1354 int ret;
1355 u32 opmode;
1356
1357 pr_debug("%s\n", __func__);
1358
1359 /* Set RX DMA */
1360 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
1361 bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
1362
1363 /* Wait MII done */
1364 ret = bfin_mdio_poll();
1365 if (ret)
1366 return ret;
1367
1368 /* We enable only RX here */
1369 /* ASTP : Enable Automatic Pad Stripping
1370 PR : Promiscuous Mode for test
1371 PSF : Receive frames with total length less than 64 bytes.
1372 FDMODE : Full Duplex Mode
1373 LB : Internal Loopback for test
1374 RE : Receiver Enable */
1375 opmode = bfin_read_EMAC_OPMODE();
1376 if (opmode & FDMODE)
1377 opmode |= PSF;
1378 else
1379 opmode |= DRO | DC | PSF;
1380 opmode |= RE;
1381
1382 if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
1383 opmode |= RMII; /* For Now only 100MBit are supported */
1384#if defined(CONFIG_BF537) || defined(CONFIG_BF536)
1385 if (__SILICON_REVISION__ < 3) {
1386 /*
1387 * This isn't publicly documented (fun times!), but in
1388 * silicon <=0.2, the RX and TX pins are clocked together.
1389 * So in order to recv, we must enable the transmit side
1390 * as well. This will cause a spurious TX interrupt too,
1391 * but we can easily consume that.
1392 */
1393 opmode |= TE;
1394 }
1395#endif
1396 }
1397
1398 /* Turn on the EMAC rx */
1399 bfin_write_EMAC_OPMODE(opmode);
1400
1401 return 0;
1402}
1403
1404/* Our watchdog timed out. Called by the networking layer */
1405static void bfin_mac_timeout(struct net_device *dev)
1406{
1407 struct bfin_mac_local *lp = netdev_priv(dev);
1408
1409 pr_debug("%s: %s\n", dev->name, __func__);
1410
1411 bfin_mac_disable();
1412
1413 del_timer(&lp->tx_reclaim_timer);
1414
1415 /* reset tx queue and free skb */
1416 while (tx_list_head != current_tx_ptr) {
1417 tx_list_head->desc_a.config &= ~DMAEN;
1418 tx_list_head->status.status_word = 0;
1419 if (tx_list_head->skb) {
1420 dev_kfree_skb(tx_list_head->skb);
1421 tx_list_head->skb = NULL;
1422 }
1423 tx_list_head = tx_list_head->next;
1424 }
1425
1426 if (netif_queue_stopped(lp->ndev))
1427 netif_wake_queue(lp->ndev);
1428
1429 bfin_mac_enable(lp->phydev);
1430
1431 /* We can accept TX packets again */
1432 dev->trans_start = jiffies; /* prevent tx timeout */
1433 netif_wake_queue(dev);
1434}
1435
1436static void bfin_mac_multicast_hash(struct net_device *dev)
1437{
1438 u32 emac_hashhi, emac_hashlo;
1439 struct netdev_hw_addr *ha;
1440 u32 crc;
1441
1442 emac_hashhi = emac_hashlo = 0;
1443
1444 netdev_for_each_mc_addr(ha, dev) {
1445 crc = ether_crc(ETH_ALEN, ha->addr);
1446 crc >>= 26;
1447
1448 if (crc & 0x20)
1449 emac_hashhi |= 1 << (crc & 0x1f);
1450 else
1451 emac_hashlo |= 1 << (crc & 0x1f);
1452 }
1453
1454 bfin_write_EMAC_HASHHI(emac_hashhi);
1455 bfin_write_EMAC_HASHLO(emac_hashlo);
1456}
1457
1458/*
1459 * This routine will, depending on the values passed to it,
1460 * either make it accept multicast packets, go into
1461 * promiscuous mode (for TCPDUMP and cousins) or accept
1462 * a select set of multicast packets
1463 */
1464static void bfin_mac_set_multicast_list(struct net_device *dev)
1465{
1466 u32 sysctl;
1467
1468 if (dev->flags & IFF_PROMISC) {
1469 netdev_info(dev, "set promisc mode\n");
1470 sysctl = bfin_read_EMAC_OPMODE();
1471 sysctl |= PR;
1472 bfin_write_EMAC_OPMODE(sysctl);
1473 } else if (dev->flags & IFF_ALLMULTI) {
1474 /* accept all multicast */
1475 sysctl = bfin_read_EMAC_OPMODE();
1476 sysctl |= PAM;
1477 bfin_write_EMAC_OPMODE(sysctl);
1478 } else if (!netdev_mc_empty(dev)) {
1479 /* set up multicast hash table */
1480 sysctl = bfin_read_EMAC_OPMODE();
1481 sysctl |= HM;
1482 bfin_write_EMAC_OPMODE(sysctl);
1483 bfin_mac_multicast_hash(dev);
1484 } else {
1485 /* clear promisc or multicast mode */
1486 sysctl = bfin_read_EMAC_OPMODE();
1487 sysctl &= ~(RAF | PAM);
1488 bfin_write_EMAC_OPMODE(sysctl);
1489 }
1490}
1491
1492static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1493{
1494 struct bfin_mac_local *lp = netdev_priv(netdev);
1495
1496 if (!netif_running(netdev))
1497 return -EINVAL;
1498
1499 switch (cmd) {
1500 case SIOCSHWTSTAMP:
1501 return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
1502 default:
1503 if (lp->phydev)
1504 return phy_mii_ioctl(lp->phydev, ifr, cmd);
1505 else
1506 return -EOPNOTSUPP;
1507 }
1508}
1509
1510/*
1511 * this puts the device in an inactive state
1512 */
1513static void bfin_mac_shutdown(struct net_device *dev)
1514{
1515 /* Turn off the EMAC */
1516 bfin_write_EMAC_OPMODE(0x00000000);
1517 /* Turn off the EMAC RX DMA */
1518 bfin_write_DMA1_CONFIG(0x0000);
1519 bfin_write_DMA2_CONFIG(0x0000);
1520}
1521
1522/*
1523 * Open and Initialize the interface
1524 *
1525 * Set up everything, reset the card, etc..
1526 */
1527static int bfin_mac_open(struct net_device *dev)
1528{
1529 struct bfin_mac_local *lp = netdev_priv(dev);
1530 int ret;
1531 pr_debug("%s: %s\n", dev->name, __func__);
1532
1533 /*
1534 * Check that the address is valid. If its not, refuse
1535 * to bring the device up. The user must specify an
1536 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1537 */
1538 if (!is_valid_ether_addr(dev->dev_addr)) {
1539 netdev_warn(dev, "no valid ethernet hw addr\n");
1540 return -EINVAL;
1541 }
1542
1543 /* initial rx and tx list */
1544 ret = desc_list_init(dev);
1545 if (ret)
1546 return ret;
1547
1548 phy_start(lp->phydev);
1549 phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
1550 setup_system_regs(dev);
1551 setup_mac_addr(dev->dev_addr);
1552
1553 bfin_mac_disable();
1554 ret = bfin_mac_enable(lp->phydev);
1555 if (ret)
1556 return ret;
1557 pr_debug("hardware init finished\n");
1558
1559 netif_start_queue(dev);
1560 netif_carrier_on(dev);
1561
1562 return 0;
1563}
1564
1565/*
1566 * this makes the board clean up everything that it can
1567 * and not talk to the outside world. Caused by
1568 * an 'ifconfig ethX down'
1569 */
1570static int bfin_mac_close(struct net_device *dev)
1571{
1572 struct bfin_mac_local *lp = netdev_priv(dev);
1573 pr_debug("%s: %s\n", dev->name, __func__);
1574
1575 netif_stop_queue(dev);
1576 netif_carrier_off(dev);
1577
1578 phy_stop(lp->phydev);
1579 phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
1580
1581 /* clear everything */
1582 bfin_mac_shutdown(dev);
1583
1584 /* free the rx/tx buffers */
1585 desc_list_free();
1586
1587 return 0;
1588}
1589
1590static const struct net_device_ops bfin_mac_netdev_ops = {
1591 .ndo_open = bfin_mac_open,
1592 .ndo_stop = bfin_mac_close,
1593 .ndo_start_xmit = bfin_mac_hard_start_xmit,
1594 .ndo_set_mac_address = bfin_mac_set_mac_address,
1595 .ndo_tx_timeout = bfin_mac_timeout,
1596 .ndo_set_rx_mode = bfin_mac_set_multicast_list,
1597 .ndo_do_ioctl = bfin_mac_ioctl,
1598 .ndo_validate_addr = eth_validate_addr,
1599 .ndo_change_mtu = eth_change_mtu,
1600#ifdef CONFIG_NET_POLL_CONTROLLER
1601 .ndo_poll_controller = bfin_mac_poll,
1602#endif
1603};
1604
1605static int bfin_mac_probe(struct platform_device *pdev)
1606{
1607 struct net_device *ndev;
1608 struct bfin_mac_local *lp;
1609 struct platform_device *pd;
1610 struct bfin_mii_bus_platform_data *mii_bus_data;
1611 int rc;
1612
1613 ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
1614 if (!ndev)
1615 return -ENOMEM;
1616
1617 SET_NETDEV_DEV(ndev, &pdev->dev);
1618 platform_set_drvdata(pdev, ndev);
1619 lp = netdev_priv(ndev);
1620 lp->ndev = ndev;
1621
1622 /* Grab the MAC address in the MAC */
1623 *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
1624 *(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
1625
1626 /* probe mac */
1627 /*todo: how to proble? which is revision_register */
1628 bfin_write_EMAC_ADDRLO(0x12345678);
1629 if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
1630 dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
1631 rc = -ENODEV;
1632 goto out_err_probe_mac;
1633 }
1634
1635
1636 /*
1637 * Is it valid? (Did bootloader initialize it?)
1638 * Grab the MAC from the board somehow
1639 * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c
1640 */
1641 if (!is_valid_ether_addr(ndev->dev_addr)) {
1642 if (bfin_get_ether_addr(ndev->dev_addr) ||
1643 !is_valid_ether_addr(ndev->dev_addr)) {
1644 /* Still not valid, get a random one */
1645 netdev_warn(ndev, "Setting Ethernet MAC to a random one\n");
1646 eth_hw_addr_random(ndev);
1647 }
1648 }
1649
1650 setup_mac_addr(ndev->dev_addr);
1651
1652 if (!pdev->dev.platform_data) {
1653 dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
1654 rc = -ENODEV;
1655 goto out_err_probe_mac;
1656 }
1657 pd = pdev->dev.platform_data;
1658 lp->mii_bus = platform_get_drvdata(pd);
1659 if (!lp->mii_bus) {
1660 dev_err(&pdev->dev, "Cannot get mii_bus!\n");
1661 rc = -ENODEV;
1662 goto out_err_probe_mac;
1663 }
1664 lp->mii_bus->priv = ndev;
1665 mii_bus_data = pd->dev.platform_data;
1666
1667 rc = mii_probe(ndev, mii_bus_data->phy_mode);
1668 if (rc) {
1669 dev_err(&pdev->dev, "MII Probe failed!\n");
1670 goto out_err_mii_probe;
1671 }
1672
1673 lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
1674 lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
1675
1676 /* Fill in the fields of the device structure with ethernet values. */
1677 ether_setup(ndev);
1678
1679 ndev->netdev_ops = &bfin_mac_netdev_ops;
1680 ndev->ethtool_ops = &bfin_mac_ethtool_ops;
1681
1682 init_timer(&lp->tx_reclaim_timer);
1683 lp->tx_reclaim_timer.data = (unsigned long)lp;
1684 lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
1685
1686 spin_lock_init(&lp->lock);
1687
1688 /* now, enable interrupts */
1689 /* register irq handler */
1690 rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
1691 IRQF_DISABLED, "EMAC_RX", ndev);
1692 if (rc) {
1693 dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
1694 rc = -EBUSY;
1695 goto out_err_request_irq;
1696 }
1697
1698 rc = register_netdev(ndev);
1699 if (rc) {
1700 dev_err(&pdev->dev, "Cannot register net device!\n");
1701 goto out_err_reg_ndev;
1702 }
1703
1704 bfin_mac_hwtstamp_init(ndev);
1705 if (bfin_phc_init(ndev, &pdev->dev)) {
1706 dev_err(&pdev->dev, "Cannot register PHC device!\n");
1707 goto out_err_phc;
1708 }
1709
1710 /* now, print out the card info, in a short format.. */
1711 netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
1712
1713 return 0;
1714
1715out_err_phc:
1716out_err_reg_ndev:
1717 free_irq(IRQ_MAC_RX, ndev);
1718out_err_request_irq:
1719out_err_mii_probe:
1720 mdiobus_unregister(lp->mii_bus);
1721 mdiobus_free(lp->mii_bus);
1722out_err_probe_mac:
1723 platform_set_drvdata(pdev, NULL);
1724 free_netdev(ndev);
1725
1726 return rc;
1727}
1728
1729static int bfin_mac_remove(struct platform_device *pdev)
1730{
1731 struct net_device *ndev = platform_get_drvdata(pdev);
1732 struct bfin_mac_local *lp = netdev_priv(ndev);
1733
1734 bfin_phc_release(lp);
1735
1736 platform_set_drvdata(pdev, NULL);
1737
1738 lp->mii_bus->priv = NULL;
1739
1740 unregister_netdev(ndev);
1741
1742 free_irq(IRQ_MAC_RX, ndev);
1743
1744 free_netdev(ndev);
1745
1746 return 0;
1747}
1748
1749#ifdef CONFIG_PM
1750static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
1751{
1752 struct net_device *net_dev = platform_get_drvdata(pdev);
1753 struct bfin_mac_local *lp = netdev_priv(net_dev);
1754
1755 if (lp->wol) {
1756 bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE) | RE);
1757 bfin_write_EMAC_WKUP_CTL(MPKE);
1758 enable_irq_wake(IRQ_MAC_WAKEDET);
1759 } else {
1760 if (netif_running(net_dev))
1761 bfin_mac_close(net_dev);
1762 }
1763
1764 return 0;
1765}
1766
1767static int bfin_mac_resume(struct platform_device *pdev)
1768{
1769 struct net_device *net_dev = platform_get_drvdata(pdev);
1770 struct bfin_mac_local *lp = netdev_priv(net_dev);
1771
1772 if (lp->wol) {
1773 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
1774 bfin_write_EMAC_WKUP_CTL(0);
1775 disable_irq_wake(IRQ_MAC_WAKEDET);
1776 } else {
1777 if (netif_running(net_dev))
1778 bfin_mac_open(net_dev);
1779 }
1780
1781 return 0;
1782}
1783#else
1784#define bfin_mac_suspend NULL
1785#define bfin_mac_resume NULL
1786#endif /* CONFIG_PM */
1787
1788static int bfin_mii_bus_probe(struct platform_device *pdev)
1789{
1790 struct mii_bus *miibus;
1791 struct bfin_mii_bus_platform_data *mii_bus_pd;
1792 const unsigned short *pin_req;
1793 int rc, i;
1794
1795 mii_bus_pd = dev_get_platdata(&pdev->dev);
1796 if (!mii_bus_pd) {
1797 dev_err(&pdev->dev, "No peripherals in platform data!\n");
1798 return -EINVAL;
1799 }
1800
1801 /*
1802 * We are setting up a network card,
1803 * so set the GPIO pins to Ethernet mode
1804 */
1805 pin_req = mii_bus_pd->mac_peripherals;
1806 rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
1807 if (rc) {
1808 dev_err(&pdev->dev, "Requesting peripherals failed!\n");
1809 return rc;
1810 }
1811
1812 rc = -ENOMEM;
1813 miibus = mdiobus_alloc();
1814 if (miibus == NULL)
1815 goto out_err_alloc;
1816 miibus->read = bfin_mdiobus_read;
1817 miibus->write = bfin_mdiobus_write;
1818 miibus->reset = bfin_mdiobus_reset;
1819
1820 miibus->parent = &pdev->dev;
1821 miibus->name = "bfin_mii_bus";
1822 miibus->phy_mask = mii_bus_pd->phy_mask;
1823
1824 snprintf(miibus->id, MII_BUS_ID_SIZE, "%s-%x",
1825 pdev->name, pdev->id);
1826 miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1827 if (!miibus->irq)
1828 goto out_err_irq_alloc;
1829
1830 for (i = rc; i < PHY_MAX_ADDR; ++i)
1831 miibus->irq[i] = PHY_POLL;
1832
1833 rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR);
1834 if (rc != mii_bus_pd->phydev_number)
1835 dev_err(&pdev->dev, "Invalid number (%i) of phydevs\n",
1836 mii_bus_pd->phydev_number);
1837 for (i = 0; i < rc; ++i) {
1838 unsigned short phyaddr = mii_bus_pd->phydev_data[i].addr;
1839 if (phyaddr < PHY_MAX_ADDR)
1840 miibus->irq[phyaddr] = mii_bus_pd->phydev_data[i].irq;
1841 else
1842 dev_err(&pdev->dev,
1843 "Invalid PHY address %i for phydev %i\n",
1844 phyaddr, i);
1845 }
1846
1847 rc = mdiobus_register(miibus);
1848 if (rc) {
1849 dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
1850 goto out_err_mdiobus_register;
1851 }
1852
1853 platform_set_drvdata(pdev, miibus);
1854 return 0;
1855
1856out_err_mdiobus_register:
1857 kfree(miibus->irq);
1858out_err_irq_alloc:
1859 mdiobus_free(miibus);
1860out_err_alloc:
1861 peripheral_free_list(pin_req);
1862
1863 return rc;
1864}
1865
1866static int bfin_mii_bus_remove(struct platform_device *pdev)
1867{
1868 struct mii_bus *miibus = platform_get_drvdata(pdev);
1869 struct bfin_mii_bus_platform_data *mii_bus_pd =
1870 dev_get_platdata(&pdev->dev);
1871
1872 platform_set_drvdata(pdev, NULL);
1873 mdiobus_unregister(miibus);
1874 kfree(miibus->irq);
1875 mdiobus_free(miibus);
1876 peripheral_free_list(mii_bus_pd->mac_peripherals);
1877
1878 return 0;
1879}
1880
1881static struct platform_driver bfin_mii_bus_driver = {
1882 .probe = bfin_mii_bus_probe,
1883 .remove = bfin_mii_bus_remove,
1884 .driver = {
1885 .name = "bfin_mii_bus",
1886 .owner = THIS_MODULE,
1887 },
1888};
1889
1890static struct platform_driver bfin_mac_driver = {
1891 .probe = bfin_mac_probe,
1892 .remove = bfin_mac_remove,
1893 .resume = bfin_mac_resume,
1894 .suspend = bfin_mac_suspend,
1895 .driver = {
1896 .name = KBUILD_MODNAME,
1897 .owner = THIS_MODULE,
1898 },
1899};
1900
1901static int __init bfin_mac_init(void)
1902{
1903 int ret;
1904 ret = platform_driver_register(&bfin_mii_bus_driver);
1905 if (!ret)
1906 return platform_driver_register(&bfin_mac_driver);
1907 return -ENODEV;
1908}
1909
1910module_init(bfin_mac_init);
1911
1912static void __exit bfin_mac_cleanup(void)
1913{
1914 platform_driver_unregister(&bfin_mac_driver);
1915 platform_driver_unregister(&bfin_mii_bus_driver);
1916}
1917
1918module_exit(bfin_mac_cleanup);
1919
This page took 0.040704 seconds and 5 git commands to generate.