Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux...
[deliverable/linux.git] / drivers / net / bfin_mac.c
1 /*
2 * Blackfin On-Chip MAC Driver
3 *
4 * Copyright 2004-2007 Analog Devices Inc.
5 *
6 * Enter bugs at http://blackfin.uclinux.org/
7 *
8 * Licensed under the GPL-2 or later.
9 */
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/delay.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/irq.h>
20 #include <linux/io.h>
21 #include <linux/ioport.h>
22 #include <linux/crc32.h>
23 #include <linux/device.h>
24 #include <linux/spinlock.h>
25 #include <linux/mii.h>
26 #include <linux/phy.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/ethtool.h>
30 #include <linux/skbuff.h>
31 #include <linux/platform_device.h>
32
33 #include <asm/dma.h>
34 #include <linux/dma-mapping.h>
35
36 #include <asm/div64.h>
37 #include <asm/dpmc.h>
38 #include <asm/blackfin.h>
39 #include <asm/cacheflush.h>
40 #include <asm/portmux.h>
41 #include <mach/pll.h>
42
43 #include "bfin_mac.h"
44
45 #define DRV_NAME "bfin_mac"
46 #define DRV_VERSION "1.1"
47 #define DRV_AUTHOR "Bryan Wu, Luke Yang"
48 #define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
49
50 MODULE_AUTHOR(DRV_AUTHOR);
51 MODULE_LICENSE("GPL");
52 MODULE_DESCRIPTION(DRV_DESC);
53 MODULE_ALIAS("platform:bfin_mac");
54
55 #if defined(CONFIG_BFIN_MAC_USE_L1)
56 # define bfin_mac_alloc(dma_handle, size) l1_data_sram_zalloc(size)
57 # define bfin_mac_free(dma_handle, ptr) l1_data_sram_free(ptr)
58 #else
59 # define bfin_mac_alloc(dma_handle, size) \
60 dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL)
61 # define bfin_mac_free(dma_handle, ptr) \
62 dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle)
63 #endif
64
65 #define PKT_BUF_SZ 1580
66
67 #define MAX_TIMEOUT_CNT 500
68
69 /* pointers to maintain transmit list */
70 static struct net_dma_desc_tx *tx_list_head;
71 static struct net_dma_desc_tx *tx_list_tail;
72 static struct net_dma_desc_rx *rx_list_head;
73 static struct net_dma_desc_rx *rx_list_tail;
74 static struct net_dma_desc_rx *current_rx_ptr;
75 static struct net_dma_desc_tx *current_tx_ptr;
76 static struct net_dma_desc_tx *tx_desc;
77 static struct net_dma_desc_rx *rx_desc;
78
79 #if defined(CONFIG_BFIN_MAC_RMII)
80 static u16 pin_req[] = P_RMII0;
81 #else
82 static u16 pin_req[] = P_MII0;
83 #endif
84
85 static void desc_list_free(void)
86 {
87 struct net_dma_desc_rx *r;
88 struct net_dma_desc_tx *t;
89 int i;
90 #if !defined(CONFIG_BFIN_MAC_USE_L1)
91 dma_addr_t dma_handle = 0;
92 #endif
93
94 if (tx_desc) {
95 t = tx_list_head;
96 for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
97 if (t) {
98 if (t->skb) {
99 dev_kfree_skb(t->skb);
100 t->skb = NULL;
101 }
102 t = t->next;
103 }
104 }
105 bfin_mac_free(dma_handle, tx_desc);
106 }
107
108 if (rx_desc) {
109 r = rx_list_head;
110 for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
111 if (r) {
112 if (r->skb) {
113 dev_kfree_skb(r->skb);
114 r->skb = NULL;
115 }
116 r = r->next;
117 }
118 }
119 bfin_mac_free(dma_handle, rx_desc);
120 }
121 }
122
123 static int desc_list_init(void)
124 {
125 int i;
126 struct sk_buff *new_skb;
127 #if !defined(CONFIG_BFIN_MAC_USE_L1)
128 /*
129 * This dma_handle is useless in Blackfin dma_alloc_coherent().
130 * The real dma handler is the return value of dma_alloc_coherent().
131 */
132 dma_addr_t dma_handle;
133 #endif
134
135 tx_desc = bfin_mac_alloc(&dma_handle,
136 sizeof(struct net_dma_desc_tx) *
137 CONFIG_BFIN_TX_DESC_NUM);
138 if (tx_desc == NULL)
139 goto init_error;
140
141 rx_desc = bfin_mac_alloc(&dma_handle,
142 sizeof(struct net_dma_desc_rx) *
143 CONFIG_BFIN_RX_DESC_NUM);
144 if (rx_desc == NULL)
145 goto init_error;
146
147 /* init tx_list */
148 tx_list_head = tx_list_tail = tx_desc;
149
150 for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
151 struct net_dma_desc_tx *t = tx_desc + i;
152 struct dma_descriptor *a = &(t->desc_a);
153 struct dma_descriptor *b = &(t->desc_b);
154
155 /*
156 * disable DMA
157 * read from memory WNR = 0
158 * wordsize is 32 bits
159 * 6 half words is desc size
160 * large desc flow
161 */
162 a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
163 a->start_addr = (unsigned long)t->packet;
164 a->x_count = 0;
165 a->next_dma_desc = b;
166
167 /*
168 * enabled DMA
169 * write to memory WNR = 1
170 * wordsize is 32 bits
171 * disable interrupt
172 * 6 half words is desc size
173 * large desc flow
174 */
175 b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
176 b->start_addr = (unsigned long)(&(t->status));
177 b->x_count = 0;
178
179 t->skb = NULL;
180 tx_list_tail->desc_b.next_dma_desc = a;
181 tx_list_tail->next = t;
182 tx_list_tail = t;
183 }
184 tx_list_tail->next = tx_list_head; /* tx_list is a circle */
185 tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a);
186 current_tx_ptr = tx_list_head;
187
188 /* init rx_list */
189 rx_list_head = rx_list_tail = rx_desc;
190
191 for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
192 struct net_dma_desc_rx *r = rx_desc + i;
193 struct dma_descriptor *a = &(r->desc_a);
194 struct dma_descriptor *b = &(r->desc_b);
195
196 /* allocate a new skb for next time receive */
197 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
198 if (!new_skb) {
199 printk(KERN_NOTICE DRV_NAME
200 ": init: low on mem - packet dropped\n");
201 goto init_error;
202 }
203 skb_reserve(new_skb, NET_IP_ALIGN);
204 /* Invidate the data cache of skb->data range when it is write back
205 * cache. It will prevent overwritting the new data from DMA
206 */
207 blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
208 (unsigned long)new_skb->end);
209 r->skb = new_skb;
210
211 /*
212 * enabled DMA
213 * write to memory WNR = 1
214 * wordsize is 32 bits
215 * disable interrupt
216 * 6 half words is desc size
217 * large desc flow
218 */
219 a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
220 /* since RXDWA is enabled */
221 a->start_addr = (unsigned long)new_skb->data - 2;
222 a->x_count = 0;
223 a->next_dma_desc = b;
224
225 /*
226 * enabled DMA
227 * write to memory WNR = 1
228 * wordsize is 32 bits
229 * enable interrupt
230 * 6 half words is desc size
231 * large desc flow
232 */
233 b->config = DMAEN | WNR | WDSIZE_32 | DI_EN |
234 NDSIZE_6 | DMAFLOW_LARGE;
235 b->start_addr = (unsigned long)(&(r->status));
236 b->x_count = 0;
237
238 rx_list_tail->desc_b.next_dma_desc = a;
239 rx_list_tail->next = r;
240 rx_list_tail = r;
241 }
242 rx_list_tail->next = rx_list_head; /* rx_list is a circle */
243 rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a);
244 current_rx_ptr = rx_list_head;
245
246 return 0;
247
248 init_error:
249 desc_list_free();
250 printk(KERN_ERR DRV_NAME ": kmalloc failed\n");
251 return -ENOMEM;
252 }
253
254
255 /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
256
257 /*
258 * MII operations
259 */
260 /* Wait until the previous MDC/MDIO transaction has completed */
261 static int bfin_mdio_poll(void)
262 {
263 int timeout_cnt = MAX_TIMEOUT_CNT;
264
265 /* poll the STABUSY bit */
266 while ((bfin_read_EMAC_STAADD()) & STABUSY) {
267 udelay(1);
268 if (timeout_cnt-- < 0) {
269 printk(KERN_ERR DRV_NAME
270 ": wait MDC/MDIO transaction to complete timeout\n");
271 return -ETIMEDOUT;
272 }
273 }
274
275 return 0;
276 }
277
278 /* Read an off-chip register in a PHY through the MDC/MDIO port */
279 static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
280 {
281 int ret;
282
283 ret = bfin_mdio_poll();
284 if (ret)
285 return ret;
286
287 /* read mode */
288 bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
289 SET_REGAD((u16) regnum) |
290 STABUSY);
291
292 ret = bfin_mdio_poll();
293 if (ret)
294 return ret;
295
296 return (int) bfin_read_EMAC_STADAT();
297 }
298
299 /* Write an off-chip register in a PHY through the MDC/MDIO port */
300 static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
301 u16 value)
302 {
303 int ret;
304
305 ret = bfin_mdio_poll();
306 if (ret)
307 return ret;
308
309 bfin_write_EMAC_STADAT((u32) value);
310
311 /* write mode */
312 bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
313 SET_REGAD((u16) regnum) |
314 STAOP |
315 STABUSY);
316
317 return bfin_mdio_poll();
318 }
319
320 static int bfin_mdiobus_reset(struct mii_bus *bus)
321 {
322 return 0;
323 }
324
325 static void bfin_mac_adjust_link(struct net_device *dev)
326 {
327 struct bfin_mac_local *lp = netdev_priv(dev);
328 struct phy_device *phydev = lp->phydev;
329 unsigned long flags;
330 int new_state = 0;
331
332 spin_lock_irqsave(&lp->lock, flags);
333 if (phydev->link) {
334 /* Now we make sure that we can be in full duplex mode.
335 * If not, we operate in half-duplex mode. */
336 if (phydev->duplex != lp->old_duplex) {
337 u32 opmode = bfin_read_EMAC_OPMODE();
338 new_state = 1;
339
340 if (phydev->duplex)
341 opmode |= FDMODE;
342 else
343 opmode &= ~(FDMODE);
344
345 bfin_write_EMAC_OPMODE(opmode);
346 lp->old_duplex = phydev->duplex;
347 }
348
349 if (phydev->speed != lp->old_speed) {
350 #if defined(CONFIG_BFIN_MAC_RMII)
351 u32 opmode = bfin_read_EMAC_OPMODE();
352 switch (phydev->speed) {
353 case 10:
354 opmode |= RMII_10;
355 break;
356 case 100:
357 opmode &= ~(RMII_10);
358 break;
359 default:
360 printk(KERN_WARNING
361 "%s: Ack! Speed (%d) is not 10/100!\n",
362 DRV_NAME, phydev->speed);
363 break;
364 }
365 bfin_write_EMAC_OPMODE(opmode);
366 #endif
367
368 new_state = 1;
369 lp->old_speed = phydev->speed;
370 }
371
372 if (!lp->old_link) {
373 new_state = 1;
374 lp->old_link = 1;
375 }
376 } else if (lp->old_link) {
377 new_state = 1;
378 lp->old_link = 0;
379 lp->old_speed = 0;
380 lp->old_duplex = -1;
381 }
382
383 if (new_state) {
384 u32 opmode = bfin_read_EMAC_OPMODE();
385 phy_print_status(phydev);
386 pr_debug("EMAC_OPMODE = 0x%08x\n", opmode);
387 }
388
389 spin_unlock_irqrestore(&lp->lock, flags);
390 }
391
392 /* MDC = 2.5 MHz */
393 #define MDC_CLK 2500000
394
395 static int mii_probe(struct net_device *dev)
396 {
397 struct bfin_mac_local *lp = netdev_priv(dev);
398 struct phy_device *phydev = NULL;
399 unsigned short sysctl;
400 int i;
401 u32 sclk, mdc_div;
402
403 /* Enable PHY output early */
404 if (!(bfin_read_VR_CTL() & CLKBUFOE))
405 bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
406
407 sclk = get_sclk();
408 mdc_div = ((sclk / MDC_CLK) / 2) - 1;
409
410 sysctl = bfin_read_EMAC_SYSCTL();
411 sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
412 bfin_write_EMAC_SYSCTL(sysctl);
413
414 /* search for connect PHY device */
415 for (i = 0; i < PHY_MAX_ADDR; i++) {
416 struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i];
417
418 if (!tmp_phydev)
419 continue; /* no PHY here... */
420
421 phydev = tmp_phydev;
422 break; /* found it */
423 }
424
425 /* now we are supposed to have a proper phydev, to attach to... */
426 if (!phydev) {
427 printk(KERN_INFO "%s: Don't found any phy device at all\n",
428 dev->name);
429 return -ENODEV;
430 }
431
432 #if defined(CONFIG_BFIN_MAC_RMII)
433 phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link,
434 0, PHY_INTERFACE_MODE_RMII);
435 #else
436 phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link,
437 0, PHY_INTERFACE_MODE_MII);
438 #endif
439
440 if (IS_ERR(phydev)) {
441 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
442 return PTR_ERR(phydev);
443 }
444
445 /* mask with MAC supported features */
446 phydev->supported &= (SUPPORTED_10baseT_Half
447 | SUPPORTED_10baseT_Full
448 | SUPPORTED_100baseT_Half
449 | SUPPORTED_100baseT_Full
450 | SUPPORTED_Autoneg
451 | SUPPORTED_Pause | SUPPORTED_Asym_Pause
452 | SUPPORTED_MII
453 | SUPPORTED_TP);
454
455 phydev->advertising = phydev->supported;
456
457 lp->old_link = 0;
458 lp->old_speed = 0;
459 lp->old_duplex = -1;
460 lp->phydev = phydev;
461
462 printk(KERN_INFO "%s: attached PHY driver [%s] "
463 "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)"
464 "@sclk=%dMHz)\n",
465 DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
466 MDC_CLK, mdc_div, sclk/1000000);
467
468 return 0;
469 }
470
471 /*
472 * Ethtool support
473 */
474
475 /*
476 * interrupt routine for magic packet wakeup
477 */
478 static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id)
479 {
480 return IRQ_HANDLED;
481 }
482
483 static int
484 bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
485 {
486 struct bfin_mac_local *lp = netdev_priv(dev);
487
488 if (lp->phydev)
489 return phy_ethtool_gset(lp->phydev, cmd);
490
491 return -EINVAL;
492 }
493
494 static int
495 bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
496 {
497 struct bfin_mac_local *lp = netdev_priv(dev);
498
499 if (!capable(CAP_NET_ADMIN))
500 return -EPERM;
501
502 if (lp->phydev)
503 return phy_ethtool_sset(lp->phydev, cmd);
504
505 return -EINVAL;
506 }
507
508 static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
509 struct ethtool_drvinfo *info)
510 {
511 strcpy(info->driver, DRV_NAME);
512 strcpy(info->version, DRV_VERSION);
513 strcpy(info->fw_version, "N/A");
514 strcpy(info->bus_info, dev_name(&dev->dev));
515 }
516
517 static void bfin_mac_ethtool_getwol(struct net_device *dev,
518 struct ethtool_wolinfo *wolinfo)
519 {
520 struct bfin_mac_local *lp = netdev_priv(dev);
521
522 wolinfo->supported = WAKE_MAGIC;
523 wolinfo->wolopts = lp->wol;
524 }
525
526 static int bfin_mac_ethtool_setwol(struct net_device *dev,
527 struct ethtool_wolinfo *wolinfo)
528 {
529 struct bfin_mac_local *lp = netdev_priv(dev);
530 int rc;
531
532 if (wolinfo->wolopts & (WAKE_MAGICSECURE |
533 WAKE_UCAST |
534 WAKE_MCAST |
535 WAKE_BCAST |
536 WAKE_ARP))
537 return -EOPNOTSUPP;
538
539 lp->wol = wolinfo->wolopts;
540
541 if (lp->wol && !lp->irq_wake_requested) {
542 /* register wake irq handler */
543 rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
544 IRQF_DISABLED, "EMAC_WAKE", dev);
545 if (rc)
546 return rc;
547 lp->irq_wake_requested = true;
548 }
549
550 if (!lp->wol && lp->irq_wake_requested) {
551 free_irq(IRQ_MAC_WAKEDET, dev);
552 lp->irq_wake_requested = false;
553 }
554
555 /* Make sure the PHY driver doesn't suspend */
556 device_init_wakeup(&dev->dev, lp->wol);
557
558 return 0;
559 }
560
561 static const struct ethtool_ops bfin_mac_ethtool_ops = {
562 .get_settings = bfin_mac_ethtool_getsettings,
563 .set_settings = bfin_mac_ethtool_setsettings,
564 .get_link = ethtool_op_get_link,
565 .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
566 .get_wol = bfin_mac_ethtool_getwol,
567 .set_wol = bfin_mac_ethtool_setwol,
568 };
569
570 /**************************************************************************/
571 void setup_system_regs(struct net_device *dev)
572 {
573 unsigned short sysctl;
574
575 /*
576 * Odd word alignment for Receive Frame DMA word
577 * Configure checksum support and rcve frame word alignment
578 */
579 sysctl = bfin_read_EMAC_SYSCTL();
580 sysctl |= RXDWA;
581 #if defined(BFIN_MAC_CSUM_OFFLOAD)
582 sysctl |= RXCKS;
583 #else
584 sysctl &= ~RXCKS;
585 #endif
586 bfin_write_EMAC_SYSCTL(sysctl);
587
588 bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
589
590 /* Initialize the TX DMA channel registers */
591 bfin_write_DMA2_X_COUNT(0);
592 bfin_write_DMA2_X_MODIFY(4);
593 bfin_write_DMA2_Y_COUNT(0);
594 bfin_write_DMA2_Y_MODIFY(0);
595
596 /* Initialize the RX DMA channel registers */
597 bfin_write_DMA1_X_COUNT(0);
598 bfin_write_DMA1_X_MODIFY(4);
599 bfin_write_DMA1_Y_COUNT(0);
600 bfin_write_DMA1_Y_MODIFY(0);
601 }
602
603 static void setup_mac_addr(u8 *mac_addr)
604 {
605 u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]);
606 u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]);
607
608 /* this depends on a little-endian machine */
609 bfin_write_EMAC_ADDRLO(addr_low);
610 bfin_write_EMAC_ADDRHI(addr_hi);
611 }
612
613 static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
614 {
615 struct sockaddr *addr = p;
616 if (netif_running(dev))
617 return -EBUSY;
618 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619 setup_mac_addr(dev->dev_addr);
620 return 0;
621 }
622
623 #ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
624 #define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
625
626 static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
627 struct ifreq *ifr, int cmd)
628 {
629 struct hwtstamp_config config;
630 struct bfin_mac_local *lp = netdev_priv(netdev);
631 u16 ptpctl;
632 u32 ptpfv1, ptpfv2, ptpfv3, ptpfoff;
633
634 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
635 return -EFAULT;
636
637 pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
638 __func__, config.flags, config.tx_type, config.rx_filter);
639
640 /* reserved for future extensions */
641 if (config.flags)
642 return -EINVAL;
643
644 if ((config.tx_type != HWTSTAMP_TX_OFF) &&
645 (config.tx_type != HWTSTAMP_TX_ON))
646 return -ERANGE;
647
648 ptpctl = bfin_read_EMAC_PTP_CTL();
649
650 switch (config.rx_filter) {
651 case HWTSTAMP_FILTER_NONE:
652 /*
653 * Dont allow any timestamping
654 */
655 ptpfv3 = 0xFFFFFFFF;
656 bfin_write_EMAC_PTP_FV3(ptpfv3);
657 break;
658 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
659 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
660 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
661 /*
662 * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL)
663 * to enable all the field matches.
664 */
665 ptpctl &= ~0x1F00;
666 bfin_write_EMAC_PTP_CTL(ptpctl);
667 /*
668 * Keep the default values of the EMAC_PTP_FOFF register.
669 */
670 ptpfoff = 0x4A24170C;
671 bfin_write_EMAC_PTP_FOFF(ptpfoff);
672 /*
673 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
674 * registers.
675 */
676 ptpfv1 = 0x11040800;
677 bfin_write_EMAC_PTP_FV1(ptpfv1);
678 ptpfv2 = 0x0140013F;
679 bfin_write_EMAC_PTP_FV2(ptpfv2);
680 /*
681 * The default value (0xFFFC) allows the timestamping of both
682 * received Sync messages and Delay_Req messages.
683 */
684 ptpfv3 = 0xFFFFFFFC;
685 bfin_write_EMAC_PTP_FV3(ptpfv3);
686
687 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
688 break;
689 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
690 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
691 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
692 /* Clear all five comparison mask bits (bits[12:8]) in the
693 * EMAC_PTP_CTL register to enable all the field matches.
694 */
695 ptpctl &= ~0x1F00;
696 bfin_write_EMAC_PTP_CTL(ptpctl);
697 /*
698 * Keep the default values of the EMAC_PTP_FOFF register, except set
699 * the PTPCOF field to 0x2A.
700 */
701 ptpfoff = 0x2A24170C;
702 bfin_write_EMAC_PTP_FOFF(ptpfoff);
703 /*
704 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
705 * registers.
706 */
707 ptpfv1 = 0x11040800;
708 bfin_write_EMAC_PTP_FV1(ptpfv1);
709 ptpfv2 = 0x0140013F;
710 bfin_write_EMAC_PTP_FV2(ptpfv2);
711 /*
712 * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set
713 * the value to 0xFFF0.
714 */
715 ptpfv3 = 0xFFFFFFF0;
716 bfin_write_EMAC_PTP_FV3(ptpfv3);
717
718 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
719 break;
720 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
721 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
722 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
723 /*
724 * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the
725 * EFTM and PTPCM field comparison.
726 */
727 ptpctl &= ~0x1100;
728 bfin_write_EMAC_PTP_CTL(ptpctl);
729 /*
730 * Keep the default values of all the fields of the EMAC_PTP_FOFF
731 * register, except set the PTPCOF field to 0x0E.
732 */
733 ptpfoff = 0x0E24170C;
734 bfin_write_EMAC_PTP_FOFF(ptpfoff);
735 /*
736 * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which
737 * corresponds to PTP messages on the MAC layer.
738 */
739 ptpfv1 = 0x110488F7;
740 bfin_write_EMAC_PTP_FV1(ptpfv1);
741 ptpfv2 = 0x0140013F;
742 bfin_write_EMAC_PTP_FV2(ptpfv2);
743 /*
744 * To allow the timestamping of Pdelay_Req and Pdelay_Resp
745 * messages, set the value to 0xFFF0.
746 */
747 ptpfv3 = 0xFFFFFFF0;
748 bfin_write_EMAC_PTP_FV3(ptpfv3);
749
750 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
751 break;
752 default:
753 return -ERANGE;
754 }
755
756 if (config.tx_type == HWTSTAMP_TX_OFF &&
757 bfin_mac_hwtstamp_is_none(config.rx_filter)) {
758 ptpctl &= ~PTP_EN;
759 bfin_write_EMAC_PTP_CTL(ptpctl);
760
761 SSYNC();
762 } else {
763 ptpctl |= PTP_EN;
764 bfin_write_EMAC_PTP_CTL(ptpctl);
765
766 /*
767 * clear any existing timestamp
768 */
769 bfin_read_EMAC_PTP_RXSNAPLO();
770 bfin_read_EMAC_PTP_RXSNAPHI();
771
772 bfin_read_EMAC_PTP_TXSNAPLO();
773 bfin_read_EMAC_PTP_TXSNAPHI();
774
775 /*
776 * Set registers so that rollover occurs soon to test this.
777 */
778 bfin_write_EMAC_PTP_TIMELO(0x00000000);
779 bfin_write_EMAC_PTP_TIMEHI(0xFF800000);
780
781 SSYNC();
782
783 lp->compare.last_update = 0;
784 timecounter_init(&lp->clock,
785 &lp->cycles,
786 ktime_to_ns(ktime_get_real()));
787 timecompare_update(&lp->compare, 0);
788 }
789
790 lp->stamp_cfg = config;
791 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
792 -EFAULT : 0;
793 }
794
795 static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompare *cmp)
796 {
797 ktime_t sys = ktime_get_real();
798
799 pr_debug("%s %s hardware:%d,%d transform system:%d,%d system:%d,%d, cmp:%lld, %lld\n",
800 __func__, s, hw->tv.sec, hw->tv.nsec, ts->tv.sec, ts->tv.nsec, sys.tv.sec,
801 sys.tv.nsec, cmp->offset, cmp->skew);
802 }
803
804 static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
805 {
806 struct bfin_mac_local *lp = netdev_priv(netdev);
807
808 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
809 int timeout_cnt = MAX_TIMEOUT_CNT;
810
811 /* When doing time stamping, keep the connection to the socket
812 * a while longer
813 */
814 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
815
816 /*
817 * The timestamping is done at the EMAC module's MII/RMII interface
818 * when the module sees the Start of Frame of an event message packet. This
819 * interface is the closest possible place to the physical Ethernet transmission
820 * medium, providing the best timing accuracy.
821 */
822 while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
823 udelay(1);
824 if (timeout_cnt == 0)
825 printk(KERN_ERR DRV_NAME
826 ": fails to timestamp the TX packet\n");
827 else {
828 struct skb_shared_hwtstamps shhwtstamps;
829 u64 ns;
830 u64 regval;
831
832 regval = bfin_read_EMAC_PTP_TXSNAPLO();
833 regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
834 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
835 ns = timecounter_cyc2time(&lp->clock,
836 regval);
837 timecompare_update(&lp->compare, ns);
838 shhwtstamps.hwtstamp = ns_to_ktime(ns);
839 shhwtstamps.syststamp =
840 timecompare_transform(&lp->compare, ns);
841 skb_tstamp_tx(skb, &shhwtstamps);
842
843 bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare);
844 }
845 }
846 }
847
848 static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
849 {
850 struct bfin_mac_local *lp = netdev_priv(netdev);
851 u32 valid;
852 u64 regval, ns;
853 struct skb_shared_hwtstamps *shhwtstamps;
854
855 if (bfin_mac_hwtstamp_is_none(lp->stamp_cfg.rx_filter))
856 return;
857
858 valid = bfin_read_EMAC_PTP_ISTAT() & RXEL;
859 if (!valid)
860 return;
861
862 shhwtstamps = skb_hwtstamps(skb);
863
864 regval = bfin_read_EMAC_PTP_RXSNAPLO();
865 regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
866 ns = timecounter_cyc2time(&lp->clock, regval);
867 timecompare_update(&lp->compare, ns);
868 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
869 shhwtstamps->hwtstamp = ns_to_ktime(ns);
870 shhwtstamps->syststamp = timecompare_transform(&lp->compare, ns);
871
872 bfin_dump_hwtamp("RX", &shhwtstamps->hwtstamp, &shhwtstamps->syststamp, &lp->compare);
873 }
874
875 /*
876 * bfin_read_clock - read raw cycle counter (to be used by time counter)
877 */
878 static cycle_t bfin_read_clock(const struct cyclecounter *tc)
879 {
880 u64 stamp;
881
882 stamp = bfin_read_EMAC_PTP_TIMELO();
883 stamp |= (u64)bfin_read_EMAC_PTP_TIMEHI() << 32ULL;
884
885 return stamp;
886 }
887
888 #define PTP_CLK 25000000
889
890 static void bfin_mac_hwtstamp_init(struct net_device *netdev)
891 {
892 struct bfin_mac_local *lp = netdev_priv(netdev);
893 u64 append;
894
895 /* Initialize hardware timer */
896 append = PTP_CLK * (1ULL << 32);
897 do_div(append, get_sclk());
898 bfin_write_EMAC_PTP_ADDEND((u32)append);
899
900 memset(&lp->cycles, 0, sizeof(lp->cycles));
901 lp->cycles.read = bfin_read_clock;
902 lp->cycles.mask = CLOCKSOURCE_MASK(64);
903 lp->cycles.mult = 1000000000 / PTP_CLK;
904 lp->cycles.shift = 0;
905
906 /* Synchronize our NIC clock against system wall clock */
907 memset(&lp->compare, 0, sizeof(lp->compare));
908 lp->compare.source = &lp->clock;
909 lp->compare.target = ktime_get_real;
910 lp->compare.num_samples = 10;
911
912 /* Initialize hwstamp config */
913 lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
914 lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
915 }
916
917 #else
918 # define bfin_mac_hwtstamp_is_none(cfg) 0
919 # define bfin_mac_hwtstamp_init(dev)
920 # define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
921 # define bfin_rx_hwtstamp(dev, skb)
922 # define bfin_tx_hwtstamp(dev, skb)
923 #endif
924
925 static inline void _tx_reclaim_skb(void)
926 {
927 do {
928 tx_list_head->desc_a.config &= ~DMAEN;
929 tx_list_head->status.status_word = 0;
930 if (tx_list_head->skb) {
931 dev_kfree_skb(tx_list_head->skb);
932 tx_list_head->skb = NULL;
933 }
934 tx_list_head = tx_list_head->next;
935
936 } while (tx_list_head->status.status_word != 0);
937 }
938
939 static void tx_reclaim_skb(struct bfin_mac_local *lp)
940 {
941 int timeout_cnt = MAX_TIMEOUT_CNT;
942
943 if (tx_list_head->status.status_word != 0)
944 _tx_reclaim_skb();
945
946 if (current_tx_ptr->next == tx_list_head) {
947 while (tx_list_head->status.status_word == 0) {
948 /* slow down polling to avoid too many queue stop. */
949 udelay(10);
950 /* reclaim skb if DMA is not running. */
951 if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN))
952 break;
953 if (timeout_cnt-- < 0)
954 break;
955 }
956
957 if (timeout_cnt >= 0)
958 _tx_reclaim_skb();
959 else
960 netif_stop_queue(lp->ndev);
961 }
962
963 if (current_tx_ptr->next != tx_list_head &&
964 netif_queue_stopped(lp->ndev))
965 netif_wake_queue(lp->ndev);
966
967 if (tx_list_head != current_tx_ptr) {
968 /* shorten the timer interval if tx queue is stopped */
969 if (netif_queue_stopped(lp->ndev))
970 lp->tx_reclaim_timer.expires =
971 jiffies + (TX_RECLAIM_JIFFIES >> 4);
972 else
973 lp->tx_reclaim_timer.expires =
974 jiffies + TX_RECLAIM_JIFFIES;
975
976 mod_timer(&lp->tx_reclaim_timer,
977 lp->tx_reclaim_timer.expires);
978 }
979
980 return;
981 }
982
983 static void tx_reclaim_skb_timeout(unsigned long lp)
984 {
985 tx_reclaim_skb((struct bfin_mac_local *)lp);
986 }
987
988 static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
989 struct net_device *dev)
990 {
991 struct bfin_mac_local *lp = netdev_priv(dev);
992 u16 *data;
993 u32 data_align = (unsigned long)(skb->data) & 0x3;
994
995 current_tx_ptr->skb = skb;
996
997 if (data_align == 0x2) {
998 /* move skb->data to current_tx_ptr payload */
999 data = (u16 *)(skb->data) - 1;
1000 *data = (u16)(skb->len);
1001 /*
1002 * When transmitting an Ethernet packet, the PTP_TSYNC module requires
1003 * a DMA_Length_Word field associated with the packet. The lower 12 bits
1004 * of this field are the length of the packet payload in bytes and the higher
1005 * 4 bits are the timestamping enable field.
1006 */
1007 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1008 *data |= 0x1000;
1009
1010 current_tx_ptr->desc_a.start_addr = (u32)data;
1011 /* this is important! */
1012 blackfin_dcache_flush_range((u32)data,
1013 (u32)((u8 *)data + skb->len + 4));
1014 } else {
1015 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
1016 /* enable timestamping for the sent packet */
1017 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1018 *((u16 *)(current_tx_ptr->packet)) |= 0x1000;
1019 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
1020 skb->len);
1021 current_tx_ptr->desc_a.start_addr =
1022 (u32)current_tx_ptr->packet;
1023 blackfin_dcache_flush_range(
1024 (u32)current_tx_ptr->packet,
1025 (u32)(current_tx_ptr->packet + skb->len + 2));
1026 }
1027
1028 /* make sure the internal data buffers in the core are drained
1029 * so that the DMA descriptors are completely written when the
1030 * DMA engine goes to fetch them below
1031 */
1032 SSYNC();
1033
1034 /* always clear status buffer before start tx dma */
1035 current_tx_ptr->status.status_word = 0;
1036
1037 /* enable this packet's dma */
1038 current_tx_ptr->desc_a.config |= DMAEN;
1039
1040 /* tx dma is running, just return */
1041 if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)
1042 goto out;
1043
1044 /* tx dma is not running */
1045 bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a));
1046 /* dma enabled, read from memory, size is 6 */
1047 bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config);
1048 /* Turn on the EMAC tx */
1049 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
1050
1051 out:
1052 bfin_tx_hwtstamp(dev, skb);
1053
1054 current_tx_ptr = current_tx_ptr->next;
1055 dev->stats.tx_packets++;
1056 dev->stats.tx_bytes += (skb->len);
1057
1058 tx_reclaim_skb(lp);
1059
1060 return NETDEV_TX_OK;
1061 }
1062
1063 #define IP_HEADER_OFF 0
1064 #define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
1065 RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)
1066
1067 static void bfin_mac_rx(struct net_device *dev)
1068 {
1069 struct sk_buff *skb, *new_skb;
1070 unsigned short len;
1071 struct bfin_mac_local *lp __maybe_unused = netdev_priv(dev);
1072 #if defined(BFIN_MAC_CSUM_OFFLOAD)
1073 unsigned int i;
1074 unsigned char fcs[ETH_FCS_LEN + 1];
1075 #endif
1076
1077 /* check if frame status word reports an error condition
1078 * we which case we simply drop the packet
1079 */
1080 if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
1081 printk(KERN_NOTICE DRV_NAME
1082 ": rx: receive error - packet dropped\n");
1083 dev->stats.rx_dropped++;
1084 goto out;
1085 }
1086
1087 /* allocate a new skb for next time receive */
1088 skb = current_rx_ptr->skb;
1089
1090 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
1091 if (!new_skb) {
1092 printk(KERN_NOTICE DRV_NAME
1093 ": rx: low on mem - packet dropped\n");
1094 dev->stats.rx_dropped++;
1095 goto out;
1096 }
1097 /* reserve 2 bytes for RXDWA padding */
1098 skb_reserve(new_skb, NET_IP_ALIGN);
1099 /* Invidate the data cache of skb->data range when it is write back
1100 * cache. It will prevent overwritting the new data from DMA
1101 */
1102 blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
1103 (unsigned long)new_skb->end);
1104
1105 current_rx_ptr->skb = new_skb;
1106 current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
1107
1108 len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
1109 /* Deduce Ethernet FCS length from Ethernet payload length */
1110 len -= ETH_FCS_LEN;
1111 skb_put(skb, len);
1112
1113 skb->protocol = eth_type_trans(skb, dev);
1114
1115 bfin_rx_hwtstamp(dev, skb);
1116
1117 #if defined(BFIN_MAC_CSUM_OFFLOAD)
1118 /* Checksum offloading only works for IPv4 packets with the standard IP header
1119 * length of 20 bytes, because the blackfin MAC checksum calculation is
1120 * based on that assumption. We must NOT use the calculated checksum if our
1121 * IP version or header break that assumption.
1122 */
1123 if (skb->data[IP_HEADER_OFF] == 0x45) {
1124 skb->csum = current_rx_ptr->status.ip_payload_csum;
1125 /*
1126 * Deduce Ethernet FCS from hardware generated IP payload checksum.
1127 * IP checksum is based on 16-bit one's complement algorithm.
1128 * To deduce a value from checksum is equal to add its inversion.
1129 * If the IP payload len is odd, the inversed FCS should also
1130 * begin from odd address and leave first byte zero.
1131 */
1132 if (skb->len % 2) {
1133 fcs[0] = 0;
1134 for (i = 0; i < ETH_FCS_LEN; i++)
1135 fcs[i + 1] = ~skb->data[skb->len + i];
1136 skb->csum = csum_partial(fcs, ETH_FCS_LEN + 1, skb->csum);
1137 } else {
1138 for (i = 0; i < ETH_FCS_LEN; i++)
1139 fcs[i] = ~skb->data[skb->len + i];
1140 skb->csum = csum_partial(fcs, ETH_FCS_LEN, skb->csum);
1141 }
1142 skb->ip_summed = CHECKSUM_COMPLETE;
1143 }
1144 #endif
1145
1146 netif_rx(skb);
1147 dev->stats.rx_packets++;
1148 dev->stats.rx_bytes += len;
1149 out:
1150 current_rx_ptr->status.status_word = 0x00000000;
1151 current_rx_ptr = current_rx_ptr->next;
1152 }
1153
1154 /* interrupt routine to handle rx and error signal */
1155 static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id)
1156 {
1157 struct net_device *dev = dev_id;
1158 int number = 0;
1159
1160 get_one_packet:
1161 if (current_rx_ptr->status.status_word == 0) {
1162 /* no more new packet received */
1163 if (number == 0) {
1164 if (current_rx_ptr->next->status.status_word != 0) {
1165 current_rx_ptr = current_rx_ptr->next;
1166 goto real_rx;
1167 }
1168 }
1169 bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() |
1170 DMA_DONE | DMA_ERR);
1171 return IRQ_HANDLED;
1172 }
1173
1174 real_rx:
1175 bfin_mac_rx(dev);
1176 number++;
1177 goto get_one_packet;
1178 }
1179
1180 #ifdef CONFIG_NET_POLL_CONTROLLER
1181 static void bfin_mac_poll(struct net_device *dev)
1182 {
1183 struct bfin_mac_local *lp = netdev_priv(dev);
1184
1185 disable_irq(IRQ_MAC_RX);
1186 bfin_mac_interrupt(IRQ_MAC_RX, dev);
1187 tx_reclaim_skb(lp);
1188 enable_irq(IRQ_MAC_RX);
1189 }
1190 #endif /* CONFIG_NET_POLL_CONTROLLER */
1191
1192 static void bfin_mac_disable(void)
1193 {
1194 unsigned int opmode;
1195
1196 opmode = bfin_read_EMAC_OPMODE();
1197 opmode &= (~RE);
1198 opmode &= (~TE);
1199 /* Turn off the EMAC */
1200 bfin_write_EMAC_OPMODE(opmode);
1201 }
1202
1203 /*
1204 * Enable Interrupts, Receive, and Transmit
1205 */
1206 static int bfin_mac_enable(void)
1207 {
1208 int ret;
1209 u32 opmode;
1210
1211 pr_debug("%s: %s\n", DRV_NAME, __func__);
1212
1213 /* Set RX DMA */
1214 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
1215 bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
1216
1217 /* Wait MII done */
1218 ret = bfin_mdio_poll();
1219 if (ret)
1220 return ret;
1221
1222 /* We enable only RX here */
1223 /* ASTP : Enable Automatic Pad Stripping
1224 PR : Promiscuous Mode for test
1225 PSF : Receive frames with total length less than 64 bytes.
1226 FDMODE : Full Duplex Mode
1227 LB : Internal Loopback for test
1228 RE : Receiver Enable */
1229 opmode = bfin_read_EMAC_OPMODE();
1230 if (opmode & FDMODE)
1231 opmode |= PSF;
1232 else
1233 opmode |= DRO | DC | PSF;
1234 opmode |= RE;
1235
1236 #if defined(CONFIG_BFIN_MAC_RMII)
1237 opmode |= RMII; /* For Now only 100MBit are supported */
1238 #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2
1239 opmode |= TE;
1240 #endif
1241 #endif
1242 /* Turn on the EMAC rx */
1243 bfin_write_EMAC_OPMODE(opmode);
1244
1245 return 0;
1246 }
1247
1248 /* Our watchdog timed out. Called by the networking layer */
1249 static void bfin_mac_timeout(struct net_device *dev)
1250 {
1251 struct bfin_mac_local *lp = netdev_priv(dev);
1252
1253 pr_debug("%s: %s\n", dev->name, __func__);
1254
1255 bfin_mac_disable();
1256
1257 del_timer(&lp->tx_reclaim_timer);
1258
1259 /* reset tx queue and free skb */
1260 while (tx_list_head != current_tx_ptr) {
1261 tx_list_head->desc_a.config &= ~DMAEN;
1262 tx_list_head->status.status_word = 0;
1263 if (tx_list_head->skb) {
1264 dev_kfree_skb(tx_list_head->skb);
1265 tx_list_head->skb = NULL;
1266 }
1267 tx_list_head = tx_list_head->next;
1268 }
1269
1270 if (netif_queue_stopped(lp->ndev))
1271 netif_wake_queue(lp->ndev);
1272
1273 bfin_mac_enable();
1274
1275 /* We can accept TX packets again */
1276 dev->trans_start = jiffies; /* prevent tx timeout */
1277 netif_wake_queue(dev);
1278 }
1279
1280 static void bfin_mac_multicast_hash(struct net_device *dev)
1281 {
1282 u32 emac_hashhi, emac_hashlo;
1283 struct netdev_hw_addr *ha;
1284 char *addrs;
1285 u32 crc;
1286
1287 emac_hashhi = emac_hashlo = 0;
1288
1289 netdev_for_each_mc_addr(ha, dev) {
1290 addrs = ha->addr;
1291
1292 /* skip non-multicast addresses */
1293 if (!(*addrs & 1))
1294 continue;
1295
1296 crc = ether_crc(ETH_ALEN, addrs);
1297 crc >>= 26;
1298
1299 if (crc & 0x20)
1300 emac_hashhi |= 1 << (crc & 0x1f);
1301 else
1302 emac_hashlo |= 1 << (crc & 0x1f);
1303 }
1304
1305 bfin_write_EMAC_HASHHI(emac_hashhi);
1306 bfin_write_EMAC_HASHLO(emac_hashlo);
1307 }
1308
1309 /*
1310 * This routine will, depending on the values passed to it,
1311 * either make it accept multicast packets, go into
1312 * promiscuous mode (for TCPDUMP and cousins) or accept
1313 * a select set of multicast packets
1314 */
1315 static void bfin_mac_set_multicast_list(struct net_device *dev)
1316 {
1317 u32 sysctl;
1318
1319 if (dev->flags & IFF_PROMISC) {
1320 printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
1321 sysctl = bfin_read_EMAC_OPMODE();
1322 sysctl |= PR;
1323 bfin_write_EMAC_OPMODE(sysctl);
1324 } else if (dev->flags & IFF_ALLMULTI) {
1325 /* accept all multicast */
1326 sysctl = bfin_read_EMAC_OPMODE();
1327 sysctl |= PAM;
1328 bfin_write_EMAC_OPMODE(sysctl);
1329 } else if (!netdev_mc_empty(dev)) {
1330 /* set up multicast hash table */
1331 sysctl = bfin_read_EMAC_OPMODE();
1332 sysctl |= HM;
1333 bfin_write_EMAC_OPMODE(sysctl);
1334 bfin_mac_multicast_hash(dev);
1335 } else {
1336 /* clear promisc or multicast mode */
1337 sysctl = bfin_read_EMAC_OPMODE();
1338 sysctl &= ~(RAF | PAM);
1339 bfin_write_EMAC_OPMODE(sysctl);
1340 }
1341 }
1342
1343 static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1344 {
1345 switch (cmd) {
1346 case SIOCSHWTSTAMP:
1347 return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
1348 default:
1349 return -EOPNOTSUPP;
1350 }
1351 }
1352
1353 /*
1354 * this puts the device in an inactive state
1355 */
1356 static void bfin_mac_shutdown(struct net_device *dev)
1357 {
1358 /* Turn off the EMAC */
1359 bfin_write_EMAC_OPMODE(0x00000000);
1360 /* Turn off the EMAC RX DMA */
1361 bfin_write_DMA1_CONFIG(0x0000);
1362 bfin_write_DMA2_CONFIG(0x0000);
1363 }
1364
1365 /*
1366 * Open and Initialize the interface
1367 *
1368 * Set up everything, reset the card, etc..
1369 */
1370 static int bfin_mac_open(struct net_device *dev)
1371 {
1372 struct bfin_mac_local *lp = netdev_priv(dev);
1373 int ret;
1374 pr_debug("%s: %s\n", dev->name, __func__);
1375
1376 /*
1377 * Check that the address is valid. If its not, refuse
1378 * to bring the device up. The user must specify an
1379 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1380 */
1381 if (!is_valid_ether_addr(dev->dev_addr)) {
1382 printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n");
1383 return -EINVAL;
1384 }
1385
1386 /* initial rx and tx list */
1387 ret = desc_list_init();
1388 if (ret)
1389 return ret;
1390
1391 phy_start(lp->phydev);
1392 phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
1393 setup_system_regs(dev);
1394 setup_mac_addr(dev->dev_addr);
1395
1396 bfin_mac_disable();
1397 ret = bfin_mac_enable();
1398 if (ret)
1399 return ret;
1400 pr_debug("hardware init finished\n");
1401
1402 netif_start_queue(dev);
1403 netif_carrier_on(dev);
1404
1405 return 0;
1406 }
1407
1408 /*
1409 * this makes the board clean up everything that it can
1410 * and not talk to the outside world. Caused by
1411 * an 'ifconfig ethX down'
1412 */
1413 static int bfin_mac_close(struct net_device *dev)
1414 {
1415 struct bfin_mac_local *lp = netdev_priv(dev);
1416 pr_debug("%s: %s\n", dev->name, __func__);
1417
1418 netif_stop_queue(dev);
1419 netif_carrier_off(dev);
1420
1421 phy_stop(lp->phydev);
1422 phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
1423
1424 /* clear everything */
1425 bfin_mac_shutdown(dev);
1426
1427 /* free the rx/tx buffers */
1428 desc_list_free();
1429
1430 return 0;
1431 }
1432
1433 static const struct net_device_ops bfin_mac_netdev_ops = {
1434 .ndo_open = bfin_mac_open,
1435 .ndo_stop = bfin_mac_close,
1436 .ndo_start_xmit = bfin_mac_hard_start_xmit,
1437 .ndo_set_mac_address = bfin_mac_set_mac_address,
1438 .ndo_tx_timeout = bfin_mac_timeout,
1439 .ndo_set_multicast_list = bfin_mac_set_multicast_list,
1440 .ndo_do_ioctl = bfin_mac_ioctl,
1441 .ndo_validate_addr = eth_validate_addr,
1442 .ndo_change_mtu = eth_change_mtu,
1443 #ifdef CONFIG_NET_POLL_CONTROLLER
1444 .ndo_poll_controller = bfin_mac_poll,
1445 #endif
1446 };
1447
1448 static int __devinit bfin_mac_probe(struct platform_device *pdev)
1449 {
1450 struct net_device *ndev;
1451 struct bfin_mac_local *lp;
1452 struct platform_device *pd;
1453 int rc;
1454
1455 ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
1456 if (!ndev) {
1457 dev_err(&pdev->dev, "Cannot allocate net device!\n");
1458 return -ENOMEM;
1459 }
1460
1461 SET_NETDEV_DEV(ndev, &pdev->dev);
1462 platform_set_drvdata(pdev, ndev);
1463 lp = netdev_priv(ndev);
1464 lp->ndev = ndev;
1465
1466 /* Grab the MAC address in the MAC */
1467 *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
1468 *(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
1469
1470 /* probe mac */
1471 /*todo: how to proble? which is revision_register */
1472 bfin_write_EMAC_ADDRLO(0x12345678);
1473 if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
1474 dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
1475 rc = -ENODEV;
1476 goto out_err_probe_mac;
1477 }
1478
1479
1480 /*
1481 * Is it valid? (Did bootloader initialize it?)
1482 * Grab the MAC from the board somehow
1483 * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c
1484 */
1485 if (!is_valid_ether_addr(ndev->dev_addr))
1486 bfin_get_ether_addr(ndev->dev_addr);
1487
1488 /* If still not valid, get a random one */
1489 if (!is_valid_ether_addr(ndev->dev_addr))
1490 random_ether_addr(ndev->dev_addr);
1491
1492 setup_mac_addr(ndev->dev_addr);
1493
1494 if (!pdev->dev.platform_data) {
1495 dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
1496 rc = -ENODEV;
1497 goto out_err_probe_mac;
1498 }
1499 pd = pdev->dev.platform_data;
1500 lp->mii_bus = platform_get_drvdata(pd);
1501 if (!lp->mii_bus) {
1502 dev_err(&pdev->dev, "Cannot get mii_bus!\n");
1503 rc = -ENODEV;
1504 goto out_err_mii_bus_probe;
1505 }
1506 lp->mii_bus->priv = ndev;
1507
1508 rc = mii_probe(ndev);
1509 if (rc) {
1510 dev_err(&pdev->dev, "MII Probe failed!\n");
1511 goto out_err_mii_probe;
1512 }
1513
1514 /* Fill in the fields of the device structure with ethernet values. */
1515 ether_setup(ndev);
1516
1517 ndev->netdev_ops = &bfin_mac_netdev_ops;
1518 ndev->ethtool_ops = &bfin_mac_ethtool_ops;
1519
1520 init_timer(&lp->tx_reclaim_timer);
1521 lp->tx_reclaim_timer.data = (unsigned long)lp;
1522 lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
1523
1524 spin_lock_init(&lp->lock);
1525
1526 /* now, enable interrupts */
1527 /* register irq handler */
1528 rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
1529 IRQF_DISABLED, "EMAC_RX", ndev);
1530 if (rc) {
1531 dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
1532 rc = -EBUSY;
1533 goto out_err_request_irq;
1534 }
1535
1536 rc = register_netdev(ndev);
1537 if (rc) {
1538 dev_err(&pdev->dev, "Cannot register net device!\n");
1539 goto out_err_reg_ndev;
1540 }
1541
1542 bfin_mac_hwtstamp_init(ndev);
1543
1544 /* now, print out the card info, in a short format.. */
1545 dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
1546
1547 return 0;
1548
1549 out_err_reg_ndev:
1550 free_irq(IRQ_MAC_RX, ndev);
1551 out_err_request_irq:
1552 out_err_mii_probe:
1553 mdiobus_unregister(lp->mii_bus);
1554 mdiobus_free(lp->mii_bus);
1555 out_err_mii_bus_probe:
1556 peripheral_free_list(pin_req);
1557 out_err_probe_mac:
1558 platform_set_drvdata(pdev, NULL);
1559 free_netdev(ndev);
1560
1561 return rc;
1562 }
1563
1564 static int __devexit bfin_mac_remove(struct platform_device *pdev)
1565 {
1566 struct net_device *ndev = platform_get_drvdata(pdev);
1567 struct bfin_mac_local *lp = netdev_priv(ndev);
1568
1569 platform_set_drvdata(pdev, NULL);
1570
1571 lp->mii_bus->priv = NULL;
1572
1573 unregister_netdev(ndev);
1574
1575 free_irq(IRQ_MAC_RX, ndev);
1576
1577 free_netdev(ndev);
1578
1579 peripheral_free_list(pin_req);
1580
1581 return 0;
1582 }
1583
1584 #ifdef CONFIG_PM
1585 static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
1586 {
1587 struct net_device *net_dev = platform_get_drvdata(pdev);
1588 struct bfin_mac_local *lp = netdev_priv(net_dev);
1589
1590 if (lp->wol) {
1591 bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE) | RE);
1592 bfin_write_EMAC_WKUP_CTL(MPKE);
1593 enable_irq_wake(IRQ_MAC_WAKEDET);
1594 } else {
1595 if (netif_running(net_dev))
1596 bfin_mac_close(net_dev);
1597 }
1598
1599 return 0;
1600 }
1601
1602 static int bfin_mac_resume(struct platform_device *pdev)
1603 {
1604 struct net_device *net_dev = platform_get_drvdata(pdev);
1605 struct bfin_mac_local *lp = netdev_priv(net_dev);
1606
1607 if (lp->wol) {
1608 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
1609 bfin_write_EMAC_WKUP_CTL(0);
1610 disable_irq_wake(IRQ_MAC_WAKEDET);
1611 } else {
1612 if (netif_running(net_dev))
1613 bfin_mac_open(net_dev);
1614 }
1615
1616 return 0;
1617 }
1618 #else
1619 #define bfin_mac_suspend NULL
1620 #define bfin_mac_resume NULL
1621 #endif /* CONFIG_PM */
1622
1623 static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
1624 {
1625 struct mii_bus *miibus;
1626 int rc, i;
1627
1628 /*
1629 * We are setting up a network card,
1630 * so set the GPIO pins to Ethernet mode
1631 */
1632 rc = peripheral_request_list(pin_req, DRV_NAME);
1633 if (rc) {
1634 dev_err(&pdev->dev, "Requesting peripherals failed!\n");
1635 return rc;
1636 }
1637
1638 rc = -ENOMEM;
1639 miibus = mdiobus_alloc();
1640 if (miibus == NULL)
1641 goto out_err_alloc;
1642 miibus->read = bfin_mdiobus_read;
1643 miibus->write = bfin_mdiobus_write;
1644 miibus->reset = bfin_mdiobus_reset;
1645
1646 miibus->parent = &pdev->dev;
1647 miibus->name = "bfin_mii_bus";
1648 snprintf(miibus->id, MII_BUS_ID_SIZE, "0");
1649 miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1650 if (miibus->irq == NULL)
1651 goto out_err_alloc;
1652 for (i = 0; i < PHY_MAX_ADDR; ++i)
1653 miibus->irq[i] = PHY_POLL;
1654
1655 rc = mdiobus_register(miibus);
1656 if (rc) {
1657 dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
1658 goto out_err_mdiobus_register;
1659 }
1660
1661 platform_set_drvdata(pdev, miibus);
1662 return 0;
1663
1664 out_err_mdiobus_register:
1665 kfree(miibus->irq);
1666 mdiobus_free(miibus);
1667 out_err_alloc:
1668 peripheral_free_list(pin_req);
1669
1670 return rc;
1671 }
1672
1673 static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
1674 {
1675 struct mii_bus *miibus = platform_get_drvdata(pdev);
1676 platform_set_drvdata(pdev, NULL);
1677 mdiobus_unregister(miibus);
1678 kfree(miibus->irq);
1679 mdiobus_free(miibus);
1680 peripheral_free_list(pin_req);
1681 return 0;
1682 }
1683
1684 static struct platform_driver bfin_mii_bus_driver = {
1685 .probe = bfin_mii_bus_probe,
1686 .remove = __devexit_p(bfin_mii_bus_remove),
1687 .driver = {
1688 .name = "bfin_mii_bus",
1689 .owner = THIS_MODULE,
1690 },
1691 };
1692
1693 static struct platform_driver bfin_mac_driver = {
1694 .probe = bfin_mac_probe,
1695 .remove = __devexit_p(bfin_mac_remove),
1696 .resume = bfin_mac_resume,
1697 .suspend = bfin_mac_suspend,
1698 .driver = {
1699 .name = DRV_NAME,
1700 .owner = THIS_MODULE,
1701 },
1702 };
1703
1704 static int __init bfin_mac_init(void)
1705 {
1706 int ret;
1707 ret = platform_driver_register(&bfin_mii_bus_driver);
1708 if (!ret)
1709 return platform_driver_register(&bfin_mac_driver);
1710 return -ENODEV;
1711 }
1712
1713 module_init(bfin_mac_init);
1714
1715 static void __exit bfin_mac_cleanup(void)
1716 {
1717 platform_driver_unregister(&bfin_mac_driver);
1718 platform_driver_unregister(&bfin_mii_bus_driver);
1719 }
1720
1721 module_exit(bfin_mac_cleanup);
1722
This page took 0.065149 seconds and 6 git commands to generate.