2 * drivers/net/ethernet/freescale/gianfar.c
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
13 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
21 * Gianfar: AKA Lambda Draconis, "Dragon"
29 * The driver is initialized through of_device. Configuration information
30 * is therefore conveyed through an OF-style device tree.
32 * The Gianfar Ethernet Controller uses a ring of buffer
33 * descriptors. The beginning is indicated by a register
34 * pointing to the physical address of the start of the ring.
35 * The end is determined by a "wrap" bit being set in the
36 * last descriptor of the ring.
38 * When a packet is received, the RXF bit in the
39 * IEVENT register is set, triggering an interrupt when the
40 * corresponding bit in the IMASK register is also set (if
41 * interrupt coalescing is active, then the interrupt may not
42 * happen immediately, but will wait until either a set number
43 * of frames or amount of time have passed). In NAPI, the
44 * interrupt handler will signal there is work to be done, and
45 * exit. This method will start at the last known empty
46 * descriptor, and process every subsequent descriptor until there
47 * are none left with data (NAPI will stop after a set number of
48 * packets to give time to other tasks, but will eventually
49 * process all the packets). The data arrives inside a
50 * pre-allocated skb, and so after the skb is passed up to the
51 * stack, a new skb must be allocated, and the address field in
52 * the buffer descriptor must be updated to indicate this new
55 * When the kernel requests that a packet be transmitted, the
56 * driver starts where it left off last time, and points the
57 * descriptor at the buffer which was passed in. The driver
58 * then informs the DMA engine that there are packets ready to
59 * be transmitted. Once the controller is finished transmitting
60 * the packet, an interrupt may be triggered (under the same
61 * conditions as for reception, but depending on the TXF bit).
62 * The driver then cleans up the buffer.
65 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
68 #include <linux/kernel.h>
69 #include <linux/string.h>
70 #include <linux/errno.h>
71 #include <linux/unistd.h>
72 #include <linux/slab.h>
73 #include <linux/interrupt.h>
74 #include <linux/init.h>
75 #include <linux/delay.h>
76 #include <linux/netdevice.h>
77 #include <linux/etherdevice.h>
78 #include <linux/skbuff.h>
79 #include <linux/if_vlan.h>
80 #include <linux/spinlock.h>
82 #include <linux/of_mdio.h>
83 #include <linux/of_platform.h>
85 #include <linux/tcp.h>
86 #include <linux/udp.h>
88 #include <linux/net_tstamp.h>
93 #include <asm/uaccess.h>
94 #include <linux/module.h>
95 #include <linux/dma-mapping.h>
96 #include <linux/crc32.h>
97 #include <linux/mii.h>
98 #include <linux/phy.h>
99 #include <linux/phy_fixed.h>
100 #include <linux/of.h>
101 #include <linux/of_net.h>
104 #include "fsl_pq_mdio.h"
106 #define TX_TIMEOUT (1*HZ)
108 const char gfar_driver_version
[] = "1.3";
110 static int gfar_enet_open(struct net_device
*dev
);
111 static int gfar_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
112 static void gfar_reset_task(struct work_struct
*work
);
113 static void gfar_timeout(struct net_device
*dev
);
114 static int gfar_close(struct net_device
*dev
);
115 struct sk_buff
*gfar_new_skb(struct net_device
*dev
);
116 static void gfar_new_rxbdp(struct gfar_priv_rx_q
*rx_queue
, struct rxbd8
*bdp
,
117 struct sk_buff
*skb
);
118 static int gfar_set_mac_address(struct net_device
*dev
);
119 static int gfar_change_mtu(struct net_device
*dev
, int new_mtu
);
120 static irqreturn_t
gfar_error(int irq
, void *dev_id
);
121 static irqreturn_t
gfar_transmit(int irq
, void *dev_id
);
122 static irqreturn_t
gfar_interrupt(int irq
, void *dev_id
);
123 static void adjust_link(struct net_device
*dev
);
124 static void init_registers(struct net_device
*dev
);
125 static int init_phy(struct net_device
*dev
);
126 static int gfar_probe(struct platform_device
*ofdev
);
127 static int gfar_remove(struct platform_device
*ofdev
);
128 static void free_skb_resources(struct gfar_private
*priv
);
129 static void gfar_set_multi(struct net_device
*dev
);
130 static void gfar_set_hash_for_addr(struct net_device
*dev
, u8
*addr
);
131 static void gfar_configure_serdes(struct net_device
*dev
);
132 static int gfar_poll(struct napi_struct
*napi
, int budget
);
133 #ifdef CONFIG_NET_POLL_CONTROLLER
134 static void gfar_netpoll(struct net_device
*dev
);
136 int gfar_clean_rx_ring(struct gfar_priv_rx_q
*rx_queue
, int rx_work_limit
);
137 static int gfar_clean_tx_ring(struct gfar_priv_tx_q
*tx_queue
);
138 static int gfar_process_frame(struct net_device
*dev
, struct sk_buff
*skb
,
139 int amount_pull
, struct napi_struct
*napi
);
140 void gfar_halt(struct net_device
*dev
);
141 static void gfar_halt_nodisable(struct net_device
*dev
);
142 void gfar_start(struct net_device
*dev
);
143 static void gfar_clear_exact_match(struct net_device
*dev
);
144 static void gfar_set_mac_for_addr(struct net_device
*dev
, int num
,
146 static int gfar_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
148 MODULE_AUTHOR("Freescale Semiconductor, Inc");
149 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
150 MODULE_LICENSE("GPL");
152 static void gfar_init_rxbdp(struct gfar_priv_rx_q
*rx_queue
, struct rxbd8
*bdp
,
159 lstatus
= BD_LFLAG(RXBD_EMPTY
| RXBD_INTERRUPT
);
160 if (bdp
== rx_queue
->rx_bd_base
+ rx_queue
->rx_ring_size
- 1)
161 lstatus
|= BD_LFLAG(RXBD_WRAP
);
165 bdp
->lstatus
= lstatus
;
168 static int gfar_init_bds(struct net_device
*ndev
)
170 struct gfar_private
*priv
= netdev_priv(ndev
);
171 struct gfar_priv_tx_q
*tx_queue
= NULL
;
172 struct gfar_priv_rx_q
*rx_queue
= NULL
;
177 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
178 tx_queue
= priv
->tx_queue
[i
];
179 /* Initialize some variables in our dev structure */
180 tx_queue
->num_txbdfree
= tx_queue
->tx_ring_size
;
181 tx_queue
->dirty_tx
= tx_queue
->tx_bd_base
;
182 tx_queue
->cur_tx
= tx_queue
->tx_bd_base
;
183 tx_queue
->skb_curtx
= 0;
184 tx_queue
->skb_dirtytx
= 0;
186 /* Initialize Transmit Descriptor Ring */
187 txbdp
= tx_queue
->tx_bd_base
;
188 for (j
= 0; j
< tx_queue
->tx_ring_size
; j
++) {
194 /* Set the last descriptor in the ring to indicate wrap */
196 txbdp
->status
|= TXBD_WRAP
;
199 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
200 rx_queue
= priv
->rx_queue
[i
];
201 rx_queue
->cur_rx
= rx_queue
->rx_bd_base
;
202 rx_queue
->skb_currx
= 0;
203 rxbdp
= rx_queue
->rx_bd_base
;
205 for (j
= 0; j
< rx_queue
->rx_ring_size
; j
++) {
206 struct sk_buff
*skb
= rx_queue
->rx_skbuff
[j
];
209 gfar_init_rxbdp(rx_queue
, rxbdp
,
212 skb
= gfar_new_skb(ndev
);
214 netdev_err(ndev
, "Can't allocate RX buffers\n");
215 goto err_rxalloc_fail
;
217 rx_queue
->rx_skbuff
[j
] = skb
;
219 gfar_new_rxbdp(rx_queue
, rxbdp
, skb
);
230 free_skb_resources(priv
);
234 static int gfar_alloc_skb_resources(struct net_device
*ndev
)
239 struct gfar_private
*priv
= netdev_priv(ndev
);
240 struct device
*dev
= &priv
->ofdev
->dev
;
241 struct gfar_priv_tx_q
*tx_queue
= NULL
;
242 struct gfar_priv_rx_q
*rx_queue
= NULL
;
244 priv
->total_tx_ring_size
= 0;
245 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
246 priv
->total_tx_ring_size
+= priv
->tx_queue
[i
]->tx_ring_size
;
248 priv
->total_rx_ring_size
= 0;
249 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
250 priv
->total_rx_ring_size
+= priv
->rx_queue
[i
]->rx_ring_size
;
252 /* Allocate memory for the buffer descriptors */
253 vaddr
= dma_alloc_coherent(dev
,
254 sizeof(struct txbd8
) * priv
->total_tx_ring_size
+
255 sizeof(struct rxbd8
) * priv
->total_rx_ring_size
,
258 netif_err(priv
, ifup
, ndev
,
259 "Could not allocate buffer descriptors!\n");
263 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
264 tx_queue
= priv
->tx_queue
[i
];
265 tx_queue
->tx_bd_base
= vaddr
;
266 tx_queue
->tx_bd_dma_base
= addr
;
267 tx_queue
->dev
= ndev
;
268 /* enet DMA only understands physical addresses */
269 addr
+= sizeof(struct txbd8
) *tx_queue
->tx_ring_size
;
270 vaddr
+= sizeof(struct txbd8
) *tx_queue
->tx_ring_size
;
273 /* Start the rx descriptor ring where the tx ring leaves off */
274 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
275 rx_queue
= priv
->rx_queue
[i
];
276 rx_queue
->rx_bd_base
= vaddr
;
277 rx_queue
->rx_bd_dma_base
= addr
;
278 rx_queue
->dev
= ndev
;
279 addr
+= sizeof (struct rxbd8
) * rx_queue
->rx_ring_size
;
280 vaddr
+= sizeof (struct rxbd8
) * rx_queue
->rx_ring_size
;
283 /* Setup the skbuff rings */
284 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
285 tx_queue
= priv
->tx_queue
[i
];
286 tx_queue
->tx_skbuff
= kmalloc(sizeof(*tx_queue
->tx_skbuff
) *
287 tx_queue
->tx_ring_size
, GFP_KERNEL
);
288 if (!tx_queue
->tx_skbuff
) {
289 netif_err(priv
, ifup
, ndev
,
290 "Could not allocate tx_skbuff\n");
294 for (k
= 0; k
< tx_queue
->tx_ring_size
; k
++)
295 tx_queue
->tx_skbuff
[k
] = NULL
;
298 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
299 rx_queue
= priv
->rx_queue
[i
];
300 rx_queue
->rx_skbuff
= kmalloc(sizeof(*rx_queue
->rx_skbuff
) *
301 rx_queue
->rx_ring_size
, GFP_KERNEL
);
303 if (!rx_queue
->rx_skbuff
) {
304 netif_err(priv
, ifup
, ndev
,
305 "Could not allocate rx_skbuff\n");
309 for (j
= 0; j
< rx_queue
->rx_ring_size
; j
++)
310 rx_queue
->rx_skbuff
[j
] = NULL
;
313 if (gfar_init_bds(ndev
))
319 free_skb_resources(priv
);
323 static void gfar_init_tx_rx_base(struct gfar_private
*priv
)
325 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
329 baddr
= ®s
->tbase0
;
330 for(i
= 0; i
< priv
->num_tx_queues
; i
++) {
331 gfar_write(baddr
, priv
->tx_queue
[i
]->tx_bd_dma_base
);
335 baddr
= ®s
->rbase0
;
336 for(i
= 0; i
< priv
->num_rx_queues
; i
++) {
337 gfar_write(baddr
, priv
->rx_queue
[i
]->rx_bd_dma_base
);
342 static void gfar_init_mac(struct net_device
*ndev
)
344 struct gfar_private
*priv
= netdev_priv(ndev
);
345 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
350 /* write the tx/rx base registers */
351 gfar_init_tx_rx_base(priv
);
353 /* Configure the coalescing support */
354 gfar_configure_coalescing(priv
, 0xFF, 0xFF);
356 if (priv
->rx_filer_enable
) {
357 rctrl
|= RCTRL_FILREN
;
358 /* Program the RIR0 reg with the required distribution */
359 gfar_write(®s
->rir0
, DEFAULT_RIR0
);
362 if (ndev
->features
& NETIF_F_RXCSUM
)
363 rctrl
|= RCTRL_CHECKSUMMING
;
365 if (priv
->extended_hash
) {
366 rctrl
|= RCTRL_EXTHASH
;
368 gfar_clear_exact_match(ndev
);
373 rctrl
&= ~RCTRL_PAL_MASK
;
374 rctrl
|= RCTRL_PADDING(priv
->padding
);
377 /* Insert receive time stamps into padding alignment bytes */
378 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
) {
379 rctrl
&= ~RCTRL_PAL_MASK
;
380 rctrl
|= RCTRL_PADDING(8);
384 /* Enable HW time stamping if requested from user space */
385 if (priv
->hwts_rx_en
)
386 rctrl
|= RCTRL_PRSDEP_INIT
| RCTRL_TS_ENABLE
;
388 if (ndev
->features
& NETIF_F_HW_VLAN_RX
)
389 rctrl
|= RCTRL_VLEX
| RCTRL_PRSDEP_INIT
;
391 /* Init rctrl based on our settings */
392 gfar_write(®s
->rctrl
, rctrl
);
394 if (ndev
->features
& NETIF_F_IP_CSUM
)
395 tctrl
|= TCTRL_INIT_CSUM
;
397 tctrl
|= TCTRL_TXSCHED_PRIO
;
399 gfar_write(®s
->tctrl
, tctrl
);
401 /* Set the extraction length and index */
402 attrs
= ATTRELI_EL(priv
->rx_stash_size
) |
403 ATTRELI_EI(priv
->rx_stash_index
);
405 gfar_write(®s
->attreli
, attrs
);
407 /* Start with defaults, and add stashing or locking
408 * depending on the approprate variables */
409 attrs
= ATTR_INIT_SETTINGS
;
411 if (priv
->bd_stash_en
)
412 attrs
|= ATTR_BDSTASH
;
414 if (priv
->rx_stash_size
!= 0)
415 attrs
|= ATTR_BUFSTASH
;
417 gfar_write(®s
->attr
, attrs
);
419 gfar_write(®s
->fifo_tx_thr
, priv
->fifo_threshold
);
420 gfar_write(®s
->fifo_tx_starve
, priv
->fifo_starve
);
421 gfar_write(®s
->fifo_tx_starve_shutoff
, priv
->fifo_starve_off
);
424 static struct net_device_stats
*gfar_get_stats(struct net_device
*dev
)
426 struct gfar_private
*priv
= netdev_priv(dev
);
427 unsigned long rx_packets
= 0, rx_bytes
= 0, rx_dropped
= 0;
428 unsigned long tx_packets
= 0, tx_bytes
= 0;
431 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
432 rx_packets
+= priv
->rx_queue
[i
]->stats
.rx_packets
;
433 rx_bytes
+= priv
->rx_queue
[i
]->stats
.rx_bytes
;
434 rx_dropped
+= priv
->rx_queue
[i
]->stats
.rx_dropped
;
437 dev
->stats
.rx_packets
= rx_packets
;
438 dev
->stats
.rx_bytes
= rx_bytes
;
439 dev
->stats
.rx_dropped
= rx_dropped
;
441 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
442 tx_bytes
+= priv
->tx_queue
[i
]->stats
.tx_bytes
;
443 tx_packets
+= priv
->tx_queue
[i
]->stats
.tx_packets
;
446 dev
->stats
.tx_bytes
= tx_bytes
;
447 dev
->stats
.tx_packets
= tx_packets
;
452 static const struct net_device_ops gfar_netdev_ops
= {
453 .ndo_open
= gfar_enet_open
,
454 .ndo_start_xmit
= gfar_start_xmit
,
455 .ndo_stop
= gfar_close
,
456 .ndo_change_mtu
= gfar_change_mtu
,
457 .ndo_set_features
= gfar_set_features
,
458 .ndo_set_rx_mode
= gfar_set_multi
,
459 .ndo_tx_timeout
= gfar_timeout
,
460 .ndo_do_ioctl
= gfar_ioctl
,
461 .ndo_get_stats
= gfar_get_stats
,
462 .ndo_set_mac_address
= eth_mac_addr
,
463 .ndo_validate_addr
= eth_validate_addr
,
464 #ifdef CONFIG_NET_POLL_CONTROLLER
465 .ndo_poll_controller
= gfar_netpoll
,
469 void lock_rx_qs(struct gfar_private
*priv
)
473 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
474 spin_lock(&priv
->rx_queue
[i
]->rxlock
);
477 void lock_tx_qs(struct gfar_private
*priv
)
481 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
482 spin_lock(&priv
->tx_queue
[i
]->txlock
);
485 void unlock_rx_qs(struct gfar_private
*priv
)
489 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
490 spin_unlock(&priv
->rx_queue
[i
]->rxlock
);
493 void unlock_tx_qs(struct gfar_private
*priv
)
497 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
498 spin_unlock(&priv
->tx_queue
[i
]->txlock
);
501 static bool gfar_is_vlan_on(struct gfar_private
*priv
)
503 return (priv
->ndev
->features
& NETIF_F_HW_VLAN_RX
) ||
504 (priv
->ndev
->features
& NETIF_F_HW_VLAN_TX
);
507 /* Returns 1 if incoming frames use an FCB */
508 static inline int gfar_uses_fcb(struct gfar_private
*priv
)
510 return gfar_is_vlan_on(priv
) ||
511 (priv
->ndev
->features
& NETIF_F_RXCSUM
) ||
512 (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
);
515 static void free_tx_pointers(struct gfar_private
*priv
)
519 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
520 kfree(priv
->tx_queue
[i
]);
523 static void free_rx_pointers(struct gfar_private
*priv
)
527 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
528 kfree(priv
->rx_queue
[i
]);
531 static void unmap_group_regs(struct gfar_private
*priv
)
535 for (i
= 0; i
< MAXGROUPS
; i
++)
536 if (priv
->gfargrp
[i
].regs
)
537 iounmap(priv
->gfargrp
[i
].regs
);
540 static void disable_napi(struct gfar_private
*priv
)
544 for (i
= 0; i
< priv
->num_grps
; i
++)
545 napi_disable(&priv
->gfargrp
[i
].napi
);
548 static void enable_napi(struct gfar_private
*priv
)
552 for (i
= 0; i
< priv
->num_grps
; i
++)
553 napi_enable(&priv
->gfargrp
[i
].napi
);
556 static int gfar_parse_group(struct device_node
*np
,
557 struct gfar_private
*priv
, const char *model
)
561 priv
->gfargrp
[priv
->num_grps
].regs
= of_iomap(np
, 0);
562 if (!priv
->gfargrp
[priv
->num_grps
].regs
)
565 priv
->gfargrp
[priv
->num_grps
].interruptTransmit
=
566 irq_of_parse_and_map(np
, 0);
568 /* If we aren't the FEC we have multiple interrupts */
569 if (model
&& strcasecmp(model
, "FEC")) {
570 priv
->gfargrp
[priv
->num_grps
].interruptReceive
=
571 irq_of_parse_and_map(np
, 1);
572 priv
->gfargrp
[priv
->num_grps
].interruptError
=
573 irq_of_parse_and_map(np
,2);
574 if (priv
->gfargrp
[priv
->num_grps
].interruptTransmit
== NO_IRQ
||
575 priv
->gfargrp
[priv
->num_grps
].interruptReceive
== NO_IRQ
||
576 priv
->gfargrp
[priv
->num_grps
].interruptError
== NO_IRQ
)
580 priv
->gfargrp
[priv
->num_grps
].grp_id
= priv
->num_grps
;
581 priv
->gfargrp
[priv
->num_grps
].priv
= priv
;
582 spin_lock_init(&priv
->gfargrp
[priv
->num_grps
].grplock
);
583 if(priv
->mode
== MQ_MG_MODE
) {
584 queue_mask
= (u32
*)of_get_property(np
,
585 "fsl,rx-bit-map", NULL
);
586 priv
->gfargrp
[priv
->num_grps
].rx_bit_map
=
587 queue_mask
? *queue_mask
:(DEFAULT_MAPPING
>> priv
->num_grps
);
588 queue_mask
= (u32
*)of_get_property(np
,
589 "fsl,tx-bit-map", NULL
);
590 priv
->gfargrp
[priv
->num_grps
].tx_bit_map
=
591 queue_mask
? *queue_mask
: (DEFAULT_MAPPING
>> priv
->num_grps
);
593 priv
->gfargrp
[priv
->num_grps
].rx_bit_map
= 0xFF;
594 priv
->gfargrp
[priv
->num_grps
].tx_bit_map
= 0xFF;
601 static int gfar_of_init(struct platform_device
*ofdev
, struct net_device
**pdev
)
605 const void *mac_addr
;
607 struct net_device
*dev
= NULL
;
608 struct gfar_private
*priv
= NULL
;
609 struct device_node
*np
= ofdev
->dev
.of_node
;
610 struct device_node
*child
= NULL
;
612 const u32
*stash_len
;
613 const u32
*stash_idx
;
614 unsigned int num_tx_qs
, num_rx_qs
;
615 u32
*tx_queues
, *rx_queues
;
617 if (!np
|| !of_device_is_available(np
))
620 /* parse the num of tx and rx queues */
621 tx_queues
= (u32
*)of_get_property(np
, "fsl,num_tx_queues", NULL
);
622 num_tx_qs
= tx_queues
? *tx_queues
: 1;
624 if (num_tx_qs
> MAX_TX_QS
) {
625 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
626 num_tx_qs
, MAX_TX_QS
);
627 pr_err("Cannot do alloc_etherdev, aborting\n");
631 rx_queues
= (u32
*)of_get_property(np
, "fsl,num_rx_queues", NULL
);
632 num_rx_qs
= rx_queues
? *rx_queues
: 1;
634 if (num_rx_qs
> MAX_RX_QS
) {
635 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
636 num_rx_qs
, MAX_RX_QS
);
637 pr_err("Cannot do alloc_etherdev, aborting\n");
641 *pdev
= alloc_etherdev_mq(sizeof(*priv
), num_tx_qs
);
646 priv
= netdev_priv(dev
);
647 priv
->node
= ofdev
->dev
.of_node
;
650 priv
->num_tx_queues
= num_tx_qs
;
651 netif_set_real_num_rx_queues(dev
, num_rx_qs
);
652 priv
->num_rx_queues
= num_rx_qs
;
653 priv
->num_grps
= 0x0;
655 /* Init Rx queue filer rule set linked list*/
656 INIT_LIST_HEAD(&priv
->rx_list
.list
);
657 priv
->rx_list
.count
= 0;
658 mutex_init(&priv
->rx_queue_access
);
660 model
= of_get_property(np
, "model", NULL
);
662 for (i
= 0; i
< MAXGROUPS
; i
++)
663 priv
->gfargrp
[i
].regs
= NULL
;
665 /* Parse and initialize group specific information */
666 if (of_device_is_compatible(np
, "fsl,etsec2")) {
667 priv
->mode
= MQ_MG_MODE
;
668 for_each_child_of_node(np
, child
) {
669 err
= gfar_parse_group(child
, priv
, model
);
674 priv
->mode
= SQ_SG_MODE
;
675 err
= gfar_parse_group(np
, priv
, model
);
680 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
681 priv
->tx_queue
[i
] = NULL
;
682 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
683 priv
->rx_queue
[i
] = NULL
;
685 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
686 priv
->tx_queue
[i
] = kzalloc(sizeof(struct gfar_priv_tx_q
),
688 if (!priv
->tx_queue
[i
]) {
690 goto tx_alloc_failed
;
692 priv
->tx_queue
[i
]->tx_skbuff
= NULL
;
693 priv
->tx_queue
[i
]->qindex
= i
;
694 priv
->tx_queue
[i
]->dev
= dev
;
695 spin_lock_init(&(priv
->tx_queue
[i
]->txlock
));
698 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
699 priv
->rx_queue
[i
] = kzalloc(sizeof(struct gfar_priv_rx_q
),
701 if (!priv
->rx_queue
[i
]) {
703 goto rx_alloc_failed
;
705 priv
->rx_queue
[i
]->rx_skbuff
= NULL
;
706 priv
->rx_queue
[i
]->qindex
= i
;
707 priv
->rx_queue
[i
]->dev
= dev
;
708 spin_lock_init(&(priv
->rx_queue
[i
]->rxlock
));
712 stash
= of_get_property(np
, "bd-stash", NULL
);
715 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_BD_STASHING
;
716 priv
->bd_stash_en
= 1;
719 stash_len
= of_get_property(np
, "rx-stash-len", NULL
);
722 priv
->rx_stash_size
= *stash_len
;
724 stash_idx
= of_get_property(np
, "rx-stash-idx", NULL
);
727 priv
->rx_stash_index
= *stash_idx
;
729 if (stash_len
|| stash_idx
)
730 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_BUF_STASHING
;
732 mac_addr
= of_get_mac_address(np
);
734 memcpy(dev
->dev_addr
, mac_addr
, ETH_ALEN
);
736 if (model
&& !strcasecmp(model
, "TSEC"))
738 FSL_GIANFAR_DEV_HAS_GIGABIT
|
739 FSL_GIANFAR_DEV_HAS_COALESCE
|
740 FSL_GIANFAR_DEV_HAS_RMON
|
741 FSL_GIANFAR_DEV_HAS_MULTI_INTR
;
742 if (model
&& !strcasecmp(model
, "eTSEC"))
744 FSL_GIANFAR_DEV_HAS_GIGABIT
|
745 FSL_GIANFAR_DEV_HAS_COALESCE
|
746 FSL_GIANFAR_DEV_HAS_RMON
|
747 FSL_GIANFAR_DEV_HAS_MULTI_INTR
|
748 FSL_GIANFAR_DEV_HAS_PADDING
|
749 FSL_GIANFAR_DEV_HAS_CSUM
|
750 FSL_GIANFAR_DEV_HAS_VLAN
|
751 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
|
752 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH
|
753 FSL_GIANFAR_DEV_HAS_TIMER
;
755 ctype
= of_get_property(np
, "phy-connection-type", NULL
);
757 /* We only care about rgmii-id. The rest are autodetected */
758 if (ctype
&& !strcmp(ctype
, "rgmii-id"))
759 priv
->interface
= PHY_INTERFACE_MODE_RGMII_ID
;
761 priv
->interface
= PHY_INTERFACE_MODE_MII
;
763 if (of_get_property(np
, "fsl,magic-packet", NULL
))
764 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
;
766 priv
->phy_node
= of_parse_phandle(np
, "phy-handle", 0);
768 /* Find the TBI PHY. If it's not there, we don't support SGMII */
769 priv
->tbi_node
= of_parse_phandle(np
, "tbi-handle", 0);
774 free_rx_pointers(priv
);
776 free_tx_pointers(priv
);
778 unmap_group_regs(priv
);
783 static int gfar_hwtstamp_ioctl(struct net_device
*netdev
,
784 struct ifreq
*ifr
, int cmd
)
786 struct hwtstamp_config config
;
787 struct gfar_private
*priv
= netdev_priv(netdev
);
789 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
792 /* reserved for future extensions */
796 switch (config
.tx_type
) {
797 case HWTSTAMP_TX_OFF
:
798 priv
->hwts_tx_en
= 0;
801 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
))
803 priv
->hwts_tx_en
= 1;
809 switch (config
.rx_filter
) {
810 case HWTSTAMP_FILTER_NONE
:
811 if (priv
->hwts_rx_en
) {
813 priv
->hwts_rx_en
= 0;
814 startup_gfar(netdev
);
818 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
))
820 if (!priv
->hwts_rx_en
) {
822 priv
->hwts_rx_en
= 1;
823 startup_gfar(netdev
);
825 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
829 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
833 /* Ioctl MII Interface */
834 static int gfar_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
836 struct gfar_private
*priv
= netdev_priv(dev
);
838 if (!netif_running(dev
))
841 if (cmd
== SIOCSHWTSTAMP
)
842 return gfar_hwtstamp_ioctl(dev
, rq
, cmd
);
847 return phy_mii_ioctl(priv
->phydev
, rq
, cmd
);
850 static unsigned int reverse_bitmap(unsigned int bit_map
, unsigned int max_qs
)
852 unsigned int new_bit_map
= 0x0;
853 int mask
= 0x1 << (max_qs
- 1), i
;
854 for (i
= 0; i
< max_qs
; i
++) {
856 new_bit_map
= new_bit_map
+ (1 << i
);
862 static u32
cluster_entry_per_class(struct gfar_private
*priv
, u32 rqfar
,
865 u32 rqfpr
= FPR_FILER_MASK
;
869 rqfcr
= RQFCR_CLE
| RQFCR_PID_MASK
| RQFCR_CMP_EXACT
;
870 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
871 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
872 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
875 rqfcr
= RQFCR_CMP_NOMATCH
;
876 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
877 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
878 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
881 rqfcr
= RQFCR_CMP_EXACT
| RQFCR_PID_PARSE
| RQFCR_CLE
| RQFCR_AND
;
883 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
884 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
885 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
888 rqfcr
= RQFCR_CMP_EXACT
| RQFCR_PID_MASK
| RQFCR_AND
;
890 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
891 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
892 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
897 static void gfar_init_filer_table(struct gfar_private
*priv
)
900 u32 rqfar
= MAX_FILER_IDX
;
902 u32 rqfpr
= FPR_FILER_MASK
;
905 rqfcr
= RQFCR_CMP_MATCH
;
906 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
907 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
908 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
910 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
);
911 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
| RQFPR_UDP
);
912 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
| RQFPR_TCP
);
913 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
);
914 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
| RQFPR_UDP
);
915 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
| RQFPR_TCP
);
917 /* cur_filer_idx indicated the first non-masked rule */
918 priv
->cur_filer_idx
= rqfar
;
920 /* Rest are masked rules */
921 rqfcr
= RQFCR_CMP_NOMATCH
;
922 for (i
= 0; i
< rqfar
; i
++) {
923 priv
->ftp_rqfcr
[i
] = rqfcr
;
924 priv
->ftp_rqfpr
[i
] = rqfpr
;
925 gfar_write_filer(priv
, i
, rqfcr
, rqfpr
);
929 static void gfar_detect_errata(struct gfar_private
*priv
)
931 struct device
*dev
= &priv
->ofdev
->dev
;
932 unsigned int pvr
= mfspr(SPRN_PVR
);
933 unsigned int svr
= mfspr(SPRN_SVR
);
934 unsigned int mod
= (svr
>> 16) & 0xfff6; /* w/o E suffix */
935 unsigned int rev
= svr
& 0xffff;
937 /* MPC8313 Rev 2.0 and higher; All MPC837x */
938 if ((pvr
== 0x80850010 && mod
== 0x80b0 && rev
>= 0x0020) ||
939 (pvr
== 0x80861010 && (mod
& 0xfff9) == 0x80c0))
940 priv
->errata
|= GFAR_ERRATA_74
;
942 /* MPC8313 and MPC837x all rev */
943 if ((pvr
== 0x80850010 && mod
== 0x80b0) ||
944 (pvr
== 0x80861010 && (mod
& 0xfff9) == 0x80c0))
945 priv
->errata
|= GFAR_ERRATA_76
;
947 /* MPC8313 and MPC837x all rev */
948 if ((pvr
== 0x80850010 && mod
== 0x80b0) ||
949 (pvr
== 0x80861010 && (mod
& 0xfff9) == 0x80c0))
950 priv
->errata
|= GFAR_ERRATA_A002
;
952 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
953 if ((pvr
== 0x80850010 && mod
== 0x80b0 && rev
< 0x0020) ||
954 (pvr
== 0x80210020 && mod
== 0x8030 && rev
== 0x0020))
955 priv
->errata
|= GFAR_ERRATA_12
;
958 dev_info(dev
, "enabled errata workarounds, flags: 0x%x\n",
962 /* Set up the ethernet device structure, private data,
963 * and anything else we need before we start */
964 static int gfar_probe(struct platform_device
*ofdev
)
967 struct net_device
*dev
= NULL
;
968 struct gfar_private
*priv
= NULL
;
969 struct gfar __iomem
*regs
= NULL
;
970 int err
= 0, i
, grp_idx
= 0;
971 u32 rstat
= 0, tstat
= 0, rqueue
= 0, tqueue
= 0;
975 err
= gfar_of_init(ofdev
, &dev
);
980 priv
= netdev_priv(dev
);
983 priv
->node
= ofdev
->dev
.of_node
;
984 SET_NETDEV_DEV(dev
, &ofdev
->dev
);
986 spin_lock_init(&priv
->bflock
);
987 INIT_WORK(&priv
->reset_task
, gfar_reset_task
);
989 dev_set_drvdata(&ofdev
->dev
, priv
);
990 regs
= priv
->gfargrp
[0].regs
;
992 gfar_detect_errata(priv
);
994 /* Stop the DMA engine now, in case it was running before */
995 /* (The firmware could have used it, and left it running). */
998 /* Reset MAC layer */
999 gfar_write(®s
->maccfg1
, MACCFG1_SOFT_RESET
);
1001 /* We need to delay at least 3 TX clocks */
1004 tempval
= (MACCFG1_TX_FLOW
| MACCFG1_RX_FLOW
);
1005 gfar_write(®s
->maccfg1
, tempval
);
1007 /* Initialize MACCFG2. */
1008 tempval
= MACCFG2_INIT_SETTINGS
;
1009 if (gfar_has_errata(priv
, GFAR_ERRATA_74
))
1010 tempval
|= MACCFG2_HUGEFRAME
| MACCFG2_LENGTHCHECK
;
1011 gfar_write(®s
->maccfg2
, tempval
);
1013 /* Initialize ECNTRL */
1014 gfar_write(®s
->ecntrl
, ECNTRL_INIT_SETTINGS
);
1016 /* Set the dev->base_addr to the gfar reg region */
1017 dev
->base_addr
= (unsigned long) regs
;
1019 SET_NETDEV_DEV(dev
, &ofdev
->dev
);
1021 /* Fill in the dev structure */
1022 dev
->watchdog_timeo
= TX_TIMEOUT
;
1024 dev
->netdev_ops
= &gfar_netdev_ops
;
1025 dev
->ethtool_ops
= &gfar_ethtool_ops
;
1027 /* Register for napi ...We are registering NAPI for each grp */
1028 for (i
= 0; i
< priv
->num_grps
; i
++)
1029 netif_napi_add(dev
, &priv
->gfargrp
[i
].napi
, gfar_poll
, GFAR_DEV_WEIGHT
);
1031 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_CSUM
) {
1032 dev
->hw_features
= NETIF_F_IP_CSUM
| NETIF_F_SG
|
1034 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
|
1035 NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
;
1038 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_VLAN
) {
1039 dev
->hw_features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
1040 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
1043 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_EXTENDED_HASH
) {
1044 priv
->extended_hash
= 1;
1045 priv
->hash_width
= 9;
1047 priv
->hash_regs
[0] = ®s
->igaddr0
;
1048 priv
->hash_regs
[1] = ®s
->igaddr1
;
1049 priv
->hash_regs
[2] = ®s
->igaddr2
;
1050 priv
->hash_regs
[3] = ®s
->igaddr3
;
1051 priv
->hash_regs
[4] = ®s
->igaddr4
;
1052 priv
->hash_regs
[5] = ®s
->igaddr5
;
1053 priv
->hash_regs
[6] = ®s
->igaddr6
;
1054 priv
->hash_regs
[7] = ®s
->igaddr7
;
1055 priv
->hash_regs
[8] = ®s
->gaddr0
;
1056 priv
->hash_regs
[9] = ®s
->gaddr1
;
1057 priv
->hash_regs
[10] = ®s
->gaddr2
;
1058 priv
->hash_regs
[11] = ®s
->gaddr3
;
1059 priv
->hash_regs
[12] = ®s
->gaddr4
;
1060 priv
->hash_regs
[13] = ®s
->gaddr5
;
1061 priv
->hash_regs
[14] = ®s
->gaddr6
;
1062 priv
->hash_regs
[15] = ®s
->gaddr7
;
1065 priv
->extended_hash
= 0;
1066 priv
->hash_width
= 8;
1068 priv
->hash_regs
[0] = ®s
->gaddr0
;
1069 priv
->hash_regs
[1] = ®s
->gaddr1
;
1070 priv
->hash_regs
[2] = ®s
->gaddr2
;
1071 priv
->hash_regs
[3] = ®s
->gaddr3
;
1072 priv
->hash_regs
[4] = ®s
->gaddr4
;
1073 priv
->hash_regs
[5] = ®s
->gaddr5
;
1074 priv
->hash_regs
[6] = ®s
->gaddr6
;
1075 priv
->hash_regs
[7] = ®s
->gaddr7
;
1078 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_PADDING
)
1079 priv
->padding
= DEFAULT_PADDING
;
1083 if (dev
->features
& NETIF_F_IP_CSUM
||
1084 priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
)
1085 dev
->needed_headroom
= GMAC_FCB_LEN
;
1087 /* Program the isrg regs only if number of grps > 1 */
1088 if (priv
->num_grps
> 1) {
1089 baddr
= ®s
->isrg0
;
1090 for (i
= 0; i
< priv
->num_grps
; i
++) {
1091 isrg
|= (priv
->gfargrp
[i
].rx_bit_map
<< ISRG_SHIFT_RX
);
1092 isrg
|= (priv
->gfargrp
[i
].tx_bit_map
<< ISRG_SHIFT_TX
);
1093 gfar_write(baddr
, isrg
);
1099 /* Need to reverse the bit maps as bit_map's MSB is q0
1100 * but, for_each_set_bit parses from right to left, which
1101 * basically reverses the queue numbers */
1102 for (i
= 0; i
< priv
->num_grps
; i
++) {
1103 priv
->gfargrp
[i
].tx_bit_map
= reverse_bitmap(
1104 priv
->gfargrp
[i
].tx_bit_map
, MAX_TX_QS
);
1105 priv
->gfargrp
[i
].rx_bit_map
= reverse_bitmap(
1106 priv
->gfargrp
[i
].rx_bit_map
, MAX_RX_QS
);
1109 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1110 * also assign queues to groups */
1111 for (grp_idx
= 0; grp_idx
< priv
->num_grps
; grp_idx
++) {
1112 priv
->gfargrp
[grp_idx
].num_rx_queues
= 0x0;
1113 for_each_set_bit(i
, &priv
->gfargrp
[grp_idx
].rx_bit_map
,
1114 priv
->num_rx_queues
) {
1115 priv
->gfargrp
[grp_idx
].num_rx_queues
++;
1116 priv
->rx_queue
[i
]->grp
= &priv
->gfargrp
[grp_idx
];
1117 rstat
= rstat
| (RSTAT_CLEAR_RHALT
>> i
);
1118 rqueue
= rqueue
| ((RQUEUE_EN0
| RQUEUE_EX0
) >> i
);
1120 priv
->gfargrp
[grp_idx
].num_tx_queues
= 0x0;
1121 for_each_set_bit(i
, &priv
->gfargrp
[grp_idx
].tx_bit_map
,
1122 priv
->num_tx_queues
) {
1123 priv
->gfargrp
[grp_idx
].num_tx_queues
++;
1124 priv
->tx_queue
[i
]->grp
= &priv
->gfargrp
[grp_idx
];
1125 tstat
= tstat
| (TSTAT_CLEAR_THALT
>> i
);
1126 tqueue
= tqueue
| (TQUEUE_EN0
>> i
);
1128 priv
->gfargrp
[grp_idx
].rstat
= rstat
;
1129 priv
->gfargrp
[grp_idx
].tstat
= tstat
;
1133 gfar_write(®s
->rqueue
, rqueue
);
1134 gfar_write(®s
->tqueue
, tqueue
);
1136 priv
->rx_buffer_size
= DEFAULT_RX_BUFFER_SIZE
;
1138 /* Initializing some of the rx/tx queue level parameters */
1139 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
1140 priv
->tx_queue
[i
]->tx_ring_size
= DEFAULT_TX_RING_SIZE
;
1141 priv
->tx_queue
[i
]->num_txbdfree
= DEFAULT_TX_RING_SIZE
;
1142 priv
->tx_queue
[i
]->txcoalescing
= DEFAULT_TX_COALESCE
;
1143 priv
->tx_queue
[i
]->txic
= DEFAULT_TXIC
;
1146 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
1147 priv
->rx_queue
[i
]->rx_ring_size
= DEFAULT_RX_RING_SIZE
;
1148 priv
->rx_queue
[i
]->rxcoalescing
= DEFAULT_RX_COALESCE
;
1149 priv
->rx_queue
[i
]->rxic
= DEFAULT_RXIC
;
1152 /* always enable rx filer*/
1153 priv
->rx_filer_enable
= 1;
1154 /* Enable most messages by default */
1155 priv
->msg_enable
= (NETIF_MSG_IFUP
<< 1 ) - 1;
1157 /* Carrier starts down, phylib will bring it up */
1158 netif_carrier_off(dev
);
1160 err
= register_netdev(dev
);
1163 pr_err("%s: Cannot register net device, aborting\n", dev
->name
);
1167 device_init_wakeup(&dev
->dev
,
1168 priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
1170 /* fill out IRQ number and name fields */
1171 for (i
= 0; i
< priv
->num_grps
; i
++) {
1172 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
1173 sprintf(priv
->gfargrp
[i
].int_name_tx
, "%s%s%c%s",
1174 dev
->name
, "_g", '0' + i
, "_tx");
1175 sprintf(priv
->gfargrp
[i
].int_name_rx
, "%s%s%c%s",
1176 dev
->name
, "_g", '0' + i
, "_rx");
1177 sprintf(priv
->gfargrp
[i
].int_name_er
, "%s%s%c%s",
1178 dev
->name
, "_g", '0' + i
, "_er");
1180 strcpy(priv
->gfargrp
[i
].int_name_tx
, dev
->name
);
1183 /* Initialize the filer table */
1184 gfar_init_filer_table(priv
);
1186 /* Create all the sysfs files */
1187 gfar_init_sysfs(dev
);
1189 /* Print out the device info */
1190 netdev_info(dev
, "mac: %pM\n", dev
->dev_addr
);
1192 /* Even more device info helps when determining which kernel */
1193 /* provided which set of benchmarks. */
1194 netdev_info(dev
, "Running with NAPI enabled\n");
1195 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
1196 netdev_info(dev
, "RX BD ring size for Q[%d]: %d\n",
1197 i
, priv
->rx_queue
[i
]->rx_ring_size
);
1198 for(i
= 0; i
< priv
->num_tx_queues
; i
++)
1199 netdev_info(dev
, "TX BD ring size for Q[%d]: %d\n",
1200 i
, priv
->tx_queue
[i
]->tx_ring_size
);
1205 unmap_group_regs(priv
);
1206 free_tx_pointers(priv
);
1207 free_rx_pointers(priv
);
1209 of_node_put(priv
->phy_node
);
1211 of_node_put(priv
->tbi_node
);
1216 static int gfar_remove(struct platform_device
*ofdev
)
1218 struct gfar_private
*priv
= dev_get_drvdata(&ofdev
->dev
);
1221 of_node_put(priv
->phy_node
);
1223 of_node_put(priv
->tbi_node
);
1225 dev_set_drvdata(&ofdev
->dev
, NULL
);
1227 unregister_netdev(priv
->ndev
);
1228 unmap_group_regs(priv
);
1229 free_netdev(priv
->ndev
);
1236 static int gfar_suspend(struct device
*dev
)
1238 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1239 struct net_device
*ndev
= priv
->ndev
;
1240 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1241 unsigned long flags
;
1244 int magic_packet
= priv
->wol_en
&&
1245 (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
1247 netif_device_detach(ndev
);
1249 if (netif_running(ndev
)) {
1251 local_irq_save(flags
);
1255 gfar_halt_nodisable(ndev
);
1257 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1258 tempval
= gfar_read(®s
->maccfg1
);
1260 tempval
&= ~MACCFG1_TX_EN
;
1263 tempval
&= ~MACCFG1_RX_EN
;
1265 gfar_write(®s
->maccfg1
, tempval
);
1269 local_irq_restore(flags
);
1274 /* Enable interrupt on Magic Packet */
1275 gfar_write(®s
->imask
, IMASK_MAG
);
1277 /* Enable Magic Packet mode */
1278 tempval
= gfar_read(®s
->maccfg2
);
1279 tempval
|= MACCFG2_MPEN
;
1280 gfar_write(®s
->maccfg2
, tempval
);
1282 phy_stop(priv
->phydev
);
1289 static int gfar_resume(struct device
*dev
)
1291 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1292 struct net_device
*ndev
= priv
->ndev
;
1293 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1294 unsigned long flags
;
1296 int magic_packet
= priv
->wol_en
&&
1297 (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
1299 if (!netif_running(ndev
)) {
1300 netif_device_attach(ndev
);
1304 if (!magic_packet
&& priv
->phydev
)
1305 phy_start(priv
->phydev
);
1307 /* Disable Magic Packet mode, in case something
1310 local_irq_save(flags
);
1314 tempval
= gfar_read(®s
->maccfg2
);
1315 tempval
&= ~MACCFG2_MPEN
;
1316 gfar_write(®s
->maccfg2
, tempval
);
1322 local_irq_restore(flags
);
1324 netif_device_attach(ndev
);
1331 static int gfar_restore(struct device
*dev
)
1333 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1334 struct net_device
*ndev
= priv
->ndev
;
1336 if (!netif_running(ndev
))
1339 gfar_init_bds(ndev
);
1340 init_registers(ndev
);
1341 gfar_set_mac_address(ndev
);
1342 gfar_init_mac(ndev
);
1347 priv
->oldduplex
= -1;
1350 phy_start(priv
->phydev
);
1352 netif_device_attach(ndev
);
1358 static struct dev_pm_ops gfar_pm_ops
= {
1359 .suspend
= gfar_suspend
,
1360 .resume
= gfar_resume
,
1361 .freeze
= gfar_suspend
,
1362 .thaw
= gfar_resume
,
1363 .restore
= gfar_restore
,
1366 #define GFAR_PM_OPS (&gfar_pm_ops)
1370 #define GFAR_PM_OPS NULL
1374 /* Reads the controller's registers to determine what interface
1375 * connects it to the PHY.
1377 static phy_interface_t
gfar_get_interface(struct net_device
*dev
)
1379 struct gfar_private
*priv
= netdev_priv(dev
);
1380 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1383 ecntrl
= gfar_read(®s
->ecntrl
);
1385 if (ecntrl
& ECNTRL_SGMII_MODE
)
1386 return PHY_INTERFACE_MODE_SGMII
;
1388 if (ecntrl
& ECNTRL_TBI_MODE
) {
1389 if (ecntrl
& ECNTRL_REDUCED_MODE
)
1390 return PHY_INTERFACE_MODE_RTBI
;
1392 return PHY_INTERFACE_MODE_TBI
;
1395 if (ecntrl
& ECNTRL_REDUCED_MODE
) {
1396 if (ecntrl
& ECNTRL_REDUCED_MII_MODE
)
1397 return PHY_INTERFACE_MODE_RMII
;
1399 phy_interface_t interface
= priv
->interface
;
1402 * This isn't autodetected right now, so it must
1403 * be set by the device tree or platform code.
1405 if (interface
== PHY_INTERFACE_MODE_RGMII_ID
)
1406 return PHY_INTERFACE_MODE_RGMII_ID
;
1408 return PHY_INTERFACE_MODE_RGMII
;
1412 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_GIGABIT
)
1413 return PHY_INTERFACE_MODE_GMII
;
1415 return PHY_INTERFACE_MODE_MII
;
1419 /* Initializes driver's PHY state, and attaches to the PHY.
1420 * Returns 0 on success.
1422 static int init_phy(struct net_device
*dev
)
1424 struct gfar_private
*priv
= netdev_priv(dev
);
1425 uint gigabit_support
=
1426 priv
->device_flags
& FSL_GIANFAR_DEV_HAS_GIGABIT
?
1427 SUPPORTED_1000baseT_Full
: 0;
1428 phy_interface_t interface
;
1432 priv
->oldduplex
= -1;
1434 interface
= gfar_get_interface(dev
);
1436 priv
->phydev
= of_phy_connect(dev
, priv
->phy_node
, &adjust_link
, 0,
1439 priv
->phydev
= of_phy_connect_fixed_link(dev
, &adjust_link
,
1441 if (!priv
->phydev
) {
1442 dev_err(&dev
->dev
, "could not attach to PHY\n");
1446 if (interface
== PHY_INTERFACE_MODE_SGMII
)
1447 gfar_configure_serdes(dev
);
1449 /* Remove any features not supported by the controller */
1450 priv
->phydev
->supported
&= (GFAR_SUPPORTED
| gigabit_support
);
1451 priv
->phydev
->advertising
= priv
->phydev
->supported
;
1457 * Initialize TBI PHY interface for communicating with the
1458 * SERDES lynx PHY on the chip. We communicate with this PHY
1459 * through the MDIO bus on each controller, treating it as a
1460 * "normal" PHY at the address found in the TBIPA register. We assume
1461 * that the TBIPA register is valid. Either the MDIO bus code will set
1462 * it to a value that doesn't conflict with other PHYs on the bus, or the
1463 * value doesn't matter, as there are no other PHYs on the bus.
1465 static void gfar_configure_serdes(struct net_device
*dev
)
1467 struct gfar_private
*priv
= netdev_priv(dev
);
1468 struct phy_device
*tbiphy
;
1470 if (!priv
->tbi_node
) {
1471 dev_warn(&dev
->dev
, "error: SGMII mode requires that the "
1472 "device tree specify a tbi-handle\n");
1476 tbiphy
= of_phy_find_device(priv
->tbi_node
);
1478 dev_err(&dev
->dev
, "error: Could not get TBI device\n");
1483 * If the link is already up, we must already be ok, and don't need to
1484 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1485 * everything for us? Resetting it takes the link down and requires
1486 * several seconds for it to come back.
1488 if (phy_read(tbiphy
, MII_BMSR
) & BMSR_LSTATUS
)
1491 /* Single clk mode, mii mode off(for serdes communication) */
1492 phy_write(tbiphy
, MII_TBICON
, TBICON_CLK_SELECT
);
1494 phy_write(tbiphy
, MII_ADVERTISE
,
1495 ADVERTISE_1000XFULL
| ADVERTISE_1000XPAUSE
|
1496 ADVERTISE_1000XPSE_ASYM
);
1498 phy_write(tbiphy
, MII_BMCR
, BMCR_ANENABLE
|
1499 BMCR_ANRESTART
| BMCR_FULLDPLX
| BMCR_SPEED1000
);
1502 static void init_registers(struct net_device
*dev
)
1504 struct gfar_private
*priv
= netdev_priv(dev
);
1505 struct gfar __iomem
*regs
= NULL
;
1508 for (i
= 0; i
< priv
->num_grps
; i
++) {
1509 regs
= priv
->gfargrp
[i
].regs
;
1511 gfar_write(®s
->ievent
, IEVENT_INIT_CLEAR
);
1513 /* Initialize IMASK */
1514 gfar_write(®s
->imask
, IMASK_INIT_CLEAR
);
1517 regs
= priv
->gfargrp
[0].regs
;
1518 /* Init hash registers to zero */
1519 gfar_write(®s
->igaddr0
, 0);
1520 gfar_write(®s
->igaddr1
, 0);
1521 gfar_write(®s
->igaddr2
, 0);
1522 gfar_write(®s
->igaddr3
, 0);
1523 gfar_write(®s
->igaddr4
, 0);
1524 gfar_write(®s
->igaddr5
, 0);
1525 gfar_write(®s
->igaddr6
, 0);
1526 gfar_write(®s
->igaddr7
, 0);
1528 gfar_write(®s
->gaddr0
, 0);
1529 gfar_write(®s
->gaddr1
, 0);
1530 gfar_write(®s
->gaddr2
, 0);
1531 gfar_write(®s
->gaddr3
, 0);
1532 gfar_write(®s
->gaddr4
, 0);
1533 gfar_write(®s
->gaddr5
, 0);
1534 gfar_write(®s
->gaddr6
, 0);
1535 gfar_write(®s
->gaddr7
, 0);
1537 /* Zero out the rmon mib registers if it has them */
1538 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_RMON
) {
1539 memset_io(&(regs
->rmon
), 0, sizeof (struct rmon_mib
));
1541 /* Mask off the CAM interrupts */
1542 gfar_write(®s
->rmon
.cam1
, 0xffffffff);
1543 gfar_write(®s
->rmon
.cam2
, 0xffffffff);
1546 /* Initialize the max receive buffer length */
1547 gfar_write(®s
->mrblr
, priv
->rx_buffer_size
);
1549 /* Initialize the Minimum Frame Length Register */
1550 gfar_write(®s
->minflr
, MINFLR_INIT_SETTINGS
);
1553 static int __gfar_is_rx_idle(struct gfar_private
*priv
)
1558 * Normaly TSEC should not hang on GRS commands, so we should
1559 * actually wait for IEVENT_GRSC flag.
1561 if (likely(!gfar_has_errata(priv
, GFAR_ERRATA_A002
)))
1565 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1566 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1567 * and the Rx can be safely reset.
1569 res
= gfar_read((void __iomem
*)priv
->gfargrp
[0].regs
+ 0xd1c);
1571 if ((res
& 0xffff) == (res
>> 16))
1577 /* Halt the receive and transmit queues */
1578 static void gfar_halt_nodisable(struct net_device
*dev
)
1580 struct gfar_private
*priv
= netdev_priv(dev
);
1581 struct gfar __iomem
*regs
= NULL
;
1585 for (i
= 0; i
< priv
->num_grps
; i
++) {
1586 regs
= priv
->gfargrp
[i
].regs
;
1587 /* Mask all interrupts */
1588 gfar_write(®s
->imask
, IMASK_INIT_CLEAR
);
1590 /* Clear all interrupts */
1591 gfar_write(®s
->ievent
, IEVENT_INIT_CLEAR
);
1594 regs
= priv
->gfargrp
[0].regs
;
1595 /* Stop the DMA, and wait for it to stop */
1596 tempval
= gfar_read(®s
->dmactrl
);
1597 if ((tempval
& (DMACTRL_GRS
| DMACTRL_GTS
))
1598 != (DMACTRL_GRS
| DMACTRL_GTS
)) {
1601 tempval
|= (DMACTRL_GRS
| DMACTRL_GTS
);
1602 gfar_write(®s
->dmactrl
, tempval
);
1605 ret
= spin_event_timeout(((gfar_read(®s
->ievent
) &
1606 (IEVENT_GRSC
| IEVENT_GTSC
)) ==
1607 (IEVENT_GRSC
| IEVENT_GTSC
)), 1000000, 0);
1608 if (!ret
&& !(gfar_read(®s
->ievent
) & IEVENT_GRSC
))
1609 ret
= __gfar_is_rx_idle(priv
);
1614 /* Halt the receive and transmit queues */
1615 void gfar_halt(struct net_device
*dev
)
1617 struct gfar_private
*priv
= netdev_priv(dev
);
1618 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1621 gfar_halt_nodisable(dev
);
1623 /* Disable Rx and Tx */
1624 tempval
= gfar_read(®s
->maccfg1
);
1625 tempval
&= ~(MACCFG1_RX_EN
| MACCFG1_TX_EN
);
1626 gfar_write(®s
->maccfg1
, tempval
);
1629 static void free_grp_irqs(struct gfar_priv_grp
*grp
)
1631 free_irq(grp
->interruptError
, grp
);
1632 free_irq(grp
->interruptTransmit
, grp
);
1633 free_irq(grp
->interruptReceive
, grp
);
1636 void stop_gfar(struct net_device
*dev
)
1638 struct gfar_private
*priv
= netdev_priv(dev
);
1639 unsigned long flags
;
1642 phy_stop(priv
->phydev
);
1646 local_irq_save(flags
);
1654 local_irq_restore(flags
);
1657 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
1658 for (i
= 0; i
< priv
->num_grps
; i
++)
1659 free_grp_irqs(&priv
->gfargrp
[i
]);
1661 for (i
= 0; i
< priv
->num_grps
; i
++)
1662 free_irq(priv
->gfargrp
[i
].interruptTransmit
,
1666 free_skb_resources(priv
);
1669 static void free_skb_tx_queue(struct gfar_priv_tx_q
*tx_queue
)
1671 struct txbd8
*txbdp
;
1672 struct gfar_private
*priv
= netdev_priv(tx_queue
->dev
);
1675 txbdp
= tx_queue
->tx_bd_base
;
1677 for (i
= 0; i
< tx_queue
->tx_ring_size
; i
++) {
1678 if (!tx_queue
->tx_skbuff
[i
])
1681 dma_unmap_single(&priv
->ofdev
->dev
, txbdp
->bufPtr
,
1682 txbdp
->length
, DMA_TO_DEVICE
);
1684 for (j
= 0; j
< skb_shinfo(tx_queue
->tx_skbuff
[i
])->nr_frags
;
1687 dma_unmap_page(&priv
->ofdev
->dev
, txbdp
->bufPtr
,
1688 txbdp
->length
, DMA_TO_DEVICE
);
1691 dev_kfree_skb_any(tx_queue
->tx_skbuff
[i
]);
1692 tx_queue
->tx_skbuff
[i
] = NULL
;
1694 kfree(tx_queue
->tx_skbuff
);
1697 static void free_skb_rx_queue(struct gfar_priv_rx_q
*rx_queue
)
1699 struct rxbd8
*rxbdp
;
1700 struct gfar_private
*priv
= netdev_priv(rx_queue
->dev
);
1703 rxbdp
= rx_queue
->rx_bd_base
;
1705 for (i
= 0; i
< rx_queue
->rx_ring_size
; i
++) {
1706 if (rx_queue
->rx_skbuff
[i
]) {
1707 dma_unmap_single(&priv
->ofdev
->dev
,
1708 rxbdp
->bufPtr
, priv
->rx_buffer_size
,
1710 dev_kfree_skb_any(rx_queue
->rx_skbuff
[i
]);
1711 rx_queue
->rx_skbuff
[i
] = NULL
;
1717 kfree(rx_queue
->rx_skbuff
);
1720 /* If there are any tx skbs or rx skbs still around, free them.
1721 * Then free tx_skbuff and rx_skbuff */
1722 static void free_skb_resources(struct gfar_private
*priv
)
1724 struct gfar_priv_tx_q
*tx_queue
= NULL
;
1725 struct gfar_priv_rx_q
*rx_queue
= NULL
;
1728 /* Go through all the buffer descriptors and free their data buffers */
1729 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
1730 struct netdev_queue
*txq
;
1731 tx_queue
= priv
->tx_queue
[i
];
1732 txq
= netdev_get_tx_queue(tx_queue
->dev
, tx_queue
->qindex
);
1733 if(tx_queue
->tx_skbuff
)
1734 free_skb_tx_queue(tx_queue
);
1735 netdev_tx_reset_queue(txq
);
1738 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
1739 rx_queue
= priv
->rx_queue
[i
];
1740 if(rx_queue
->rx_skbuff
)
1741 free_skb_rx_queue(rx_queue
);
1744 dma_free_coherent(&priv
->ofdev
->dev
,
1745 sizeof(struct txbd8
) * priv
->total_tx_ring_size
+
1746 sizeof(struct rxbd8
) * priv
->total_rx_ring_size
,
1747 priv
->tx_queue
[0]->tx_bd_base
,
1748 priv
->tx_queue
[0]->tx_bd_dma_base
);
1749 skb_queue_purge(&priv
->rx_recycle
);
1752 void gfar_start(struct net_device
*dev
)
1754 struct gfar_private
*priv
= netdev_priv(dev
);
1755 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1759 /* Enable Rx and Tx in MACCFG1 */
1760 tempval
= gfar_read(®s
->maccfg1
);
1761 tempval
|= (MACCFG1_RX_EN
| MACCFG1_TX_EN
);
1762 gfar_write(®s
->maccfg1
, tempval
);
1764 /* Initialize DMACTRL to have WWR and WOP */
1765 tempval
= gfar_read(®s
->dmactrl
);
1766 tempval
|= DMACTRL_INIT_SETTINGS
;
1767 gfar_write(®s
->dmactrl
, tempval
);
1769 /* Make sure we aren't stopped */
1770 tempval
= gfar_read(®s
->dmactrl
);
1771 tempval
&= ~(DMACTRL_GRS
| DMACTRL_GTS
);
1772 gfar_write(®s
->dmactrl
, tempval
);
1774 for (i
= 0; i
< priv
->num_grps
; i
++) {
1775 regs
= priv
->gfargrp
[i
].regs
;
1776 /* Clear THLT/RHLT, so that the DMA starts polling now */
1777 gfar_write(®s
->tstat
, priv
->gfargrp
[i
].tstat
);
1778 gfar_write(®s
->rstat
, priv
->gfargrp
[i
].rstat
);
1779 /* Unmask the interrupts we look for */
1780 gfar_write(®s
->imask
, IMASK_DEFAULT
);
1783 dev
->trans_start
= jiffies
; /* prevent tx timeout */
1786 void gfar_configure_coalescing(struct gfar_private
*priv
,
1787 unsigned long tx_mask
, unsigned long rx_mask
)
1789 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1793 /* Backward compatible case ---- even if we enable
1794 * multiple queues, there's only single reg to program
1796 gfar_write(®s
->txic
, 0);
1797 if(likely(priv
->tx_queue
[0]->txcoalescing
))
1798 gfar_write(®s
->txic
, priv
->tx_queue
[0]->txic
);
1800 gfar_write(®s
->rxic
, 0);
1801 if(unlikely(priv
->rx_queue
[0]->rxcoalescing
))
1802 gfar_write(®s
->rxic
, priv
->rx_queue
[0]->rxic
);
1804 if (priv
->mode
== MQ_MG_MODE
) {
1805 baddr
= ®s
->txic0
;
1806 for_each_set_bit(i
, &tx_mask
, priv
->num_tx_queues
) {
1807 gfar_write(baddr
+ i
, 0);
1808 if (likely(priv
->tx_queue
[i
]->txcoalescing
))
1809 gfar_write(baddr
+ i
, priv
->tx_queue
[i
]->txic
);
1812 baddr
= ®s
->rxic0
;
1813 for_each_set_bit(i
, &rx_mask
, priv
->num_rx_queues
) {
1814 gfar_write(baddr
+ i
, 0);
1815 if (likely(priv
->rx_queue
[i
]->rxcoalescing
))
1816 gfar_write(baddr
+ i
, priv
->rx_queue
[i
]->rxic
);
1821 static int register_grp_irqs(struct gfar_priv_grp
*grp
)
1823 struct gfar_private
*priv
= grp
->priv
;
1824 struct net_device
*dev
= priv
->ndev
;
1827 /* If the device has multiple interrupts, register for
1828 * them. Otherwise, only register for the one */
1829 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
1830 /* Install our interrupt handlers for Error,
1831 * Transmit, and Receive */
1832 if ((err
= request_irq(grp
->interruptError
, gfar_error
, 0,
1833 grp
->int_name_er
,grp
)) < 0) {
1834 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
1835 grp
->interruptError
);
1840 if ((err
= request_irq(grp
->interruptTransmit
, gfar_transmit
,
1841 0, grp
->int_name_tx
, grp
)) < 0) {
1842 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
1843 grp
->interruptTransmit
);
1847 if ((err
= request_irq(grp
->interruptReceive
, gfar_receive
, 0,
1848 grp
->int_name_rx
, grp
)) < 0) {
1849 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
1850 grp
->interruptReceive
);
1854 if ((err
= request_irq(grp
->interruptTransmit
, gfar_interrupt
, 0,
1855 grp
->int_name_tx
, grp
)) < 0) {
1856 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
1857 grp
->interruptTransmit
);
1865 free_irq(grp
->interruptTransmit
, grp
);
1867 free_irq(grp
->interruptError
, grp
);
1873 /* Bring the controller up and running */
1874 int startup_gfar(struct net_device
*ndev
)
1876 struct gfar_private
*priv
= netdev_priv(ndev
);
1877 struct gfar __iomem
*regs
= NULL
;
1880 for (i
= 0; i
< priv
->num_grps
; i
++) {
1881 regs
= priv
->gfargrp
[i
].regs
;
1882 gfar_write(®s
->imask
, IMASK_INIT_CLEAR
);
1885 regs
= priv
->gfargrp
[0].regs
;
1886 err
= gfar_alloc_skb_resources(ndev
);
1890 gfar_init_mac(ndev
);
1892 for (i
= 0; i
< priv
->num_grps
; i
++) {
1893 err
= register_grp_irqs(&priv
->gfargrp
[i
]);
1895 for (j
= 0; j
< i
; j
++)
1896 free_grp_irqs(&priv
->gfargrp
[j
]);
1901 /* Start the controller */
1904 phy_start(priv
->phydev
);
1906 gfar_configure_coalescing(priv
, 0xFF, 0xFF);
1911 free_skb_resources(priv
);
1915 /* Called when something needs to use the ethernet device */
1916 /* Returns 0 for success. */
1917 static int gfar_enet_open(struct net_device
*dev
)
1919 struct gfar_private
*priv
= netdev_priv(dev
);
1924 skb_queue_head_init(&priv
->rx_recycle
);
1926 /* Initialize a bunch of registers */
1927 init_registers(dev
);
1929 gfar_set_mac_address(dev
);
1931 err
= init_phy(dev
);
1938 err
= startup_gfar(dev
);
1944 netif_tx_start_all_queues(dev
);
1946 device_set_wakeup_enable(&dev
->dev
, priv
->wol_en
);
1951 static inline struct txfcb
*gfar_add_fcb(struct sk_buff
*skb
)
1953 struct txfcb
*fcb
= (struct txfcb
*)skb_push(skb
, GMAC_FCB_LEN
);
1955 memset(fcb
, 0, GMAC_FCB_LEN
);
1960 static inline void gfar_tx_checksum(struct sk_buff
*skb
, struct txfcb
*fcb
,
1965 /* If we're here, it's a IP packet with a TCP or UDP
1966 * payload. We set it to checksum, using a pseudo-header
1969 flags
= TXFCB_DEFAULT
;
1971 /* Tell the controller what the protocol is */
1972 /* And provide the already calculated phcs */
1973 if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
) {
1975 fcb
->phcs
= udp_hdr(skb
)->check
;
1977 fcb
->phcs
= tcp_hdr(skb
)->check
;
1979 /* l3os is the distance between the start of the
1980 * frame (skb->data) and the start of the IP hdr.
1981 * l4os is the distance between the start of the
1982 * l3 hdr and the l4 hdr */
1983 fcb
->l3os
= (u16
)(skb_network_offset(skb
) - fcb_length
);
1984 fcb
->l4os
= skb_network_header_len(skb
);
1989 void inline gfar_tx_vlan(struct sk_buff
*skb
, struct txfcb
*fcb
)
1991 fcb
->flags
|= TXFCB_VLN
;
1992 fcb
->vlctl
= vlan_tx_tag_get(skb
);
1995 static inline struct txbd8
*skip_txbd(struct txbd8
*bdp
, int stride
,
1996 struct txbd8
*base
, int ring_size
)
1998 struct txbd8
*new_bd
= bdp
+ stride
;
2000 return (new_bd
>= (base
+ ring_size
)) ? (new_bd
- ring_size
) : new_bd
;
2003 static inline struct txbd8
*next_txbd(struct txbd8
*bdp
, struct txbd8
*base
,
2006 return skip_txbd(bdp
, 1, base
, ring_size
);
2009 /* This is called by the kernel when a frame is ready for transmission. */
2010 /* It is pointed to by the dev->hard_start_xmit function pointer */
2011 static int gfar_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2013 struct gfar_private
*priv
= netdev_priv(dev
);
2014 struct gfar_priv_tx_q
*tx_queue
= NULL
;
2015 struct netdev_queue
*txq
;
2016 struct gfar __iomem
*regs
= NULL
;
2017 struct txfcb
*fcb
= NULL
;
2018 struct txbd8
*txbdp
, *txbdp_start
, *base
, *txbdp_tstamp
= NULL
;
2020 int i
, rq
= 0, do_tstamp
= 0;
2022 unsigned long flags
;
2023 unsigned int nr_frags
, nr_txbds
, length
, fcb_length
= GMAC_FCB_LEN
;
2026 * TOE=1 frames larger than 2500 bytes may see excess delays
2027 * before start of transmission.
2029 if (unlikely(gfar_has_errata(priv
, GFAR_ERRATA_76
) &&
2030 skb
->ip_summed
== CHECKSUM_PARTIAL
&&
2034 ret
= skb_checksum_help(skb
);
2039 rq
= skb
->queue_mapping
;
2040 tx_queue
= priv
->tx_queue
[rq
];
2041 txq
= netdev_get_tx_queue(dev
, rq
);
2042 base
= tx_queue
->tx_bd_base
;
2043 regs
= tx_queue
->grp
->regs
;
2045 /* check if time stamp should be generated */
2046 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
&&
2047 priv
->hwts_tx_en
)) {
2049 fcb_length
= GMAC_FCB_LEN
+ GMAC_TXPAL_LEN
;
2052 /* make space for additional header when fcb is needed */
2053 if (((skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
2054 vlan_tx_tag_present(skb
) ||
2055 unlikely(do_tstamp
)) &&
2056 (skb_headroom(skb
) < fcb_length
)) {
2057 struct sk_buff
*skb_new
;
2059 skb_new
= skb_realloc_headroom(skb
, fcb_length
);
2061 dev
->stats
.tx_errors
++;
2063 return NETDEV_TX_OK
;
2066 /* Steal sock reference for processing TX time stamps */
2067 swap(skb_new
->sk
, skb
->sk
);
2068 swap(skb_new
->destructor
, skb
->destructor
);
2073 /* total number of fragments in the SKB */
2074 nr_frags
= skb_shinfo(skb
)->nr_frags
;
2076 /* calculate the required number of TxBDs for this skb */
2077 if (unlikely(do_tstamp
))
2078 nr_txbds
= nr_frags
+ 2;
2080 nr_txbds
= nr_frags
+ 1;
2082 /* check if there is space to queue this packet */
2083 if (nr_txbds
> tx_queue
->num_txbdfree
) {
2084 /* no space, stop the queue */
2085 netif_tx_stop_queue(txq
);
2086 dev
->stats
.tx_fifo_errors
++;
2087 return NETDEV_TX_BUSY
;
2090 /* Update transmit stats */
2091 tx_queue
->stats
.tx_bytes
+= skb
->len
;
2092 tx_queue
->stats
.tx_packets
++;
2094 txbdp
= txbdp_start
= tx_queue
->cur_tx
;
2095 lstatus
= txbdp
->lstatus
;
2097 /* Time stamp insertion requires one additional TxBD */
2098 if (unlikely(do_tstamp
))
2099 txbdp_tstamp
= txbdp
= next_txbd(txbdp
, base
,
2100 tx_queue
->tx_ring_size
);
2102 if (nr_frags
== 0) {
2103 if (unlikely(do_tstamp
))
2104 txbdp_tstamp
->lstatus
|= BD_LFLAG(TXBD_LAST
|
2107 lstatus
|= BD_LFLAG(TXBD_LAST
| TXBD_INTERRUPT
);
2109 /* Place the fragment addresses and lengths into the TxBDs */
2110 for (i
= 0; i
< nr_frags
; i
++) {
2111 /* Point at the next BD, wrapping as needed */
2112 txbdp
= next_txbd(txbdp
, base
, tx_queue
->tx_ring_size
);
2114 length
= skb_shinfo(skb
)->frags
[i
].size
;
2116 lstatus
= txbdp
->lstatus
| length
|
2117 BD_LFLAG(TXBD_READY
);
2119 /* Handle the last BD specially */
2120 if (i
== nr_frags
- 1)
2121 lstatus
|= BD_LFLAG(TXBD_LAST
| TXBD_INTERRUPT
);
2123 bufaddr
= skb_frag_dma_map(&priv
->ofdev
->dev
,
2124 &skb_shinfo(skb
)->frags
[i
],
2129 /* set the TxBD length and buffer pointer */
2130 txbdp
->bufPtr
= bufaddr
;
2131 txbdp
->lstatus
= lstatus
;
2134 lstatus
= txbdp_start
->lstatus
;
2137 /* Add TxPAL between FCB and frame if required */
2138 if (unlikely(do_tstamp
)) {
2139 skb_push(skb
, GMAC_TXPAL_LEN
);
2140 memset(skb
->data
, 0, GMAC_TXPAL_LEN
);
2143 /* Set up checksumming */
2144 if (CHECKSUM_PARTIAL
== skb
->ip_summed
) {
2145 fcb
= gfar_add_fcb(skb
);
2146 /* as specified by errata */
2147 if (unlikely(gfar_has_errata(priv
, GFAR_ERRATA_12
)
2148 && ((unsigned long)fcb
% 0x20) > 0x18)) {
2149 __skb_pull(skb
, GMAC_FCB_LEN
);
2150 skb_checksum_help(skb
);
2152 lstatus
|= BD_LFLAG(TXBD_TOE
);
2153 gfar_tx_checksum(skb
, fcb
, fcb_length
);
2157 if (vlan_tx_tag_present(skb
)) {
2158 if (unlikely(NULL
== fcb
)) {
2159 fcb
= gfar_add_fcb(skb
);
2160 lstatus
|= BD_LFLAG(TXBD_TOE
);
2163 gfar_tx_vlan(skb
, fcb
);
2166 /* Setup tx hardware time stamping if requested */
2167 if (unlikely(do_tstamp
)) {
2168 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
2170 fcb
= gfar_add_fcb(skb
);
2172 lstatus
|= BD_LFLAG(TXBD_TOE
);
2175 txbdp_start
->bufPtr
= dma_map_single(&priv
->ofdev
->dev
, skb
->data
,
2176 skb_headlen(skb
), DMA_TO_DEVICE
);
2179 * If time stamping is requested one additional TxBD must be set up. The
2180 * first TxBD points to the FCB and must have a data length of
2181 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2182 * the full frame length.
2184 if (unlikely(do_tstamp
)) {
2185 txbdp_tstamp
->bufPtr
= txbdp_start
->bufPtr
+ fcb_length
;
2186 txbdp_tstamp
->lstatus
|= BD_LFLAG(TXBD_READY
) |
2187 (skb_headlen(skb
) - fcb_length
);
2188 lstatus
|= BD_LFLAG(TXBD_CRC
| TXBD_READY
) | GMAC_FCB_LEN
;
2190 lstatus
|= BD_LFLAG(TXBD_CRC
| TXBD_READY
) | skb_headlen(skb
);
2193 netdev_tx_sent_queue(txq
, skb
->len
);
2196 * We can work in parallel with gfar_clean_tx_ring(), except
2197 * when modifying num_txbdfree. Note that we didn't grab the lock
2198 * when we were reading the num_txbdfree and checking for available
2199 * space, that's because outside of this function it can only grow,
2200 * and once we've got needed space, it cannot suddenly disappear.
2202 * The lock also protects us from gfar_error(), which can modify
2203 * regs->tstat and thus retrigger the transfers, which is why we
2204 * also must grab the lock before setting ready bit for the first
2205 * to be transmitted BD.
2207 spin_lock_irqsave(&tx_queue
->txlock
, flags
);
2210 * The powerpc-specific eieio() is used, as wmb() has too strong
2211 * semantics (it requires synchronization between cacheable and
2212 * uncacheable mappings, which eieio doesn't provide and which we
2213 * don't need), thus requiring a more expensive sync instruction. At
2214 * some point, the set of architecture-independent barrier functions
2215 * should be expanded to include weaker barriers.
2219 txbdp_start
->lstatus
= lstatus
;
2221 eieio(); /* force lstatus write before tx_skbuff */
2223 tx_queue
->tx_skbuff
[tx_queue
->skb_curtx
] = skb
;
2225 /* Update the current skb pointer to the next entry we will use
2226 * (wrapping if necessary) */
2227 tx_queue
->skb_curtx
= (tx_queue
->skb_curtx
+ 1) &
2228 TX_RING_MOD_MASK(tx_queue
->tx_ring_size
);
2230 tx_queue
->cur_tx
= next_txbd(txbdp
, base
, tx_queue
->tx_ring_size
);
2232 /* reduce TxBD free count */
2233 tx_queue
->num_txbdfree
-= (nr_txbds
);
2235 /* If the next BD still needs to be cleaned up, then the bds
2236 are full. We need to tell the kernel to stop sending us stuff. */
2237 if (!tx_queue
->num_txbdfree
) {
2238 netif_tx_stop_queue(txq
);
2240 dev
->stats
.tx_fifo_errors
++;
2243 /* Tell the DMA to go go go */
2244 gfar_write(®s
->tstat
, TSTAT_CLEAR_THALT
>> tx_queue
->qindex
);
2247 spin_unlock_irqrestore(&tx_queue
->txlock
, flags
);
2249 return NETDEV_TX_OK
;
2252 /* Stops the kernel queue, and halts the controller */
2253 static int gfar_close(struct net_device
*dev
)
2255 struct gfar_private
*priv
= netdev_priv(dev
);
2259 cancel_work_sync(&priv
->reset_task
);
2262 /* Disconnect from the PHY */
2263 phy_disconnect(priv
->phydev
);
2264 priv
->phydev
= NULL
;
2266 netif_tx_stop_all_queues(dev
);
2271 /* Changes the mac address if the controller is not running. */
2272 static int gfar_set_mac_address(struct net_device
*dev
)
2274 gfar_set_mac_for_addr(dev
, 0, dev
->dev_addr
);
2279 /* Check if rx parser should be activated */
2280 void gfar_check_rx_parser_mode(struct gfar_private
*priv
)
2282 struct gfar __iomem
*regs
;
2285 regs
= priv
->gfargrp
[0].regs
;
2287 tempval
= gfar_read(®s
->rctrl
);
2288 /* If parse is no longer required, then disable parser */
2289 if (tempval
& RCTRL_REQ_PARSER
)
2290 tempval
|= RCTRL_PRSDEP_INIT
;
2292 tempval
&= ~RCTRL_PRSDEP_INIT
;
2293 gfar_write(®s
->rctrl
, tempval
);
2296 /* Enables and disables VLAN insertion/extraction */
2297 void gfar_vlan_mode(struct net_device
*dev
, netdev_features_t features
)
2299 struct gfar_private
*priv
= netdev_priv(dev
);
2300 struct gfar __iomem
*regs
= NULL
;
2301 unsigned long flags
;
2304 regs
= priv
->gfargrp
[0].regs
;
2305 local_irq_save(flags
);
2308 if (features
& NETIF_F_HW_VLAN_TX
) {
2309 /* Enable VLAN tag insertion */
2310 tempval
= gfar_read(®s
->tctrl
);
2311 tempval
|= TCTRL_VLINS
;
2312 gfar_write(®s
->tctrl
, tempval
);
2314 /* Disable VLAN tag insertion */
2315 tempval
= gfar_read(®s
->tctrl
);
2316 tempval
&= ~TCTRL_VLINS
;
2317 gfar_write(®s
->tctrl
, tempval
);
2320 if (features
& NETIF_F_HW_VLAN_RX
) {
2321 /* Enable VLAN tag extraction */
2322 tempval
= gfar_read(®s
->rctrl
);
2323 tempval
|= (RCTRL_VLEX
| RCTRL_PRSDEP_INIT
);
2324 gfar_write(®s
->rctrl
, tempval
);
2326 /* Disable VLAN tag extraction */
2327 tempval
= gfar_read(®s
->rctrl
);
2328 tempval
&= ~RCTRL_VLEX
;
2329 gfar_write(®s
->rctrl
, tempval
);
2331 gfar_check_rx_parser_mode(priv
);
2334 gfar_change_mtu(dev
, dev
->mtu
);
2337 local_irq_restore(flags
);
2340 static int gfar_change_mtu(struct net_device
*dev
, int new_mtu
)
2342 int tempsize
, tempval
;
2343 struct gfar_private
*priv
= netdev_priv(dev
);
2344 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
2345 int oldsize
= priv
->rx_buffer_size
;
2346 int frame_size
= new_mtu
+ ETH_HLEN
;
2348 if (gfar_is_vlan_on(priv
))
2349 frame_size
+= VLAN_HLEN
;
2351 if ((frame_size
< 64) || (frame_size
> JUMBO_FRAME_SIZE
)) {
2352 netif_err(priv
, drv
, dev
, "Invalid MTU setting\n");
2356 if (gfar_uses_fcb(priv
))
2357 frame_size
+= GMAC_FCB_LEN
;
2359 frame_size
+= priv
->padding
;
2362 (frame_size
& ~(INCREMENTAL_BUFFER_SIZE
- 1)) +
2363 INCREMENTAL_BUFFER_SIZE
;
2365 /* Only stop and start the controller if it isn't already
2366 * stopped, and we changed something */
2367 if ((oldsize
!= tempsize
) && (dev
->flags
& IFF_UP
))
2370 priv
->rx_buffer_size
= tempsize
;
2374 gfar_write(®s
->mrblr
, priv
->rx_buffer_size
);
2375 gfar_write(®s
->maxfrm
, priv
->rx_buffer_size
);
2377 /* If the mtu is larger than the max size for standard
2378 * ethernet frames (ie, a jumbo frame), then set maccfg2
2379 * to allow huge frames, and to check the length */
2380 tempval
= gfar_read(®s
->maccfg2
);
2382 if (priv
->rx_buffer_size
> DEFAULT_RX_BUFFER_SIZE
||
2383 gfar_has_errata(priv
, GFAR_ERRATA_74
))
2384 tempval
|= (MACCFG2_HUGEFRAME
| MACCFG2_LENGTHCHECK
);
2386 tempval
&= ~(MACCFG2_HUGEFRAME
| MACCFG2_LENGTHCHECK
);
2388 gfar_write(®s
->maccfg2
, tempval
);
2390 if ((oldsize
!= tempsize
) && (dev
->flags
& IFF_UP
))
2396 /* gfar_reset_task gets scheduled when a packet has not been
2397 * transmitted after a set amount of time.
2398 * For now, assume that clearing out all the structures, and
2399 * starting over will fix the problem.
2401 static void gfar_reset_task(struct work_struct
*work
)
2403 struct gfar_private
*priv
= container_of(work
, struct gfar_private
,
2405 struct net_device
*dev
= priv
->ndev
;
2407 if (dev
->flags
& IFF_UP
) {
2408 netif_tx_stop_all_queues(dev
);
2411 netif_tx_start_all_queues(dev
);
2414 netif_tx_schedule_all(dev
);
2417 static void gfar_timeout(struct net_device
*dev
)
2419 struct gfar_private
*priv
= netdev_priv(dev
);
2421 dev
->stats
.tx_errors
++;
2422 schedule_work(&priv
->reset_task
);
2425 static void gfar_align_skb(struct sk_buff
*skb
)
2427 /* We need the data buffer to be aligned properly. We will reserve
2428 * as many bytes as needed to align the data properly
2430 skb_reserve(skb
, RXBUF_ALIGNMENT
-
2431 (((unsigned long) skb
->data
) & (RXBUF_ALIGNMENT
- 1)));
2434 /* Interrupt Handler for Transmit complete */
2435 static int gfar_clean_tx_ring(struct gfar_priv_tx_q
*tx_queue
)
2437 struct net_device
*dev
= tx_queue
->dev
;
2438 struct netdev_queue
*txq
;
2439 struct gfar_private
*priv
= netdev_priv(dev
);
2440 struct gfar_priv_rx_q
*rx_queue
= NULL
;
2441 struct txbd8
*bdp
, *next
= NULL
;
2442 struct txbd8
*lbdp
= NULL
;
2443 struct txbd8
*base
= tx_queue
->tx_bd_base
;
2444 struct sk_buff
*skb
;
2446 int tx_ring_size
= tx_queue
->tx_ring_size
;
2447 int frags
= 0, nr_txbds
= 0;
2450 int tqi
= tx_queue
->qindex
;
2451 unsigned int bytes_sent
= 0;
2455 rx_queue
= priv
->rx_queue
[tqi
];
2456 txq
= netdev_get_tx_queue(dev
, tqi
);
2457 bdp
= tx_queue
->dirty_tx
;
2458 skb_dirtytx
= tx_queue
->skb_dirtytx
;
2460 while ((skb
= tx_queue
->tx_skbuff
[skb_dirtytx
])) {
2461 unsigned long flags
;
2463 frags
= skb_shinfo(skb
)->nr_frags
;
2466 * When time stamping, one additional TxBD must be freed.
2467 * Also, we need to dma_unmap_single() the TxPAL.
2469 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
))
2470 nr_txbds
= frags
+ 2;
2472 nr_txbds
= frags
+ 1;
2474 lbdp
= skip_txbd(bdp
, nr_txbds
- 1, base
, tx_ring_size
);
2476 lstatus
= lbdp
->lstatus
;
2478 /* Only clean completed frames */
2479 if ((lstatus
& BD_LFLAG(TXBD_READY
)) &&
2480 (lstatus
& BD_LENGTH_MASK
))
2483 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
)) {
2484 next
= next_txbd(bdp
, base
, tx_ring_size
);
2485 buflen
= next
->length
+ GMAC_FCB_LEN
+ GMAC_TXPAL_LEN
;
2487 buflen
= bdp
->length
;
2489 dma_unmap_single(&priv
->ofdev
->dev
, bdp
->bufPtr
,
2490 buflen
, DMA_TO_DEVICE
);
2492 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
)) {
2493 struct skb_shared_hwtstamps shhwtstamps
;
2494 u64
*ns
= (u64
*) (((u32
)skb
->data
+ 0x10) & ~0x7);
2495 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
2496 shhwtstamps
.hwtstamp
= ns_to_ktime(*ns
);
2497 skb_pull(skb
, GMAC_FCB_LEN
+ GMAC_TXPAL_LEN
);
2498 skb_tstamp_tx(skb
, &shhwtstamps
);
2499 bdp
->lstatus
&= BD_LFLAG(TXBD_WRAP
);
2503 bdp
->lstatus
&= BD_LFLAG(TXBD_WRAP
);
2504 bdp
= next_txbd(bdp
, base
, tx_ring_size
);
2506 for (i
= 0; i
< frags
; i
++) {
2507 dma_unmap_page(&priv
->ofdev
->dev
,
2511 bdp
->lstatus
&= BD_LFLAG(TXBD_WRAP
);
2512 bdp
= next_txbd(bdp
, base
, tx_ring_size
);
2515 bytes_sent
+= skb
->len
;
2518 * If there's room in the queue (limit it to rx_buffer_size)
2519 * we add this skb back into the pool, if it's the right size
2521 if (skb_queue_len(&priv
->rx_recycle
) < rx_queue
->rx_ring_size
&&
2522 skb_recycle_check(skb
, priv
->rx_buffer_size
+
2524 gfar_align_skb(skb
);
2525 skb_queue_head(&priv
->rx_recycle
, skb
);
2527 dev_kfree_skb_any(skb
);
2529 tx_queue
->tx_skbuff
[skb_dirtytx
] = NULL
;
2531 skb_dirtytx
= (skb_dirtytx
+ 1) &
2532 TX_RING_MOD_MASK(tx_ring_size
);
2535 spin_lock_irqsave(&tx_queue
->txlock
, flags
);
2536 tx_queue
->num_txbdfree
+= nr_txbds
;
2537 spin_unlock_irqrestore(&tx_queue
->txlock
, flags
);
2540 /* If we freed a buffer, we can restart transmission, if necessary */
2541 if (netif_tx_queue_stopped(txq
) && tx_queue
->num_txbdfree
)
2542 netif_wake_subqueue(dev
, tqi
);
2544 /* Update dirty indicators */
2545 tx_queue
->skb_dirtytx
= skb_dirtytx
;
2546 tx_queue
->dirty_tx
= bdp
;
2548 netdev_tx_completed_queue(txq
, howmany
, bytes_sent
);
2553 static void gfar_schedule_cleanup(struct gfar_priv_grp
*gfargrp
)
2555 unsigned long flags
;
2557 spin_lock_irqsave(&gfargrp
->grplock
, flags
);
2558 if (napi_schedule_prep(&gfargrp
->napi
)) {
2559 gfar_write(&gfargrp
->regs
->imask
, IMASK_RTX_DISABLED
);
2560 __napi_schedule(&gfargrp
->napi
);
2563 * Clear IEVENT, so interrupts aren't called again
2564 * because of the packets that have already arrived.
2566 gfar_write(&gfargrp
->regs
->ievent
, IEVENT_RTX_MASK
);
2568 spin_unlock_irqrestore(&gfargrp
->grplock
, flags
);
2572 /* Interrupt Handler for Transmit complete */
2573 static irqreturn_t
gfar_transmit(int irq
, void *grp_id
)
2575 gfar_schedule_cleanup((struct gfar_priv_grp
*)grp_id
);
2579 static void gfar_new_rxbdp(struct gfar_priv_rx_q
*rx_queue
, struct rxbd8
*bdp
,
2580 struct sk_buff
*skb
)
2582 struct net_device
*dev
= rx_queue
->dev
;
2583 struct gfar_private
*priv
= netdev_priv(dev
);
2586 buf
= dma_map_single(&priv
->ofdev
->dev
, skb
->data
,
2587 priv
->rx_buffer_size
, DMA_FROM_DEVICE
);
2588 gfar_init_rxbdp(rx_queue
, bdp
, buf
);
2591 static struct sk_buff
* gfar_alloc_skb(struct net_device
*dev
)
2593 struct gfar_private
*priv
= netdev_priv(dev
);
2594 struct sk_buff
*skb
= NULL
;
2596 skb
= netdev_alloc_skb(dev
, priv
->rx_buffer_size
+ RXBUF_ALIGNMENT
);
2600 gfar_align_skb(skb
);
2605 struct sk_buff
* gfar_new_skb(struct net_device
*dev
)
2607 struct gfar_private
*priv
= netdev_priv(dev
);
2608 struct sk_buff
*skb
= NULL
;
2610 skb
= skb_dequeue(&priv
->rx_recycle
);
2612 skb
= gfar_alloc_skb(dev
);
2617 static inline void count_errors(unsigned short status
, struct net_device
*dev
)
2619 struct gfar_private
*priv
= netdev_priv(dev
);
2620 struct net_device_stats
*stats
= &dev
->stats
;
2621 struct gfar_extra_stats
*estats
= &priv
->extra_stats
;
2623 /* If the packet was truncated, none of the other errors
2625 if (status
& RXBD_TRUNCATED
) {
2626 stats
->rx_length_errors
++;
2632 /* Count the errors, if there were any */
2633 if (status
& (RXBD_LARGE
| RXBD_SHORT
)) {
2634 stats
->rx_length_errors
++;
2636 if (status
& RXBD_LARGE
)
2641 if (status
& RXBD_NONOCTET
) {
2642 stats
->rx_frame_errors
++;
2643 estats
->rx_nonoctet
++;
2645 if (status
& RXBD_CRCERR
) {
2646 estats
->rx_crcerr
++;
2647 stats
->rx_crc_errors
++;
2649 if (status
& RXBD_OVERRUN
) {
2650 estats
->rx_overrun
++;
2651 stats
->rx_crc_errors
++;
2655 irqreturn_t
gfar_receive(int irq
, void *grp_id
)
2657 gfar_schedule_cleanup((struct gfar_priv_grp
*)grp_id
);
2661 static inline void gfar_rx_checksum(struct sk_buff
*skb
, struct rxfcb
*fcb
)
2663 /* If valid headers were found, and valid sums
2664 * were verified, then we tell the kernel that no
2665 * checksumming is necessary. Otherwise, it is */
2666 if ((fcb
->flags
& RXFCB_CSUM_MASK
) == (RXFCB_CIP
| RXFCB_CTU
))
2667 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2669 skb_checksum_none_assert(skb
);
2673 /* gfar_process_frame() -- handle one incoming packet if skb
2675 static int gfar_process_frame(struct net_device
*dev
, struct sk_buff
*skb
,
2676 int amount_pull
, struct napi_struct
*napi
)
2678 struct gfar_private
*priv
= netdev_priv(dev
);
2679 struct rxfcb
*fcb
= NULL
;
2683 /* fcb is at the beginning if exists */
2684 fcb
= (struct rxfcb
*)skb
->data
;
2686 /* Remove the FCB from the skb */
2687 /* Remove the padded bytes, if there are any */
2689 skb_record_rx_queue(skb
, fcb
->rq
);
2690 skb_pull(skb
, amount_pull
);
2693 /* Get receive timestamp from the skb */
2694 if (priv
->hwts_rx_en
) {
2695 struct skb_shared_hwtstamps
*shhwtstamps
= skb_hwtstamps(skb
);
2696 u64
*ns
= (u64
*) skb
->data
;
2697 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
2698 shhwtstamps
->hwtstamp
= ns_to_ktime(*ns
);
2702 skb_pull(skb
, priv
->padding
);
2704 if (dev
->features
& NETIF_F_RXCSUM
)
2705 gfar_rx_checksum(skb
, fcb
);
2707 /* Tell the skb what kind of packet this is */
2708 skb
->protocol
= eth_type_trans(skb
, dev
);
2711 * There's need to check for NETIF_F_HW_VLAN_RX here.
2712 * Even if vlan rx accel is disabled, on some chips
2713 * RXFCB_VLN is pseudo randomly set.
2715 if (dev
->features
& NETIF_F_HW_VLAN_RX
&&
2716 fcb
->flags
& RXFCB_VLN
)
2717 __vlan_hwaccel_put_tag(skb
, fcb
->vlctl
);
2719 /* Send the packet up the stack */
2720 ret
= napi_gro_receive(napi
, skb
);
2722 if (GRO_DROP
== ret
)
2723 priv
->extra_stats
.kernel_dropped
++;
2728 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2729 * until the budget/quota has been reached. Returns the number
2732 int gfar_clean_rx_ring(struct gfar_priv_rx_q
*rx_queue
, int rx_work_limit
)
2734 struct net_device
*dev
= rx_queue
->dev
;
2735 struct rxbd8
*bdp
, *base
;
2736 struct sk_buff
*skb
;
2740 struct gfar_private
*priv
= netdev_priv(dev
);
2742 /* Get the first full descriptor */
2743 bdp
= rx_queue
->cur_rx
;
2744 base
= rx_queue
->rx_bd_base
;
2746 amount_pull
= (gfar_uses_fcb(priv
) ? GMAC_FCB_LEN
: 0);
2748 while (!((bdp
->status
& RXBD_EMPTY
) || (--rx_work_limit
< 0))) {
2749 struct sk_buff
*newskb
;
2752 /* Add another skb for the future */
2753 newskb
= gfar_new_skb(dev
);
2755 skb
= rx_queue
->rx_skbuff
[rx_queue
->skb_currx
];
2757 dma_unmap_single(&priv
->ofdev
->dev
, bdp
->bufPtr
,
2758 priv
->rx_buffer_size
, DMA_FROM_DEVICE
);
2760 if (unlikely(!(bdp
->status
& RXBD_ERR
) &&
2761 bdp
->length
> priv
->rx_buffer_size
))
2762 bdp
->status
= RXBD_LARGE
;
2764 /* We drop the frame if we failed to allocate a new buffer */
2765 if (unlikely(!newskb
|| !(bdp
->status
& RXBD_LAST
) ||
2766 bdp
->status
& RXBD_ERR
)) {
2767 count_errors(bdp
->status
, dev
);
2769 if (unlikely(!newskb
))
2772 skb_queue_head(&priv
->rx_recycle
, skb
);
2774 /* Increment the number of packets */
2775 rx_queue
->stats
.rx_packets
++;
2779 pkt_len
= bdp
->length
- ETH_FCS_LEN
;
2780 /* Remove the FCS from the packet length */
2781 skb_put(skb
, pkt_len
);
2782 rx_queue
->stats
.rx_bytes
+= pkt_len
;
2783 skb_record_rx_queue(skb
, rx_queue
->qindex
);
2784 gfar_process_frame(dev
, skb
, amount_pull
,
2785 &rx_queue
->grp
->napi
);
2788 netif_warn(priv
, rx_err
, dev
, "Missing skb!\n");
2789 rx_queue
->stats
.rx_dropped
++;
2790 priv
->extra_stats
.rx_skbmissing
++;
2795 rx_queue
->rx_skbuff
[rx_queue
->skb_currx
] = newskb
;
2797 /* Setup the new bdp */
2798 gfar_new_rxbdp(rx_queue
, bdp
, newskb
);
2800 /* Update to the next pointer */
2801 bdp
= next_bd(bdp
, base
, rx_queue
->rx_ring_size
);
2803 /* update to point at the next skb */
2804 rx_queue
->skb_currx
=
2805 (rx_queue
->skb_currx
+ 1) &
2806 RX_RING_MOD_MASK(rx_queue
->rx_ring_size
);
2809 /* Update the current rxbd pointer to be the next one */
2810 rx_queue
->cur_rx
= bdp
;
2815 static int gfar_poll(struct napi_struct
*napi
, int budget
)
2817 struct gfar_priv_grp
*gfargrp
= container_of(napi
,
2818 struct gfar_priv_grp
, napi
);
2819 struct gfar_private
*priv
= gfargrp
->priv
;
2820 struct gfar __iomem
*regs
= gfargrp
->regs
;
2821 struct gfar_priv_tx_q
*tx_queue
= NULL
;
2822 struct gfar_priv_rx_q
*rx_queue
= NULL
;
2823 int rx_cleaned
= 0, budget_per_queue
= 0, rx_cleaned_per_queue
= 0;
2824 int tx_cleaned
= 0, i
, left_over_budget
= budget
;
2825 unsigned long serviced_queues
= 0;
2828 num_queues
= gfargrp
->num_rx_queues
;
2829 budget_per_queue
= budget
/num_queues
;
2831 /* Clear IEVENT, so interrupts aren't called again
2832 * because of the packets that have already arrived */
2833 gfar_write(®s
->ievent
, IEVENT_RTX_MASK
);
2835 while (num_queues
&& left_over_budget
) {
2837 budget_per_queue
= left_over_budget
/num_queues
;
2838 left_over_budget
= 0;
2840 for_each_set_bit(i
, &gfargrp
->rx_bit_map
, priv
->num_rx_queues
) {
2841 if (test_bit(i
, &serviced_queues
))
2843 rx_queue
= priv
->rx_queue
[i
];
2844 tx_queue
= priv
->tx_queue
[rx_queue
->qindex
];
2846 tx_cleaned
+= gfar_clean_tx_ring(tx_queue
);
2847 rx_cleaned_per_queue
= gfar_clean_rx_ring(rx_queue
,
2849 rx_cleaned
+= rx_cleaned_per_queue
;
2850 if(rx_cleaned_per_queue
< budget_per_queue
) {
2851 left_over_budget
= left_over_budget
+
2852 (budget_per_queue
- rx_cleaned_per_queue
);
2853 set_bit(i
, &serviced_queues
);
2862 if (rx_cleaned
< budget
) {
2863 napi_complete(napi
);
2865 /* Clear the halt bit in RSTAT */
2866 gfar_write(®s
->rstat
, gfargrp
->rstat
);
2868 gfar_write(®s
->imask
, IMASK_DEFAULT
);
2870 /* If we are coalescing interrupts, update the timer */
2871 /* Otherwise, clear it */
2872 gfar_configure_coalescing(priv
,
2873 gfargrp
->rx_bit_map
, gfargrp
->tx_bit_map
);
2879 #ifdef CONFIG_NET_POLL_CONTROLLER
2881 * Polling 'interrupt' - used by things like netconsole to send skbs
2882 * without having to re-enable interrupts. It's not called while
2883 * the interrupt routine is executing.
2885 static void gfar_netpoll(struct net_device
*dev
)
2887 struct gfar_private
*priv
= netdev_priv(dev
);
2890 /* If the device has multiple interrupts, run tx/rx */
2891 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
2892 for (i
= 0; i
< priv
->num_grps
; i
++) {
2893 disable_irq(priv
->gfargrp
[i
].interruptTransmit
);
2894 disable_irq(priv
->gfargrp
[i
].interruptReceive
);
2895 disable_irq(priv
->gfargrp
[i
].interruptError
);
2896 gfar_interrupt(priv
->gfargrp
[i
].interruptTransmit
,
2898 enable_irq(priv
->gfargrp
[i
].interruptError
);
2899 enable_irq(priv
->gfargrp
[i
].interruptReceive
);
2900 enable_irq(priv
->gfargrp
[i
].interruptTransmit
);
2903 for (i
= 0; i
< priv
->num_grps
; i
++) {
2904 disable_irq(priv
->gfargrp
[i
].interruptTransmit
);
2905 gfar_interrupt(priv
->gfargrp
[i
].interruptTransmit
,
2907 enable_irq(priv
->gfargrp
[i
].interruptTransmit
);
2913 /* The interrupt handler for devices with one interrupt */
2914 static irqreturn_t
gfar_interrupt(int irq
, void *grp_id
)
2916 struct gfar_priv_grp
*gfargrp
= grp_id
;
2918 /* Save ievent for future reference */
2919 u32 events
= gfar_read(&gfargrp
->regs
->ievent
);
2921 /* Check for reception */
2922 if (events
& IEVENT_RX_MASK
)
2923 gfar_receive(irq
, grp_id
);
2925 /* Check for transmit completion */
2926 if (events
& IEVENT_TX_MASK
)
2927 gfar_transmit(irq
, grp_id
);
2929 /* Check for errors */
2930 if (events
& IEVENT_ERR_MASK
)
2931 gfar_error(irq
, grp_id
);
2936 /* Called every time the controller might need to be made
2937 * aware of new link state. The PHY code conveys this
2938 * information through variables in the phydev structure, and this
2939 * function converts those variables into the appropriate
2940 * register values, and can bring down the device if needed.
2942 static void adjust_link(struct net_device
*dev
)
2944 struct gfar_private
*priv
= netdev_priv(dev
);
2945 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
2946 unsigned long flags
;
2947 struct phy_device
*phydev
= priv
->phydev
;
2950 local_irq_save(flags
);
2954 u32 tempval
= gfar_read(®s
->maccfg2
);
2955 u32 ecntrl
= gfar_read(®s
->ecntrl
);
2957 /* Now we make sure that we can be in full duplex mode.
2958 * If not, we operate in half-duplex mode. */
2959 if (phydev
->duplex
!= priv
->oldduplex
) {
2961 if (!(phydev
->duplex
))
2962 tempval
&= ~(MACCFG2_FULL_DUPLEX
);
2964 tempval
|= MACCFG2_FULL_DUPLEX
;
2966 priv
->oldduplex
= phydev
->duplex
;
2969 if (phydev
->speed
!= priv
->oldspeed
) {
2971 switch (phydev
->speed
) {
2974 ((tempval
& ~(MACCFG2_IF
)) | MACCFG2_GMII
);
2976 ecntrl
&= ~(ECNTRL_R100
);
2981 ((tempval
& ~(MACCFG2_IF
)) | MACCFG2_MII
);
2983 /* Reduced mode distinguishes
2984 * between 10 and 100 */
2985 if (phydev
->speed
== SPEED_100
)
2986 ecntrl
|= ECNTRL_R100
;
2988 ecntrl
&= ~(ECNTRL_R100
);
2991 netif_warn(priv
, link
, dev
,
2992 "Ack! Speed (%d) is not 10/100/1000!\n",
2997 priv
->oldspeed
= phydev
->speed
;
3000 gfar_write(®s
->maccfg2
, tempval
);
3001 gfar_write(®s
->ecntrl
, ecntrl
);
3003 if (!priv
->oldlink
) {
3007 } else if (priv
->oldlink
) {
3011 priv
->oldduplex
= -1;
3014 if (new_state
&& netif_msg_link(priv
))
3015 phy_print_status(phydev
);
3017 local_irq_restore(flags
);
3020 /* Update the hash table based on the current list of multicast
3021 * addresses we subscribe to. Also, change the promiscuity of
3022 * the device based on the flags (this function is called
3023 * whenever dev->flags is changed */
3024 static void gfar_set_multi(struct net_device
*dev
)
3026 struct netdev_hw_addr
*ha
;
3027 struct gfar_private
*priv
= netdev_priv(dev
);
3028 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
3031 if (dev
->flags
& IFF_PROMISC
) {
3032 /* Set RCTRL to PROM */
3033 tempval
= gfar_read(®s
->rctrl
);
3034 tempval
|= RCTRL_PROM
;
3035 gfar_write(®s
->rctrl
, tempval
);
3037 /* Set RCTRL to not PROM */
3038 tempval
= gfar_read(®s
->rctrl
);
3039 tempval
&= ~(RCTRL_PROM
);
3040 gfar_write(®s
->rctrl
, tempval
);
3043 if (dev
->flags
& IFF_ALLMULTI
) {
3044 /* Set the hash to rx all multicast frames */
3045 gfar_write(®s
->igaddr0
, 0xffffffff);
3046 gfar_write(®s
->igaddr1
, 0xffffffff);
3047 gfar_write(®s
->igaddr2
, 0xffffffff);
3048 gfar_write(®s
->igaddr3
, 0xffffffff);
3049 gfar_write(®s
->igaddr4
, 0xffffffff);
3050 gfar_write(®s
->igaddr5
, 0xffffffff);
3051 gfar_write(®s
->igaddr6
, 0xffffffff);
3052 gfar_write(®s
->igaddr7
, 0xffffffff);
3053 gfar_write(®s
->gaddr0
, 0xffffffff);
3054 gfar_write(®s
->gaddr1
, 0xffffffff);
3055 gfar_write(®s
->gaddr2
, 0xffffffff);
3056 gfar_write(®s
->gaddr3
, 0xffffffff);
3057 gfar_write(®s
->gaddr4
, 0xffffffff);
3058 gfar_write(®s
->gaddr5
, 0xffffffff);
3059 gfar_write(®s
->gaddr6
, 0xffffffff);
3060 gfar_write(®s
->gaddr7
, 0xffffffff);
3065 /* zero out the hash */
3066 gfar_write(®s
->igaddr0
, 0x0);
3067 gfar_write(®s
->igaddr1
, 0x0);
3068 gfar_write(®s
->igaddr2
, 0x0);
3069 gfar_write(®s
->igaddr3
, 0x0);
3070 gfar_write(®s
->igaddr4
, 0x0);
3071 gfar_write(®s
->igaddr5
, 0x0);
3072 gfar_write(®s
->igaddr6
, 0x0);
3073 gfar_write(®s
->igaddr7
, 0x0);
3074 gfar_write(®s
->gaddr0
, 0x0);
3075 gfar_write(®s
->gaddr1
, 0x0);
3076 gfar_write(®s
->gaddr2
, 0x0);
3077 gfar_write(®s
->gaddr3
, 0x0);
3078 gfar_write(®s
->gaddr4
, 0x0);
3079 gfar_write(®s
->gaddr5
, 0x0);
3080 gfar_write(®s
->gaddr6
, 0x0);
3081 gfar_write(®s
->gaddr7
, 0x0);
3083 /* If we have extended hash tables, we need to
3084 * clear the exact match registers to prepare for
3086 if (priv
->extended_hash
) {
3087 em_num
= GFAR_EM_NUM
+ 1;
3088 gfar_clear_exact_match(dev
);
3095 if (netdev_mc_empty(dev
))
3098 /* Parse the list, and set the appropriate bits */
3099 netdev_for_each_mc_addr(ha
, dev
) {
3101 gfar_set_mac_for_addr(dev
, idx
, ha
->addr
);
3104 gfar_set_hash_for_addr(dev
, ha
->addr
);
3110 /* Clears each of the exact match registers to zero, so they
3111 * don't interfere with normal reception */
3112 static void gfar_clear_exact_match(struct net_device
*dev
)
3115 static const u8 zero_arr
[ETH_ALEN
] = {0, 0, 0, 0, 0, 0};
3117 for(idx
= 1;idx
< GFAR_EM_NUM
+ 1;idx
++)
3118 gfar_set_mac_for_addr(dev
, idx
, zero_arr
);
3121 /* Set the appropriate hash bit for the given addr */
3122 /* The algorithm works like so:
3123 * 1) Take the Destination Address (ie the multicast address), and
3124 * do a CRC on it (little endian), and reverse the bits of the
3126 * 2) Use the 8 most significant bits as a hash into a 256-entry
3127 * table. The table is controlled through 8 32-bit registers:
3128 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3129 * gaddr7. This means that the 3 most significant bits in the
3130 * hash index which gaddr register to use, and the 5 other bits
3131 * indicate which bit (assuming an IBM numbering scheme, which
3132 * for PowerPC (tm) is usually the case) in the register holds
3134 static void gfar_set_hash_for_addr(struct net_device
*dev
, u8
*addr
)
3137 struct gfar_private
*priv
= netdev_priv(dev
);
3138 u32 result
= ether_crc(ETH_ALEN
, addr
);
3139 int width
= priv
->hash_width
;
3140 u8 whichbit
= (result
>> (32 - width
)) & 0x1f;
3141 u8 whichreg
= result
>> (32 - width
+ 5);
3142 u32 value
= (1 << (31-whichbit
));
3144 tempval
= gfar_read(priv
->hash_regs
[whichreg
]);
3146 gfar_write(priv
->hash_regs
[whichreg
], tempval
);
3150 /* There are multiple MAC Address register pairs on some controllers
3151 * This function sets the numth pair to a given address
3153 static void gfar_set_mac_for_addr(struct net_device
*dev
, int num
,
3156 struct gfar_private
*priv
= netdev_priv(dev
);
3157 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
3159 char tmpbuf
[ETH_ALEN
];
3161 u32 __iomem
*macptr
= ®s
->macstnaddr1
;
3165 /* Now copy it into the mac registers backwards, cuz */
3166 /* little endian is silly */
3167 for (idx
= 0; idx
< ETH_ALEN
; idx
++)
3168 tmpbuf
[ETH_ALEN
- 1 - idx
] = addr
[idx
];
3170 gfar_write(macptr
, *((u32
*) (tmpbuf
)));
3172 tempval
= *((u32
*) (tmpbuf
+ 4));
3174 gfar_write(macptr
+1, tempval
);
3177 /* GFAR error interrupt handler */
3178 static irqreturn_t
gfar_error(int irq
, void *grp_id
)
3180 struct gfar_priv_grp
*gfargrp
= grp_id
;
3181 struct gfar __iomem
*regs
= gfargrp
->regs
;
3182 struct gfar_private
*priv
= gfargrp
->priv
;
3183 struct net_device
*dev
= priv
->ndev
;
3185 /* Save ievent for future reference */
3186 u32 events
= gfar_read(®s
->ievent
);
3189 gfar_write(®s
->ievent
, events
& IEVENT_ERR_MASK
);
3191 /* Magic Packet is not an error. */
3192 if ((priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
) &&
3193 (events
& IEVENT_MAG
))
3194 events
&= ~IEVENT_MAG
;
3197 if (netif_msg_rx_err(priv
) || netif_msg_tx_err(priv
))
3198 netdev_dbg(dev
, "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3199 events
, gfar_read(®s
->imask
));
3201 /* Update the error counters */
3202 if (events
& IEVENT_TXE
) {
3203 dev
->stats
.tx_errors
++;
3205 if (events
& IEVENT_LC
)
3206 dev
->stats
.tx_window_errors
++;
3207 if (events
& IEVENT_CRL
)
3208 dev
->stats
.tx_aborted_errors
++;
3209 if (events
& IEVENT_XFUN
) {
3210 unsigned long flags
;
3212 netif_dbg(priv
, tx_err
, dev
,
3213 "TX FIFO underrun, packet dropped\n");
3214 dev
->stats
.tx_dropped
++;
3215 priv
->extra_stats
.tx_underrun
++;
3217 local_irq_save(flags
);
3220 /* Reactivate the Tx Queues */
3221 gfar_write(®s
->tstat
, gfargrp
->tstat
);
3224 local_irq_restore(flags
);
3226 netif_dbg(priv
, tx_err
, dev
, "Transmit Error\n");
3228 if (events
& IEVENT_BSY
) {
3229 dev
->stats
.rx_errors
++;
3230 priv
->extra_stats
.rx_bsy
++;
3232 gfar_receive(irq
, grp_id
);
3234 netif_dbg(priv
, rx_err
, dev
, "busy error (rstat: %x)\n",
3235 gfar_read(®s
->rstat
));
3237 if (events
& IEVENT_BABR
) {
3238 dev
->stats
.rx_errors
++;
3239 priv
->extra_stats
.rx_babr
++;
3241 netif_dbg(priv
, rx_err
, dev
, "babbling RX error\n");
3243 if (events
& IEVENT_EBERR
) {
3244 priv
->extra_stats
.eberr
++;
3245 netif_dbg(priv
, rx_err
, dev
, "bus error\n");
3247 if (events
& IEVENT_RXC
)
3248 netif_dbg(priv
, rx_status
, dev
, "control frame\n");
3250 if (events
& IEVENT_BABT
) {
3251 priv
->extra_stats
.tx_babt
++;
3252 netif_dbg(priv
, tx_err
, dev
, "babbling TX error\n");
3257 static struct of_device_id gfar_match
[] =
3261 .compatible
= "gianfar",
3264 .compatible
= "fsl,etsec2",
3268 MODULE_DEVICE_TABLE(of
, gfar_match
);
3270 /* Structure for a device driver */
3271 static struct platform_driver gfar_driver
= {
3273 .name
= "fsl-gianfar",
3274 .owner
= THIS_MODULE
,
3276 .of_match_table
= gfar_match
,
3278 .probe
= gfar_probe
,
3279 .remove
= gfar_remove
,
3282 module_platform_driver(gfar_driver
);