1 /* drivers/net/ethernet/freescale/gianfar.c
3 * Gianfar Ethernet Driver
4 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
6 * Based on 8260_io/fcc_enet.c
9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13 * Copyright 2007 MontaVista Software, Inc.
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
20 * Gianfar: AKA Lambda Draconis, "Dragon"
28 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
33 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
35 * last descriptor of the ring.
37 * When a packet is received, the RXF bit in the
38 * IEVENT register is set, triggering an interrupt when the
39 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
42 * of frames or amount of time have passed). In NAPI, the
43 * interrupt handler will signal there is work to be done, and
44 * exit. This method will start at the last known empty
45 * descriptor, and process every subsequent descriptor until there
46 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/errno.h>
70 #include <linux/unistd.h>
71 #include <linux/slab.h>
72 #include <linux/interrupt.h>
73 #include <linux/delay.h>
74 #include <linux/netdevice.h>
75 #include <linux/etherdevice.h>
76 #include <linux/skbuff.h>
77 #include <linux/if_vlan.h>
78 #include <linux/spinlock.h>
80 #include <linux/of_address.h>
81 #include <linux/of_irq.h>
82 #include <linux/of_mdio.h>
83 #include <linux/of_platform.h>
85 #include <linux/tcp.h>
86 #include <linux/udp.h>
88 #include <linux/net_tstamp.h>
93 #include <asm/mpc85xx.h>
96 #include <asm/uaccess.h>
97 #include <linux/module.h>
98 #include <linux/dma-mapping.h>
99 #include <linux/crc32.h>
100 #include <linux/mii.h>
101 #include <linux/phy.h>
102 #include <linux/phy_fixed.h>
103 #include <linux/of.h>
104 #include <linux/of_net.h>
105 #include <linux/of_address.h>
106 #include <linux/of_irq.h>
110 #define TX_TIMEOUT (1*HZ)
112 const char gfar_driver_version
[] = "1.3";
114 static int gfar_enet_open(struct net_device
*dev
);
115 static int gfar_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
116 static void gfar_reset_task(struct work_struct
*work
);
117 static void gfar_timeout(struct net_device
*dev
);
118 static int gfar_close(struct net_device
*dev
);
119 static struct sk_buff
*gfar_new_skb(struct net_device
*dev
,
120 dma_addr_t
*bufaddr
);
121 static int gfar_set_mac_address(struct net_device
*dev
);
122 static int gfar_change_mtu(struct net_device
*dev
, int new_mtu
);
123 static irqreturn_t
gfar_error(int irq
, void *dev_id
);
124 static irqreturn_t
gfar_transmit(int irq
, void *dev_id
);
125 static irqreturn_t
gfar_interrupt(int irq
, void *dev_id
);
126 static void adjust_link(struct net_device
*dev
);
127 static noinline
void gfar_update_link_state(struct gfar_private
*priv
);
128 static int init_phy(struct net_device
*dev
);
129 static int gfar_probe(struct platform_device
*ofdev
);
130 static int gfar_remove(struct platform_device
*ofdev
);
131 static void free_skb_resources(struct gfar_private
*priv
);
132 static void gfar_set_multi(struct net_device
*dev
);
133 static void gfar_set_hash_for_addr(struct net_device
*dev
, u8
*addr
);
134 static void gfar_configure_serdes(struct net_device
*dev
);
135 static int gfar_poll_rx(struct napi_struct
*napi
, int budget
);
136 static int gfar_poll_tx(struct napi_struct
*napi
, int budget
);
137 static int gfar_poll_rx_sq(struct napi_struct
*napi
, int budget
);
138 static int gfar_poll_tx_sq(struct napi_struct
*napi
, int budget
);
139 #ifdef CONFIG_NET_POLL_CONTROLLER
140 static void gfar_netpoll(struct net_device
*dev
);
142 int gfar_clean_rx_ring(struct gfar_priv_rx_q
*rx_queue
, int rx_work_limit
);
143 static void gfar_clean_tx_ring(struct gfar_priv_tx_q
*tx_queue
);
144 static void gfar_process_frame(struct net_device
*dev
, struct sk_buff
*skb
,
145 int amount_pull
, struct napi_struct
*napi
);
146 static void gfar_halt_nodisable(struct gfar_private
*priv
);
147 static void gfar_clear_exact_match(struct net_device
*dev
);
148 static void gfar_set_mac_for_addr(struct net_device
*dev
, int num
,
150 static int gfar_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
152 MODULE_AUTHOR("Freescale Semiconductor, Inc");
153 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
154 MODULE_LICENSE("GPL");
156 static void gfar_init_rxbdp(struct gfar_priv_rx_q
*rx_queue
, struct rxbd8
*bdp
,
161 bdp
->bufPtr
= cpu_to_be32(buf
);
163 lstatus
= BD_LFLAG(RXBD_EMPTY
| RXBD_INTERRUPT
);
164 if (bdp
== rx_queue
->rx_bd_base
+ rx_queue
->rx_ring_size
- 1)
165 lstatus
|= BD_LFLAG(RXBD_WRAP
);
169 bdp
->lstatus
= cpu_to_be32(lstatus
);
172 static int gfar_init_bds(struct net_device
*ndev
)
174 struct gfar_private
*priv
= netdev_priv(ndev
);
175 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
176 struct gfar_priv_tx_q
*tx_queue
= NULL
;
177 struct gfar_priv_rx_q
*rx_queue
= NULL
;
184 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
185 tx_queue
= priv
->tx_queue
[i
];
186 /* Initialize some variables in our dev structure */
187 tx_queue
->num_txbdfree
= tx_queue
->tx_ring_size
;
188 tx_queue
->dirty_tx
= tx_queue
->tx_bd_base
;
189 tx_queue
->cur_tx
= tx_queue
->tx_bd_base
;
190 tx_queue
->skb_curtx
= 0;
191 tx_queue
->skb_dirtytx
= 0;
193 /* Initialize Transmit Descriptor Ring */
194 txbdp
= tx_queue
->tx_bd_base
;
195 for (j
= 0; j
< tx_queue
->tx_ring_size
; j
++) {
201 /* Set the last descriptor in the ring to indicate wrap */
203 txbdp
->status
= cpu_to_be16(be16_to_cpu(txbdp
->status
) |
207 rfbptr
= ®s
->rfbptr0
;
208 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
209 rx_queue
= priv
->rx_queue
[i
];
210 rx_queue
->cur_rx
= rx_queue
->rx_bd_base
;
211 rx_queue
->skb_currx
= 0;
212 rxbdp
= rx_queue
->rx_bd_base
;
214 for (j
= 0; j
< rx_queue
->rx_ring_size
; j
++) {
215 struct sk_buff
*skb
= rx_queue
->rx_skbuff
[j
];
218 bufaddr
= be32_to_cpu(rxbdp
->bufPtr
);
220 skb
= gfar_new_skb(ndev
, &bufaddr
);
222 netdev_err(ndev
, "Can't allocate RX buffers\n");
225 rx_queue
->rx_skbuff
[j
] = skb
;
228 gfar_init_rxbdp(rx_queue
, rxbdp
, bufaddr
);
232 rx_queue
->rfbptr
= rfbptr
;
239 static int gfar_alloc_skb_resources(struct net_device
*ndev
)
244 struct gfar_private
*priv
= netdev_priv(ndev
);
245 struct device
*dev
= priv
->dev
;
246 struct gfar_priv_tx_q
*tx_queue
= NULL
;
247 struct gfar_priv_rx_q
*rx_queue
= NULL
;
249 priv
->total_tx_ring_size
= 0;
250 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
251 priv
->total_tx_ring_size
+= priv
->tx_queue
[i
]->tx_ring_size
;
253 priv
->total_rx_ring_size
= 0;
254 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
255 priv
->total_rx_ring_size
+= priv
->rx_queue
[i
]->rx_ring_size
;
257 /* Allocate memory for the buffer descriptors */
258 vaddr
= dma_alloc_coherent(dev
,
259 (priv
->total_tx_ring_size
*
260 sizeof(struct txbd8
)) +
261 (priv
->total_rx_ring_size
*
262 sizeof(struct rxbd8
)),
267 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
268 tx_queue
= priv
->tx_queue
[i
];
269 tx_queue
->tx_bd_base
= vaddr
;
270 tx_queue
->tx_bd_dma_base
= addr
;
271 tx_queue
->dev
= ndev
;
272 /* enet DMA only understands physical addresses */
273 addr
+= sizeof(struct txbd8
) * tx_queue
->tx_ring_size
;
274 vaddr
+= sizeof(struct txbd8
) * tx_queue
->tx_ring_size
;
277 /* Start the rx descriptor ring where the tx ring leaves off */
278 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
279 rx_queue
= priv
->rx_queue
[i
];
280 rx_queue
->rx_bd_base
= vaddr
;
281 rx_queue
->rx_bd_dma_base
= addr
;
282 rx_queue
->dev
= ndev
;
283 addr
+= sizeof(struct rxbd8
) * rx_queue
->rx_ring_size
;
284 vaddr
+= sizeof(struct rxbd8
) * rx_queue
->rx_ring_size
;
287 /* Setup the skbuff rings */
288 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
289 tx_queue
= priv
->tx_queue
[i
];
290 tx_queue
->tx_skbuff
=
291 kmalloc_array(tx_queue
->tx_ring_size
,
292 sizeof(*tx_queue
->tx_skbuff
),
294 if (!tx_queue
->tx_skbuff
)
297 for (k
= 0; k
< tx_queue
->tx_ring_size
; k
++)
298 tx_queue
->tx_skbuff
[k
] = NULL
;
301 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
302 rx_queue
= priv
->rx_queue
[i
];
303 rx_queue
->rx_skbuff
=
304 kmalloc_array(rx_queue
->rx_ring_size
,
305 sizeof(*rx_queue
->rx_skbuff
),
307 if (!rx_queue
->rx_skbuff
)
310 for (j
= 0; j
< rx_queue
->rx_ring_size
; j
++)
311 rx_queue
->rx_skbuff
[j
] = NULL
;
314 if (gfar_init_bds(ndev
))
320 free_skb_resources(priv
);
324 static void gfar_init_tx_rx_base(struct gfar_private
*priv
)
326 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
330 baddr
= ®s
->tbase0
;
331 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
332 gfar_write(baddr
, priv
->tx_queue
[i
]->tx_bd_dma_base
);
336 baddr
= ®s
->rbase0
;
337 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
338 gfar_write(baddr
, priv
->rx_queue
[i
]->rx_bd_dma_base
);
343 static void gfar_init_rqprm(struct gfar_private
*priv
)
345 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
349 baddr
= ®s
->rqprm0
;
350 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
351 gfar_write(baddr
, priv
->rx_queue
[i
]->rx_ring_size
|
352 (DEFAULT_RX_LFC_THR
<< FBTHR_SHIFT
));
357 static void gfar_rx_buff_size_config(struct gfar_private
*priv
)
359 int frame_size
= priv
->ndev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
361 /* set this when rx hw offload (TOE) functions are being used */
362 priv
->uses_rxfcb
= 0;
364 if (priv
->ndev
->features
& (NETIF_F_RXCSUM
| NETIF_F_HW_VLAN_CTAG_RX
))
365 priv
->uses_rxfcb
= 1;
367 if (priv
->hwts_rx_en
)
368 priv
->uses_rxfcb
= 1;
370 if (priv
->uses_rxfcb
)
371 frame_size
+= GMAC_FCB_LEN
;
373 frame_size
+= priv
->padding
;
375 frame_size
= (frame_size
& ~(INCREMENTAL_BUFFER_SIZE
- 1)) +
376 INCREMENTAL_BUFFER_SIZE
;
378 priv
->rx_buffer_size
= frame_size
;
381 static void gfar_mac_rx_config(struct gfar_private
*priv
)
383 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
386 if (priv
->rx_filer_enable
) {
387 rctrl
|= RCTRL_FILREN
;
388 /* Program the RIR0 reg with the required distribution */
389 if (priv
->poll_mode
== GFAR_SQ_POLLING
)
390 gfar_write(®s
->rir0
, DEFAULT_2RXQ_RIR0
);
391 else /* GFAR_MQ_POLLING */
392 gfar_write(®s
->rir0
, DEFAULT_8RXQ_RIR0
);
395 /* Restore PROMISC mode */
396 if (priv
->ndev
->flags
& IFF_PROMISC
)
399 if (priv
->ndev
->features
& NETIF_F_RXCSUM
)
400 rctrl
|= RCTRL_CHECKSUMMING
;
402 if (priv
->extended_hash
)
403 rctrl
|= RCTRL_EXTHASH
| RCTRL_EMEN
;
406 rctrl
&= ~RCTRL_PAL_MASK
;
407 rctrl
|= RCTRL_PADDING(priv
->padding
);
410 /* Enable HW time stamping if requested from user space */
411 if (priv
->hwts_rx_en
)
412 rctrl
|= RCTRL_PRSDEP_INIT
| RCTRL_TS_ENABLE
;
414 if (priv
->ndev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)
415 rctrl
|= RCTRL_VLEX
| RCTRL_PRSDEP_INIT
;
417 /* Clear the LFC bit */
418 gfar_write(®s
->rctrl
, rctrl
);
419 /* Init flow control threshold values */
420 gfar_init_rqprm(priv
);
421 gfar_write(®s
->ptv
, DEFAULT_LFC_PTVVAL
);
424 /* Init rctrl based on our settings */
425 gfar_write(®s
->rctrl
, rctrl
);
428 static void gfar_mac_tx_config(struct gfar_private
*priv
)
430 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
433 if (priv
->ndev
->features
& NETIF_F_IP_CSUM
)
434 tctrl
|= TCTRL_INIT_CSUM
;
436 if (priv
->prio_sched_en
)
437 tctrl
|= TCTRL_TXSCHED_PRIO
;
439 tctrl
|= TCTRL_TXSCHED_WRRS
;
440 gfar_write(®s
->tr03wt
, DEFAULT_WRRS_WEIGHT
);
441 gfar_write(®s
->tr47wt
, DEFAULT_WRRS_WEIGHT
);
444 if (priv
->ndev
->features
& NETIF_F_HW_VLAN_CTAG_TX
)
445 tctrl
|= TCTRL_VLINS
;
447 gfar_write(®s
->tctrl
, tctrl
);
450 static void gfar_configure_coalescing(struct gfar_private
*priv
,
451 unsigned long tx_mask
, unsigned long rx_mask
)
453 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
456 if (priv
->mode
== MQ_MG_MODE
) {
459 baddr
= ®s
->txic0
;
460 for_each_set_bit(i
, &tx_mask
, priv
->num_tx_queues
) {
461 gfar_write(baddr
+ i
, 0);
462 if (likely(priv
->tx_queue
[i
]->txcoalescing
))
463 gfar_write(baddr
+ i
, priv
->tx_queue
[i
]->txic
);
466 baddr
= ®s
->rxic0
;
467 for_each_set_bit(i
, &rx_mask
, priv
->num_rx_queues
) {
468 gfar_write(baddr
+ i
, 0);
469 if (likely(priv
->rx_queue
[i
]->rxcoalescing
))
470 gfar_write(baddr
+ i
, priv
->rx_queue
[i
]->rxic
);
473 /* Backward compatible case -- even if we enable
474 * multiple queues, there's only single reg to program
476 gfar_write(®s
->txic
, 0);
477 if (likely(priv
->tx_queue
[0]->txcoalescing
))
478 gfar_write(®s
->txic
, priv
->tx_queue
[0]->txic
);
480 gfar_write(®s
->rxic
, 0);
481 if (unlikely(priv
->rx_queue
[0]->rxcoalescing
))
482 gfar_write(®s
->rxic
, priv
->rx_queue
[0]->rxic
);
486 void gfar_configure_coalescing_all(struct gfar_private
*priv
)
488 gfar_configure_coalescing(priv
, 0xFF, 0xFF);
491 static struct net_device_stats
*gfar_get_stats(struct net_device
*dev
)
493 struct gfar_private
*priv
= netdev_priv(dev
);
494 unsigned long rx_packets
= 0, rx_bytes
= 0, rx_dropped
= 0;
495 unsigned long tx_packets
= 0, tx_bytes
= 0;
498 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
499 rx_packets
+= priv
->rx_queue
[i
]->stats
.rx_packets
;
500 rx_bytes
+= priv
->rx_queue
[i
]->stats
.rx_bytes
;
501 rx_dropped
+= priv
->rx_queue
[i
]->stats
.rx_dropped
;
504 dev
->stats
.rx_packets
= rx_packets
;
505 dev
->stats
.rx_bytes
= rx_bytes
;
506 dev
->stats
.rx_dropped
= rx_dropped
;
508 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
509 tx_bytes
+= priv
->tx_queue
[i
]->stats
.tx_bytes
;
510 tx_packets
+= priv
->tx_queue
[i
]->stats
.tx_packets
;
513 dev
->stats
.tx_bytes
= tx_bytes
;
514 dev
->stats
.tx_packets
= tx_packets
;
519 static const struct net_device_ops gfar_netdev_ops
= {
520 .ndo_open
= gfar_enet_open
,
521 .ndo_start_xmit
= gfar_start_xmit
,
522 .ndo_stop
= gfar_close
,
523 .ndo_change_mtu
= gfar_change_mtu
,
524 .ndo_set_features
= gfar_set_features
,
525 .ndo_set_rx_mode
= gfar_set_multi
,
526 .ndo_tx_timeout
= gfar_timeout
,
527 .ndo_do_ioctl
= gfar_ioctl
,
528 .ndo_get_stats
= gfar_get_stats
,
529 .ndo_set_mac_address
= eth_mac_addr
,
530 .ndo_validate_addr
= eth_validate_addr
,
531 #ifdef CONFIG_NET_POLL_CONTROLLER
532 .ndo_poll_controller
= gfar_netpoll
,
536 static void gfar_ints_disable(struct gfar_private
*priv
)
539 for (i
= 0; i
< priv
->num_grps
; i
++) {
540 struct gfar __iomem
*regs
= priv
->gfargrp
[i
].regs
;
542 gfar_write(®s
->ievent
, IEVENT_INIT_CLEAR
);
544 /* Initialize IMASK */
545 gfar_write(®s
->imask
, IMASK_INIT_CLEAR
);
549 static void gfar_ints_enable(struct gfar_private
*priv
)
552 for (i
= 0; i
< priv
->num_grps
; i
++) {
553 struct gfar __iomem
*regs
= priv
->gfargrp
[i
].regs
;
554 /* Unmask the interrupts we look for */
555 gfar_write(®s
->imask
, IMASK_DEFAULT
);
559 static void lock_tx_qs(struct gfar_private
*priv
)
563 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
564 spin_lock(&priv
->tx_queue
[i
]->txlock
);
567 static void unlock_tx_qs(struct gfar_private
*priv
)
571 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
572 spin_unlock(&priv
->tx_queue
[i
]->txlock
);
575 static int gfar_alloc_tx_queues(struct gfar_private
*priv
)
579 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
580 priv
->tx_queue
[i
] = kzalloc(sizeof(struct gfar_priv_tx_q
),
582 if (!priv
->tx_queue
[i
])
585 priv
->tx_queue
[i
]->tx_skbuff
= NULL
;
586 priv
->tx_queue
[i
]->qindex
= i
;
587 priv
->tx_queue
[i
]->dev
= priv
->ndev
;
588 spin_lock_init(&(priv
->tx_queue
[i
]->txlock
));
593 static int gfar_alloc_rx_queues(struct gfar_private
*priv
)
597 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
598 priv
->rx_queue
[i
] = kzalloc(sizeof(struct gfar_priv_rx_q
),
600 if (!priv
->rx_queue
[i
])
603 priv
->rx_queue
[i
]->rx_skbuff
= NULL
;
604 priv
->rx_queue
[i
]->qindex
= i
;
605 priv
->rx_queue
[i
]->dev
= priv
->ndev
;
610 static void gfar_free_tx_queues(struct gfar_private
*priv
)
614 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
615 kfree(priv
->tx_queue
[i
]);
618 static void gfar_free_rx_queues(struct gfar_private
*priv
)
622 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
623 kfree(priv
->rx_queue
[i
]);
626 static void unmap_group_regs(struct gfar_private
*priv
)
630 for (i
= 0; i
< MAXGROUPS
; i
++)
631 if (priv
->gfargrp
[i
].regs
)
632 iounmap(priv
->gfargrp
[i
].regs
);
635 static void free_gfar_dev(struct gfar_private
*priv
)
639 for (i
= 0; i
< priv
->num_grps
; i
++)
640 for (j
= 0; j
< GFAR_NUM_IRQS
; j
++) {
641 kfree(priv
->gfargrp
[i
].irqinfo
[j
]);
642 priv
->gfargrp
[i
].irqinfo
[j
] = NULL
;
645 free_netdev(priv
->ndev
);
648 static void disable_napi(struct gfar_private
*priv
)
652 for (i
= 0; i
< priv
->num_grps
; i
++) {
653 napi_disable(&priv
->gfargrp
[i
].napi_rx
);
654 napi_disable(&priv
->gfargrp
[i
].napi_tx
);
658 static void enable_napi(struct gfar_private
*priv
)
662 for (i
= 0; i
< priv
->num_grps
; i
++) {
663 napi_enable(&priv
->gfargrp
[i
].napi_rx
);
664 napi_enable(&priv
->gfargrp
[i
].napi_tx
);
668 static int gfar_parse_group(struct device_node
*np
,
669 struct gfar_private
*priv
, const char *model
)
671 struct gfar_priv_grp
*grp
= &priv
->gfargrp
[priv
->num_grps
];
674 for (i
= 0; i
< GFAR_NUM_IRQS
; i
++) {
675 grp
->irqinfo
[i
] = kzalloc(sizeof(struct gfar_irqinfo
),
677 if (!grp
->irqinfo
[i
])
681 grp
->regs
= of_iomap(np
, 0);
685 gfar_irq(grp
, TX
)->irq
= irq_of_parse_and_map(np
, 0);
687 /* If we aren't the FEC we have multiple interrupts */
688 if (model
&& strcasecmp(model
, "FEC")) {
689 gfar_irq(grp
, RX
)->irq
= irq_of_parse_and_map(np
, 1);
690 gfar_irq(grp
, ER
)->irq
= irq_of_parse_and_map(np
, 2);
691 if (gfar_irq(grp
, TX
)->irq
== NO_IRQ
||
692 gfar_irq(grp
, RX
)->irq
== NO_IRQ
||
693 gfar_irq(grp
, ER
)->irq
== NO_IRQ
)
698 spin_lock_init(&grp
->grplock
);
699 if (priv
->mode
== MQ_MG_MODE
) {
700 u32 rxq_mask
, txq_mask
;
703 grp
->rx_bit_map
= (DEFAULT_MAPPING
>> priv
->num_grps
);
704 grp
->tx_bit_map
= (DEFAULT_MAPPING
>> priv
->num_grps
);
706 ret
= of_property_read_u32(np
, "fsl,rx-bit-map", &rxq_mask
);
708 grp
->rx_bit_map
= rxq_mask
?
709 rxq_mask
: (DEFAULT_MAPPING
>> priv
->num_grps
);
712 ret
= of_property_read_u32(np
, "fsl,tx-bit-map", &txq_mask
);
714 grp
->tx_bit_map
= txq_mask
?
715 txq_mask
: (DEFAULT_MAPPING
>> priv
->num_grps
);
718 if (priv
->poll_mode
== GFAR_SQ_POLLING
) {
719 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
720 grp
->rx_bit_map
= (DEFAULT_MAPPING
>> priv
->num_grps
);
721 grp
->tx_bit_map
= (DEFAULT_MAPPING
>> priv
->num_grps
);
724 grp
->rx_bit_map
= 0xFF;
725 grp
->tx_bit_map
= 0xFF;
728 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
729 * right to left, so we need to revert the 8 bits to get the q index
731 grp
->rx_bit_map
= bitrev8(grp
->rx_bit_map
);
732 grp
->tx_bit_map
= bitrev8(grp
->tx_bit_map
);
734 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
735 * also assign queues to groups
737 for_each_set_bit(i
, &grp
->rx_bit_map
, priv
->num_rx_queues
) {
739 grp
->rx_queue
= priv
->rx_queue
[i
];
740 grp
->num_rx_queues
++;
741 grp
->rstat
|= (RSTAT_CLEAR_RHALT
>> i
);
742 priv
->rqueue
|= ((RQUEUE_EN0
| RQUEUE_EX0
) >> i
);
743 priv
->rx_queue
[i
]->grp
= grp
;
746 for_each_set_bit(i
, &grp
->tx_bit_map
, priv
->num_tx_queues
) {
748 grp
->tx_queue
= priv
->tx_queue
[i
];
749 grp
->num_tx_queues
++;
750 grp
->tstat
|= (TSTAT_CLEAR_THALT
>> i
);
751 priv
->tqueue
|= (TQUEUE_EN0
>> i
);
752 priv
->tx_queue
[i
]->grp
= grp
;
760 static int gfar_of_group_count(struct device_node
*np
)
762 struct device_node
*child
;
765 for_each_available_child_of_node(np
, child
)
766 if (!of_node_cmp(child
->name
, "queue-group"))
772 static int gfar_of_init(struct platform_device
*ofdev
, struct net_device
**pdev
)
776 const void *mac_addr
;
778 struct net_device
*dev
= NULL
;
779 struct gfar_private
*priv
= NULL
;
780 struct device_node
*np
= ofdev
->dev
.of_node
;
781 struct device_node
*child
= NULL
;
782 struct property
*stash
;
785 unsigned int num_tx_qs
, num_rx_qs
;
786 unsigned short mode
, poll_mode
;
791 if (of_device_is_compatible(np
, "fsl,etsec2")) {
793 poll_mode
= GFAR_SQ_POLLING
;
796 poll_mode
= GFAR_SQ_POLLING
;
799 if (mode
== SQ_SG_MODE
) {
802 } else { /* MQ_MG_MODE */
803 /* get the actual number of supported groups */
804 unsigned int num_grps
= gfar_of_group_count(np
);
806 if (num_grps
== 0 || num_grps
> MAXGROUPS
) {
807 dev_err(&ofdev
->dev
, "Invalid # of int groups(%d)\n",
809 pr_err("Cannot do alloc_etherdev, aborting\n");
813 if (poll_mode
== GFAR_SQ_POLLING
) {
814 num_tx_qs
= num_grps
; /* one txq per int group */
815 num_rx_qs
= num_grps
; /* one rxq per int group */
816 } else { /* GFAR_MQ_POLLING */
817 u32 tx_queues
, rx_queues
;
820 /* parse the num of HW tx and rx queues */
821 ret
= of_property_read_u32(np
, "fsl,num_tx_queues",
823 num_tx_qs
= ret
? 1 : tx_queues
;
825 ret
= of_property_read_u32(np
, "fsl,num_rx_queues",
827 num_rx_qs
= ret
? 1 : rx_queues
;
831 if (num_tx_qs
> MAX_TX_QS
) {
832 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
833 num_tx_qs
, MAX_TX_QS
);
834 pr_err("Cannot do alloc_etherdev, aborting\n");
838 if (num_rx_qs
> MAX_RX_QS
) {
839 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
840 num_rx_qs
, MAX_RX_QS
);
841 pr_err("Cannot do alloc_etherdev, aborting\n");
845 *pdev
= alloc_etherdev_mq(sizeof(*priv
), num_tx_qs
);
850 priv
= netdev_priv(dev
);
854 priv
->poll_mode
= poll_mode
;
856 priv
->num_tx_queues
= num_tx_qs
;
857 netif_set_real_num_rx_queues(dev
, num_rx_qs
);
858 priv
->num_rx_queues
= num_rx_qs
;
860 err
= gfar_alloc_tx_queues(priv
);
862 goto tx_alloc_failed
;
864 err
= gfar_alloc_rx_queues(priv
);
866 goto rx_alloc_failed
;
868 err
= of_property_read_string(np
, "model", &model
);
870 pr_err("Device model property missing, aborting\n");
871 goto rx_alloc_failed
;
874 /* Init Rx queue filer rule set linked list */
875 INIT_LIST_HEAD(&priv
->rx_list
.list
);
876 priv
->rx_list
.count
= 0;
877 mutex_init(&priv
->rx_queue_access
);
879 for (i
= 0; i
< MAXGROUPS
; i
++)
880 priv
->gfargrp
[i
].regs
= NULL
;
882 /* Parse and initialize group specific information */
883 if (priv
->mode
== MQ_MG_MODE
) {
884 for_each_available_child_of_node(np
, child
) {
885 if (of_node_cmp(child
->name
, "queue-group"))
888 err
= gfar_parse_group(child
, priv
, model
);
892 } else { /* SQ_SG_MODE */
893 err
= gfar_parse_group(np
, priv
, model
);
898 stash
= of_find_property(np
, "bd-stash", NULL
);
901 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_BD_STASHING
;
902 priv
->bd_stash_en
= 1;
905 err
= of_property_read_u32(np
, "rx-stash-len", &stash_len
);
908 priv
->rx_stash_size
= stash_len
;
910 err
= of_property_read_u32(np
, "rx-stash-idx", &stash_idx
);
913 priv
->rx_stash_index
= stash_idx
;
915 if (stash_len
|| stash_idx
)
916 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_BUF_STASHING
;
918 mac_addr
= of_get_mac_address(np
);
921 memcpy(dev
->dev_addr
, mac_addr
, ETH_ALEN
);
923 if (model
&& !strcasecmp(model
, "TSEC"))
924 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_GIGABIT
|
925 FSL_GIANFAR_DEV_HAS_COALESCE
|
926 FSL_GIANFAR_DEV_HAS_RMON
|
927 FSL_GIANFAR_DEV_HAS_MULTI_INTR
;
929 if (model
&& !strcasecmp(model
, "eTSEC"))
930 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_GIGABIT
|
931 FSL_GIANFAR_DEV_HAS_COALESCE
|
932 FSL_GIANFAR_DEV_HAS_RMON
|
933 FSL_GIANFAR_DEV_HAS_MULTI_INTR
|
934 FSL_GIANFAR_DEV_HAS_CSUM
|
935 FSL_GIANFAR_DEV_HAS_VLAN
|
936 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
|
937 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH
|
938 FSL_GIANFAR_DEV_HAS_TIMER
;
940 err
= of_property_read_string(np
, "phy-connection-type", &ctype
);
942 /* We only care about rgmii-id. The rest are autodetected */
943 if (err
== 0 && !strcmp(ctype
, "rgmii-id"))
944 priv
->interface
= PHY_INTERFACE_MODE_RGMII_ID
;
946 priv
->interface
= PHY_INTERFACE_MODE_MII
;
948 if (of_find_property(np
, "fsl,magic-packet", NULL
))
949 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
;
951 priv
->phy_node
= of_parse_phandle(np
, "phy-handle", 0);
953 /* In the case of a fixed PHY, the DT node associated
954 * to the PHY is the Ethernet MAC DT node.
956 if (!priv
->phy_node
&& of_phy_is_fixed_link(np
)) {
957 err
= of_phy_register_fixed_link(np
);
961 priv
->phy_node
= of_node_get(np
);
964 /* Find the TBI PHY. If it's not there, we don't support SGMII */
965 priv
->tbi_node
= of_parse_phandle(np
, "tbi-handle", 0);
970 unmap_group_regs(priv
);
972 gfar_free_rx_queues(priv
);
974 gfar_free_tx_queues(priv
);
979 static int gfar_hwtstamp_set(struct net_device
*netdev
, struct ifreq
*ifr
)
981 struct hwtstamp_config config
;
982 struct gfar_private
*priv
= netdev_priv(netdev
);
984 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
987 /* reserved for future extensions */
991 switch (config
.tx_type
) {
992 case HWTSTAMP_TX_OFF
:
993 priv
->hwts_tx_en
= 0;
996 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
))
998 priv
->hwts_tx_en
= 1;
1004 switch (config
.rx_filter
) {
1005 case HWTSTAMP_FILTER_NONE
:
1006 if (priv
->hwts_rx_en
) {
1007 priv
->hwts_rx_en
= 0;
1012 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
))
1014 if (!priv
->hwts_rx_en
) {
1015 priv
->hwts_rx_en
= 1;
1018 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
1022 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
1026 static int gfar_hwtstamp_get(struct net_device
*netdev
, struct ifreq
*ifr
)
1028 struct hwtstamp_config config
;
1029 struct gfar_private
*priv
= netdev_priv(netdev
);
1032 config
.tx_type
= priv
->hwts_tx_en
? HWTSTAMP_TX_ON
: HWTSTAMP_TX_OFF
;
1033 config
.rx_filter
= (priv
->hwts_rx_en
?
1034 HWTSTAMP_FILTER_ALL
: HWTSTAMP_FILTER_NONE
);
1036 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
1040 static int gfar_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1042 struct gfar_private
*priv
= netdev_priv(dev
);
1044 if (!netif_running(dev
))
1047 if (cmd
== SIOCSHWTSTAMP
)
1048 return gfar_hwtstamp_set(dev
, rq
);
1049 if (cmd
== SIOCGHWTSTAMP
)
1050 return gfar_hwtstamp_get(dev
, rq
);
1055 return phy_mii_ioctl(priv
->phydev
, rq
, cmd
);
1058 static u32
cluster_entry_per_class(struct gfar_private
*priv
, u32 rqfar
,
1061 u32 rqfpr
= FPR_FILER_MASK
;
1065 rqfcr
= RQFCR_CLE
| RQFCR_PID_MASK
| RQFCR_CMP_EXACT
;
1066 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
1067 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
1068 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
1071 rqfcr
= RQFCR_CMP_NOMATCH
;
1072 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
1073 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
1074 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
1077 rqfcr
= RQFCR_CMP_EXACT
| RQFCR_PID_PARSE
| RQFCR_CLE
| RQFCR_AND
;
1079 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
1080 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
1081 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
1084 rqfcr
= RQFCR_CMP_EXACT
| RQFCR_PID_MASK
| RQFCR_AND
;
1086 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
1087 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
1088 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
1093 static void gfar_init_filer_table(struct gfar_private
*priv
)
1096 u32 rqfar
= MAX_FILER_IDX
;
1098 u32 rqfpr
= FPR_FILER_MASK
;
1101 rqfcr
= RQFCR_CMP_MATCH
;
1102 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
1103 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
1104 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
1106 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
);
1107 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
| RQFPR_UDP
);
1108 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
| RQFPR_TCP
);
1109 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
);
1110 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
| RQFPR_UDP
);
1111 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
| RQFPR_TCP
);
1113 /* cur_filer_idx indicated the first non-masked rule */
1114 priv
->cur_filer_idx
= rqfar
;
1116 /* Rest are masked rules */
1117 rqfcr
= RQFCR_CMP_NOMATCH
;
1118 for (i
= 0; i
< rqfar
; i
++) {
1119 priv
->ftp_rqfcr
[i
] = rqfcr
;
1120 priv
->ftp_rqfpr
[i
] = rqfpr
;
1121 gfar_write_filer(priv
, i
, rqfcr
, rqfpr
);
1126 static void __gfar_detect_errata_83xx(struct gfar_private
*priv
)
1128 unsigned int pvr
= mfspr(SPRN_PVR
);
1129 unsigned int svr
= mfspr(SPRN_SVR
);
1130 unsigned int mod
= (svr
>> 16) & 0xfff6; /* w/o E suffix */
1131 unsigned int rev
= svr
& 0xffff;
1133 /* MPC8313 Rev 2.0 and higher; All MPC837x */
1134 if ((pvr
== 0x80850010 && mod
== 0x80b0 && rev
>= 0x0020) ||
1135 (pvr
== 0x80861010 && (mod
& 0xfff9) == 0x80c0))
1136 priv
->errata
|= GFAR_ERRATA_74
;
1138 /* MPC8313 and MPC837x all rev */
1139 if ((pvr
== 0x80850010 && mod
== 0x80b0) ||
1140 (pvr
== 0x80861010 && (mod
& 0xfff9) == 0x80c0))
1141 priv
->errata
|= GFAR_ERRATA_76
;
1143 /* MPC8313 Rev < 2.0 */
1144 if (pvr
== 0x80850010 && mod
== 0x80b0 && rev
< 0x0020)
1145 priv
->errata
|= GFAR_ERRATA_12
;
1148 static void __gfar_detect_errata_85xx(struct gfar_private
*priv
)
1150 unsigned int svr
= mfspr(SPRN_SVR
);
1152 if ((SVR_SOC_VER(svr
) == SVR_8548
) && (SVR_REV(svr
) == 0x20))
1153 priv
->errata
|= GFAR_ERRATA_12
;
1154 if (((SVR_SOC_VER(svr
) == SVR_P2020
) && (SVR_REV(svr
) < 0x20)) ||
1155 ((SVR_SOC_VER(svr
) == SVR_P2010
) && (SVR_REV(svr
) < 0x20)))
1156 priv
->errata
|= GFAR_ERRATA_76
; /* aka eTSEC 20 */
1160 static void gfar_detect_errata(struct gfar_private
*priv
)
1162 struct device
*dev
= &priv
->ofdev
->dev
;
1164 /* no plans to fix */
1165 priv
->errata
|= GFAR_ERRATA_A002
;
1168 if (pvr_version_is(PVR_VER_E500V1
) || pvr_version_is(PVR_VER_E500V2
))
1169 __gfar_detect_errata_85xx(priv
);
1170 else /* non-mpc85xx parts, i.e. e300 core based */
1171 __gfar_detect_errata_83xx(priv
);
1175 dev_info(dev
, "enabled errata workarounds, flags: 0x%x\n",
1179 void gfar_mac_reset(struct gfar_private
*priv
)
1181 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1184 /* Reset MAC layer */
1185 gfar_write(®s
->maccfg1
, MACCFG1_SOFT_RESET
);
1187 /* We need to delay at least 3 TX clocks */
1190 /* the soft reset bit is not self-resetting, so we need to
1191 * clear it before resuming normal operation
1193 gfar_write(®s
->maccfg1
, 0);
1197 /* Compute rx_buff_size based on config flags */
1198 gfar_rx_buff_size_config(priv
);
1200 /* Initialize the max receive frame/buffer lengths */
1201 gfar_write(®s
->maxfrm
, priv
->rx_buffer_size
);
1202 gfar_write(®s
->mrblr
, priv
->rx_buffer_size
);
1204 /* Initialize the Minimum Frame Length Register */
1205 gfar_write(®s
->minflr
, MINFLR_INIT_SETTINGS
);
1207 /* Initialize MACCFG2. */
1208 tempval
= MACCFG2_INIT_SETTINGS
;
1210 /* If the mtu is larger than the max size for standard
1211 * ethernet frames (ie, a jumbo frame), then set maccfg2
1212 * to allow huge frames, and to check the length
1214 if (priv
->rx_buffer_size
> DEFAULT_RX_BUFFER_SIZE
||
1215 gfar_has_errata(priv
, GFAR_ERRATA_74
))
1216 tempval
|= MACCFG2_HUGEFRAME
| MACCFG2_LENGTHCHECK
;
1218 gfar_write(®s
->maccfg2
, tempval
);
1220 /* Clear mac addr hash registers */
1221 gfar_write(®s
->igaddr0
, 0);
1222 gfar_write(®s
->igaddr1
, 0);
1223 gfar_write(®s
->igaddr2
, 0);
1224 gfar_write(®s
->igaddr3
, 0);
1225 gfar_write(®s
->igaddr4
, 0);
1226 gfar_write(®s
->igaddr5
, 0);
1227 gfar_write(®s
->igaddr6
, 0);
1228 gfar_write(®s
->igaddr7
, 0);
1230 gfar_write(®s
->gaddr0
, 0);
1231 gfar_write(®s
->gaddr1
, 0);
1232 gfar_write(®s
->gaddr2
, 0);
1233 gfar_write(®s
->gaddr3
, 0);
1234 gfar_write(®s
->gaddr4
, 0);
1235 gfar_write(®s
->gaddr5
, 0);
1236 gfar_write(®s
->gaddr6
, 0);
1237 gfar_write(®s
->gaddr7
, 0);
1239 if (priv
->extended_hash
)
1240 gfar_clear_exact_match(priv
->ndev
);
1242 gfar_mac_rx_config(priv
);
1244 gfar_mac_tx_config(priv
);
1246 gfar_set_mac_address(priv
->ndev
);
1248 gfar_set_multi(priv
->ndev
);
1250 /* clear ievent and imask before configuring coalescing */
1251 gfar_ints_disable(priv
);
1253 /* Configure the coalescing support */
1254 gfar_configure_coalescing_all(priv
);
1257 static void gfar_hw_init(struct gfar_private
*priv
)
1259 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1262 /* Stop the DMA engine now, in case it was running before
1263 * (The firmware could have used it, and left it running).
1267 gfar_mac_reset(priv
);
1269 /* Zero out the rmon mib registers if it has them */
1270 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_RMON
) {
1271 memset_io(&(regs
->rmon
), 0, sizeof(struct rmon_mib
));
1273 /* Mask off the CAM interrupts */
1274 gfar_write(®s
->rmon
.cam1
, 0xffffffff);
1275 gfar_write(®s
->rmon
.cam2
, 0xffffffff);
1278 /* Initialize ECNTRL */
1279 gfar_write(®s
->ecntrl
, ECNTRL_INIT_SETTINGS
);
1281 /* Set the extraction length and index */
1282 attrs
= ATTRELI_EL(priv
->rx_stash_size
) |
1283 ATTRELI_EI(priv
->rx_stash_index
);
1285 gfar_write(®s
->attreli
, attrs
);
1287 /* Start with defaults, and add stashing
1288 * depending on driver parameters
1290 attrs
= ATTR_INIT_SETTINGS
;
1292 if (priv
->bd_stash_en
)
1293 attrs
|= ATTR_BDSTASH
;
1295 if (priv
->rx_stash_size
!= 0)
1296 attrs
|= ATTR_BUFSTASH
;
1298 gfar_write(®s
->attr
, attrs
);
1301 gfar_write(®s
->fifo_tx_thr
, DEFAULT_FIFO_TX_THR
);
1302 gfar_write(®s
->fifo_tx_starve
, DEFAULT_FIFO_TX_STARVE
);
1303 gfar_write(®s
->fifo_tx_starve_shutoff
, DEFAULT_FIFO_TX_STARVE_OFF
);
1305 /* Program the interrupt steering regs, only for MG devices */
1306 if (priv
->num_grps
> 1)
1307 gfar_write_isrg(priv
);
1310 static void gfar_init_addr_hash_table(struct gfar_private
*priv
)
1312 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1314 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_EXTENDED_HASH
) {
1315 priv
->extended_hash
= 1;
1316 priv
->hash_width
= 9;
1318 priv
->hash_regs
[0] = ®s
->igaddr0
;
1319 priv
->hash_regs
[1] = ®s
->igaddr1
;
1320 priv
->hash_regs
[2] = ®s
->igaddr2
;
1321 priv
->hash_regs
[3] = ®s
->igaddr3
;
1322 priv
->hash_regs
[4] = ®s
->igaddr4
;
1323 priv
->hash_regs
[5] = ®s
->igaddr5
;
1324 priv
->hash_regs
[6] = ®s
->igaddr6
;
1325 priv
->hash_regs
[7] = ®s
->igaddr7
;
1326 priv
->hash_regs
[8] = ®s
->gaddr0
;
1327 priv
->hash_regs
[9] = ®s
->gaddr1
;
1328 priv
->hash_regs
[10] = ®s
->gaddr2
;
1329 priv
->hash_regs
[11] = ®s
->gaddr3
;
1330 priv
->hash_regs
[12] = ®s
->gaddr4
;
1331 priv
->hash_regs
[13] = ®s
->gaddr5
;
1332 priv
->hash_regs
[14] = ®s
->gaddr6
;
1333 priv
->hash_regs
[15] = ®s
->gaddr7
;
1336 priv
->extended_hash
= 0;
1337 priv
->hash_width
= 8;
1339 priv
->hash_regs
[0] = ®s
->gaddr0
;
1340 priv
->hash_regs
[1] = ®s
->gaddr1
;
1341 priv
->hash_regs
[2] = ®s
->gaddr2
;
1342 priv
->hash_regs
[3] = ®s
->gaddr3
;
1343 priv
->hash_regs
[4] = ®s
->gaddr4
;
1344 priv
->hash_regs
[5] = ®s
->gaddr5
;
1345 priv
->hash_regs
[6] = ®s
->gaddr6
;
1346 priv
->hash_regs
[7] = ®s
->gaddr7
;
1350 /* Set up the ethernet device structure, private data,
1351 * and anything else we need before we start
1353 static int gfar_probe(struct platform_device
*ofdev
)
1355 struct net_device
*dev
= NULL
;
1356 struct gfar_private
*priv
= NULL
;
1359 err
= gfar_of_init(ofdev
, &dev
);
1364 priv
= netdev_priv(dev
);
1366 priv
->ofdev
= ofdev
;
1367 priv
->dev
= &ofdev
->dev
;
1368 SET_NETDEV_DEV(dev
, &ofdev
->dev
);
1370 spin_lock_init(&priv
->bflock
);
1371 INIT_WORK(&priv
->reset_task
, gfar_reset_task
);
1373 platform_set_drvdata(ofdev
, priv
);
1375 gfar_detect_errata(priv
);
1377 /* Set the dev->base_addr to the gfar reg region */
1378 dev
->base_addr
= (unsigned long) priv
->gfargrp
[0].regs
;
1380 /* Fill in the dev structure */
1381 dev
->watchdog_timeo
= TX_TIMEOUT
;
1383 dev
->netdev_ops
= &gfar_netdev_ops
;
1384 dev
->ethtool_ops
= &gfar_ethtool_ops
;
1386 /* Register for napi ...We are registering NAPI for each grp */
1387 for (i
= 0; i
< priv
->num_grps
; i
++) {
1388 if (priv
->poll_mode
== GFAR_SQ_POLLING
) {
1389 netif_napi_add(dev
, &priv
->gfargrp
[i
].napi_rx
,
1390 gfar_poll_rx_sq
, GFAR_DEV_WEIGHT
);
1391 netif_napi_add(dev
, &priv
->gfargrp
[i
].napi_tx
,
1392 gfar_poll_tx_sq
, 2);
1394 netif_napi_add(dev
, &priv
->gfargrp
[i
].napi_rx
,
1395 gfar_poll_rx
, GFAR_DEV_WEIGHT
);
1396 netif_napi_add(dev
, &priv
->gfargrp
[i
].napi_tx
,
1401 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_CSUM
) {
1402 dev
->hw_features
= NETIF_F_IP_CSUM
| NETIF_F_SG
|
1404 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
|
1405 NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
;
1408 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_VLAN
) {
1409 dev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
|
1410 NETIF_F_HW_VLAN_CTAG_RX
;
1411 dev
->features
|= NETIF_F_HW_VLAN_CTAG_RX
;
1414 gfar_init_addr_hash_table(priv
);
1416 /* Insert receive time stamps into padding alignment bytes */
1417 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
)
1420 if (dev
->features
& NETIF_F_IP_CSUM
||
1421 priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
)
1422 dev
->needed_headroom
= GMAC_FCB_LEN
;
1424 priv
->rx_buffer_size
= DEFAULT_RX_BUFFER_SIZE
;
1426 /* Initializing some of the rx/tx queue level parameters */
1427 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
1428 priv
->tx_queue
[i
]->tx_ring_size
= DEFAULT_TX_RING_SIZE
;
1429 priv
->tx_queue
[i
]->num_txbdfree
= DEFAULT_TX_RING_SIZE
;
1430 priv
->tx_queue
[i
]->txcoalescing
= DEFAULT_TX_COALESCE
;
1431 priv
->tx_queue
[i
]->txic
= DEFAULT_TXIC
;
1434 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
1435 priv
->rx_queue
[i
]->rx_ring_size
= DEFAULT_RX_RING_SIZE
;
1436 priv
->rx_queue
[i
]->rxcoalescing
= DEFAULT_RX_COALESCE
;
1437 priv
->rx_queue
[i
]->rxic
= DEFAULT_RXIC
;
1440 /* always enable rx filer */
1441 priv
->rx_filer_enable
= 1;
1442 /* Enable most messages by default */
1443 priv
->msg_enable
= (NETIF_MSG_IFUP
<< 1 ) - 1;
1444 /* use pritority h/w tx queue scheduling for single queue devices */
1445 if (priv
->num_tx_queues
== 1)
1446 priv
->prio_sched_en
= 1;
1448 set_bit(GFAR_DOWN
, &priv
->state
);
1452 /* Carrier starts down, phylib will bring it up */
1453 netif_carrier_off(dev
);
1455 err
= register_netdev(dev
);
1458 pr_err("%s: Cannot register net device, aborting\n", dev
->name
);
1462 device_init_wakeup(&dev
->dev
,
1463 priv
->device_flags
&
1464 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
1466 /* fill out IRQ number and name fields */
1467 for (i
= 0; i
< priv
->num_grps
; i
++) {
1468 struct gfar_priv_grp
*grp
= &priv
->gfargrp
[i
];
1469 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
1470 sprintf(gfar_irq(grp
, TX
)->name
, "%s%s%c%s",
1471 dev
->name
, "_g", '0' + i
, "_tx");
1472 sprintf(gfar_irq(grp
, RX
)->name
, "%s%s%c%s",
1473 dev
->name
, "_g", '0' + i
, "_rx");
1474 sprintf(gfar_irq(grp
, ER
)->name
, "%s%s%c%s",
1475 dev
->name
, "_g", '0' + i
, "_er");
1477 strcpy(gfar_irq(grp
, TX
)->name
, dev
->name
);
1480 /* Initialize the filer table */
1481 gfar_init_filer_table(priv
);
1483 /* Print out the device info */
1484 netdev_info(dev
, "mac: %pM\n", dev
->dev_addr
);
1486 /* Even more device info helps when determining which kernel
1487 * provided which set of benchmarks.
1489 netdev_info(dev
, "Running with NAPI enabled\n");
1490 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
1491 netdev_info(dev
, "RX BD ring size for Q[%d]: %d\n",
1492 i
, priv
->rx_queue
[i
]->rx_ring_size
);
1493 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
1494 netdev_info(dev
, "TX BD ring size for Q[%d]: %d\n",
1495 i
, priv
->tx_queue
[i
]->tx_ring_size
);
1500 unmap_group_regs(priv
);
1501 gfar_free_rx_queues(priv
);
1502 gfar_free_tx_queues(priv
);
1503 of_node_put(priv
->phy_node
);
1504 of_node_put(priv
->tbi_node
);
1505 free_gfar_dev(priv
);
1509 static int gfar_remove(struct platform_device
*ofdev
)
1511 struct gfar_private
*priv
= platform_get_drvdata(ofdev
);
1513 of_node_put(priv
->phy_node
);
1514 of_node_put(priv
->tbi_node
);
1516 unregister_netdev(priv
->ndev
);
1517 unmap_group_regs(priv
);
1518 gfar_free_rx_queues(priv
);
1519 gfar_free_tx_queues(priv
);
1520 free_gfar_dev(priv
);
1527 static int gfar_suspend(struct device
*dev
)
1529 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1530 struct net_device
*ndev
= priv
->ndev
;
1531 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1532 unsigned long flags
;
1535 int magic_packet
= priv
->wol_en
&&
1536 (priv
->device_flags
&
1537 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
1539 netif_device_detach(ndev
);
1541 if (netif_running(ndev
)) {
1543 local_irq_save(flags
);
1546 gfar_halt_nodisable(priv
);
1548 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1549 tempval
= gfar_read(®s
->maccfg1
);
1551 tempval
&= ~MACCFG1_TX_EN
;
1554 tempval
&= ~MACCFG1_RX_EN
;
1556 gfar_write(®s
->maccfg1
, tempval
);
1559 local_irq_restore(flags
);
1564 /* Enable interrupt on Magic Packet */
1565 gfar_write(®s
->imask
, IMASK_MAG
);
1567 /* Enable Magic Packet mode */
1568 tempval
= gfar_read(®s
->maccfg2
);
1569 tempval
|= MACCFG2_MPEN
;
1570 gfar_write(®s
->maccfg2
, tempval
);
1572 phy_stop(priv
->phydev
);
1579 static int gfar_resume(struct device
*dev
)
1581 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1582 struct net_device
*ndev
= priv
->ndev
;
1583 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1584 unsigned long flags
;
1586 int magic_packet
= priv
->wol_en
&&
1587 (priv
->device_flags
&
1588 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
1590 if (!netif_running(ndev
)) {
1591 netif_device_attach(ndev
);
1595 if (!magic_packet
&& priv
->phydev
)
1596 phy_start(priv
->phydev
);
1598 /* Disable Magic Packet mode, in case something
1601 local_irq_save(flags
);
1604 tempval
= gfar_read(®s
->maccfg2
);
1605 tempval
&= ~MACCFG2_MPEN
;
1606 gfar_write(®s
->maccfg2
, tempval
);
1611 local_irq_restore(flags
);
1613 netif_device_attach(ndev
);
1620 static int gfar_restore(struct device
*dev
)
1622 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1623 struct net_device
*ndev
= priv
->ndev
;
1625 if (!netif_running(ndev
)) {
1626 netif_device_attach(ndev
);
1631 if (gfar_init_bds(ndev
)) {
1632 free_skb_resources(priv
);
1636 gfar_mac_reset(priv
);
1638 gfar_init_tx_rx_base(priv
);
1644 priv
->oldduplex
= -1;
1647 phy_start(priv
->phydev
);
1649 netif_device_attach(ndev
);
1655 static struct dev_pm_ops gfar_pm_ops
= {
1656 .suspend
= gfar_suspend
,
1657 .resume
= gfar_resume
,
1658 .freeze
= gfar_suspend
,
1659 .thaw
= gfar_resume
,
1660 .restore
= gfar_restore
,
1663 #define GFAR_PM_OPS (&gfar_pm_ops)
1667 #define GFAR_PM_OPS NULL
1671 /* Reads the controller's registers to determine what interface
1672 * connects it to the PHY.
1674 static phy_interface_t
gfar_get_interface(struct net_device
*dev
)
1676 struct gfar_private
*priv
= netdev_priv(dev
);
1677 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1680 ecntrl
= gfar_read(®s
->ecntrl
);
1682 if (ecntrl
& ECNTRL_SGMII_MODE
)
1683 return PHY_INTERFACE_MODE_SGMII
;
1685 if (ecntrl
& ECNTRL_TBI_MODE
) {
1686 if (ecntrl
& ECNTRL_REDUCED_MODE
)
1687 return PHY_INTERFACE_MODE_RTBI
;
1689 return PHY_INTERFACE_MODE_TBI
;
1692 if (ecntrl
& ECNTRL_REDUCED_MODE
) {
1693 if (ecntrl
& ECNTRL_REDUCED_MII_MODE
) {
1694 return PHY_INTERFACE_MODE_RMII
;
1697 phy_interface_t interface
= priv
->interface
;
1699 /* This isn't autodetected right now, so it must
1700 * be set by the device tree or platform code.
1702 if (interface
== PHY_INTERFACE_MODE_RGMII_ID
)
1703 return PHY_INTERFACE_MODE_RGMII_ID
;
1705 return PHY_INTERFACE_MODE_RGMII
;
1709 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_GIGABIT
)
1710 return PHY_INTERFACE_MODE_GMII
;
1712 return PHY_INTERFACE_MODE_MII
;
1716 /* Initializes driver's PHY state, and attaches to the PHY.
1717 * Returns 0 on success.
1719 static int init_phy(struct net_device
*dev
)
1721 struct gfar_private
*priv
= netdev_priv(dev
);
1722 uint gigabit_support
=
1723 priv
->device_flags
& FSL_GIANFAR_DEV_HAS_GIGABIT
?
1724 GFAR_SUPPORTED_GBIT
: 0;
1725 phy_interface_t interface
;
1729 priv
->oldduplex
= -1;
1731 interface
= gfar_get_interface(dev
);
1733 priv
->phydev
= of_phy_connect(dev
, priv
->phy_node
, &adjust_link
, 0,
1735 if (!priv
->phydev
) {
1736 dev_err(&dev
->dev
, "could not attach to PHY\n");
1740 if (interface
== PHY_INTERFACE_MODE_SGMII
)
1741 gfar_configure_serdes(dev
);
1743 /* Remove any features not supported by the controller */
1744 priv
->phydev
->supported
&= (GFAR_SUPPORTED
| gigabit_support
);
1745 priv
->phydev
->advertising
= priv
->phydev
->supported
;
1747 /* Add support for flow control, but don't advertise it by default */
1748 priv
->phydev
->supported
|= (SUPPORTED_Pause
| SUPPORTED_Asym_Pause
);
1753 /* Initialize TBI PHY interface for communicating with the
1754 * SERDES lynx PHY on the chip. We communicate with this PHY
1755 * through the MDIO bus on each controller, treating it as a
1756 * "normal" PHY at the address found in the TBIPA register. We assume
1757 * that the TBIPA register is valid. Either the MDIO bus code will set
1758 * it to a value that doesn't conflict with other PHYs on the bus, or the
1759 * value doesn't matter, as there are no other PHYs on the bus.
1761 static void gfar_configure_serdes(struct net_device
*dev
)
1763 struct gfar_private
*priv
= netdev_priv(dev
);
1764 struct phy_device
*tbiphy
;
1766 if (!priv
->tbi_node
) {
1767 dev_warn(&dev
->dev
, "error: SGMII mode requires that the "
1768 "device tree specify a tbi-handle\n");
1772 tbiphy
= of_phy_find_device(priv
->tbi_node
);
1774 dev_err(&dev
->dev
, "error: Could not get TBI device\n");
1778 /* If the link is already up, we must already be ok, and don't need to
1779 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1780 * everything for us? Resetting it takes the link down and requires
1781 * several seconds for it to come back.
1783 if (phy_read(tbiphy
, MII_BMSR
) & BMSR_LSTATUS
)
1786 /* Single clk mode, mii mode off(for serdes communication) */
1787 phy_write(tbiphy
, MII_TBICON
, TBICON_CLK_SELECT
);
1789 phy_write(tbiphy
, MII_ADVERTISE
,
1790 ADVERTISE_1000XFULL
| ADVERTISE_1000XPAUSE
|
1791 ADVERTISE_1000XPSE_ASYM
);
1793 phy_write(tbiphy
, MII_BMCR
,
1794 BMCR_ANENABLE
| BMCR_ANRESTART
| BMCR_FULLDPLX
|
1798 static int __gfar_is_rx_idle(struct gfar_private
*priv
)
1802 /* Normaly TSEC should not hang on GRS commands, so we should
1803 * actually wait for IEVENT_GRSC flag.
1805 if (!gfar_has_errata(priv
, GFAR_ERRATA_A002
))
1808 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1809 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1810 * and the Rx can be safely reset.
1812 res
= gfar_read((void __iomem
*)priv
->gfargrp
[0].regs
+ 0xd1c);
1814 if ((res
& 0xffff) == (res
>> 16))
1820 /* Halt the receive and transmit queues */
1821 static void gfar_halt_nodisable(struct gfar_private
*priv
)
1823 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1825 unsigned int timeout
;
1828 gfar_ints_disable(priv
);
1830 if (gfar_is_dma_stopped(priv
))
1833 /* Stop the DMA, and wait for it to stop */
1834 tempval
= gfar_read(®s
->dmactrl
);
1835 tempval
|= (DMACTRL_GRS
| DMACTRL_GTS
);
1836 gfar_write(®s
->dmactrl
, tempval
);
1840 while (!(stopped
= gfar_is_dma_stopped(priv
)) && timeout
) {
1846 stopped
= gfar_is_dma_stopped(priv
);
1848 if (!stopped
&& !gfar_is_rx_dma_stopped(priv
) &&
1849 !__gfar_is_rx_idle(priv
))
1853 /* Halt the receive and transmit queues */
1854 void gfar_halt(struct gfar_private
*priv
)
1856 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1859 /* Dissable the Rx/Tx hw queues */
1860 gfar_write(®s
->rqueue
, 0);
1861 gfar_write(®s
->tqueue
, 0);
1865 gfar_halt_nodisable(priv
);
1867 /* Disable Rx/Tx DMA */
1868 tempval
= gfar_read(®s
->maccfg1
);
1869 tempval
&= ~(MACCFG1_RX_EN
| MACCFG1_TX_EN
);
1870 gfar_write(®s
->maccfg1
, tempval
);
1873 void stop_gfar(struct net_device
*dev
)
1875 struct gfar_private
*priv
= netdev_priv(dev
);
1877 netif_tx_stop_all_queues(dev
);
1879 smp_mb__before_atomic();
1880 set_bit(GFAR_DOWN
, &priv
->state
);
1881 smp_mb__after_atomic();
1885 /* disable ints and gracefully shut down Rx/Tx DMA */
1888 phy_stop(priv
->phydev
);
1890 free_skb_resources(priv
);
1893 static void free_skb_tx_queue(struct gfar_priv_tx_q
*tx_queue
)
1895 struct txbd8
*txbdp
;
1896 struct gfar_private
*priv
= netdev_priv(tx_queue
->dev
);
1899 txbdp
= tx_queue
->tx_bd_base
;
1901 for (i
= 0; i
< tx_queue
->tx_ring_size
; i
++) {
1902 if (!tx_queue
->tx_skbuff
[i
])
1905 dma_unmap_single(priv
->dev
, be32_to_cpu(txbdp
->bufPtr
),
1906 be16_to_cpu(txbdp
->length
), DMA_TO_DEVICE
);
1908 for (j
= 0; j
< skb_shinfo(tx_queue
->tx_skbuff
[i
])->nr_frags
;
1911 dma_unmap_page(priv
->dev
, be32_to_cpu(txbdp
->bufPtr
),
1912 be16_to_cpu(txbdp
->length
),
1916 dev_kfree_skb_any(tx_queue
->tx_skbuff
[i
]);
1917 tx_queue
->tx_skbuff
[i
] = NULL
;
1919 kfree(tx_queue
->tx_skbuff
);
1920 tx_queue
->tx_skbuff
= NULL
;
1923 static void free_skb_rx_queue(struct gfar_priv_rx_q
*rx_queue
)
1925 struct rxbd8
*rxbdp
;
1926 struct gfar_private
*priv
= netdev_priv(rx_queue
->dev
);
1929 rxbdp
= rx_queue
->rx_bd_base
;
1931 for (i
= 0; i
< rx_queue
->rx_ring_size
; i
++) {
1932 if (rx_queue
->rx_skbuff
[i
]) {
1933 dma_unmap_single(priv
->dev
, be32_to_cpu(rxbdp
->bufPtr
),
1934 priv
->rx_buffer_size
,
1936 dev_kfree_skb_any(rx_queue
->rx_skbuff
[i
]);
1937 rx_queue
->rx_skbuff
[i
] = NULL
;
1943 kfree(rx_queue
->rx_skbuff
);
1944 rx_queue
->rx_skbuff
= NULL
;
1947 /* If there are any tx skbs or rx skbs still around, free them.
1948 * Then free tx_skbuff and rx_skbuff
1950 static void free_skb_resources(struct gfar_private
*priv
)
1952 struct gfar_priv_tx_q
*tx_queue
= NULL
;
1953 struct gfar_priv_rx_q
*rx_queue
= NULL
;
1956 /* Go through all the buffer descriptors and free their data buffers */
1957 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
1958 struct netdev_queue
*txq
;
1960 tx_queue
= priv
->tx_queue
[i
];
1961 txq
= netdev_get_tx_queue(tx_queue
->dev
, tx_queue
->qindex
);
1962 if (tx_queue
->tx_skbuff
)
1963 free_skb_tx_queue(tx_queue
);
1964 netdev_tx_reset_queue(txq
);
1967 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
1968 rx_queue
= priv
->rx_queue
[i
];
1969 if (rx_queue
->rx_skbuff
)
1970 free_skb_rx_queue(rx_queue
);
1973 dma_free_coherent(priv
->dev
,
1974 sizeof(struct txbd8
) * priv
->total_tx_ring_size
+
1975 sizeof(struct rxbd8
) * priv
->total_rx_ring_size
,
1976 priv
->tx_queue
[0]->tx_bd_base
,
1977 priv
->tx_queue
[0]->tx_bd_dma_base
);
1980 void gfar_start(struct gfar_private
*priv
)
1982 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1986 /* Enable Rx/Tx hw queues */
1987 gfar_write(®s
->rqueue
, priv
->rqueue
);
1988 gfar_write(®s
->tqueue
, priv
->tqueue
);
1990 /* Initialize DMACTRL to have WWR and WOP */
1991 tempval
= gfar_read(®s
->dmactrl
);
1992 tempval
|= DMACTRL_INIT_SETTINGS
;
1993 gfar_write(®s
->dmactrl
, tempval
);
1995 /* Make sure we aren't stopped */
1996 tempval
= gfar_read(®s
->dmactrl
);
1997 tempval
&= ~(DMACTRL_GRS
| DMACTRL_GTS
);
1998 gfar_write(®s
->dmactrl
, tempval
);
2000 for (i
= 0; i
< priv
->num_grps
; i
++) {
2001 regs
= priv
->gfargrp
[i
].regs
;
2002 /* Clear THLT/RHLT, so that the DMA starts polling now */
2003 gfar_write(®s
->tstat
, priv
->gfargrp
[i
].tstat
);
2004 gfar_write(®s
->rstat
, priv
->gfargrp
[i
].rstat
);
2007 /* Enable Rx/Tx DMA */
2008 tempval
= gfar_read(®s
->maccfg1
);
2009 tempval
|= (MACCFG1_RX_EN
| MACCFG1_TX_EN
);
2010 gfar_write(®s
->maccfg1
, tempval
);
2012 gfar_ints_enable(priv
);
2014 priv
->ndev
->trans_start
= jiffies
; /* prevent tx timeout */
2017 static void free_grp_irqs(struct gfar_priv_grp
*grp
)
2019 free_irq(gfar_irq(grp
, TX
)->irq
, grp
);
2020 free_irq(gfar_irq(grp
, RX
)->irq
, grp
);
2021 free_irq(gfar_irq(grp
, ER
)->irq
, grp
);
2024 static int register_grp_irqs(struct gfar_priv_grp
*grp
)
2026 struct gfar_private
*priv
= grp
->priv
;
2027 struct net_device
*dev
= priv
->ndev
;
2030 /* If the device has multiple interrupts, register for
2031 * them. Otherwise, only register for the one
2033 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
2034 /* Install our interrupt handlers for Error,
2035 * Transmit, and Receive
2037 err
= request_irq(gfar_irq(grp
, ER
)->irq
, gfar_error
, 0,
2038 gfar_irq(grp
, ER
)->name
, grp
);
2040 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
2041 gfar_irq(grp
, ER
)->irq
);
2045 err
= request_irq(gfar_irq(grp
, TX
)->irq
, gfar_transmit
, 0,
2046 gfar_irq(grp
, TX
)->name
, grp
);
2048 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
2049 gfar_irq(grp
, TX
)->irq
);
2052 err
= request_irq(gfar_irq(grp
, RX
)->irq
, gfar_receive
, 0,
2053 gfar_irq(grp
, RX
)->name
, grp
);
2055 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
2056 gfar_irq(grp
, RX
)->irq
);
2060 err
= request_irq(gfar_irq(grp
, TX
)->irq
, gfar_interrupt
, 0,
2061 gfar_irq(grp
, TX
)->name
, grp
);
2063 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
2064 gfar_irq(grp
, TX
)->irq
);
2072 free_irq(gfar_irq(grp
, TX
)->irq
, grp
);
2074 free_irq(gfar_irq(grp
, ER
)->irq
, grp
);
2080 static void gfar_free_irq(struct gfar_private
*priv
)
2085 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
2086 for (i
= 0; i
< priv
->num_grps
; i
++)
2087 free_grp_irqs(&priv
->gfargrp
[i
]);
2089 for (i
= 0; i
< priv
->num_grps
; i
++)
2090 free_irq(gfar_irq(&priv
->gfargrp
[i
], TX
)->irq
,
2095 static int gfar_request_irq(struct gfar_private
*priv
)
2099 for (i
= 0; i
< priv
->num_grps
; i
++) {
2100 err
= register_grp_irqs(&priv
->gfargrp
[i
]);
2102 for (j
= 0; j
< i
; j
++)
2103 free_grp_irqs(&priv
->gfargrp
[j
]);
2111 /* Bring the controller up and running */
2112 int startup_gfar(struct net_device
*ndev
)
2114 struct gfar_private
*priv
= netdev_priv(ndev
);
2117 gfar_mac_reset(priv
);
2119 err
= gfar_alloc_skb_resources(ndev
);
2123 gfar_init_tx_rx_base(priv
);
2125 smp_mb__before_atomic();
2126 clear_bit(GFAR_DOWN
, &priv
->state
);
2127 smp_mb__after_atomic();
2129 /* Start Rx/Tx DMA and enable the interrupts */
2132 phy_start(priv
->phydev
);
2136 netif_tx_wake_all_queues(ndev
);
2141 /* Called when something needs to use the ethernet device
2142 * Returns 0 for success.
2144 static int gfar_enet_open(struct net_device
*dev
)
2146 struct gfar_private
*priv
= netdev_priv(dev
);
2149 err
= init_phy(dev
);
2153 err
= gfar_request_irq(priv
);
2157 err
= startup_gfar(dev
);
2161 device_set_wakeup_enable(&dev
->dev
, priv
->wol_en
);
2166 static inline struct txfcb
*gfar_add_fcb(struct sk_buff
*skb
)
2168 struct txfcb
*fcb
= (struct txfcb
*)skb_push(skb
, GMAC_FCB_LEN
);
2170 memset(fcb
, 0, GMAC_FCB_LEN
);
2175 static inline void gfar_tx_checksum(struct sk_buff
*skb
, struct txfcb
*fcb
,
2178 /* If we're here, it's a IP packet with a TCP or UDP
2179 * payload. We set it to checksum, using a pseudo-header
2182 u8 flags
= TXFCB_DEFAULT
;
2184 /* Tell the controller what the protocol is
2185 * And provide the already calculated phcs
2187 if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
) {
2189 fcb
->phcs
= (__force __be16
)(udp_hdr(skb
)->check
);
2191 fcb
->phcs
= (__force __be16
)(tcp_hdr(skb
)->check
);
2193 /* l3os is the distance between the start of the
2194 * frame (skb->data) and the start of the IP hdr.
2195 * l4os is the distance between the start of the
2196 * l3 hdr and the l4 hdr
2198 fcb
->l3os
= (u8
)(skb_network_offset(skb
) - fcb_length
);
2199 fcb
->l4os
= skb_network_header_len(skb
);
2204 void inline gfar_tx_vlan(struct sk_buff
*skb
, struct txfcb
*fcb
)
2206 fcb
->flags
|= TXFCB_VLN
;
2207 fcb
->vlctl
= cpu_to_be16(skb_vlan_tag_get(skb
));
2210 static inline struct txbd8
*skip_txbd(struct txbd8
*bdp
, int stride
,
2211 struct txbd8
*base
, int ring_size
)
2213 struct txbd8
*new_bd
= bdp
+ stride
;
2215 return (new_bd
>= (base
+ ring_size
)) ? (new_bd
- ring_size
) : new_bd
;
2218 static inline struct txbd8
*next_txbd(struct txbd8
*bdp
, struct txbd8
*base
,
2221 return skip_txbd(bdp
, 1, base
, ring_size
);
2224 /* eTSEC12: csum generation not supported for some fcb offsets */
2225 static inline bool gfar_csum_errata_12(struct gfar_private
*priv
,
2226 unsigned long fcb_addr
)
2228 return (gfar_has_errata(priv
, GFAR_ERRATA_12
) &&
2229 (fcb_addr
% 0x20) > 0x18);
2232 /* eTSEC76: csum generation for frames larger than 2500 may
2233 * cause excess delays before start of transmission
2235 static inline bool gfar_csum_errata_76(struct gfar_private
*priv
,
2238 return (gfar_has_errata(priv
, GFAR_ERRATA_76
) &&
2242 /* This is called by the kernel when a frame is ready for transmission.
2243 * It is pointed to by the dev->hard_start_xmit function pointer
2245 static int gfar_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2247 struct gfar_private
*priv
= netdev_priv(dev
);
2248 struct gfar_priv_tx_q
*tx_queue
= NULL
;
2249 struct netdev_queue
*txq
;
2250 struct gfar __iomem
*regs
= NULL
;
2251 struct txfcb
*fcb
= NULL
;
2252 struct txbd8
*txbdp
, *txbdp_start
, *base
, *txbdp_tstamp
= NULL
;
2255 int do_tstamp
, do_csum
, do_vlan
;
2257 unsigned long flags
;
2258 unsigned int nr_frags
, nr_txbds
, bytes_sent
, fcb_len
= 0;
2260 rq
= skb
->queue_mapping
;
2261 tx_queue
= priv
->tx_queue
[rq
];
2262 txq
= netdev_get_tx_queue(dev
, rq
);
2263 base
= tx_queue
->tx_bd_base
;
2264 regs
= tx_queue
->grp
->regs
;
2266 do_csum
= (CHECKSUM_PARTIAL
== skb
->ip_summed
);
2267 do_vlan
= skb_vlan_tag_present(skb
);
2268 do_tstamp
= (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
2271 if (do_csum
|| do_vlan
)
2272 fcb_len
= GMAC_FCB_LEN
;
2274 /* check if time stamp should be generated */
2275 if (unlikely(do_tstamp
))
2276 fcb_len
= GMAC_FCB_LEN
+ GMAC_TXPAL_LEN
;
2278 /* make space for additional header when fcb is needed */
2279 if (fcb_len
&& unlikely(skb_headroom(skb
) < fcb_len
)) {
2280 struct sk_buff
*skb_new
;
2282 skb_new
= skb_realloc_headroom(skb
, fcb_len
);
2284 dev
->stats
.tx_errors
++;
2285 dev_kfree_skb_any(skb
);
2286 return NETDEV_TX_OK
;
2290 skb_set_owner_w(skb_new
, skb
->sk
);
2291 dev_consume_skb_any(skb
);
2295 /* total number of fragments in the SKB */
2296 nr_frags
= skb_shinfo(skb
)->nr_frags
;
2298 /* calculate the required number of TxBDs for this skb */
2299 if (unlikely(do_tstamp
))
2300 nr_txbds
= nr_frags
+ 2;
2302 nr_txbds
= nr_frags
+ 1;
2304 /* check if there is space to queue this packet */
2305 if (nr_txbds
> tx_queue
->num_txbdfree
) {
2306 /* no space, stop the queue */
2307 netif_tx_stop_queue(txq
);
2308 dev
->stats
.tx_fifo_errors
++;
2309 return NETDEV_TX_BUSY
;
2312 /* Update transmit stats */
2313 bytes_sent
= skb
->len
;
2314 tx_queue
->stats
.tx_bytes
+= bytes_sent
;
2315 /* keep Tx bytes on wire for BQL accounting */
2316 GFAR_CB(skb
)->bytes_sent
= bytes_sent
;
2317 tx_queue
->stats
.tx_packets
++;
2319 txbdp
= txbdp_start
= tx_queue
->cur_tx
;
2320 lstatus
= be32_to_cpu(txbdp
->lstatus
);
2322 /* Time stamp insertion requires one additional TxBD */
2323 if (unlikely(do_tstamp
))
2324 txbdp_tstamp
= txbdp
= next_txbd(txbdp
, base
,
2325 tx_queue
->tx_ring_size
);
2327 if (nr_frags
== 0) {
2328 if (unlikely(do_tstamp
)) {
2329 u32 lstatus_ts
= be32_to_cpu(txbdp_tstamp
->lstatus
);
2331 lstatus_ts
|= BD_LFLAG(TXBD_LAST
| TXBD_INTERRUPT
);
2332 txbdp_tstamp
->lstatus
= cpu_to_be32(lstatus_ts
);
2334 lstatus
|= BD_LFLAG(TXBD_LAST
| TXBD_INTERRUPT
);
2337 /* Place the fragment addresses and lengths into the TxBDs */
2338 for (i
= 0; i
< nr_frags
; i
++) {
2339 unsigned int frag_len
;
2340 /* Point at the next BD, wrapping as needed */
2341 txbdp
= next_txbd(txbdp
, base
, tx_queue
->tx_ring_size
);
2343 frag_len
= skb_shinfo(skb
)->frags
[i
].size
;
2345 lstatus
= be32_to_cpu(txbdp
->lstatus
) | frag_len
|
2346 BD_LFLAG(TXBD_READY
);
2348 /* Handle the last BD specially */
2349 if (i
== nr_frags
- 1)
2350 lstatus
|= BD_LFLAG(TXBD_LAST
| TXBD_INTERRUPT
);
2352 bufaddr
= skb_frag_dma_map(priv
->dev
,
2353 &skb_shinfo(skb
)->frags
[i
],
2357 if (unlikely(dma_mapping_error(priv
->dev
, bufaddr
)))
2360 /* set the TxBD length and buffer pointer */
2361 txbdp
->bufPtr
= cpu_to_be32(bufaddr
);
2362 txbdp
->lstatus
= cpu_to_be32(lstatus
);
2365 lstatus
= be32_to_cpu(txbdp_start
->lstatus
);
2368 /* Add TxPAL between FCB and frame if required */
2369 if (unlikely(do_tstamp
)) {
2370 skb_push(skb
, GMAC_TXPAL_LEN
);
2371 memset(skb
->data
, 0, GMAC_TXPAL_LEN
);
2374 /* Add TxFCB if required */
2376 fcb
= gfar_add_fcb(skb
);
2377 lstatus
|= BD_LFLAG(TXBD_TOE
);
2380 /* Set up checksumming */
2382 gfar_tx_checksum(skb
, fcb
, fcb_len
);
2384 if (unlikely(gfar_csum_errata_12(priv
, (unsigned long)fcb
)) ||
2385 unlikely(gfar_csum_errata_76(priv
, skb
->len
))) {
2386 __skb_pull(skb
, GMAC_FCB_LEN
);
2387 skb_checksum_help(skb
);
2388 if (do_vlan
|| do_tstamp
) {
2389 /* put back a new fcb for vlan/tstamp TOE */
2390 fcb
= gfar_add_fcb(skb
);
2392 /* Tx TOE not used */
2393 lstatus
&= ~(BD_LFLAG(TXBD_TOE
));
2400 gfar_tx_vlan(skb
, fcb
);
2402 /* Setup tx hardware time stamping if requested */
2403 if (unlikely(do_tstamp
)) {
2404 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
2408 bufaddr
= dma_map_single(priv
->dev
, skb
->data
, skb_headlen(skb
),
2410 if (unlikely(dma_mapping_error(priv
->dev
, bufaddr
)))
2413 txbdp_start
->bufPtr
= cpu_to_be32(bufaddr
);
2415 /* If time stamping is requested one additional TxBD must be set up. The
2416 * first TxBD points to the FCB and must have a data length of
2417 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2418 * the full frame length.
2420 if (unlikely(do_tstamp
)) {
2421 u32 lstatus_ts
= be32_to_cpu(txbdp_tstamp
->lstatus
);
2423 bufaddr
= be32_to_cpu(txbdp_start
->bufPtr
);
2425 lstatus_ts
|= BD_LFLAG(TXBD_READY
) |
2426 (skb_headlen(skb
) - fcb_len
);
2428 txbdp_tstamp
->bufPtr
= cpu_to_be32(bufaddr
);
2429 txbdp_tstamp
->lstatus
= cpu_to_be32(lstatus_ts
);
2430 lstatus
|= BD_LFLAG(TXBD_CRC
| TXBD_READY
) | GMAC_FCB_LEN
;
2432 lstatus
|= BD_LFLAG(TXBD_CRC
| TXBD_READY
) | skb_headlen(skb
);
2435 netdev_tx_sent_queue(txq
, bytes_sent
);
2437 /* We can work in parallel with gfar_clean_tx_ring(), except
2438 * when modifying num_txbdfree. Note that we didn't grab the lock
2439 * when we were reading the num_txbdfree and checking for available
2440 * space, that's because outside of this function it can only grow,
2441 * and once we've got needed space, it cannot suddenly disappear.
2443 * The lock also protects us from gfar_error(), which can modify
2444 * regs->tstat and thus retrigger the transfers, which is why we
2445 * also must grab the lock before setting ready bit for the first
2446 * to be transmitted BD.
2448 spin_lock_irqsave(&tx_queue
->txlock
, flags
);
2452 txbdp_start
->lstatus
= cpu_to_be32(lstatus
);
2454 gfar_wmb(); /* force lstatus write before tx_skbuff */
2456 tx_queue
->tx_skbuff
[tx_queue
->skb_curtx
] = skb
;
2458 /* Update the current skb pointer to the next entry we will use
2459 * (wrapping if necessary)
2461 tx_queue
->skb_curtx
= (tx_queue
->skb_curtx
+ 1) &
2462 TX_RING_MOD_MASK(tx_queue
->tx_ring_size
);
2464 tx_queue
->cur_tx
= next_txbd(txbdp
, base
, tx_queue
->tx_ring_size
);
2466 /* reduce TxBD free count */
2467 tx_queue
->num_txbdfree
-= (nr_txbds
);
2469 /* If the next BD still needs to be cleaned up, then the bds
2470 * are full. We need to tell the kernel to stop sending us stuff.
2472 if (!tx_queue
->num_txbdfree
) {
2473 netif_tx_stop_queue(txq
);
2475 dev
->stats
.tx_fifo_errors
++;
2478 /* Tell the DMA to go go go */
2479 gfar_write(®s
->tstat
, TSTAT_CLEAR_THALT
>> tx_queue
->qindex
);
2482 spin_unlock_irqrestore(&tx_queue
->txlock
, flags
);
2484 return NETDEV_TX_OK
;
2487 txbdp
= next_txbd(txbdp_start
, base
, tx_queue
->tx_ring_size
);
2489 txbdp
= next_txbd(txbdp
, base
, tx_queue
->tx_ring_size
);
2490 for (i
= 0; i
< nr_frags
; i
++) {
2491 lstatus
= be32_to_cpu(txbdp
->lstatus
);
2492 if (!(lstatus
& BD_LFLAG(TXBD_READY
)))
2495 lstatus
&= ~BD_LFLAG(TXBD_READY
);
2496 txbdp
->lstatus
= cpu_to_be32(lstatus
);
2497 bufaddr
= be32_to_cpu(txbdp
->bufPtr
);
2498 dma_unmap_page(priv
->dev
, bufaddr
, be16_to_cpu(txbdp
->length
),
2500 txbdp
= next_txbd(txbdp
, base
, tx_queue
->tx_ring_size
);
2503 dev_kfree_skb_any(skb
);
2504 return NETDEV_TX_OK
;
2507 /* Stops the kernel queue, and halts the controller */
2508 static int gfar_close(struct net_device
*dev
)
2510 struct gfar_private
*priv
= netdev_priv(dev
);
2512 cancel_work_sync(&priv
->reset_task
);
2515 /* Disconnect from the PHY */
2516 phy_disconnect(priv
->phydev
);
2517 priv
->phydev
= NULL
;
2519 gfar_free_irq(priv
);
2524 /* Changes the mac address if the controller is not running. */
2525 static int gfar_set_mac_address(struct net_device
*dev
)
2527 gfar_set_mac_for_addr(dev
, 0, dev
->dev_addr
);
2532 static int gfar_change_mtu(struct net_device
*dev
, int new_mtu
)
2534 struct gfar_private
*priv
= netdev_priv(dev
);
2535 int frame_size
= new_mtu
+ ETH_HLEN
;
2537 if ((frame_size
< 64) || (frame_size
> JUMBO_FRAME_SIZE
)) {
2538 netif_err(priv
, drv
, dev
, "Invalid MTU setting\n");
2542 while (test_and_set_bit_lock(GFAR_RESETTING
, &priv
->state
))
2545 if (dev
->flags
& IFF_UP
)
2550 if (dev
->flags
& IFF_UP
)
2553 clear_bit_unlock(GFAR_RESETTING
, &priv
->state
);
2558 void reset_gfar(struct net_device
*ndev
)
2560 struct gfar_private
*priv
= netdev_priv(ndev
);
2562 while (test_and_set_bit_lock(GFAR_RESETTING
, &priv
->state
))
2568 clear_bit_unlock(GFAR_RESETTING
, &priv
->state
);
2571 /* gfar_reset_task gets scheduled when a packet has not been
2572 * transmitted after a set amount of time.
2573 * For now, assume that clearing out all the structures, and
2574 * starting over will fix the problem.
2576 static void gfar_reset_task(struct work_struct
*work
)
2578 struct gfar_private
*priv
= container_of(work
, struct gfar_private
,
2580 reset_gfar(priv
->ndev
);
2583 static void gfar_timeout(struct net_device
*dev
)
2585 struct gfar_private
*priv
= netdev_priv(dev
);
2587 dev
->stats
.tx_errors
++;
2588 schedule_work(&priv
->reset_task
);
2591 static void gfar_align_skb(struct sk_buff
*skb
)
2593 /* We need the data buffer to be aligned properly. We will reserve
2594 * as many bytes as needed to align the data properly
2596 skb_reserve(skb
, RXBUF_ALIGNMENT
-
2597 (((unsigned long) skb
->data
) & (RXBUF_ALIGNMENT
- 1)));
2600 /* Interrupt Handler for Transmit complete */
2601 static void gfar_clean_tx_ring(struct gfar_priv_tx_q
*tx_queue
)
2603 struct net_device
*dev
= tx_queue
->dev
;
2604 struct netdev_queue
*txq
;
2605 struct gfar_private
*priv
= netdev_priv(dev
);
2606 struct txbd8
*bdp
, *next
= NULL
;
2607 struct txbd8
*lbdp
= NULL
;
2608 struct txbd8
*base
= tx_queue
->tx_bd_base
;
2609 struct sk_buff
*skb
;
2611 int tx_ring_size
= tx_queue
->tx_ring_size
;
2612 int frags
= 0, nr_txbds
= 0;
2615 int tqi
= tx_queue
->qindex
;
2616 unsigned int bytes_sent
= 0;
2620 txq
= netdev_get_tx_queue(dev
, tqi
);
2621 bdp
= tx_queue
->dirty_tx
;
2622 skb_dirtytx
= tx_queue
->skb_dirtytx
;
2624 while ((skb
= tx_queue
->tx_skbuff
[skb_dirtytx
])) {
2625 unsigned long flags
;
2627 frags
= skb_shinfo(skb
)->nr_frags
;
2629 /* When time stamping, one additional TxBD must be freed.
2630 * Also, we need to dma_unmap_single() the TxPAL.
2632 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
))
2633 nr_txbds
= frags
+ 2;
2635 nr_txbds
= frags
+ 1;
2637 lbdp
= skip_txbd(bdp
, nr_txbds
- 1, base
, tx_ring_size
);
2639 lstatus
= be32_to_cpu(lbdp
->lstatus
);
2641 /* Only clean completed frames */
2642 if ((lstatus
& BD_LFLAG(TXBD_READY
)) &&
2643 (lstatus
& BD_LENGTH_MASK
))
2646 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
)) {
2647 next
= next_txbd(bdp
, base
, tx_ring_size
);
2648 buflen
= be16_to_cpu(next
->length
) +
2649 GMAC_FCB_LEN
+ GMAC_TXPAL_LEN
;
2651 buflen
= be16_to_cpu(bdp
->length
);
2653 dma_unmap_single(priv
->dev
, be32_to_cpu(bdp
->bufPtr
),
2654 buflen
, DMA_TO_DEVICE
);
2656 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
)) {
2657 struct skb_shared_hwtstamps shhwtstamps
;
2658 u64
*ns
= (u64
*) (((u32
)skb
->data
+ 0x10) & ~0x7);
2660 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
2661 shhwtstamps
.hwtstamp
= ns_to_ktime(*ns
);
2662 skb_pull(skb
, GMAC_FCB_LEN
+ GMAC_TXPAL_LEN
);
2663 skb_tstamp_tx(skb
, &shhwtstamps
);
2664 gfar_clear_txbd_status(bdp
);
2668 gfar_clear_txbd_status(bdp
);
2669 bdp
= next_txbd(bdp
, base
, tx_ring_size
);
2671 for (i
= 0; i
< frags
; i
++) {
2672 dma_unmap_page(priv
->dev
, be32_to_cpu(bdp
->bufPtr
),
2673 be16_to_cpu(bdp
->length
),
2675 gfar_clear_txbd_status(bdp
);
2676 bdp
= next_txbd(bdp
, base
, tx_ring_size
);
2679 bytes_sent
+= GFAR_CB(skb
)->bytes_sent
;
2681 dev_kfree_skb_any(skb
);
2683 tx_queue
->tx_skbuff
[skb_dirtytx
] = NULL
;
2685 skb_dirtytx
= (skb_dirtytx
+ 1) &
2686 TX_RING_MOD_MASK(tx_ring_size
);
2689 spin_lock_irqsave(&tx_queue
->txlock
, flags
);
2690 tx_queue
->num_txbdfree
+= nr_txbds
;
2691 spin_unlock_irqrestore(&tx_queue
->txlock
, flags
);
2694 /* If we freed a buffer, we can restart transmission, if necessary */
2695 if (tx_queue
->num_txbdfree
&&
2696 netif_tx_queue_stopped(txq
) &&
2697 !(test_bit(GFAR_DOWN
, &priv
->state
)))
2698 netif_wake_subqueue(priv
->ndev
, tqi
);
2700 /* Update dirty indicators */
2701 tx_queue
->skb_dirtytx
= skb_dirtytx
;
2702 tx_queue
->dirty_tx
= bdp
;
2704 netdev_tx_completed_queue(txq
, howmany
, bytes_sent
);
2707 static struct sk_buff
*gfar_alloc_skb(struct net_device
*dev
)
2709 struct gfar_private
*priv
= netdev_priv(dev
);
2710 struct sk_buff
*skb
;
2712 skb
= netdev_alloc_skb(dev
, priv
->rx_buffer_size
+ RXBUF_ALIGNMENT
);
2716 gfar_align_skb(skb
);
2721 static struct sk_buff
*gfar_new_skb(struct net_device
*dev
, dma_addr_t
*bufaddr
)
2723 struct gfar_private
*priv
= netdev_priv(dev
);
2724 struct sk_buff
*skb
;
2727 skb
= gfar_alloc_skb(dev
);
2731 addr
= dma_map_single(priv
->dev
, skb
->data
,
2732 priv
->rx_buffer_size
, DMA_FROM_DEVICE
);
2733 if (unlikely(dma_mapping_error(priv
->dev
, addr
))) {
2734 dev_kfree_skb_any(skb
);
2742 static inline void count_errors(unsigned short status
, struct net_device
*dev
)
2744 struct gfar_private
*priv
= netdev_priv(dev
);
2745 struct net_device_stats
*stats
= &dev
->stats
;
2746 struct gfar_extra_stats
*estats
= &priv
->extra_stats
;
2748 /* If the packet was truncated, none of the other errors matter */
2749 if (status
& RXBD_TRUNCATED
) {
2750 stats
->rx_length_errors
++;
2752 atomic64_inc(&estats
->rx_trunc
);
2756 /* Count the errors, if there were any */
2757 if (status
& (RXBD_LARGE
| RXBD_SHORT
)) {
2758 stats
->rx_length_errors
++;
2760 if (status
& RXBD_LARGE
)
2761 atomic64_inc(&estats
->rx_large
);
2763 atomic64_inc(&estats
->rx_short
);
2765 if (status
& RXBD_NONOCTET
) {
2766 stats
->rx_frame_errors
++;
2767 atomic64_inc(&estats
->rx_nonoctet
);
2769 if (status
& RXBD_CRCERR
) {
2770 atomic64_inc(&estats
->rx_crcerr
);
2771 stats
->rx_crc_errors
++;
2773 if (status
& RXBD_OVERRUN
) {
2774 atomic64_inc(&estats
->rx_overrun
);
2775 stats
->rx_crc_errors
++;
2779 irqreturn_t
gfar_receive(int irq
, void *grp_id
)
2781 struct gfar_priv_grp
*grp
= (struct gfar_priv_grp
*)grp_id
;
2782 unsigned long flags
;
2785 if (likely(napi_schedule_prep(&grp
->napi_rx
))) {
2786 spin_lock_irqsave(&grp
->grplock
, flags
);
2787 imask
= gfar_read(&grp
->regs
->imask
);
2788 imask
&= IMASK_RX_DISABLED
;
2789 gfar_write(&grp
->regs
->imask
, imask
);
2790 spin_unlock_irqrestore(&grp
->grplock
, flags
);
2791 __napi_schedule(&grp
->napi_rx
);
2793 /* Clear IEVENT, so interrupts aren't called again
2794 * because of the packets that have already arrived.
2796 gfar_write(&grp
->regs
->ievent
, IEVENT_RX_MASK
);
2802 /* Interrupt Handler for Transmit complete */
2803 static irqreturn_t
gfar_transmit(int irq
, void *grp_id
)
2805 struct gfar_priv_grp
*grp
= (struct gfar_priv_grp
*)grp_id
;
2806 unsigned long flags
;
2809 if (likely(napi_schedule_prep(&grp
->napi_tx
))) {
2810 spin_lock_irqsave(&grp
->grplock
, flags
);
2811 imask
= gfar_read(&grp
->regs
->imask
);
2812 imask
&= IMASK_TX_DISABLED
;
2813 gfar_write(&grp
->regs
->imask
, imask
);
2814 spin_unlock_irqrestore(&grp
->grplock
, flags
);
2815 __napi_schedule(&grp
->napi_tx
);
2817 /* Clear IEVENT, so interrupts aren't called again
2818 * because of the packets that have already arrived.
2820 gfar_write(&grp
->regs
->ievent
, IEVENT_TX_MASK
);
2826 static inline void gfar_rx_checksum(struct sk_buff
*skb
, struct rxfcb
*fcb
)
2828 /* If valid headers were found, and valid sums
2829 * were verified, then we tell the kernel that no
2830 * checksumming is necessary. Otherwise, it is [FIXME]
2832 if ((be16_to_cpu(fcb
->flags
) & RXFCB_CSUM_MASK
) ==
2833 (RXFCB_CIP
| RXFCB_CTU
))
2834 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2836 skb_checksum_none_assert(skb
);
2839 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2840 static void gfar_process_frame(struct net_device
*dev
, struct sk_buff
*skb
,
2841 int amount_pull
, struct napi_struct
*napi
)
2843 struct gfar_private
*priv
= netdev_priv(dev
);
2844 struct rxfcb
*fcb
= NULL
;
2846 /* fcb is at the beginning if exists */
2847 fcb
= (struct rxfcb
*)skb
->data
;
2849 /* Remove the FCB from the skb
2850 * Remove the padded bytes, if there are any
2853 skb_record_rx_queue(skb
, fcb
->rq
);
2854 skb_pull(skb
, amount_pull
);
2857 /* Get receive timestamp from the skb */
2858 if (priv
->hwts_rx_en
) {
2859 struct skb_shared_hwtstamps
*shhwtstamps
= skb_hwtstamps(skb
);
2860 u64
*ns
= (u64
*) skb
->data
;
2862 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
2863 shhwtstamps
->hwtstamp
= ns_to_ktime(*ns
);
2867 skb_pull(skb
, priv
->padding
);
2869 if (dev
->features
& NETIF_F_RXCSUM
)
2870 gfar_rx_checksum(skb
, fcb
);
2872 /* Tell the skb what kind of packet this is */
2873 skb
->protocol
= eth_type_trans(skb
, dev
);
2875 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2876 * Even if vlan rx accel is disabled, on some chips
2877 * RXFCB_VLN is pseudo randomly set.
2879 if (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
&&
2880 be16_to_cpu(fcb
->flags
) & RXFCB_VLN
)
2881 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
2882 be16_to_cpu(fcb
->vlctl
));
2884 /* Send the packet up the stack */
2885 napi_gro_receive(napi
, skb
);
2889 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2890 * until the budget/quota has been reached. Returns the number
2893 int gfar_clean_rx_ring(struct gfar_priv_rx_q
*rx_queue
, int rx_work_limit
)
2895 struct net_device
*dev
= rx_queue
->dev
;
2896 struct rxbd8
*bdp
, *base
;
2897 struct sk_buff
*skb
;
2901 struct gfar_private
*priv
= netdev_priv(dev
);
2903 /* Get the first full descriptor */
2904 bdp
= rx_queue
->cur_rx
;
2905 base
= rx_queue
->rx_bd_base
;
2907 amount_pull
= priv
->uses_rxfcb
? GMAC_FCB_LEN
: 0;
2909 while (!(be16_to_cpu(bdp
->status
) & RXBD_EMPTY
) && rx_work_limit
--) {
2910 struct sk_buff
*newskb
;
2915 /* Add another skb for the future */
2916 newskb
= gfar_new_skb(dev
, &bufaddr
);
2918 skb
= rx_queue
->rx_skbuff
[rx_queue
->skb_currx
];
2920 dma_unmap_single(priv
->dev
, be32_to_cpu(bdp
->bufPtr
),
2921 priv
->rx_buffer_size
, DMA_FROM_DEVICE
);
2923 if (unlikely(!(be16_to_cpu(bdp
->status
) & RXBD_ERR
) &&
2924 be16_to_cpu(bdp
->length
) > priv
->rx_buffer_size
))
2925 bdp
->status
= cpu_to_be16(RXBD_LARGE
);
2927 /* We drop the frame if we failed to allocate a new buffer */
2928 if (unlikely(!newskb
||
2929 !(be16_to_cpu(bdp
->status
) & RXBD_LAST
) ||
2930 be16_to_cpu(bdp
->status
) & RXBD_ERR
)) {
2931 count_errors(be16_to_cpu(bdp
->status
), dev
);
2933 if (unlikely(!newskb
)) {
2935 bufaddr
= be32_to_cpu(bdp
->bufPtr
);
2939 /* Increment the number of packets */
2940 rx_queue
->stats
.rx_packets
++;
2944 pkt_len
= be16_to_cpu(bdp
->length
) -
2946 /* Remove the FCS from the packet length */
2947 skb_put(skb
, pkt_len
);
2948 rx_queue
->stats
.rx_bytes
+= pkt_len
;
2949 skb_record_rx_queue(skb
, rx_queue
->qindex
);
2950 gfar_process_frame(dev
, skb
, amount_pull
,
2951 &rx_queue
->grp
->napi_rx
);
2954 netif_warn(priv
, rx_err
, dev
, "Missing skb!\n");
2955 rx_queue
->stats
.rx_dropped
++;
2956 atomic64_inc(&priv
->extra_stats
.rx_skbmissing
);
2961 rx_queue
->rx_skbuff
[rx_queue
->skb_currx
] = newskb
;
2963 /* Setup the new bdp */
2964 gfar_init_rxbdp(rx_queue
, bdp
, bufaddr
);
2966 /* Update Last Free RxBD pointer for LFC */
2967 if (unlikely(rx_queue
->rfbptr
&& priv
->tx_actual_en
))
2968 gfar_write(rx_queue
->rfbptr
, (u32
)bdp
);
2970 /* Update to the next pointer */
2971 bdp
= next_bd(bdp
, base
, rx_queue
->rx_ring_size
);
2973 /* update to point at the next skb */
2974 rx_queue
->skb_currx
= (rx_queue
->skb_currx
+ 1) &
2975 RX_RING_MOD_MASK(rx_queue
->rx_ring_size
);
2978 /* Update the current rxbd pointer to be the next one */
2979 rx_queue
->cur_rx
= bdp
;
2984 static int gfar_poll_rx_sq(struct napi_struct
*napi
, int budget
)
2986 struct gfar_priv_grp
*gfargrp
=
2987 container_of(napi
, struct gfar_priv_grp
, napi_rx
);
2988 struct gfar __iomem
*regs
= gfargrp
->regs
;
2989 struct gfar_priv_rx_q
*rx_queue
= gfargrp
->rx_queue
;
2992 /* Clear IEVENT, so interrupts aren't called again
2993 * because of the packets that have already arrived
2995 gfar_write(®s
->ievent
, IEVENT_RX_MASK
);
2997 work_done
= gfar_clean_rx_ring(rx_queue
, budget
);
2999 if (work_done
< budget
) {
3001 napi_complete(napi
);
3002 /* Clear the halt bit in RSTAT */
3003 gfar_write(®s
->rstat
, gfargrp
->rstat
);
3005 spin_lock_irq(&gfargrp
->grplock
);
3006 imask
= gfar_read(®s
->imask
);
3007 imask
|= IMASK_RX_DEFAULT
;
3008 gfar_write(®s
->imask
, imask
);
3009 spin_unlock_irq(&gfargrp
->grplock
);
3015 static int gfar_poll_tx_sq(struct napi_struct
*napi
, int budget
)
3017 struct gfar_priv_grp
*gfargrp
=
3018 container_of(napi
, struct gfar_priv_grp
, napi_tx
);
3019 struct gfar __iomem
*regs
= gfargrp
->regs
;
3020 struct gfar_priv_tx_q
*tx_queue
= gfargrp
->tx_queue
;
3023 /* Clear IEVENT, so interrupts aren't called again
3024 * because of the packets that have already arrived
3026 gfar_write(®s
->ievent
, IEVENT_TX_MASK
);
3028 /* run Tx cleanup to completion */
3029 if (tx_queue
->tx_skbuff
[tx_queue
->skb_dirtytx
])
3030 gfar_clean_tx_ring(tx_queue
);
3032 napi_complete(napi
);
3034 spin_lock_irq(&gfargrp
->grplock
);
3035 imask
= gfar_read(®s
->imask
);
3036 imask
|= IMASK_TX_DEFAULT
;
3037 gfar_write(®s
->imask
, imask
);
3038 spin_unlock_irq(&gfargrp
->grplock
);
3043 static int gfar_poll_rx(struct napi_struct
*napi
, int budget
)
3045 struct gfar_priv_grp
*gfargrp
=
3046 container_of(napi
, struct gfar_priv_grp
, napi_rx
);
3047 struct gfar_private
*priv
= gfargrp
->priv
;
3048 struct gfar __iomem
*regs
= gfargrp
->regs
;
3049 struct gfar_priv_rx_q
*rx_queue
= NULL
;
3050 int work_done
= 0, work_done_per_q
= 0;
3051 int i
, budget_per_q
= 0;
3052 unsigned long rstat_rxf
;
3055 /* Clear IEVENT, so interrupts aren't called again
3056 * because of the packets that have already arrived
3058 gfar_write(®s
->ievent
, IEVENT_RX_MASK
);
3060 rstat_rxf
= gfar_read(®s
->rstat
) & RSTAT_RXF_MASK
;
3062 num_act_queues
= bitmap_weight(&rstat_rxf
, MAX_RX_QS
);
3064 budget_per_q
= budget
/num_act_queues
;
3066 for_each_set_bit(i
, &gfargrp
->rx_bit_map
, priv
->num_rx_queues
) {
3067 /* skip queue if not active */
3068 if (!(rstat_rxf
& (RSTAT_CLEAR_RXF0
>> i
)))
3071 rx_queue
= priv
->rx_queue
[i
];
3073 gfar_clean_rx_ring(rx_queue
, budget_per_q
);
3074 work_done
+= work_done_per_q
;
3076 /* finished processing this queue */
3077 if (work_done_per_q
< budget_per_q
) {
3078 /* clear active queue hw indication */
3079 gfar_write(®s
->rstat
,
3080 RSTAT_CLEAR_RXF0
>> i
);
3083 if (!num_act_queues
)
3088 if (!num_act_queues
) {
3090 napi_complete(napi
);
3092 /* Clear the halt bit in RSTAT */
3093 gfar_write(®s
->rstat
, gfargrp
->rstat
);
3095 spin_lock_irq(&gfargrp
->grplock
);
3096 imask
= gfar_read(®s
->imask
);
3097 imask
|= IMASK_RX_DEFAULT
;
3098 gfar_write(®s
->imask
, imask
);
3099 spin_unlock_irq(&gfargrp
->grplock
);
3105 static int gfar_poll_tx(struct napi_struct
*napi
, int budget
)
3107 struct gfar_priv_grp
*gfargrp
=
3108 container_of(napi
, struct gfar_priv_grp
, napi_tx
);
3109 struct gfar_private
*priv
= gfargrp
->priv
;
3110 struct gfar __iomem
*regs
= gfargrp
->regs
;
3111 struct gfar_priv_tx_q
*tx_queue
= NULL
;
3112 int has_tx_work
= 0;
3115 /* Clear IEVENT, so interrupts aren't called again
3116 * because of the packets that have already arrived
3118 gfar_write(®s
->ievent
, IEVENT_TX_MASK
);
3120 for_each_set_bit(i
, &gfargrp
->tx_bit_map
, priv
->num_tx_queues
) {
3121 tx_queue
= priv
->tx_queue
[i
];
3122 /* run Tx cleanup to completion */
3123 if (tx_queue
->tx_skbuff
[tx_queue
->skb_dirtytx
]) {
3124 gfar_clean_tx_ring(tx_queue
);
3131 napi_complete(napi
);
3133 spin_lock_irq(&gfargrp
->grplock
);
3134 imask
= gfar_read(®s
->imask
);
3135 imask
|= IMASK_TX_DEFAULT
;
3136 gfar_write(®s
->imask
, imask
);
3137 spin_unlock_irq(&gfargrp
->grplock
);
3144 #ifdef CONFIG_NET_POLL_CONTROLLER
3145 /* Polling 'interrupt' - used by things like netconsole to send skbs
3146 * without having to re-enable interrupts. It's not called while
3147 * the interrupt routine is executing.
3149 static void gfar_netpoll(struct net_device
*dev
)
3151 struct gfar_private
*priv
= netdev_priv(dev
);
3154 /* If the device has multiple interrupts, run tx/rx */
3155 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
3156 for (i
= 0; i
< priv
->num_grps
; i
++) {
3157 struct gfar_priv_grp
*grp
= &priv
->gfargrp
[i
];
3159 disable_irq(gfar_irq(grp
, TX
)->irq
);
3160 disable_irq(gfar_irq(grp
, RX
)->irq
);
3161 disable_irq(gfar_irq(grp
, ER
)->irq
);
3162 gfar_interrupt(gfar_irq(grp
, TX
)->irq
, grp
);
3163 enable_irq(gfar_irq(grp
, ER
)->irq
);
3164 enable_irq(gfar_irq(grp
, RX
)->irq
);
3165 enable_irq(gfar_irq(grp
, TX
)->irq
);
3168 for (i
= 0; i
< priv
->num_grps
; i
++) {
3169 struct gfar_priv_grp
*grp
= &priv
->gfargrp
[i
];
3171 disable_irq(gfar_irq(grp
, TX
)->irq
);
3172 gfar_interrupt(gfar_irq(grp
, TX
)->irq
, grp
);
3173 enable_irq(gfar_irq(grp
, TX
)->irq
);
3179 /* The interrupt handler for devices with one interrupt */
3180 static irqreturn_t
gfar_interrupt(int irq
, void *grp_id
)
3182 struct gfar_priv_grp
*gfargrp
= grp_id
;
3184 /* Save ievent for future reference */
3185 u32 events
= gfar_read(&gfargrp
->regs
->ievent
);
3187 /* Check for reception */
3188 if (events
& IEVENT_RX_MASK
)
3189 gfar_receive(irq
, grp_id
);
3191 /* Check for transmit completion */
3192 if (events
& IEVENT_TX_MASK
)
3193 gfar_transmit(irq
, grp_id
);
3195 /* Check for errors */
3196 if (events
& IEVENT_ERR_MASK
)
3197 gfar_error(irq
, grp_id
);
3202 /* Called every time the controller might need to be made
3203 * aware of new link state. The PHY code conveys this
3204 * information through variables in the phydev structure, and this
3205 * function converts those variables into the appropriate
3206 * register values, and can bring down the device if needed.
3208 static void adjust_link(struct net_device
*dev
)
3210 struct gfar_private
*priv
= netdev_priv(dev
);
3211 struct phy_device
*phydev
= priv
->phydev
;
3213 if (unlikely(phydev
->link
!= priv
->oldlink
||
3214 (phydev
->link
&& (phydev
->duplex
!= priv
->oldduplex
||
3215 phydev
->speed
!= priv
->oldspeed
))))
3216 gfar_update_link_state(priv
);
3219 /* Update the hash table based on the current list of multicast
3220 * addresses we subscribe to. Also, change the promiscuity of
3221 * the device based on the flags (this function is called
3222 * whenever dev->flags is changed
3224 static void gfar_set_multi(struct net_device
*dev
)
3226 struct netdev_hw_addr
*ha
;
3227 struct gfar_private
*priv
= netdev_priv(dev
);
3228 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
3231 if (dev
->flags
& IFF_PROMISC
) {
3232 /* Set RCTRL to PROM */
3233 tempval
= gfar_read(®s
->rctrl
);
3234 tempval
|= RCTRL_PROM
;
3235 gfar_write(®s
->rctrl
, tempval
);
3237 /* Set RCTRL to not PROM */
3238 tempval
= gfar_read(®s
->rctrl
);
3239 tempval
&= ~(RCTRL_PROM
);
3240 gfar_write(®s
->rctrl
, tempval
);
3243 if (dev
->flags
& IFF_ALLMULTI
) {
3244 /* Set the hash to rx all multicast frames */
3245 gfar_write(®s
->igaddr0
, 0xffffffff);
3246 gfar_write(®s
->igaddr1
, 0xffffffff);
3247 gfar_write(®s
->igaddr2
, 0xffffffff);
3248 gfar_write(®s
->igaddr3
, 0xffffffff);
3249 gfar_write(®s
->igaddr4
, 0xffffffff);
3250 gfar_write(®s
->igaddr5
, 0xffffffff);
3251 gfar_write(®s
->igaddr6
, 0xffffffff);
3252 gfar_write(®s
->igaddr7
, 0xffffffff);
3253 gfar_write(®s
->gaddr0
, 0xffffffff);
3254 gfar_write(®s
->gaddr1
, 0xffffffff);
3255 gfar_write(®s
->gaddr2
, 0xffffffff);
3256 gfar_write(®s
->gaddr3
, 0xffffffff);
3257 gfar_write(®s
->gaddr4
, 0xffffffff);
3258 gfar_write(®s
->gaddr5
, 0xffffffff);
3259 gfar_write(®s
->gaddr6
, 0xffffffff);
3260 gfar_write(®s
->gaddr7
, 0xffffffff);
3265 /* zero out the hash */
3266 gfar_write(®s
->igaddr0
, 0x0);
3267 gfar_write(®s
->igaddr1
, 0x0);
3268 gfar_write(®s
->igaddr2
, 0x0);
3269 gfar_write(®s
->igaddr3
, 0x0);
3270 gfar_write(®s
->igaddr4
, 0x0);
3271 gfar_write(®s
->igaddr5
, 0x0);
3272 gfar_write(®s
->igaddr6
, 0x0);
3273 gfar_write(®s
->igaddr7
, 0x0);
3274 gfar_write(®s
->gaddr0
, 0x0);
3275 gfar_write(®s
->gaddr1
, 0x0);
3276 gfar_write(®s
->gaddr2
, 0x0);
3277 gfar_write(®s
->gaddr3
, 0x0);
3278 gfar_write(®s
->gaddr4
, 0x0);
3279 gfar_write(®s
->gaddr5
, 0x0);
3280 gfar_write(®s
->gaddr6
, 0x0);
3281 gfar_write(®s
->gaddr7
, 0x0);
3283 /* If we have extended hash tables, we need to
3284 * clear the exact match registers to prepare for
3287 if (priv
->extended_hash
) {
3288 em_num
= GFAR_EM_NUM
+ 1;
3289 gfar_clear_exact_match(dev
);
3296 if (netdev_mc_empty(dev
))
3299 /* Parse the list, and set the appropriate bits */
3300 netdev_for_each_mc_addr(ha
, dev
) {
3302 gfar_set_mac_for_addr(dev
, idx
, ha
->addr
);
3305 gfar_set_hash_for_addr(dev
, ha
->addr
);
3311 /* Clears each of the exact match registers to zero, so they
3312 * don't interfere with normal reception
3314 static void gfar_clear_exact_match(struct net_device
*dev
)
3317 static const u8 zero_arr
[ETH_ALEN
] = {0, 0, 0, 0, 0, 0};
3319 for (idx
= 1; idx
< GFAR_EM_NUM
+ 1; idx
++)
3320 gfar_set_mac_for_addr(dev
, idx
, zero_arr
);
3323 /* Set the appropriate hash bit for the given addr */
3324 /* The algorithm works like so:
3325 * 1) Take the Destination Address (ie the multicast address), and
3326 * do a CRC on it (little endian), and reverse the bits of the
3328 * 2) Use the 8 most significant bits as a hash into a 256-entry
3329 * table. The table is controlled through 8 32-bit registers:
3330 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3331 * gaddr7. This means that the 3 most significant bits in the
3332 * hash index which gaddr register to use, and the 5 other bits
3333 * indicate which bit (assuming an IBM numbering scheme, which
3334 * for PowerPC (tm) is usually the case) in the register holds
3337 static void gfar_set_hash_for_addr(struct net_device
*dev
, u8
*addr
)
3340 struct gfar_private
*priv
= netdev_priv(dev
);
3341 u32 result
= ether_crc(ETH_ALEN
, addr
);
3342 int width
= priv
->hash_width
;
3343 u8 whichbit
= (result
>> (32 - width
)) & 0x1f;
3344 u8 whichreg
= result
>> (32 - width
+ 5);
3345 u32 value
= (1 << (31-whichbit
));
3347 tempval
= gfar_read(priv
->hash_regs
[whichreg
]);
3349 gfar_write(priv
->hash_regs
[whichreg
], tempval
);
3353 /* There are multiple MAC Address register pairs on some controllers
3354 * This function sets the numth pair to a given address
3356 static void gfar_set_mac_for_addr(struct net_device
*dev
, int num
,
3359 struct gfar_private
*priv
= netdev_priv(dev
);
3360 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
3362 u32 __iomem
*macptr
= ®s
->macstnaddr1
;
3366 /* For a station address of 0x12345678ABCD in transmission
3367 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3368 * MACnADDR2 is set to 0x34120000.
3370 tempval
= (addr
[5] << 24) | (addr
[4] << 16) |
3371 (addr
[3] << 8) | addr
[2];
3373 gfar_write(macptr
, tempval
);
3375 tempval
= (addr
[1] << 24) | (addr
[0] << 16);
3377 gfar_write(macptr
+1, tempval
);
3380 /* GFAR error interrupt handler */
3381 static irqreturn_t
gfar_error(int irq
, void *grp_id
)
3383 struct gfar_priv_grp
*gfargrp
= grp_id
;
3384 struct gfar __iomem
*regs
= gfargrp
->regs
;
3385 struct gfar_private
*priv
= gfargrp
->priv
;
3386 struct net_device
*dev
= priv
->ndev
;
3388 /* Save ievent for future reference */
3389 u32 events
= gfar_read(®s
->ievent
);
3392 gfar_write(®s
->ievent
, events
& IEVENT_ERR_MASK
);
3394 /* Magic Packet is not an error. */
3395 if ((priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
) &&
3396 (events
& IEVENT_MAG
))
3397 events
&= ~IEVENT_MAG
;
3400 if (netif_msg_rx_err(priv
) || netif_msg_tx_err(priv
))
3402 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3403 events
, gfar_read(®s
->imask
));
3405 /* Update the error counters */
3406 if (events
& IEVENT_TXE
) {
3407 dev
->stats
.tx_errors
++;
3409 if (events
& IEVENT_LC
)
3410 dev
->stats
.tx_window_errors
++;
3411 if (events
& IEVENT_CRL
)
3412 dev
->stats
.tx_aborted_errors
++;
3413 if (events
& IEVENT_XFUN
) {
3414 unsigned long flags
;
3416 netif_dbg(priv
, tx_err
, dev
,
3417 "TX FIFO underrun, packet dropped\n");
3418 dev
->stats
.tx_dropped
++;
3419 atomic64_inc(&priv
->extra_stats
.tx_underrun
);
3421 local_irq_save(flags
);
3424 /* Reactivate the Tx Queues */
3425 gfar_write(®s
->tstat
, gfargrp
->tstat
);
3428 local_irq_restore(flags
);
3430 netif_dbg(priv
, tx_err
, dev
, "Transmit Error\n");
3432 if (events
& IEVENT_BSY
) {
3433 dev
->stats
.rx_errors
++;
3434 atomic64_inc(&priv
->extra_stats
.rx_bsy
);
3436 gfar_receive(irq
, grp_id
);
3438 netif_dbg(priv
, rx_err
, dev
, "busy error (rstat: %x)\n",
3439 gfar_read(®s
->rstat
));
3441 if (events
& IEVENT_BABR
) {
3442 dev
->stats
.rx_errors
++;
3443 atomic64_inc(&priv
->extra_stats
.rx_babr
);
3445 netif_dbg(priv
, rx_err
, dev
, "babbling RX error\n");
3447 if (events
& IEVENT_EBERR
) {
3448 atomic64_inc(&priv
->extra_stats
.eberr
);
3449 netif_dbg(priv
, rx_err
, dev
, "bus error\n");
3451 if (events
& IEVENT_RXC
)
3452 netif_dbg(priv
, rx_status
, dev
, "control frame\n");
3454 if (events
& IEVENT_BABT
) {
3455 atomic64_inc(&priv
->extra_stats
.tx_babt
);
3456 netif_dbg(priv
, tx_err
, dev
, "babbling TX error\n");
3461 static u32
gfar_get_flowctrl_cfg(struct gfar_private
*priv
)
3463 struct phy_device
*phydev
= priv
->phydev
;
3466 if (!phydev
->duplex
)
3469 if (!priv
->pause_aneg_en
) {
3470 if (priv
->tx_pause_en
)
3471 val
|= MACCFG1_TX_FLOW
;
3472 if (priv
->rx_pause_en
)
3473 val
|= MACCFG1_RX_FLOW
;
3475 u16 lcl_adv
, rmt_adv
;
3477 /* get link partner capabilities */
3480 rmt_adv
= LPA_PAUSE_CAP
;
3481 if (phydev
->asym_pause
)
3482 rmt_adv
|= LPA_PAUSE_ASYM
;
3485 if (phydev
->advertising
& ADVERTISED_Pause
)
3486 lcl_adv
|= ADVERTISE_PAUSE_CAP
;
3487 if (phydev
->advertising
& ADVERTISED_Asym_Pause
)
3488 lcl_adv
|= ADVERTISE_PAUSE_ASYM
;
3490 flowctrl
= mii_resolve_flowctrl_fdx(lcl_adv
, rmt_adv
);
3491 if (flowctrl
& FLOW_CTRL_TX
)
3492 val
|= MACCFG1_TX_FLOW
;
3493 if (flowctrl
& FLOW_CTRL_RX
)
3494 val
|= MACCFG1_RX_FLOW
;
3500 static noinline
void gfar_update_link_state(struct gfar_private
*priv
)
3502 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
3503 struct phy_device
*phydev
= priv
->phydev
;
3504 struct gfar_priv_rx_q
*rx_queue
= NULL
;
3508 if (unlikely(test_bit(GFAR_RESETTING
, &priv
->state
)))
3512 u32 tempval1
= gfar_read(®s
->maccfg1
);
3513 u32 tempval
= gfar_read(®s
->maccfg2
);
3514 u32 ecntrl
= gfar_read(®s
->ecntrl
);
3515 u32 tx_flow_oldval
= (tempval
& MACCFG1_TX_FLOW
);
3517 if (phydev
->duplex
!= priv
->oldduplex
) {
3518 if (!(phydev
->duplex
))
3519 tempval
&= ~(MACCFG2_FULL_DUPLEX
);
3521 tempval
|= MACCFG2_FULL_DUPLEX
;
3523 priv
->oldduplex
= phydev
->duplex
;
3526 if (phydev
->speed
!= priv
->oldspeed
) {
3527 switch (phydev
->speed
) {
3530 ((tempval
& ~(MACCFG2_IF
)) | MACCFG2_GMII
);
3532 ecntrl
&= ~(ECNTRL_R100
);
3537 ((tempval
& ~(MACCFG2_IF
)) | MACCFG2_MII
);
3539 /* Reduced mode distinguishes
3540 * between 10 and 100
3542 if (phydev
->speed
== SPEED_100
)
3543 ecntrl
|= ECNTRL_R100
;
3545 ecntrl
&= ~(ECNTRL_R100
);
3548 netif_warn(priv
, link
, priv
->ndev
,
3549 "Ack! Speed (%d) is not 10/100/1000!\n",
3554 priv
->oldspeed
= phydev
->speed
;
3557 tempval1
&= ~(MACCFG1_TX_FLOW
| MACCFG1_RX_FLOW
);
3558 tempval1
|= gfar_get_flowctrl_cfg(priv
);
3560 /* Turn last free buffer recording on */
3561 if ((tempval1
& MACCFG1_TX_FLOW
) && !tx_flow_oldval
) {
3562 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
3563 rx_queue
= priv
->rx_queue
[i
];
3564 bdp
= rx_queue
->cur_rx
;
3565 /* skip to previous bd */
3566 bdp
= skip_bd(bdp
, rx_queue
->rx_ring_size
- 1,
3567 rx_queue
->rx_bd_base
,
3568 rx_queue
->rx_ring_size
);
3570 if (rx_queue
->rfbptr
)
3571 gfar_write(rx_queue
->rfbptr
, (u32
)bdp
);
3574 priv
->tx_actual_en
= 1;
3577 if (unlikely(!(tempval1
& MACCFG1_TX_FLOW
) && tx_flow_oldval
))
3578 priv
->tx_actual_en
= 0;
3580 gfar_write(®s
->maccfg1
, tempval1
);
3581 gfar_write(®s
->maccfg2
, tempval
);
3582 gfar_write(®s
->ecntrl
, ecntrl
);
3587 } else if (priv
->oldlink
) {
3590 priv
->oldduplex
= -1;
3593 if (netif_msg_link(priv
))
3594 phy_print_status(phydev
);
3597 static const struct of_device_id gfar_match
[] =
3601 .compatible
= "gianfar",
3604 .compatible
= "fsl,etsec2",
3608 MODULE_DEVICE_TABLE(of
, gfar_match
);
3610 /* Structure for a device driver */
3611 static struct platform_driver gfar_driver
= {
3613 .name
= "fsl-gianfar",
3615 .of_match_table
= gfar_match
,
3617 .probe
= gfar_probe
,
3618 .remove
= gfar_remove
,
3621 module_platform_driver(gfar_driver
);