Commit | Line | Data |
---|---|---|
0bbaf069 | 1 | /* |
1da177e4 LT |
2 | * drivers/net/gianfar.c |
3 | * | |
4 | * Gianfar Ethernet Driver | |
7f7f5316 AF |
5 | * This driver is designed for the non-CPM ethernet controllers |
6 | * on the 85xx and 83xx family of integrated processors | |
1da177e4 LT |
7 | * Based on 8260_io/fcc_enet.c |
8 | * | |
9 | * Author: Andy Fleming | |
4c8d3d99 | 10 | * Maintainer: Kumar Gala |
a12f801d | 11 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> |
1da177e4 | 12 | * |
a12f801d SG |
13 | * Copyright 2002-2009 Freescale Semiconductor, Inc. |
14 | * Copyright 2007 MontaVista Software, Inc. | |
1da177e4 LT |
15 | * |
16 | * This program is free software; you can redistribute it and/or modify it | |
17 | * under the terms of the GNU General Public License as published by the | |
18 | * Free Software Foundation; either version 2 of the License, or (at your | |
19 | * option) any later version. | |
20 | * | |
21 | * Gianfar: AKA Lambda Draconis, "Dragon" | |
22 | * RA 11 31 24.2 | |
23 | * Dec +69 19 52 | |
24 | * V 3.84 | |
25 | * B-V +1.62 | |
26 | * | |
27 | * Theory of operation | |
0bbaf069 | 28 | * |
b31a1d8b AF |
29 | * The driver is initialized through of_device. Configuration information |
30 | * is therefore conveyed through an OF-style device tree. | |
1da177e4 LT |
31 | * |
32 | * The Gianfar Ethernet Controller uses a ring of buffer | |
33 | * descriptors. The beginning is indicated by a register | |
0bbaf069 KG |
34 | * pointing to the physical address of the start of the ring. |
35 | * The end is determined by a "wrap" bit being set in the | |
1da177e4 LT |
36 | * last descriptor of the ring. |
37 | * | |
38 | * When a packet is received, the RXF bit in the | |
0bbaf069 | 39 | * IEVENT register is set, triggering an interrupt when the |
1da177e4 LT |
40 | * corresponding bit in the IMASK register is also set (if |
41 | * interrupt coalescing is active, then the interrupt may not | |
42 | * happen immediately, but will wait until either a set number | |
bb40dcbb | 43 | * of frames or amount of time have passed). In NAPI, the |
1da177e4 | 44 | * interrupt handler will signal there is work to be done, and |
0aa1538f | 45 | * exit. This method will start at the last known empty |
0bbaf069 | 46 | * descriptor, and process every subsequent descriptor until there |
1da177e4 LT |
47 | * are none left with data (NAPI will stop after a set number of |
48 | * packets to give time to other tasks, but will eventually | |
49 | * process all the packets). The data arrives inside a | |
50 | * pre-allocated skb, and so after the skb is passed up to the | |
51 | * stack, a new skb must be allocated, and the address field in | |
52 | * the buffer descriptor must be updated to indicate this new | |
53 | * skb. | |
54 | * | |
55 | * When the kernel requests that a packet be transmitted, the | |
56 | * driver starts where it left off last time, and points the | |
57 | * descriptor at the buffer which was passed in. The driver | |
58 | * then informs the DMA engine that there are packets ready to | |
59 | * be transmitted. Once the controller is finished transmitting | |
60 | * the packet, an interrupt may be triggered (under the same | |
61 | * conditions as for reception, but depending on the TXF bit). | |
62 | * The driver then cleans up the buffer. | |
63 | */ | |
64 | ||
1da177e4 | 65 | #include <linux/kernel.h> |
1da177e4 LT |
66 | #include <linux/string.h> |
67 | #include <linux/errno.h> | |
bb40dcbb | 68 | #include <linux/unistd.h> |
1da177e4 LT |
69 | #include <linux/slab.h> |
70 | #include <linux/interrupt.h> | |
71 | #include <linux/init.h> | |
72 | #include <linux/delay.h> | |
73 | #include <linux/netdevice.h> | |
74 | #include <linux/etherdevice.h> | |
75 | #include <linux/skbuff.h> | |
0bbaf069 | 76 | #include <linux/if_vlan.h> |
1da177e4 LT |
77 | #include <linux/spinlock.h> |
78 | #include <linux/mm.h> | |
fe192a49 | 79 | #include <linux/of_mdio.h> |
b31a1d8b | 80 | #include <linux/of_platform.h> |
0bbaf069 KG |
81 | #include <linux/ip.h> |
82 | #include <linux/tcp.h> | |
83 | #include <linux/udp.h> | |
9c07b884 | 84 | #include <linux/in.h> |
1da177e4 LT |
85 | |
86 | #include <asm/io.h> | |
87 | #include <asm/irq.h> | |
88 | #include <asm/uaccess.h> | |
89 | #include <linux/module.h> | |
1da177e4 LT |
90 | #include <linux/dma-mapping.h> |
91 | #include <linux/crc32.h> | |
bb40dcbb AF |
92 | #include <linux/mii.h> |
93 | #include <linux/phy.h> | |
b31a1d8b AF |
94 | #include <linux/phy_fixed.h> |
95 | #include <linux/of.h> | |
1da177e4 LT |
96 | |
97 | #include "gianfar.h" | |
1577ecef | 98 | #include "fsl_pq_mdio.h" |
1da177e4 LT |
99 | |
100 | #define TX_TIMEOUT (1*HZ) | |
1da177e4 LT |
101 | #undef BRIEF_GFAR_ERRORS |
102 | #undef VERBOSE_GFAR_ERRORS | |
103 | ||
1da177e4 | 104 | const char gfar_driver_name[] = "Gianfar Ethernet"; |
7f7f5316 | 105 | const char gfar_driver_version[] = "1.3"; |
1da177e4 | 106 | |
1da177e4 LT |
107 | static int gfar_enet_open(struct net_device *dev); |
108 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); | |
ab939905 | 109 | static void gfar_reset_task(struct work_struct *work); |
1da177e4 LT |
110 | static void gfar_timeout(struct net_device *dev); |
111 | static int gfar_close(struct net_device *dev); | |
815b97c6 | 112 | struct sk_buff *gfar_new_skb(struct net_device *dev); |
a12f801d | 113 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
815b97c6 | 114 | struct sk_buff *skb); |
1da177e4 LT |
115 | static int gfar_set_mac_address(struct net_device *dev); |
116 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); | |
7d12e780 DH |
117 | static irqreturn_t gfar_error(int irq, void *dev_id); |
118 | static irqreturn_t gfar_transmit(int irq, void *dev_id); | |
119 | static irqreturn_t gfar_interrupt(int irq, void *dev_id); | |
1da177e4 LT |
120 | static void adjust_link(struct net_device *dev); |
121 | static void init_registers(struct net_device *dev); | |
122 | static int init_phy(struct net_device *dev); | |
b31a1d8b AF |
123 | static int gfar_probe(struct of_device *ofdev, |
124 | const struct of_device_id *match); | |
125 | static int gfar_remove(struct of_device *ofdev); | |
bb40dcbb | 126 | static void free_skb_resources(struct gfar_private *priv); |
1da177e4 LT |
127 | static void gfar_set_multi(struct net_device *dev); |
128 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); | |
d3c12873 | 129 | static void gfar_configure_serdes(struct net_device *dev); |
bea3348e | 130 | static int gfar_poll(struct napi_struct *napi, int budget); |
f2d71c2d VW |
131 | #ifdef CONFIG_NET_POLL_CONTROLLER |
132 | static void gfar_netpoll(struct net_device *dev); | |
133 | #endif | |
a12f801d SG |
134 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); |
135 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); | |
2c2db48a DH |
136 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, |
137 | int amount_pull); | |
0bbaf069 KG |
138 | static void gfar_vlan_rx_register(struct net_device *netdev, |
139 | struct vlan_group *grp); | |
7f7f5316 | 140 | void gfar_halt(struct net_device *dev); |
d87eb127 | 141 | static void gfar_halt_nodisable(struct net_device *dev); |
7f7f5316 AF |
142 | void gfar_start(struct net_device *dev); |
143 | static void gfar_clear_exact_match(struct net_device *dev); | |
144 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); | |
26ccfc37 | 145 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
1da177e4 | 146 | |
1da177e4 LT |
147 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); |
148 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); | |
149 | MODULE_LICENSE("GPL"); | |
150 | ||
a12f801d | 151 | static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
8a102fe0 AV |
152 | dma_addr_t buf) |
153 | { | |
8a102fe0 AV |
154 | u32 lstatus; |
155 | ||
156 | bdp->bufPtr = buf; | |
157 | ||
158 | lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); | |
a12f801d | 159 | if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) |
8a102fe0 AV |
160 | lstatus |= BD_LFLAG(RXBD_WRAP); |
161 | ||
162 | eieio(); | |
163 | ||
164 | bdp->lstatus = lstatus; | |
165 | } | |
166 | ||
8728327e | 167 | static int gfar_init_bds(struct net_device *ndev) |
826aa4a0 | 168 | { |
8728327e | 169 | struct gfar_private *priv = netdev_priv(ndev); |
a12f801d SG |
170 | struct gfar_priv_tx_q *tx_queue = NULL; |
171 | struct gfar_priv_rx_q *rx_queue = NULL; | |
826aa4a0 AV |
172 | struct txbd8 *txbdp; |
173 | struct rxbd8 *rxbdp; | |
fba4ed03 | 174 | int i, j; |
a12f801d | 175 | |
fba4ed03 SG |
176 | for (i = 0; i < priv->num_tx_queues; i++) { |
177 | tx_queue = priv->tx_queue[i]; | |
178 | /* Initialize some variables in our dev structure */ | |
179 | tx_queue->num_txbdfree = tx_queue->tx_ring_size; | |
180 | tx_queue->dirty_tx = tx_queue->tx_bd_base; | |
181 | tx_queue->cur_tx = tx_queue->tx_bd_base; | |
182 | tx_queue->skb_curtx = 0; | |
183 | tx_queue->skb_dirtytx = 0; | |
184 | ||
185 | /* Initialize Transmit Descriptor Ring */ | |
186 | txbdp = tx_queue->tx_bd_base; | |
187 | for (j = 0; j < tx_queue->tx_ring_size; j++) { | |
188 | txbdp->lstatus = 0; | |
189 | txbdp->bufPtr = 0; | |
190 | txbdp++; | |
191 | } | |
8728327e | 192 | |
fba4ed03 SG |
193 | /* Set the last descriptor in the ring to indicate wrap */ |
194 | txbdp--; | |
195 | txbdp->status |= TXBD_WRAP; | |
8728327e AV |
196 | } |
197 | ||
fba4ed03 SG |
198 | for (i = 0; i < priv->num_rx_queues; i++) { |
199 | rx_queue = priv->rx_queue[i]; | |
200 | rx_queue->cur_rx = rx_queue->rx_bd_base; | |
201 | rx_queue->skb_currx = 0; | |
202 | rxbdp = rx_queue->rx_bd_base; | |
8728327e | 203 | |
fba4ed03 SG |
204 | for (j = 0; j < rx_queue->rx_ring_size; j++) { |
205 | struct sk_buff *skb = rx_queue->rx_skbuff[j]; | |
8728327e | 206 | |
fba4ed03 SG |
207 | if (skb) { |
208 | gfar_init_rxbdp(rx_queue, rxbdp, | |
209 | rxbdp->bufPtr); | |
210 | } else { | |
211 | skb = gfar_new_skb(ndev); | |
212 | if (!skb) { | |
213 | pr_err("%s: Can't allocate RX buffers\n", | |
214 | ndev->name); | |
215 | goto err_rxalloc_fail; | |
216 | } | |
217 | rx_queue->rx_skbuff[j] = skb; | |
218 | ||
219 | gfar_new_rxbdp(rx_queue, rxbdp, skb); | |
8728327e | 220 | } |
8728327e | 221 | |
fba4ed03 | 222 | rxbdp++; |
8728327e AV |
223 | } |
224 | ||
8728327e AV |
225 | } |
226 | ||
227 | return 0; | |
fba4ed03 SG |
228 | |
229 | err_rxalloc_fail: | |
230 | free_skb_resources(priv); | |
231 | return -ENOMEM; | |
8728327e AV |
232 | } |
233 | ||
234 | static int gfar_alloc_skb_resources(struct net_device *ndev) | |
235 | { | |
826aa4a0 | 236 | void *vaddr; |
fba4ed03 SG |
237 | dma_addr_t addr; |
238 | int i, j, k; | |
826aa4a0 AV |
239 | struct gfar_private *priv = netdev_priv(ndev); |
240 | struct device *dev = &priv->ofdev->dev; | |
a12f801d SG |
241 | struct gfar_priv_tx_q *tx_queue = NULL; |
242 | struct gfar_priv_rx_q *rx_queue = NULL; | |
243 | ||
fba4ed03 SG |
244 | priv->total_tx_ring_size = 0; |
245 | for (i = 0; i < priv->num_tx_queues; i++) | |
246 | priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; | |
247 | ||
248 | priv->total_rx_ring_size = 0; | |
249 | for (i = 0; i < priv->num_rx_queues; i++) | |
250 | priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; | |
826aa4a0 AV |
251 | |
252 | /* Allocate memory for the buffer descriptors */ | |
8728327e | 253 | vaddr = dma_alloc_coherent(dev, |
fba4ed03 SG |
254 | sizeof(struct txbd8) * priv->total_tx_ring_size + |
255 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | |
256 | &addr, GFP_KERNEL); | |
826aa4a0 AV |
257 | if (!vaddr) { |
258 | if (netif_msg_ifup(priv)) | |
259 | pr_err("%s: Could not allocate buffer descriptors!\n", | |
260 | ndev->name); | |
261 | return -ENOMEM; | |
262 | } | |
263 | ||
fba4ed03 SG |
264 | for (i = 0; i < priv->num_tx_queues; i++) { |
265 | tx_queue = priv->tx_queue[i]; | |
266 | tx_queue->tx_bd_base = (struct txbd8 *) vaddr; | |
267 | tx_queue->tx_bd_dma_base = addr; | |
268 | tx_queue->dev = ndev; | |
269 | /* enet DMA only understands physical addresses */ | |
270 | addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; | |
271 | vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; | |
272 | } | |
826aa4a0 | 273 | |
826aa4a0 | 274 | /* Start the rx descriptor ring where the tx ring leaves off */ |
fba4ed03 SG |
275 | for (i = 0; i < priv->num_rx_queues; i++) { |
276 | rx_queue = priv->rx_queue[i]; | |
277 | rx_queue->rx_bd_base = (struct rxbd8 *) vaddr; | |
278 | rx_queue->rx_bd_dma_base = addr; | |
279 | rx_queue->dev = ndev; | |
280 | addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; | |
281 | vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; | |
282 | } | |
826aa4a0 AV |
283 | |
284 | /* Setup the skbuff rings */ | |
fba4ed03 SG |
285 | for (i = 0; i < priv->num_tx_queues; i++) { |
286 | tx_queue = priv->tx_queue[i]; | |
287 | tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * | |
a12f801d | 288 | tx_queue->tx_ring_size, GFP_KERNEL); |
fba4ed03 SG |
289 | if (!tx_queue->tx_skbuff) { |
290 | if (netif_msg_ifup(priv)) | |
291 | pr_err("%s: Could not allocate tx_skbuff\n", | |
292 | ndev->name); | |
293 | goto cleanup; | |
294 | } | |
826aa4a0 | 295 | |
fba4ed03 SG |
296 | for (k = 0; k < tx_queue->tx_ring_size; k++) |
297 | tx_queue->tx_skbuff[k] = NULL; | |
298 | } | |
826aa4a0 | 299 | |
fba4ed03 SG |
300 | for (i = 0; i < priv->num_rx_queues; i++) { |
301 | rx_queue = priv->rx_queue[i]; | |
302 | rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * | |
a12f801d | 303 | rx_queue->rx_ring_size, GFP_KERNEL); |
826aa4a0 | 304 | |
fba4ed03 SG |
305 | if (!rx_queue->rx_skbuff) { |
306 | if (netif_msg_ifup(priv)) | |
307 | pr_err("%s: Could not allocate rx_skbuff\n", | |
308 | ndev->name); | |
309 | goto cleanup; | |
310 | } | |
311 | ||
312 | for (j = 0; j < rx_queue->rx_ring_size; j++) | |
313 | rx_queue->rx_skbuff[j] = NULL; | |
314 | } | |
826aa4a0 | 315 | |
8728327e AV |
316 | if (gfar_init_bds(ndev)) |
317 | goto cleanup; | |
826aa4a0 AV |
318 | |
319 | return 0; | |
320 | ||
321 | cleanup: | |
322 | free_skb_resources(priv); | |
323 | return -ENOMEM; | |
324 | } | |
325 | ||
fba4ed03 SG |
326 | static void gfar_init_tx_rx_base(struct gfar_private *priv) |
327 | { | |
46ceb60c | 328 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
18294ad1 | 329 | u32 __iomem *baddr; |
fba4ed03 SG |
330 | int i; |
331 | ||
332 | baddr = ®s->tbase0; | |
333 | for(i = 0; i < priv->num_tx_queues; i++) { | |
334 | gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); | |
335 | baddr += 2; | |
336 | } | |
337 | ||
338 | baddr = ®s->rbase0; | |
339 | for(i = 0; i < priv->num_rx_queues; i++) { | |
340 | gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); | |
341 | baddr += 2; | |
342 | } | |
343 | } | |
344 | ||
826aa4a0 AV |
345 | static void gfar_init_mac(struct net_device *ndev) |
346 | { | |
347 | struct gfar_private *priv = netdev_priv(ndev); | |
46ceb60c | 348 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
826aa4a0 AV |
349 | u32 rctrl = 0; |
350 | u32 tctrl = 0; | |
351 | u32 attrs = 0; | |
352 | ||
fba4ed03 SG |
353 | /* write the tx/rx base registers */ |
354 | gfar_init_tx_rx_base(priv); | |
32c513bc | 355 | |
826aa4a0 | 356 | /* Configure the coalescing support */ |
46ceb60c | 357 | gfar_configure_coalescing(priv, 0xFF, 0xFF); |
fba4ed03 | 358 | |
1ccb8389 | 359 | if (priv->rx_filer_enable) { |
fba4ed03 | 360 | rctrl |= RCTRL_FILREN; |
1ccb8389 SG |
361 | /* Program the RIR0 reg with the required distribution */ |
362 | gfar_write(®s->rir0, DEFAULT_RIR0); | |
363 | } | |
826aa4a0 AV |
364 | |
365 | if (priv->rx_csum_enable) | |
366 | rctrl |= RCTRL_CHECKSUMMING; | |
367 | ||
368 | if (priv->extended_hash) { | |
369 | rctrl |= RCTRL_EXTHASH; | |
370 | ||
371 | gfar_clear_exact_match(ndev); | |
372 | rctrl |= RCTRL_EMEN; | |
373 | } | |
374 | ||
375 | if (priv->padding) { | |
376 | rctrl &= ~RCTRL_PAL_MASK; | |
377 | rctrl |= RCTRL_PADDING(priv->padding); | |
378 | } | |
379 | ||
380 | /* keep vlan related bits if it's enabled */ | |
381 | if (priv->vlgrp) { | |
382 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; | |
383 | tctrl |= TCTRL_VLINS; | |
384 | } | |
385 | ||
386 | /* Init rctrl based on our settings */ | |
387 | gfar_write(®s->rctrl, rctrl); | |
388 | ||
389 | if (ndev->features & NETIF_F_IP_CSUM) | |
390 | tctrl |= TCTRL_INIT_CSUM; | |
391 | ||
fba4ed03 SG |
392 | tctrl |= TCTRL_TXSCHED_PRIO; |
393 | ||
826aa4a0 AV |
394 | gfar_write(®s->tctrl, tctrl); |
395 | ||
396 | /* Set the extraction length and index */ | |
397 | attrs = ATTRELI_EL(priv->rx_stash_size) | | |
398 | ATTRELI_EI(priv->rx_stash_index); | |
399 | ||
400 | gfar_write(®s->attreli, attrs); | |
401 | ||
402 | /* Start with defaults, and add stashing or locking | |
403 | * depending on the approprate variables */ | |
404 | attrs = ATTR_INIT_SETTINGS; | |
405 | ||
406 | if (priv->bd_stash_en) | |
407 | attrs |= ATTR_BDSTASH; | |
408 | ||
409 | if (priv->rx_stash_size != 0) | |
410 | attrs |= ATTR_BUFSTASH; | |
411 | ||
412 | gfar_write(®s->attr, attrs); | |
413 | ||
414 | gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); | |
415 | gfar_write(®s->fifo_tx_starve, priv->fifo_starve); | |
416 | gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); | |
417 | } | |
418 | ||
a7f38041 SG |
419 | static struct net_device_stats *gfar_get_stats(struct net_device *dev) |
420 | { | |
421 | struct gfar_private *priv = netdev_priv(dev); | |
422 | struct netdev_queue *txq; | |
423 | unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; | |
424 | unsigned long tx_packets = 0, tx_bytes = 0; | |
425 | int i = 0; | |
426 | ||
427 | for (i = 0; i < priv->num_rx_queues; i++) { | |
428 | rx_packets += priv->rx_queue[i]->stats.rx_packets; | |
429 | rx_bytes += priv->rx_queue[i]->stats.rx_bytes; | |
430 | rx_dropped += priv->rx_queue[i]->stats.rx_dropped; | |
431 | } | |
432 | ||
433 | dev->stats.rx_packets = rx_packets; | |
434 | dev->stats.rx_bytes = rx_bytes; | |
435 | dev->stats.rx_dropped = rx_dropped; | |
436 | ||
437 | for (i = 0; i < priv->num_tx_queues; i++) { | |
438 | txq = netdev_get_tx_queue(dev, i); | |
439 | tx_bytes += txq->tx_bytes; | |
440 | tx_packets += txq->tx_packets; | |
441 | } | |
442 | ||
443 | dev->stats.tx_bytes = tx_bytes; | |
444 | dev->stats.tx_packets = tx_packets; | |
445 | ||
446 | return &dev->stats; | |
447 | } | |
448 | ||
26ccfc37 AF |
449 | static const struct net_device_ops gfar_netdev_ops = { |
450 | .ndo_open = gfar_enet_open, | |
451 | .ndo_start_xmit = gfar_start_xmit, | |
452 | .ndo_stop = gfar_close, | |
453 | .ndo_change_mtu = gfar_change_mtu, | |
454 | .ndo_set_multicast_list = gfar_set_multi, | |
455 | .ndo_tx_timeout = gfar_timeout, | |
456 | .ndo_do_ioctl = gfar_ioctl, | |
a7f38041 | 457 | .ndo_get_stats = gfar_get_stats, |
26ccfc37 | 458 | .ndo_vlan_rx_register = gfar_vlan_rx_register, |
240c102d BH |
459 | .ndo_set_mac_address = eth_mac_addr, |
460 | .ndo_validate_addr = eth_validate_addr, | |
26ccfc37 AF |
461 | #ifdef CONFIG_NET_POLL_CONTROLLER |
462 | .ndo_poll_controller = gfar_netpoll, | |
463 | #endif | |
464 | }; | |
465 | ||
7a8b3372 SG |
466 | unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; |
467 | unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; | |
468 | ||
fba4ed03 SG |
469 | void lock_rx_qs(struct gfar_private *priv) |
470 | { | |
471 | int i = 0x0; | |
472 | ||
473 | for (i = 0; i < priv->num_rx_queues; i++) | |
474 | spin_lock(&priv->rx_queue[i]->rxlock); | |
475 | } | |
476 | ||
477 | void lock_tx_qs(struct gfar_private *priv) | |
478 | { | |
479 | int i = 0x0; | |
480 | ||
481 | for (i = 0; i < priv->num_tx_queues; i++) | |
482 | spin_lock(&priv->tx_queue[i]->txlock); | |
483 | } | |
484 | ||
485 | void unlock_rx_qs(struct gfar_private *priv) | |
486 | { | |
487 | int i = 0x0; | |
488 | ||
489 | for (i = 0; i < priv->num_rx_queues; i++) | |
490 | spin_unlock(&priv->rx_queue[i]->rxlock); | |
491 | } | |
492 | ||
493 | void unlock_tx_qs(struct gfar_private *priv) | |
494 | { | |
495 | int i = 0x0; | |
496 | ||
497 | for (i = 0; i < priv->num_tx_queues; i++) | |
498 | spin_unlock(&priv->tx_queue[i]->txlock); | |
499 | } | |
500 | ||
7f7f5316 AF |
501 | /* Returns 1 if incoming frames use an FCB */ |
502 | static inline int gfar_uses_fcb(struct gfar_private *priv) | |
0bbaf069 | 503 | { |
77ecaf2d | 504 | return priv->vlgrp || priv->rx_csum_enable; |
0bbaf069 | 505 | } |
bb40dcbb | 506 | |
fba4ed03 SG |
507 | static void free_tx_pointers(struct gfar_private *priv) |
508 | { | |
509 | int i = 0; | |
510 | ||
511 | for (i = 0; i < priv->num_tx_queues; i++) | |
512 | kfree(priv->tx_queue[i]); | |
513 | } | |
514 | ||
515 | static void free_rx_pointers(struct gfar_private *priv) | |
516 | { | |
517 | int i = 0; | |
518 | ||
519 | for (i = 0; i < priv->num_rx_queues; i++) | |
520 | kfree(priv->rx_queue[i]); | |
521 | } | |
522 | ||
46ceb60c SG |
523 | static void unmap_group_regs(struct gfar_private *priv) |
524 | { | |
525 | int i = 0; | |
526 | ||
527 | for (i = 0; i < MAXGROUPS; i++) | |
528 | if (priv->gfargrp[i].regs) | |
529 | iounmap(priv->gfargrp[i].regs); | |
530 | } | |
531 | ||
532 | static void disable_napi(struct gfar_private *priv) | |
533 | { | |
534 | int i = 0; | |
535 | ||
536 | for (i = 0; i < priv->num_grps; i++) | |
537 | napi_disable(&priv->gfargrp[i].napi); | |
538 | } | |
539 | ||
540 | static void enable_napi(struct gfar_private *priv) | |
541 | { | |
542 | int i = 0; | |
543 | ||
544 | for (i = 0; i < priv->num_grps; i++) | |
545 | napi_enable(&priv->gfargrp[i].napi); | |
546 | } | |
547 | ||
548 | static int gfar_parse_group(struct device_node *np, | |
549 | struct gfar_private *priv, const char *model) | |
550 | { | |
551 | u32 *queue_mask; | |
552 | u64 addr, size; | |
553 | ||
554 | addr = of_translate_address(np, | |
555 | of_get_address(np, 0, &size, NULL)); | |
556 | priv->gfargrp[priv->num_grps].regs = ioremap(addr, size); | |
557 | ||
558 | if (!priv->gfargrp[priv->num_grps].regs) | |
559 | return -ENOMEM; | |
560 | ||
561 | priv->gfargrp[priv->num_grps].interruptTransmit = | |
562 | irq_of_parse_and_map(np, 0); | |
563 | ||
564 | /* If we aren't the FEC we have multiple interrupts */ | |
565 | if (model && strcasecmp(model, "FEC")) { | |
566 | priv->gfargrp[priv->num_grps].interruptReceive = | |
567 | irq_of_parse_and_map(np, 1); | |
568 | priv->gfargrp[priv->num_grps].interruptError = | |
569 | irq_of_parse_and_map(np,2); | |
570 | if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 || | |
571 | priv->gfargrp[priv->num_grps].interruptReceive < 0 || | |
572 | priv->gfargrp[priv->num_grps].interruptError < 0) { | |
573 | return -EINVAL; | |
574 | } | |
575 | } | |
576 | ||
577 | priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; | |
578 | priv->gfargrp[priv->num_grps].priv = priv; | |
579 | spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); | |
580 | if(priv->mode == MQ_MG_MODE) { | |
581 | queue_mask = (u32 *)of_get_property(np, | |
582 | "fsl,rx-bit-map", NULL); | |
583 | priv->gfargrp[priv->num_grps].rx_bit_map = | |
584 | queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); | |
585 | queue_mask = (u32 *)of_get_property(np, | |
586 | "fsl,tx-bit-map", NULL); | |
587 | priv->gfargrp[priv->num_grps].tx_bit_map = | |
588 | queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); | |
589 | } else { | |
590 | priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; | |
591 | priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; | |
592 | } | |
593 | priv->num_grps++; | |
594 | ||
595 | return 0; | |
596 | } | |
597 | ||
fba4ed03 | 598 | static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) |
b31a1d8b | 599 | { |
b31a1d8b AF |
600 | const char *model; |
601 | const char *ctype; | |
602 | const void *mac_addr; | |
fba4ed03 SG |
603 | int err = 0, i; |
604 | struct net_device *dev = NULL; | |
605 | struct gfar_private *priv = NULL; | |
606 | struct device_node *np = ofdev->node; | |
46ceb60c | 607 | struct device_node *child = NULL; |
4d7902f2 AF |
608 | const u32 *stash; |
609 | const u32 *stash_len; | |
610 | const u32 *stash_idx; | |
fba4ed03 SG |
611 | unsigned int num_tx_qs, num_rx_qs; |
612 | u32 *tx_queues, *rx_queues; | |
b31a1d8b AF |
613 | |
614 | if (!np || !of_device_is_available(np)) | |
615 | return -ENODEV; | |
616 | ||
fba4ed03 SG |
617 | /* parse the num of tx and rx queues */ |
618 | tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); | |
619 | num_tx_qs = tx_queues ? *tx_queues : 1; | |
620 | ||
621 | if (num_tx_qs > MAX_TX_QS) { | |
622 | printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", | |
623 | num_tx_qs, MAX_TX_QS); | |
624 | printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); | |
625 | return -EINVAL; | |
626 | } | |
627 | ||
628 | rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); | |
629 | num_rx_qs = rx_queues ? *rx_queues : 1; | |
630 | ||
631 | if (num_rx_qs > MAX_RX_QS) { | |
632 | printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", | |
633 | num_tx_qs, MAX_TX_QS); | |
634 | printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); | |
635 | return -EINVAL; | |
636 | } | |
637 | ||
638 | *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); | |
639 | dev = *pdev; | |
640 | if (NULL == dev) | |
641 | return -ENOMEM; | |
642 | ||
643 | priv = netdev_priv(dev); | |
644 | priv->node = ofdev->node; | |
645 | priv->ndev = dev; | |
646 | ||
647 | dev->num_tx_queues = num_tx_qs; | |
648 | dev->real_num_tx_queues = num_tx_qs; | |
649 | priv->num_tx_queues = num_tx_qs; | |
650 | priv->num_rx_queues = num_rx_qs; | |
46ceb60c | 651 | priv->num_grps = 0x0; |
b31a1d8b AF |
652 | |
653 | model = of_get_property(np, "model", NULL); | |
654 | ||
46ceb60c SG |
655 | for (i = 0; i < MAXGROUPS; i++) |
656 | priv->gfargrp[i].regs = NULL; | |
b31a1d8b | 657 | |
46ceb60c SG |
658 | /* Parse and initialize group specific information */ |
659 | if (of_device_is_compatible(np, "fsl,etsec2")) { | |
660 | priv->mode = MQ_MG_MODE; | |
661 | for_each_child_of_node(np, child) { | |
662 | err = gfar_parse_group(child, priv, model); | |
663 | if (err) | |
664 | goto err_grp_init; | |
b31a1d8b | 665 | } |
46ceb60c SG |
666 | } else { |
667 | priv->mode = SQ_SG_MODE; | |
668 | err = gfar_parse_group(np, priv, model); | |
669 | if(err) | |
670 | goto err_grp_init; | |
b31a1d8b AF |
671 | } |
672 | ||
fba4ed03 SG |
673 | for (i = 0; i < priv->num_tx_queues; i++) |
674 | priv->tx_queue[i] = NULL; | |
675 | for (i = 0; i < priv->num_rx_queues; i++) | |
676 | priv->rx_queue[i] = NULL; | |
677 | ||
678 | for (i = 0; i < priv->num_tx_queues; i++) { | |
679 | priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc( | |
680 | sizeof (struct gfar_priv_tx_q), GFP_KERNEL); | |
681 | if (!priv->tx_queue[i]) { | |
682 | err = -ENOMEM; | |
683 | goto tx_alloc_failed; | |
684 | } | |
685 | priv->tx_queue[i]->tx_skbuff = NULL; | |
686 | priv->tx_queue[i]->qindex = i; | |
687 | priv->tx_queue[i]->dev = dev; | |
688 | spin_lock_init(&(priv->tx_queue[i]->txlock)); | |
689 | } | |
690 | ||
691 | for (i = 0; i < priv->num_rx_queues; i++) { | |
692 | priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc( | |
693 | sizeof (struct gfar_priv_rx_q), GFP_KERNEL); | |
694 | if (!priv->rx_queue[i]) { | |
695 | err = -ENOMEM; | |
696 | goto rx_alloc_failed; | |
697 | } | |
698 | priv->rx_queue[i]->rx_skbuff = NULL; | |
699 | priv->rx_queue[i]->qindex = i; | |
700 | priv->rx_queue[i]->dev = dev; | |
701 | spin_lock_init(&(priv->rx_queue[i]->rxlock)); | |
702 | } | |
703 | ||
704 | ||
4d7902f2 AF |
705 | stash = of_get_property(np, "bd-stash", NULL); |
706 | ||
a12f801d | 707 | if (stash) { |
4d7902f2 AF |
708 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; |
709 | priv->bd_stash_en = 1; | |
710 | } | |
711 | ||
712 | stash_len = of_get_property(np, "rx-stash-len", NULL); | |
713 | ||
714 | if (stash_len) | |
715 | priv->rx_stash_size = *stash_len; | |
716 | ||
717 | stash_idx = of_get_property(np, "rx-stash-idx", NULL); | |
718 | ||
719 | if (stash_idx) | |
720 | priv->rx_stash_index = *stash_idx; | |
721 | ||
722 | if (stash_len || stash_idx) | |
723 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; | |
724 | ||
b31a1d8b AF |
725 | mac_addr = of_get_mac_address(np); |
726 | if (mac_addr) | |
727 | memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN); | |
728 | ||
729 | if (model && !strcasecmp(model, "TSEC")) | |
730 | priv->device_flags = | |
731 | FSL_GIANFAR_DEV_HAS_GIGABIT | | |
732 | FSL_GIANFAR_DEV_HAS_COALESCE | | |
733 | FSL_GIANFAR_DEV_HAS_RMON | | |
734 | FSL_GIANFAR_DEV_HAS_MULTI_INTR; | |
735 | if (model && !strcasecmp(model, "eTSEC")) | |
736 | priv->device_flags = | |
737 | FSL_GIANFAR_DEV_HAS_GIGABIT | | |
738 | FSL_GIANFAR_DEV_HAS_COALESCE | | |
739 | FSL_GIANFAR_DEV_HAS_RMON | | |
740 | FSL_GIANFAR_DEV_HAS_MULTI_INTR | | |
2c2db48a | 741 | FSL_GIANFAR_DEV_HAS_PADDING | |
b31a1d8b AF |
742 | FSL_GIANFAR_DEV_HAS_CSUM | |
743 | FSL_GIANFAR_DEV_HAS_VLAN | | |
744 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | | |
745 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH; | |
746 | ||
747 | ctype = of_get_property(np, "phy-connection-type", NULL); | |
748 | ||
749 | /* We only care about rgmii-id. The rest are autodetected */ | |
750 | if (ctype && !strcmp(ctype, "rgmii-id")) | |
751 | priv->interface = PHY_INTERFACE_MODE_RGMII_ID; | |
752 | else | |
753 | priv->interface = PHY_INTERFACE_MODE_MII; | |
754 | ||
755 | if (of_get_property(np, "fsl,magic-packet", NULL)) | |
756 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; | |
757 | ||
fe192a49 | 758 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); |
b31a1d8b AF |
759 | |
760 | /* Find the TBI PHY. If it's not there, we don't support SGMII */ | |
fe192a49 | 761 | priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); |
b31a1d8b AF |
762 | |
763 | return 0; | |
764 | ||
fba4ed03 SG |
765 | rx_alloc_failed: |
766 | free_rx_pointers(priv); | |
767 | tx_alloc_failed: | |
768 | free_tx_pointers(priv); | |
46ceb60c SG |
769 | err_grp_init: |
770 | unmap_group_regs(priv); | |
fba4ed03 | 771 | free_netdev(dev); |
b31a1d8b AF |
772 | return err; |
773 | } | |
774 | ||
0faac9f7 CW |
775 | /* Ioctl MII Interface */ |
776 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
777 | { | |
778 | struct gfar_private *priv = netdev_priv(dev); | |
779 | ||
780 | if (!netif_running(dev)) | |
781 | return -EINVAL; | |
782 | ||
783 | if (!priv->phydev) | |
784 | return -ENODEV; | |
785 | ||
786 | return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); | |
787 | } | |
788 | ||
fba4ed03 SG |
789 | static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) |
790 | { | |
791 | unsigned int new_bit_map = 0x0; | |
792 | int mask = 0x1 << (max_qs - 1), i; | |
793 | for (i = 0; i < max_qs; i++) { | |
794 | if (bit_map & mask) | |
795 | new_bit_map = new_bit_map + (1 << i); | |
796 | mask = mask >> 0x1; | |
797 | } | |
798 | return new_bit_map; | |
799 | } | |
7a8b3372 | 800 | |
18294ad1 AV |
801 | static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, |
802 | u32 class) | |
7a8b3372 SG |
803 | { |
804 | u32 rqfpr = FPR_FILER_MASK; | |
805 | u32 rqfcr = 0x0; | |
806 | ||
807 | rqfar--; | |
808 | rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; | |
809 | ftp_rqfpr[rqfar] = rqfpr; | |
810 | ftp_rqfcr[rqfar] = rqfcr; | |
811 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | |
812 | ||
813 | rqfar--; | |
814 | rqfcr = RQFCR_CMP_NOMATCH; | |
815 | ftp_rqfpr[rqfar] = rqfpr; | |
816 | ftp_rqfcr[rqfar] = rqfcr; | |
817 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | |
818 | ||
819 | rqfar--; | |
820 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; | |
821 | rqfpr = class; | |
822 | ftp_rqfcr[rqfar] = rqfcr; | |
823 | ftp_rqfpr[rqfar] = rqfpr; | |
824 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | |
825 | ||
826 | rqfar--; | |
827 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; | |
828 | rqfpr = class; | |
829 | ftp_rqfcr[rqfar] = rqfcr; | |
830 | ftp_rqfpr[rqfar] = rqfpr; | |
831 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | |
832 | ||
833 | return rqfar; | |
834 | } | |
835 | ||
836 | static void gfar_init_filer_table(struct gfar_private *priv) | |
837 | { | |
838 | int i = 0x0; | |
839 | u32 rqfar = MAX_FILER_IDX; | |
840 | u32 rqfcr = 0x0; | |
841 | u32 rqfpr = FPR_FILER_MASK; | |
842 | ||
843 | /* Default rule */ | |
844 | rqfcr = RQFCR_CMP_MATCH; | |
845 | ftp_rqfcr[rqfar] = rqfcr; | |
846 | ftp_rqfpr[rqfar] = rqfpr; | |
847 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | |
848 | ||
849 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); | |
850 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); | |
851 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); | |
852 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); | |
853 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); | |
854 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); | |
855 | ||
856 | /* cur_filer_idx indicated the fisrt non-masked rule */ | |
857 | priv->cur_filer_idx = rqfar; | |
858 | ||
859 | /* Rest are masked rules */ | |
860 | rqfcr = RQFCR_CMP_NOMATCH; | |
861 | for (i = 0; i < rqfar; i++) { | |
862 | ftp_rqfcr[i] = rqfcr; | |
863 | ftp_rqfpr[i] = rqfpr; | |
864 | gfar_write_filer(priv, i, rqfcr, rqfpr); | |
865 | } | |
866 | } | |
867 | ||
bb40dcbb AF |
868 | /* Set up the ethernet device structure, private data, |
869 | * and anything else we need before we start */ | |
b31a1d8b AF |
870 | static int gfar_probe(struct of_device *ofdev, |
871 | const struct of_device_id *match) | |
1da177e4 LT |
872 | { |
873 | u32 tempval; | |
874 | struct net_device *dev = NULL; | |
875 | struct gfar_private *priv = NULL; | |
f4983704 | 876 | struct gfar __iomem *regs = NULL; |
46ceb60c | 877 | int err = 0, i, grp_idx = 0; |
c50a5d9a | 878 | int len_devname; |
fba4ed03 | 879 | u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; |
46ceb60c | 880 | u32 isrg = 0; |
18294ad1 | 881 | u32 __iomem *baddr; |
1da177e4 | 882 | |
fba4ed03 | 883 | err = gfar_of_init(ofdev, &dev); |
1da177e4 | 884 | |
fba4ed03 SG |
885 | if (err) |
886 | return err; | |
1da177e4 LT |
887 | |
888 | priv = netdev_priv(dev); | |
4826857f KG |
889 | priv->ndev = dev; |
890 | priv->ofdev = ofdev; | |
b31a1d8b | 891 | priv->node = ofdev->node; |
4826857f | 892 | SET_NETDEV_DEV(dev, &ofdev->dev); |
1da177e4 | 893 | |
d87eb127 | 894 | spin_lock_init(&priv->bflock); |
ab939905 | 895 | INIT_WORK(&priv->reset_task, gfar_reset_task); |
1da177e4 | 896 | |
b31a1d8b | 897 | dev_set_drvdata(&ofdev->dev, priv); |
46ceb60c | 898 | regs = priv->gfargrp[0].regs; |
1da177e4 LT |
899 | |
900 | /* Stop the DMA engine now, in case it was running before */ | |
901 | /* (The firmware could have used it, and left it running). */ | |
257d938a | 902 | gfar_halt(dev); |
1da177e4 LT |
903 | |
904 | /* Reset MAC layer */ | |
f4983704 | 905 | gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); |
1da177e4 | 906 | |
b98ac702 AF |
907 | /* We need to delay at least 3 TX clocks */ |
908 | udelay(2); | |
909 | ||
1da177e4 | 910 | tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); |
f4983704 | 911 | gfar_write(®s->maccfg1, tempval); |
1da177e4 LT |
912 | |
913 | /* Initialize MACCFG2. */ | |
f4983704 | 914 | gfar_write(®s->maccfg2, MACCFG2_INIT_SETTINGS); |
1da177e4 LT |
915 | |
916 | /* Initialize ECNTRL */ | |
f4983704 | 917 | gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); |
1da177e4 | 918 | |
1da177e4 | 919 | /* Set the dev->base_addr to the gfar reg region */ |
f4983704 | 920 | dev->base_addr = (unsigned long) regs; |
1da177e4 | 921 | |
b31a1d8b | 922 | SET_NETDEV_DEV(dev, &ofdev->dev); |
1da177e4 LT |
923 | |
924 | /* Fill in the dev structure */ | |
1da177e4 | 925 | dev->watchdog_timeo = TX_TIMEOUT; |
1da177e4 | 926 | dev->mtu = 1500; |
26ccfc37 | 927 | dev->netdev_ops = &gfar_netdev_ops; |
0bbaf069 KG |
928 | dev->ethtool_ops = &gfar_ethtool_ops; |
929 | ||
fba4ed03 | 930 | /* Register for napi ...We are registering NAPI for each grp */ |
46ceb60c SG |
931 | for (i = 0; i < priv->num_grps; i++) |
932 | netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); | |
a12f801d | 933 | |
b31a1d8b | 934 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { |
0bbaf069 | 935 | priv->rx_csum_enable = 1; |
4669bc90 | 936 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; |
0bbaf069 KG |
937 | } else |
938 | priv->rx_csum_enable = 0; | |
939 | ||
940 | priv->vlgrp = NULL; | |
1da177e4 | 941 | |
26ccfc37 | 942 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) |
0bbaf069 | 943 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
0bbaf069 | 944 | |
b31a1d8b | 945 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { |
0bbaf069 KG |
946 | priv->extended_hash = 1; |
947 | priv->hash_width = 9; | |
948 | ||
f4983704 SG |
949 | priv->hash_regs[0] = ®s->igaddr0; |
950 | priv->hash_regs[1] = ®s->igaddr1; | |
951 | priv->hash_regs[2] = ®s->igaddr2; | |
952 | priv->hash_regs[3] = ®s->igaddr3; | |
953 | priv->hash_regs[4] = ®s->igaddr4; | |
954 | priv->hash_regs[5] = ®s->igaddr5; | |
955 | priv->hash_regs[6] = ®s->igaddr6; | |
956 | priv->hash_regs[7] = ®s->igaddr7; | |
957 | priv->hash_regs[8] = ®s->gaddr0; | |
958 | priv->hash_regs[9] = ®s->gaddr1; | |
959 | priv->hash_regs[10] = ®s->gaddr2; | |
960 | priv->hash_regs[11] = ®s->gaddr3; | |
961 | priv->hash_regs[12] = ®s->gaddr4; | |
962 | priv->hash_regs[13] = ®s->gaddr5; | |
963 | priv->hash_regs[14] = ®s->gaddr6; | |
964 | priv->hash_regs[15] = ®s->gaddr7; | |
0bbaf069 KG |
965 | |
966 | } else { | |
967 | priv->extended_hash = 0; | |
968 | priv->hash_width = 8; | |
969 | ||
f4983704 SG |
970 | priv->hash_regs[0] = ®s->gaddr0; |
971 | priv->hash_regs[1] = ®s->gaddr1; | |
972 | priv->hash_regs[2] = ®s->gaddr2; | |
973 | priv->hash_regs[3] = ®s->gaddr3; | |
974 | priv->hash_regs[4] = ®s->gaddr4; | |
975 | priv->hash_regs[5] = ®s->gaddr5; | |
976 | priv->hash_regs[6] = ®s->gaddr6; | |
977 | priv->hash_regs[7] = ®s->gaddr7; | |
0bbaf069 KG |
978 | } |
979 | ||
b31a1d8b | 980 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) |
0bbaf069 KG |
981 | priv->padding = DEFAULT_PADDING; |
982 | else | |
983 | priv->padding = 0; | |
984 | ||
0bbaf069 KG |
985 | if (dev->features & NETIF_F_IP_CSUM) |
986 | dev->hard_header_len += GMAC_FCB_LEN; | |
1da177e4 | 987 | |
46ceb60c SG |
988 | /* Program the isrg regs only if number of grps > 1 */ |
989 | if (priv->num_grps > 1) { | |
990 | baddr = ®s->isrg0; | |
991 | for (i = 0; i < priv->num_grps; i++) { | |
992 | isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); | |
993 | isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); | |
994 | gfar_write(baddr, isrg); | |
995 | baddr++; | |
996 | isrg = 0x0; | |
997 | } | |
998 | } | |
999 | ||
fba4ed03 | 1000 | /* Need to reverse the bit maps as bit_map's MSB is q0 |
984b3f57 | 1001 | * but, for_each_set_bit parses from right to left, which |
fba4ed03 | 1002 | * basically reverses the queue numbers */ |
46ceb60c SG |
1003 | for (i = 0; i< priv->num_grps; i++) { |
1004 | priv->gfargrp[i].tx_bit_map = reverse_bitmap( | |
1005 | priv->gfargrp[i].tx_bit_map, MAX_TX_QS); | |
1006 | priv->gfargrp[i].rx_bit_map = reverse_bitmap( | |
1007 | priv->gfargrp[i].rx_bit_map, MAX_RX_QS); | |
1008 | } | |
1009 | ||
1010 | /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, | |
1011 | * also assign queues to groups */ | |
1012 | for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { | |
1013 | priv->gfargrp[grp_idx].num_rx_queues = 0x0; | |
984b3f57 | 1014 | for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, |
46ceb60c SG |
1015 | priv->num_rx_queues) { |
1016 | priv->gfargrp[grp_idx].num_rx_queues++; | |
1017 | priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; | |
1018 | rstat = rstat | (RSTAT_CLEAR_RHALT >> i); | |
1019 | rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); | |
1020 | } | |
1021 | priv->gfargrp[grp_idx].num_tx_queues = 0x0; | |
984b3f57 | 1022 | for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, |
46ceb60c SG |
1023 | priv->num_tx_queues) { |
1024 | priv->gfargrp[grp_idx].num_tx_queues++; | |
1025 | priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; | |
1026 | tstat = tstat | (TSTAT_CLEAR_THALT >> i); | |
1027 | tqueue = tqueue | (TQUEUE_EN0 >> i); | |
1028 | } | |
1029 | priv->gfargrp[grp_idx].rstat = rstat; | |
1030 | priv->gfargrp[grp_idx].tstat = tstat; | |
1031 | rstat = tstat =0; | |
fba4ed03 | 1032 | } |
fba4ed03 SG |
1033 | |
1034 | gfar_write(®s->rqueue, rqueue); | |
1035 | gfar_write(®s->tqueue, tqueue); | |
1036 | ||
1da177e4 | 1037 | priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; |
1da177e4 | 1038 | |
a12f801d | 1039 | /* Initializing some of the rx/tx queue level parameters */ |
fba4ed03 SG |
1040 | for (i = 0; i < priv->num_tx_queues; i++) { |
1041 | priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; | |
1042 | priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; | |
1043 | priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; | |
1044 | priv->tx_queue[i]->txic = DEFAULT_TXIC; | |
1045 | } | |
a12f801d | 1046 | |
fba4ed03 SG |
1047 | for (i = 0; i < priv->num_rx_queues; i++) { |
1048 | priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; | |
1049 | priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; | |
1050 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; | |
1051 | } | |
1da177e4 | 1052 | |
1ccb8389 SG |
1053 | /* enable filer if using multiple RX queues*/ |
1054 | if(priv->num_rx_queues > 1) | |
1055 | priv->rx_filer_enable = 1; | |
0bbaf069 KG |
1056 | /* Enable most messages by default */ |
1057 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; | |
1058 | ||
d3eab82b TP |
1059 | /* Carrier starts down, phylib will bring it up */ |
1060 | netif_carrier_off(dev); | |
1061 | ||
1da177e4 LT |
1062 | err = register_netdev(dev); |
1063 | ||
1064 | if (err) { | |
1065 | printk(KERN_ERR "%s: Cannot register net device, aborting.\n", | |
1066 | dev->name); | |
1067 | goto register_fail; | |
1068 | } | |
1069 | ||
2884e5cc AV |
1070 | device_init_wakeup(&dev->dev, |
1071 | priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | |
1072 | ||
c50a5d9a DH |
1073 | /* fill out IRQ number and name fields */ |
1074 | len_devname = strlen(dev->name); | |
46ceb60c SG |
1075 | for (i = 0; i < priv->num_grps; i++) { |
1076 | strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name, | |
1077 | len_devname); | |
1078 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | |
1079 | strncpy(&priv->gfargrp[i].int_name_tx[len_devname], | |
1080 | "_g", sizeof("_g")); | |
1081 | priv->gfargrp[i].int_name_tx[ | |
1082 | strlen(priv->gfargrp[i].int_name_tx)] = i+48; | |
1083 | strncpy(&priv->gfargrp[i].int_name_tx[strlen( | |
1084 | priv->gfargrp[i].int_name_tx)], | |
1085 | "_tx", sizeof("_tx") + 1); | |
1086 | ||
1087 | strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name, | |
1088 | len_devname); | |
1089 | strncpy(&priv->gfargrp[i].int_name_rx[len_devname], | |
1090 | "_g", sizeof("_g")); | |
1091 | priv->gfargrp[i].int_name_rx[ | |
1092 | strlen(priv->gfargrp[i].int_name_rx)] = i+48; | |
1093 | strncpy(&priv->gfargrp[i].int_name_rx[strlen( | |
1094 | priv->gfargrp[i].int_name_rx)], | |
1095 | "_rx", sizeof("_rx") + 1); | |
1096 | ||
1097 | strncpy(&priv->gfargrp[i].int_name_er[0], dev->name, | |
1098 | len_devname); | |
1099 | strncpy(&priv->gfargrp[i].int_name_er[len_devname], | |
1100 | "_g", sizeof("_g")); | |
1101 | priv->gfargrp[i].int_name_er[strlen( | |
1102 | priv->gfargrp[i].int_name_er)] = i+48; | |
1103 | strncpy(&priv->gfargrp[i].int_name_er[strlen(\ | |
1104 | priv->gfargrp[i].int_name_er)], | |
1105 | "_er", sizeof("_er") + 1); | |
1106 | } else | |
1107 | priv->gfargrp[i].int_name_tx[len_devname] = '\0'; | |
1108 | } | |
c50a5d9a | 1109 | |
7a8b3372 SG |
1110 | /* Initialize the filer table */ |
1111 | gfar_init_filer_table(priv); | |
1112 | ||
7f7f5316 AF |
1113 | /* Create all the sysfs files */ |
1114 | gfar_init_sysfs(dev); | |
1115 | ||
1da177e4 | 1116 | /* Print out the device info */ |
e174961c | 1117 | printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr); |
1da177e4 LT |
1118 | |
1119 | /* Even more device info helps when determining which kernel */ | |
7f7f5316 | 1120 | /* provided which set of benchmarks. */ |
1da177e4 | 1121 | printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); |
fba4ed03 SG |
1122 | for (i = 0; i < priv->num_rx_queues; i++) |
1123 | printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n", | |
1124 | dev->name, i, priv->rx_queue[i]->rx_ring_size); | |
1125 | for(i = 0; i < priv->num_tx_queues; i++) | |
1126 | printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n", | |
1127 | dev->name, i, priv->tx_queue[i]->tx_ring_size); | |
1da177e4 LT |
1128 | |
1129 | return 0; | |
1130 | ||
1131 | register_fail: | |
46ceb60c | 1132 | unmap_group_regs(priv); |
fba4ed03 SG |
1133 | free_tx_pointers(priv); |
1134 | free_rx_pointers(priv); | |
fe192a49 GL |
1135 | if (priv->phy_node) |
1136 | of_node_put(priv->phy_node); | |
1137 | if (priv->tbi_node) | |
1138 | of_node_put(priv->tbi_node); | |
1da177e4 | 1139 | free_netdev(dev); |
bb40dcbb | 1140 | return err; |
1da177e4 LT |
1141 | } |
1142 | ||
b31a1d8b | 1143 | static int gfar_remove(struct of_device *ofdev) |
1da177e4 | 1144 | { |
b31a1d8b | 1145 | struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); |
1da177e4 | 1146 | |
fe192a49 GL |
1147 | if (priv->phy_node) |
1148 | of_node_put(priv->phy_node); | |
1149 | if (priv->tbi_node) | |
1150 | of_node_put(priv->tbi_node); | |
1151 | ||
b31a1d8b | 1152 | dev_set_drvdata(&ofdev->dev, NULL); |
1da177e4 | 1153 | |
d9d8e041 | 1154 | unregister_netdev(priv->ndev); |
46ceb60c | 1155 | unmap_group_regs(priv); |
4826857f | 1156 | free_netdev(priv->ndev); |
1da177e4 LT |
1157 | |
1158 | return 0; | |
1159 | } | |
1160 | ||
d87eb127 | 1161 | #ifdef CONFIG_PM |
be926fc4 AV |
1162 | |
1163 | static int gfar_suspend(struct device *dev) | |
d87eb127 | 1164 | { |
be926fc4 AV |
1165 | struct gfar_private *priv = dev_get_drvdata(dev); |
1166 | struct net_device *ndev = priv->ndev; | |
46ceb60c | 1167 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 SW |
1168 | unsigned long flags; |
1169 | u32 tempval; | |
1170 | ||
1171 | int magic_packet = priv->wol_en && | |
b31a1d8b | 1172 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
d87eb127 | 1173 | |
be926fc4 | 1174 | netif_device_detach(ndev); |
d87eb127 | 1175 | |
be926fc4 | 1176 | if (netif_running(ndev)) { |
fba4ed03 SG |
1177 | |
1178 | local_irq_save(flags); | |
1179 | lock_tx_qs(priv); | |
1180 | lock_rx_qs(priv); | |
d87eb127 | 1181 | |
be926fc4 | 1182 | gfar_halt_nodisable(ndev); |
d87eb127 SW |
1183 | |
1184 | /* Disable Tx, and Rx if wake-on-LAN is disabled. */ | |
f4983704 | 1185 | tempval = gfar_read(®s->maccfg1); |
d87eb127 SW |
1186 | |
1187 | tempval &= ~MACCFG1_TX_EN; | |
1188 | ||
1189 | if (!magic_packet) | |
1190 | tempval &= ~MACCFG1_RX_EN; | |
1191 | ||
f4983704 | 1192 | gfar_write(®s->maccfg1, tempval); |
d87eb127 | 1193 | |
fba4ed03 SG |
1194 | unlock_rx_qs(priv); |
1195 | unlock_tx_qs(priv); | |
1196 | local_irq_restore(flags); | |
d87eb127 | 1197 | |
46ceb60c | 1198 | disable_napi(priv); |
d87eb127 SW |
1199 | |
1200 | if (magic_packet) { | |
1201 | /* Enable interrupt on Magic Packet */ | |
f4983704 | 1202 | gfar_write(®s->imask, IMASK_MAG); |
d87eb127 SW |
1203 | |
1204 | /* Enable Magic Packet mode */ | |
f4983704 | 1205 | tempval = gfar_read(®s->maccfg2); |
d87eb127 | 1206 | tempval |= MACCFG2_MPEN; |
f4983704 | 1207 | gfar_write(®s->maccfg2, tempval); |
d87eb127 SW |
1208 | } else { |
1209 | phy_stop(priv->phydev); | |
1210 | } | |
1211 | } | |
1212 | ||
1213 | return 0; | |
1214 | } | |
1215 | ||
be926fc4 | 1216 | static int gfar_resume(struct device *dev) |
d87eb127 | 1217 | { |
be926fc4 AV |
1218 | struct gfar_private *priv = dev_get_drvdata(dev); |
1219 | struct net_device *ndev = priv->ndev; | |
46ceb60c | 1220 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 SW |
1221 | unsigned long flags; |
1222 | u32 tempval; | |
1223 | int magic_packet = priv->wol_en && | |
b31a1d8b | 1224 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
d87eb127 | 1225 | |
be926fc4 AV |
1226 | if (!netif_running(ndev)) { |
1227 | netif_device_attach(ndev); | |
d87eb127 SW |
1228 | return 0; |
1229 | } | |
1230 | ||
1231 | if (!magic_packet && priv->phydev) | |
1232 | phy_start(priv->phydev); | |
1233 | ||
1234 | /* Disable Magic Packet mode, in case something | |
1235 | * else woke us up. | |
1236 | */ | |
fba4ed03 SG |
1237 | local_irq_save(flags); |
1238 | lock_tx_qs(priv); | |
1239 | lock_rx_qs(priv); | |
d87eb127 | 1240 | |
f4983704 | 1241 | tempval = gfar_read(®s->maccfg2); |
d87eb127 | 1242 | tempval &= ~MACCFG2_MPEN; |
f4983704 | 1243 | gfar_write(®s->maccfg2, tempval); |
d87eb127 | 1244 | |
be926fc4 | 1245 | gfar_start(ndev); |
d87eb127 | 1246 | |
fba4ed03 SG |
1247 | unlock_rx_qs(priv); |
1248 | unlock_tx_qs(priv); | |
1249 | local_irq_restore(flags); | |
d87eb127 | 1250 | |
be926fc4 AV |
1251 | netif_device_attach(ndev); |
1252 | ||
46ceb60c | 1253 | enable_napi(priv); |
be926fc4 AV |
1254 | |
1255 | return 0; | |
1256 | } | |
1257 | ||
1258 | static int gfar_restore(struct device *dev) | |
1259 | { | |
1260 | struct gfar_private *priv = dev_get_drvdata(dev); | |
1261 | struct net_device *ndev = priv->ndev; | |
1262 | ||
1263 | if (!netif_running(ndev)) | |
1264 | return 0; | |
1265 | ||
1266 | gfar_init_bds(ndev); | |
1267 | init_registers(ndev); | |
1268 | gfar_set_mac_address(ndev); | |
1269 | gfar_init_mac(ndev); | |
1270 | gfar_start(ndev); | |
1271 | ||
1272 | priv->oldlink = 0; | |
1273 | priv->oldspeed = 0; | |
1274 | priv->oldduplex = -1; | |
1275 | ||
1276 | if (priv->phydev) | |
1277 | phy_start(priv->phydev); | |
d87eb127 | 1278 | |
be926fc4 | 1279 | netif_device_attach(ndev); |
5ea681d4 | 1280 | enable_napi(priv); |
d87eb127 SW |
1281 | |
1282 | return 0; | |
1283 | } | |
be926fc4 AV |
1284 | |
1285 | static struct dev_pm_ops gfar_pm_ops = { | |
1286 | .suspend = gfar_suspend, | |
1287 | .resume = gfar_resume, | |
1288 | .freeze = gfar_suspend, | |
1289 | .thaw = gfar_resume, | |
1290 | .restore = gfar_restore, | |
1291 | }; | |
1292 | ||
1293 | #define GFAR_PM_OPS (&gfar_pm_ops) | |
1294 | ||
1295 | static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state) | |
1296 | { | |
1297 | return gfar_suspend(&ofdev->dev); | |
1298 | } | |
1299 | ||
1300 | static int gfar_legacy_resume(struct of_device *ofdev) | |
1301 | { | |
1302 | return gfar_resume(&ofdev->dev); | |
1303 | } | |
1304 | ||
d87eb127 | 1305 | #else |
be926fc4 AV |
1306 | |
1307 | #define GFAR_PM_OPS NULL | |
1308 | #define gfar_legacy_suspend NULL | |
1309 | #define gfar_legacy_resume NULL | |
1310 | ||
d87eb127 | 1311 | #endif |
1da177e4 | 1312 | |
e8a2b6a4 AF |
1313 | /* Reads the controller's registers to determine what interface |
1314 | * connects it to the PHY. | |
1315 | */ | |
1316 | static phy_interface_t gfar_get_interface(struct net_device *dev) | |
1317 | { | |
1318 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1319 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
f4983704 SG |
1320 | u32 ecntrl; |
1321 | ||
f4983704 | 1322 | ecntrl = gfar_read(®s->ecntrl); |
e8a2b6a4 AF |
1323 | |
1324 | if (ecntrl & ECNTRL_SGMII_MODE) | |
1325 | return PHY_INTERFACE_MODE_SGMII; | |
1326 | ||
1327 | if (ecntrl & ECNTRL_TBI_MODE) { | |
1328 | if (ecntrl & ECNTRL_REDUCED_MODE) | |
1329 | return PHY_INTERFACE_MODE_RTBI; | |
1330 | else | |
1331 | return PHY_INTERFACE_MODE_TBI; | |
1332 | } | |
1333 | ||
1334 | if (ecntrl & ECNTRL_REDUCED_MODE) { | |
1335 | if (ecntrl & ECNTRL_REDUCED_MII_MODE) | |
1336 | return PHY_INTERFACE_MODE_RMII; | |
7132ab7f | 1337 | else { |
b31a1d8b | 1338 | phy_interface_t interface = priv->interface; |
7132ab7f AF |
1339 | |
1340 | /* | |
1341 | * This isn't autodetected right now, so it must | |
1342 | * be set by the device tree or platform code. | |
1343 | */ | |
1344 | if (interface == PHY_INTERFACE_MODE_RGMII_ID) | |
1345 | return PHY_INTERFACE_MODE_RGMII_ID; | |
1346 | ||
e8a2b6a4 | 1347 | return PHY_INTERFACE_MODE_RGMII; |
7132ab7f | 1348 | } |
e8a2b6a4 AF |
1349 | } |
1350 | ||
b31a1d8b | 1351 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) |
e8a2b6a4 AF |
1352 | return PHY_INTERFACE_MODE_GMII; |
1353 | ||
1354 | return PHY_INTERFACE_MODE_MII; | |
1355 | } | |
1356 | ||
1357 | ||
bb40dcbb AF |
1358 | /* Initializes driver's PHY state, and attaches to the PHY. |
1359 | * Returns 0 on success. | |
1da177e4 LT |
1360 | */ |
1361 | static int init_phy(struct net_device *dev) | |
1362 | { | |
1363 | struct gfar_private *priv = netdev_priv(dev); | |
bb40dcbb | 1364 | uint gigabit_support = |
b31a1d8b | 1365 | priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? |
bb40dcbb | 1366 | SUPPORTED_1000baseT_Full : 0; |
e8a2b6a4 | 1367 | phy_interface_t interface; |
1da177e4 LT |
1368 | |
1369 | priv->oldlink = 0; | |
1370 | priv->oldspeed = 0; | |
1371 | priv->oldduplex = -1; | |
1372 | ||
e8a2b6a4 AF |
1373 | interface = gfar_get_interface(dev); |
1374 | ||
1db780f8 AV |
1375 | priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, |
1376 | interface); | |
1377 | if (!priv->phydev) | |
1378 | priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, | |
1379 | interface); | |
1380 | if (!priv->phydev) { | |
1381 | dev_err(&dev->dev, "could not attach to PHY\n"); | |
1382 | return -ENODEV; | |
fe192a49 | 1383 | } |
1da177e4 | 1384 | |
d3c12873 KJ |
1385 | if (interface == PHY_INTERFACE_MODE_SGMII) |
1386 | gfar_configure_serdes(dev); | |
1387 | ||
bb40dcbb | 1388 | /* Remove any features not supported by the controller */ |
fe192a49 GL |
1389 | priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); |
1390 | priv->phydev->advertising = priv->phydev->supported; | |
1da177e4 LT |
1391 | |
1392 | return 0; | |
1da177e4 LT |
1393 | } |
1394 | ||
d0313587 PG |
1395 | /* |
1396 | * Initialize TBI PHY interface for communicating with the | |
1397 | * SERDES lynx PHY on the chip. We communicate with this PHY | |
1398 | * through the MDIO bus on each controller, treating it as a | |
1399 | * "normal" PHY at the address found in the TBIPA register. We assume | |
1400 | * that the TBIPA register is valid. Either the MDIO bus code will set | |
1401 | * it to a value that doesn't conflict with other PHYs on the bus, or the | |
1402 | * value doesn't matter, as there are no other PHYs on the bus. | |
1403 | */ | |
d3c12873 KJ |
1404 | static void gfar_configure_serdes(struct net_device *dev) |
1405 | { | |
1406 | struct gfar_private *priv = netdev_priv(dev); | |
fe192a49 GL |
1407 | struct phy_device *tbiphy; |
1408 | ||
1409 | if (!priv->tbi_node) { | |
1410 | dev_warn(&dev->dev, "error: SGMII mode requires that the " | |
1411 | "device tree specify a tbi-handle\n"); | |
1412 | return; | |
1413 | } | |
c132419e | 1414 | |
fe192a49 GL |
1415 | tbiphy = of_phy_find_device(priv->tbi_node); |
1416 | if (!tbiphy) { | |
1417 | dev_err(&dev->dev, "error: Could not get TBI device\n"); | |
b31a1d8b AF |
1418 | return; |
1419 | } | |
d3c12873 | 1420 | |
b31a1d8b AF |
1421 | /* |
1422 | * If the link is already up, we must already be ok, and don't need to | |
bdb59f94 TP |
1423 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured |
1424 | * everything for us? Resetting it takes the link down and requires | |
1425 | * several seconds for it to come back. | |
1426 | */ | |
fe192a49 | 1427 | if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) |
b31a1d8b | 1428 | return; |
d3c12873 | 1429 | |
d0313587 | 1430 | /* Single clk mode, mii mode off(for serdes communication) */ |
fe192a49 | 1431 | phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); |
d3c12873 | 1432 | |
fe192a49 | 1433 | phy_write(tbiphy, MII_ADVERTISE, |
d3c12873 KJ |
1434 | ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | |
1435 | ADVERTISE_1000XPSE_ASYM); | |
1436 | ||
fe192a49 | 1437 | phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | |
d3c12873 KJ |
1438 | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); |
1439 | } | |
1440 | ||
1da177e4 LT |
1441 | static void init_registers(struct net_device *dev) |
1442 | { | |
1443 | struct gfar_private *priv = netdev_priv(dev); | |
f4983704 | 1444 | struct gfar __iomem *regs = NULL; |
46ceb60c | 1445 | int i = 0; |
1da177e4 | 1446 | |
46ceb60c SG |
1447 | for (i = 0; i < priv->num_grps; i++) { |
1448 | regs = priv->gfargrp[i].regs; | |
1449 | /* Clear IEVENT */ | |
1450 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | |
1da177e4 | 1451 | |
46ceb60c SG |
1452 | /* Initialize IMASK */ |
1453 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | |
1454 | } | |
1da177e4 | 1455 | |
46ceb60c | 1456 | regs = priv->gfargrp[0].regs; |
1da177e4 | 1457 | /* Init hash registers to zero */ |
f4983704 SG |
1458 | gfar_write(®s->igaddr0, 0); |
1459 | gfar_write(®s->igaddr1, 0); | |
1460 | gfar_write(®s->igaddr2, 0); | |
1461 | gfar_write(®s->igaddr3, 0); | |
1462 | gfar_write(®s->igaddr4, 0); | |
1463 | gfar_write(®s->igaddr5, 0); | |
1464 | gfar_write(®s->igaddr6, 0); | |
1465 | gfar_write(®s->igaddr7, 0); | |
1466 | ||
1467 | gfar_write(®s->gaddr0, 0); | |
1468 | gfar_write(®s->gaddr1, 0); | |
1469 | gfar_write(®s->gaddr2, 0); | |
1470 | gfar_write(®s->gaddr3, 0); | |
1471 | gfar_write(®s->gaddr4, 0); | |
1472 | gfar_write(®s->gaddr5, 0); | |
1473 | gfar_write(®s->gaddr6, 0); | |
1474 | gfar_write(®s->gaddr7, 0); | |
1da177e4 | 1475 | |
1da177e4 | 1476 | /* Zero out the rmon mib registers if it has them */ |
b31a1d8b | 1477 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { |
f4983704 | 1478 | memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); |
1da177e4 LT |
1479 | |
1480 | /* Mask off the CAM interrupts */ | |
f4983704 SG |
1481 | gfar_write(®s->rmon.cam1, 0xffffffff); |
1482 | gfar_write(®s->rmon.cam2, 0xffffffff); | |
1da177e4 LT |
1483 | } |
1484 | ||
1485 | /* Initialize the max receive buffer length */ | |
f4983704 | 1486 | gfar_write(®s->mrblr, priv->rx_buffer_size); |
1da177e4 | 1487 | |
1da177e4 | 1488 | /* Initialize the Minimum Frame Length Register */ |
f4983704 | 1489 | gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); |
1da177e4 LT |
1490 | } |
1491 | ||
0bbaf069 KG |
1492 | |
1493 | /* Halt the receive and transmit queues */ | |
d87eb127 | 1494 | static void gfar_halt_nodisable(struct net_device *dev) |
1da177e4 LT |
1495 | { |
1496 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1497 | struct gfar __iomem *regs = NULL; |
1da177e4 | 1498 | u32 tempval; |
46ceb60c | 1499 | int i = 0; |
1da177e4 | 1500 | |
46ceb60c SG |
1501 | for (i = 0; i < priv->num_grps; i++) { |
1502 | regs = priv->gfargrp[i].regs; | |
1503 | /* Mask all interrupts */ | |
1504 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | |
1da177e4 | 1505 | |
46ceb60c SG |
1506 | /* Clear all interrupts */ |
1507 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | |
1508 | } | |
1da177e4 | 1509 | |
46ceb60c | 1510 | regs = priv->gfargrp[0].regs; |
1da177e4 | 1511 | /* Stop the DMA, and wait for it to stop */ |
f4983704 | 1512 | tempval = gfar_read(®s->dmactrl); |
1da177e4 LT |
1513 | if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) |
1514 | != (DMACTRL_GRS | DMACTRL_GTS)) { | |
1515 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); | |
f4983704 | 1516 | gfar_write(®s->dmactrl, tempval); |
1da177e4 | 1517 | |
f4983704 | 1518 | while (!(gfar_read(®s->ievent) & |
1da177e4 LT |
1519 | (IEVENT_GRSC | IEVENT_GTSC))) |
1520 | cpu_relax(); | |
1521 | } | |
d87eb127 | 1522 | } |
d87eb127 SW |
1523 | |
1524 | /* Halt the receive and transmit queues */ | |
1525 | void gfar_halt(struct net_device *dev) | |
1526 | { | |
1527 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1528 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 | 1529 | u32 tempval; |
1da177e4 | 1530 | |
2a54adc3 SW |
1531 | gfar_halt_nodisable(dev); |
1532 | ||
1da177e4 LT |
1533 | /* Disable Rx and Tx */ |
1534 | tempval = gfar_read(®s->maccfg1); | |
1535 | tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); | |
1536 | gfar_write(®s->maccfg1, tempval); | |
0bbaf069 KG |
1537 | } |
1538 | ||
46ceb60c SG |
1539 | static void free_grp_irqs(struct gfar_priv_grp *grp) |
1540 | { | |
1541 | free_irq(grp->interruptError, grp); | |
1542 | free_irq(grp->interruptTransmit, grp); | |
1543 | free_irq(grp->interruptReceive, grp); | |
1544 | } | |
1545 | ||
0bbaf069 KG |
1546 | void stop_gfar(struct net_device *dev) |
1547 | { | |
1548 | struct gfar_private *priv = netdev_priv(dev); | |
0bbaf069 | 1549 | unsigned long flags; |
46ceb60c | 1550 | int i; |
0bbaf069 | 1551 | |
bb40dcbb AF |
1552 | phy_stop(priv->phydev); |
1553 | ||
a12f801d | 1554 | |
0bbaf069 | 1555 | /* Lock it down */ |
fba4ed03 SG |
1556 | local_irq_save(flags); |
1557 | lock_tx_qs(priv); | |
1558 | lock_rx_qs(priv); | |
0bbaf069 | 1559 | |
0bbaf069 | 1560 | gfar_halt(dev); |
1da177e4 | 1561 | |
fba4ed03 SG |
1562 | unlock_rx_qs(priv); |
1563 | unlock_tx_qs(priv); | |
1564 | local_irq_restore(flags); | |
1da177e4 LT |
1565 | |
1566 | /* Free the IRQs */ | |
b31a1d8b | 1567 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
46ceb60c SG |
1568 | for (i = 0; i < priv->num_grps; i++) |
1569 | free_grp_irqs(&priv->gfargrp[i]); | |
1da177e4 | 1570 | } else { |
46ceb60c SG |
1571 | for (i = 0; i < priv->num_grps; i++) |
1572 | free_irq(priv->gfargrp[i].interruptTransmit, | |
1573 | &priv->gfargrp[i]); | |
1da177e4 LT |
1574 | } |
1575 | ||
1576 | free_skb_resources(priv); | |
1da177e4 LT |
1577 | } |
1578 | ||
fba4ed03 | 1579 | static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) |
1da177e4 | 1580 | { |
1da177e4 | 1581 | struct txbd8 *txbdp; |
fba4ed03 | 1582 | struct gfar_private *priv = netdev_priv(tx_queue->dev); |
4669bc90 | 1583 | int i, j; |
1da177e4 | 1584 | |
a12f801d | 1585 | txbdp = tx_queue->tx_bd_base; |
1da177e4 | 1586 | |
a12f801d SG |
1587 | for (i = 0; i < tx_queue->tx_ring_size; i++) { |
1588 | if (!tx_queue->tx_skbuff[i]) | |
4669bc90 | 1589 | continue; |
1da177e4 | 1590 | |
4826857f | 1591 | dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, |
4669bc90 DH |
1592 | txbdp->length, DMA_TO_DEVICE); |
1593 | txbdp->lstatus = 0; | |
fba4ed03 SG |
1594 | for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; |
1595 | j++) { | |
4669bc90 | 1596 | txbdp++; |
4826857f | 1597 | dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, |
4669bc90 | 1598 | txbdp->length, DMA_TO_DEVICE); |
1da177e4 | 1599 | } |
ad5da7ab | 1600 | txbdp++; |
a12f801d SG |
1601 | dev_kfree_skb_any(tx_queue->tx_skbuff[i]); |
1602 | tx_queue->tx_skbuff[i] = NULL; | |
1da177e4 | 1603 | } |
a12f801d | 1604 | kfree(tx_queue->tx_skbuff); |
fba4ed03 | 1605 | } |
1da177e4 | 1606 | |
fba4ed03 SG |
1607 | static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) |
1608 | { | |
1609 | struct rxbd8 *rxbdp; | |
1610 | struct gfar_private *priv = netdev_priv(rx_queue->dev); | |
1611 | int i; | |
1da177e4 | 1612 | |
fba4ed03 | 1613 | rxbdp = rx_queue->rx_bd_base; |
1da177e4 | 1614 | |
a12f801d SG |
1615 | for (i = 0; i < rx_queue->rx_ring_size; i++) { |
1616 | if (rx_queue->rx_skbuff[i]) { | |
fba4ed03 SG |
1617 | dma_unmap_single(&priv->ofdev->dev, |
1618 | rxbdp->bufPtr, priv->rx_buffer_size, | |
e69edd21 | 1619 | DMA_FROM_DEVICE); |
a12f801d SG |
1620 | dev_kfree_skb_any(rx_queue->rx_skbuff[i]); |
1621 | rx_queue->rx_skbuff[i] = NULL; | |
1da177e4 | 1622 | } |
e69edd21 AV |
1623 | rxbdp->lstatus = 0; |
1624 | rxbdp->bufPtr = 0; | |
1625 | rxbdp++; | |
1da177e4 | 1626 | } |
a12f801d | 1627 | kfree(rx_queue->rx_skbuff); |
fba4ed03 | 1628 | } |
e69edd21 | 1629 | |
fba4ed03 SG |
1630 | /* If there are any tx skbs or rx skbs still around, free them. |
1631 | * Then free tx_skbuff and rx_skbuff */ | |
1632 | static void free_skb_resources(struct gfar_private *priv) | |
1633 | { | |
1634 | struct gfar_priv_tx_q *tx_queue = NULL; | |
1635 | struct gfar_priv_rx_q *rx_queue = NULL; | |
1636 | int i; | |
1637 | ||
1638 | /* Go through all the buffer descriptors and free their data buffers */ | |
1639 | for (i = 0; i < priv->num_tx_queues; i++) { | |
1640 | tx_queue = priv->tx_queue[i]; | |
1641 | if(!tx_queue->tx_skbuff) | |
1642 | free_skb_tx_queue(tx_queue); | |
1643 | } | |
1644 | ||
1645 | for (i = 0; i < priv->num_rx_queues; i++) { | |
1646 | rx_queue = priv->rx_queue[i]; | |
1647 | if(!rx_queue->rx_skbuff) | |
1648 | free_skb_rx_queue(rx_queue); | |
1649 | } | |
1650 | ||
1651 | dma_free_coherent(&priv->ofdev->dev, | |
1652 | sizeof(struct txbd8) * priv->total_tx_ring_size + | |
1653 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | |
1654 | priv->tx_queue[0]->tx_bd_base, | |
1655 | priv->tx_queue[0]->tx_bd_dma_base); | |
1da177e4 LT |
1656 | } |
1657 | ||
0bbaf069 KG |
1658 | void gfar_start(struct net_device *dev) |
1659 | { | |
1660 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1661 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
0bbaf069 | 1662 | u32 tempval; |
46ceb60c | 1663 | int i = 0; |
0bbaf069 KG |
1664 | |
1665 | /* Enable Rx and Tx in MACCFG1 */ | |
1666 | tempval = gfar_read(®s->maccfg1); | |
1667 | tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); | |
1668 | gfar_write(®s->maccfg1, tempval); | |
1669 | ||
1670 | /* Initialize DMACTRL to have WWR and WOP */ | |
f4983704 | 1671 | tempval = gfar_read(®s->dmactrl); |
0bbaf069 | 1672 | tempval |= DMACTRL_INIT_SETTINGS; |
f4983704 | 1673 | gfar_write(®s->dmactrl, tempval); |
0bbaf069 | 1674 | |
0bbaf069 | 1675 | /* Make sure we aren't stopped */ |
f4983704 | 1676 | tempval = gfar_read(®s->dmactrl); |
0bbaf069 | 1677 | tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); |
f4983704 | 1678 | gfar_write(®s->dmactrl, tempval); |
0bbaf069 | 1679 | |
46ceb60c SG |
1680 | for (i = 0; i < priv->num_grps; i++) { |
1681 | regs = priv->gfargrp[i].regs; | |
1682 | /* Clear THLT/RHLT, so that the DMA starts polling now */ | |
1683 | gfar_write(®s->tstat, priv->gfargrp[i].tstat); | |
1684 | gfar_write(®s->rstat, priv->gfargrp[i].rstat); | |
1685 | /* Unmask the interrupts we look for */ | |
1686 | gfar_write(®s->imask, IMASK_DEFAULT); | |
1687 | } | |
12dea57b DH |
1688 | |
1689 | dev->trans_start = jiffies; | |
0bbaf069 KG |
1690 | } |
1691 | ||
46ceb60c | 1692 | void gfar_configure_coalescing(struct gfar_private *priv, |
18294ad1 | 1693 | unsigned long tx_mask, unsigned long rx_mask) |
1da177e4 | 1694 | { |
46ceb60c | 1695 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
18294ad1 | 1696 | u32 __iomem *baddr; |
46ceb60c | 1697 | int i = 0; |
1da177e4 | 1698 | |
46ceb60c SG |
1699 | /* Backward compatible case ---- even if we enable |
1700 | * multiple queues, there's only single reg to program | |
1701 | */ | |
1702 | gfar_write(®s->txic, 0); | |
1703 | if(likely(priv->tx_queue[0]->txcoalescing)) | |
1704 | gfar_write(®s->txic, priv->tx_queue[0]->txic); | |
1da177e4 | 1705 | |
46ceb60c SG |
1706 | gfar_write(®s->rxic, 0); |
1707 | if(unlikely(priv->rx_queue[0]->rxcoalescing)) | |
1708 | gfar_write(®s->rxic, priv->rx_queue[0]->rxic); | |
815b97c6 | 1709 | |
46ceb60c SG |
1710 | if (priv->mode == MQ_MG_MODE) { |
1711 | baddr = ®s->txic0; | |
984b3f57 | 1712 | for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { |
46ceb60c SG |
1713 | if (likely(priv->tx_queue[i]->txcoalescing)) { |
1714 | gfar_write(baddr + i, 0); | |
1715 | gfar_write(baddr + i, priv->tx_queue[i]->txic); | |
1716 | } | |
1717 | } | |
1718 | ||
1719 | baddr = ®s->rxic0; | |
984b3f57 | 1720 | for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { |
46ceb60c SG |
1721 | if (likely(priv->rx_queue[i]->rxcoalescing)) { |
1722 | gfar_write(baddr + i, 0); | |
1723 | gfar_write(baddr + i, priv->rx_queue[i]->rxic); | |
1724 | } | |
1725 | } | |
1726 | } | |
1727 | } | |
1728 | ||
1729 | static int register_grp_irqs(struct gfar_priv_grp *grp) | |
1730 | { | |
1731 | struct gfar_private *priv = grp->priv; | |
1732 | struct net_device *dev = priv->ndev; | |
1733 | int err; | |
1da177e4 | 1734 | |
1da177e4 LT |
1735 | /* If the device has multiple interrupts, register for |
1736 | * them. Otherwise, only register for the one */ | |
b31a1d8b | 1737 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
0bbaf069 | 1738 | /* Install our interrupt handlers for Error, |
1da177e4 | 1739 | * Transmit, and Receive */ |
46ceb60c SG |
1740 | if ((err = request_irq(grp->interruptError, gfar_error, 0, |
1741 | grp->int_name_er,grp)) < 0) { | |
0bbaf069 | 1742 | if (netif_msg_intr(priv)) |
46ceb60c SG |
1743 | printk(KERN_ERR "%s: Can't get IRQ %d\n", |
1744 | dev->name, grp->interruptError); | |
1745 | ||
1746 | goto err_irq_fail; | |
1da177e4 LT |
1747 | } |
1748 | ||
46ceb60c SG |
1749 | if ((err = request_irq(grp->interruptTransmit, gfar_transmit, |
1750 | 0, grp->int_name_tx, grp)) < 0) { | |
0bbaf069 | 1751 | if (netif_msg_intr(priv)) |
46ceb60c SG |
1752 | printk(KERN_ERR "%s: Can't get IRQ %d\n", |
1753 | dev->name, grp->interruptTransmit); | |
1da177e4 LT |
1754 | goto tx_irq_fail; |
1755 | } | |
1756 | ||
46ceb60c SG |
1757 | if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, |
1758 | grp->int_name_rx, grp)) < 0) { | |
0bbaf069 | 1759 | if (netif_msg_intr(priv)) |
46ceb60c SG |
1760 | printk(KERN_ERR "%s: Can't get IRQ %d\n", |
1761 | dev->name, grp->interruptReceive); | |
1da177e4 LT |
1762 | goto rx_irq_fail; |
1763 | } | |
1764 | } else { | |
46ceb60c SG |
1765 | if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, |
1766 | grp->int_name_tx, grp)) < 0) { | |
0bbaf069 | 1767 | if (netif_msg_intr(priv)) |
46ceb60c SG |
1768 | printk(KERN_ERR "%s: Can't get IRQ %d\n", |
1769 | dev->name, grp->interruptTransmit); | |
1da177e4 LT |
1770 | goto err_irq_fail; |
1771 | } | |
1772 | } | |
1773 | ||
46ceb60c SG |
1774 | return 0; |
1775 | ||
1776 | rx_irq_fail: | |
1777 | free_irq(grp->interruptTransmit, grp); | |
1778 | tx_irq_fail: | |
1779 | free_irq(grp->interruptError, grp); | |
1780 | err_irq_fail: | |
1781 | return err; | |
1782 | ||
1783 | } | |
1784 | ||
1785 | /* Bring the controller up and running */ | |
1786 | int startup_gfar(struct net_device *ndev) | |
1787 | { | |
1788 | struct gfar_private *priv = netdev_priv(ndev); | |
1789 | struct gfar __iomem *regs = NULL; | |
1790 | int err, i, j; | |
1791 | ||
1792 | for (i = 0; i < priv->num_grps; i++) { | |
1793 | regs= priv->gfargrp[i].regs; | |
1794 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | |
1795 | } | |
1796 | ||
1797 | regs= priv->gfargrp[0].regs; | |
1798 | err = gfar_alloc_skb_resources(ndev); | |
1799 | if (err) | |
1800 | return err; | |
1801 | ||
1802 | gfar_init_mac(ndev); | |
1803 | ||
1804 | for (i = 0; i < priv->num_grps; i++) { | |
1805 | err = register_grp_irqs(&priv->gfargrp[i]); | |
1806 | if (err) { | |
1807 | for (j = 0; j < i; j++) | |
1808 | free_grp_irqs(&priv->gfargrp[j]); | |
1809 | goto irq_fail; | |
1810 | } | |
1811 | } | |
1812 | ||
7f7f5316 | 1813 | /* Start the controller */ |
ccc05c6e | 1814 | gfar_start(ndev); |
1da177e4 | 1815 | |
826aa4a0 AV |
1816 | phy_start(priv->phydev); |
1817 | ||
46ceb60c SG |
1818 | gfar_configure_coalescing(priv, 0xFF, 0xFF); |
1819 | ||
1da177e4 LT |
1820 | return 0; |
1821 | ||
46ceb60c | 1822 | irq_fail: |
e69edd21 | 1823 | free_skb_resources(priv); |
1da177e4 LT |
1824 | return err; |
1825 | } | |
1826 | ||
1827 | /* Called when something needs to use the ethernet device */ | |
1828 | /* Returns 0 for success. */ | |
1829 | static int gfar_enet_open(struct net_device *dev) | |
1830 | { | |
94e8cc35 | 1831 | struct gfar_private *priv = netdev_priv(dev); |
1da177e4 LT |
1832 | int err; |
1833 | ||
46ceb60c | 1834 | enable_napi(priv); |
bea3348e | 1835 | |
0fd56bb5 AF |
1836 | skb_queue_head_init(&priv->rx_recycle); |
1837 | ||
1da177e4 LT |
1838 | /* Initialize a bunch of registers */ |
1839 | init_registers(dev); | |
1840 | ||
1841 | gfar_set_mac_address(dev); | |
1842 | ||
1843 | err = init_phy(dev); | |
1844 | ||
a12f801d | 1845 | if (err) { |
46ceb60c | 1846 | disable_napi(priv); |
1da177e4 | 1847 | return err; |
bea3348e | 1848 | } |
1da177e4 LT |
1849 | |
1850 | err = startup_gfar(dev); | |
db0e8e3f | 1851 | if (err) { |
46ceb60c | 1852 | disable_napi(priv); |
db0e8e3f AV |
1853 | return err; |
1854 | } | |
1da177e4 | 1855 | |
fba4ed03 | 1856 | netif_tx_start_all_queues(dev); |
1da177e4 | 1857 | |
2884e5cc AV |
1858 | device_set_wakeup_enable(&dev->dev, priv->wol_en); |
1859 | ||
1da177e4 LT |
1860 | return err; |
1861 | } | |
1862 | ||
54dc79fe | 1863 | static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) |
0bbaf069 | 1864 | { |
54dc79fe | 1865 | struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); |
6c31d55f KG |
1866 | |
1867 | memset(fcb, 0, GMAC_FCB_LEN); | |
0bbaf069 | 1868 | |
0bbaf069 KG |
1869 | return fcb; |
1870 | } | |
1871 | ||
1872 | static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) | |
1873 | { | |
7f7f5316 | 1874 | u8 flags = 0; |
0bbaf069 KG |
1875 | |
1876 | /* If we're here, it's a IP packet with a TCP or UDP | |
1877 | * payload. We set it to checksum, using a pseudo-header | |
1878 | * we provide | |
1879 | */ | |
7f7f5316 | 1880 | flags = TXFCB_DEFAULT; |
0bbaf069 | 1881 | |
7f7f5316 AF |
1882 | /* Tell the controller what the protocol is */ |
1883 | /* And provide the already calculated phcs */ | |
eddc9ec5 | 1884 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
7f7f5316 | 1885 | flags |= TXFCB_UDP; |
4bedb452 | 1886 | fcb->phcs = udp_hdr(skb)->check; |
7f7f5316 | 1887 | } else |
8da32de5 | 1888 | fcb->phcs = tcp_hdr(skb)->check; |
0bbaf069 KG |
1889 | |
1890 | /* l3os is the distance between the start of the | |
1891 | * frame (skb->data) and the start of the IP hdr. | |
1892 | * l4os is the distance between the start of the | |
1893 | * l3 hdr and the l4 hdr */ | |
bbe735e4 | 1894 | fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); |
cfe1fc77 | 1895 | fcb->l4os = skb_network_header_len(skb); |
0bbaf069 | 1896 | |
7f7f5316 | 1897 | fcb->flags = flags; |
0bbaf069 KG |
1898 | } |
1899 | ||
7f7f5316 | 1900 | void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) |
0bbaf069 | 1901 | { |
7f7f5316 | 1902 | fcb->flags |= TXFCB_VLN; |
0bbaf069 KG |
1903 | fcb->vlctl = vlan_tx_tag_get(skb); |
1904 | } | |
1905 | ||
4669bc90 DH |
1906 | static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, |
1907 | struct txbd8 *base, int ring_size) | |
1908 | { | |
1909 | struct txbd8 *new_bd = bdp + stride; | |
1910 | ||
1911 | return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; | |
1912 | } | |
1913 | ||
1914 | static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, | |
1915 | int ring_size) | |
1916 | { | |
1917 | return skip_txbd(bdp, 1, base, ring_size); | |
1918 | } | |
1919 | ||
1da177e4 LT |
1920 | /* This is called by the kernel when a frame is ready for transmission. */ |
1921 | /* It is pointed to by the dev->hard_start_xmit function pointer */ | |
1922 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
1923 | { | |
1924 | struct gfar_private *priv = netdev_priv(dev); | |
a12f801d | 1925 | struct gfar_priv_tx_q *tx_queue = NULL; |
fba4ed03 | 1926 | struct netdev_queue *txq; |
f4983704 | 1927 | struct gfar __iomem *regs = NULL; |
0bbaf069 | 1928 | struct txfcb *fcb = NULL; |
4669bc90 | 1929 | struct txbd8 *txbdp, *txbdp_start, *base; |
5a5efed4 | 1930 | u32 lstatus; |
fba4ed03 | 1931 | int i, rq = 0; |
4669bc90 | 1932 | u32 bufaddr; |
fef6108d | 1933 | unsigned long flags; |
4669bc90 DH |
1934 | unsigned int nr_frags, length; |
1935 | ||
fba4ed03 SG |
1936 | |
1937 | rq = skb->queue_mapping; | |
1938 | tx_queue = priv->tx_queue[rq]; | |
1939 | txq = netdev_get_tx_queue(dev, rq); | |
a12f801d | 1940 | base = tx_queue->tx_bd_base; |
46ceb60c | 1941 | regs = tx_queue->grp->regs; |
4669bc90 | 1942 | |
5b28beaf LY |
1943 | /* make space for additional header when fcb is needed */ |
1944 | if (((skb->ip_summed == CHECKSUM_PARTIAL) || | |
1945 | (priv->vlgrp && vlan_tx_tag_present(skb))) && | |
1946 | (skb_headroom(skb) < GMAC_FCB_LEN)) { | |
54dc79fe SH |
1947 | struct sk_buff *skb_new; |
1948 | ||
1949 | skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN); | |
1950 | if (!skb_new) { | |
1951 | dev->stats.tx_errors++; | |
bd14ba84 | 1952 | kfree_skb(skb); |
54dc79fe SH |
1953 | return NETDEV_TX_OK; |
1954 | } | |
1955 | kfree_skb(skb); | |
1956 | skb = skb_new; | |
1957 | } | |
1958 | ||
4669bc90 DH |
1959 | /* total number of fragments in the SKB */ |
1960 | nr_frags = skb_shinfo(skb)->nr_frags; | |
1961 | ||
4669bc90 | 1962 | /* check if there is space to queue this packet */ |
a12f801d | 1963 | if ((nr_frags+1) > tx_queue->num_txbdfree) { |
4669bc90 | 1964 | /* no space, stop the queue */ |
fba4ed03 | 1965 | netif_tx_stop_queue(txq); |
4669bc90 | 1966 | dev->stats.tx_fifo_errors++; |
4669bc90 DH |
1967 | return NETDEV_TX_BUSY; |
1968 | } | |
1da177e4 LT |
1969 | |
1970 | /* Update transmit stats */ | |
a7f38041 SG |
1971 | txq->tx_bytes += skb->len; |
1972 | txq->tx_packets ++; | |
1da177e4 | 1973 | |
a12f801d | 1974 | txbdp = txbdp_start = tx_queue->cur_tx; |
1da177e4 | 1975 | |
4669bc90 DH |
1976 | if (nr_frags == 0) { |
1977 | lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | |
1978 | } else { | |
1979 | /* Place the fragment addresses and lengths into the TxBDs */ | |
1980 | for (i = 0; i < nr_frags; i++) { | |
1981 | /* Point at the next BD, wrapping as needed */ | |
a12f801d | 1982 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
4669bc90 DH |
1983 | |
1984 | length = skb_shinfo(skb)->frags[i].size; | |
1985 | ||
1986 | lstatus = txbdp->lstatus | length | | |
1987 | BD_LFLAG(TXBD_READY); | |
1988 | ||
1989 | /* Handle the last BD specially */ | |
1990 | if (i == nr_frags - 1) | |
1991 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | |
1da177e4 | 1992 | |
4826857f | 1993 | bufaddr = dma_map_page(&priv->ofdev->dev, |
4669bc90 DH |
1994 | skb_shinfo(skb)->frags[i].page, |
1995 | skb_shinfo(skb)->frags[i].page_offset, | |
1996 | length, | |
1997 | DMA_TO_DEVICE); | |
1998 | ||
1999 | /* set the TxBD length and buffer pointer */ | |
2000 | txbdp->bufPtr = bufaddr; | |
2001 | txbdp->lstatus = lstatus; | |
2002 | } | |
2003 | ||
2004 | lstatus = txbdp_start->lstatus; | |
2005 | } | |
1da177e4 | 2006 | |
0bbaf069 | 2007 | /* Set up checksumming */ |
12dea57b | 2008 | if (CHECKSUM_PARTIAL == skb->ip_summed) { |
54dc79fe SH |
2009 | fcb = gfar_add_fcb(skb); |
2010 | lstatus |= BD_LFLAG(TXBD_TOE); | |
2011 | gfar_tx_checksum(skb, fcb); | |
0bbaf069 KG |
2012 | } |
2013 | ||
77ecaf2d | 2014 | if (priv->vlgrp && vlan_tx_tag_present(skb)) { |
54dc79fe SH |
2015 | if (unlikely(NULL == fcb)) { |
2016 | fcb = gfar_add_fcb(skb); | |
5a5efed4 | 2017 | lstatus |= BD_LFLAG(TXBD_TOE); |
7f7f5316 | 2018 | } |
54dc79fe SH |
2019 | |
2020 | gfar_tx_vlan(skb, fcb); | |
0bbaf069 KG |
2021 | } |
2022 | ||
4669bc90 | 2023 | /* setup the TxBD length and buffer pointer for the first BD */ |
4826857f | 2024 | txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, |
4669bc90 | 2025 | skb_headlen(skb), DMA_TO_DEVICE); |
1da177e4 | 2026 | |
4669bc90 | 2027 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); |
1da177e4 | 2028 | |
a3bc1f11 AV |
2029 | /* |
2030 | * We can work in parallel with gfar_clean_tx_ring(), except | |
2031 | * when modifying num_txbdfree. Note that we didn't grab the lock | |
2032 | * when we were reading the num_txbdfree and checking for available | |
2033 | * space, that's because outside of this function it can only grow, | |
2034 | * and once we've got needed space, it cannot suddenly disappear. | |
2035 | * | |
2036 | * The lock also protects us from gfar_error(), which can modify | |
2037 | * regs->tstat and thus retrigger the transfers, which is why we | |
2038 | * also must grab the lock before setting ready bit for the first | |
2039 | * to be transmitted BD. | |
2040 | */ | |
2041 | spin_lock_irqsave(&tx_queue->txlock, flags); | |
2042 | ||
4669bc90 DH |
2043 | /* |
2044 | * The powerpc-specific eieio() is used, as wmb() has too strong | |
3b6330ce SW |
2045 | * semantics (it requires synchronization between cacheable and |
2046 | * uncacheable mappings, which eieio doesn't provide and which we | |
2047 | * don't need), thus requiring a more expensive sync instruction. At | |
2048 | * some point, the set of architecture-independent barrier functions | |
2049 | * should be expanded to include weaker barriers. | |
2050 | */ | |
3b6330ce | 2051 | eieio(); |
7f7f5316 | 2052 | |
4669bc90 DH |
2053 | txbdp_start->lstatus = lstatus; |
2054 | ||
0eddba52 AV |
2055 | eieio(); /* force lstatus write before tx_skbuff */ |
2056 | ||
2057 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; | |
2058 | ||
4669bc90 DH |
2059 | /* Update the current skb pointer to the next entry we will use |
2060 | * (wrapping if necessary) */ | |
a12f801d SG |
2061 | tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & |
2062 | TX_RING_MOD_MASK(tx_queue->tx_ring_size); | |
4669bc90 | 2063 | |
a12f801d | 2064 | tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
4669bc90 DH |
2065 | |
2066 | /* reduce TxBD free count */ | |
a12f801d | 2067 | tx_queue->num_txbdfree -= (nr_frags + 1); |
4669bc90 DH |
2068 | |
2069 | dev->trans_start = jiffies; | |
1da177e4 LT |
2070 | |
2071 | /* If the next BD still needs to be cleaned up, then the bds | |
2072 | are full. We need to tell the kernel to stop sending us stuff. */ | |
a12f801d | 2073 | if (!tx_queue->num_txbdfree) { |
fba4ed03 | 2074 | netif_tx_stop_queue(txq); |
1da177e4 | 2075 | |
09f75cd7 | 2076 | dev->stats.tx_fifo_errors++; |
1da177e4 LT |
2077 | } |
2078 | ||
1da177e4 | 2079 | /* Tell the DMA to go go go */ |
fba4ed03 | 2080 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); |
1da177e4 LT |
2081 | |
2082 | /* Unlock priv */ | |
a12f801d | 2083 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
1da177e4 | 2084 | |
54dc79fe | 2085 | return NETDEV_TX_OK; |
1da177e4 LT |
2086 | } |
2087 | ||
2088 | /* Stops the kernel queue, and halts the controller */ | |
2089 | static int gfar_close(struct net_device *dev) | |
2090 | { | |
2091 | struct gfar_private *priv = netdev_priv(dev); | |
bea3348e | 2092 | |
46ceb60c | 2093 | disable_napi(priv); |
bea3348e | 2094 | |
0fd56bb5 | 2095 | skb_queue_purge(&priv->rx_recycle); |
ab939905 | 2096 | cancel_work_sync(&priv->reset_task); |
1da177e4 LT |
2097 | stop_gfar(dev); |
2098 | ||
bb40dcbb AF |
2099 | /* Disconnect from the PHY */ |
2100 | phy_disconnect(priv->phydev); | |
2101 | priv->phydev = NULL; | |
1da177e4 | 2102 | |
fba4ed03 | 2103 | netif_tx_stop_all_queues(dev); |
1da177e4 LT |
2104 | |
2105 | return 0; | |
2106 | } | |
2107 | ||
1da177e4 | 2108 | /* Changes the mac address if the controller is not running. */ |
f162b9d5 | 2109 | static int gfar_set_mac_address(struct net_device *dev) |
1da177e4 | 2110 | { |
7f7f5316 | 2111 | gfar_set_mac_for_addr(dev, 0, dev->dev_addr); |
1da177e4 LT |
2112 | |
2113 | return 0; | |
2114 | } | |
2115 | ||
2116 | ||
0bbaf069 KG |
2117 | /* Enables and disables VLAN insertion/extraction */ |
2118 | static void gfar_vlan_rx_register(struct net_device *dev, | |
2119 | struct vlan_group *grp) | |
2120 | { | |
2121 | struct gfar_private *priv = netdev_priv(dev); | |
f4983704 | 2122 | struct gfar __iomem *regs = NULL; |
0bbaf069 KG |
2123 | unsigned long flags; |
2124 | u32 tempval; | |
2125 | ||
46ceb60c | 2126 | regs = priv->gfargrp[0].regs; |
fba4ed03 SG |
2127 | local_irq_save(flags); |
2128 | lock_rx_qs(priv); | |
0bbaf069 | 2129 | |
cd1f55a5 | 2130 | priv->vlgrp = grp; |
0bbaf069 KG |
2131 | |
2132 | if (grp) { | |
2133 | /* Enable VLAN tag insertion */ | |
f4983704 | 2134 | tempval = gfar_read(®s->tctrl); |
0bbaf069 KG |
2135 | tempval |= TCTRL_VLINS; |
2136 | ||
f4983704 | 2137 | gfar_write(®s->tctrl, tempval); |
6aa20a22 | 2138 | |
0bbaf069 | 2139 | /* Enable VLAN tag extraction */ |
f4983704 | 2140 | tempval = gfar_read(®s->rctrl); |
77ecaf2d | 2141 | tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); |
f4983704 | 2142 | gfar_write(®s->rctrl, tempval); |
0bbaf069 KG |
2143 | } else { |
2144 | /* Disable VLAN tag insertion */ | |
f4983704 | 2145 | tempval = gfar_read(®s->tctrl); |
0bbaf069 | 2146 | tempval &= ~TCTRL_VLINS; |
f4983704 | 2147 | gfar_write(®s->tctrl, tempval); |
0bbaf069 KG |
2148 | |
2149 | /* Disable VLAN tag extraction */ | |
f4983704 | 2150 | tempval = gfar_read(®s->rctrl); |
0bbaf069 | 2151 | tempval &= ~RCTRL_VLEX; |
77ecaf2d DH |
2152 | /* If parse is no longer required, then disable parser */ |
2153 | if (tempval & RCTRL_REQ_PARSER) | |
2154 | tempval |= RCTRL_PRSDEP_INIT; | |
2155 | else | |
2156 | tempval &= ~RCTRL_PRSDEP_INIT; | |
f4983704 | 2157 | gfar_write(®s->rctrl, tempval); |
0bbaf069 KG |
2158 | } |
2159 | ||
77ecaf2d DH |
2160 | gfar_change_mtu(dev, dev->mtu); |
2161 | ||
fba4ed03 SG |
2162 | unlock_rx_qs(priv); |
2163 | local_irq_restore(flags); | |
0bbaf069 KG |
2164 | } |
2165 | ||
1da177e4 LT |
2166 | static int gfar_change_mtu(struct net_device *dev, int new_mtu) |
2167 | { | |
2168 | int tempsize, tempval; | |
2169 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 2170 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1da177e4 | 2171 | int oldsize = priv->rx_buffer_size; |
0bbaf069 KG |
2172 | int frame_size = new_mtu + ETH_HLEN; |
2173 | ||
77ecaf2d | 2174 | if (priv->vlgrp) |
faa89577 | 2175 | frame_size += VLAN_HLEN; |
0bbaf069 | 2176 | |
1da177e4 | 2177 | if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { |
0bbaf069 KG |
2178 | if (netif_msg_drv(priv)) |
2179 | printk(KERN_ERR "%s: Invalid MTU setting\n", | |
2180 | dev->name); | |
1da177e4 LT |
2181 | return -EINVAL; |
2182 | } | |
2183 | ||
77ecaf2d DH |
2184 | if (gfar_uses_fcb(priv)) |
2185 | frame_size += GMAC_FCB_LEN; | |
2186 | ||
2187 | frame_size += priv->padding; | |
2188 | ||
1da177e4 LT |
2189 | tempsize = |
2190 | (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + | |
2191 | INCREMENTAL_BUFFER_SIZE; | |
2192 | ||
2193 | /* Only stop and start the controller if it isn't already | |
7f7f5316 | 2194 | * stopped, and we changed something */ |
1da177e4 LT |
2195 | if ((oldsize != tempsize) && (dev->flags & IFF_UP)) |
2196 | stop_gfar(dev); | |
2197 | ||
2198 | priv->rx_buffer_size = tempsize; | |
2199 | ||
2200 | dev->mtu = new_mtu; | |
2201 | ||
f4983704 SG |
2202 | gfar_write(®s->mrblr, priv->rx_buffer_size); |
2203 | gfar_write(®s->maxfrm, priv->rx_buffer_size); | |
1da177e4 LT |
2204 | |
2205 | /* If the mtu is larger than the max size for standard | |
2206 | * ethernet frames (ie, a jumbo frame), then set maccfg2 | |
2207 | * to allow huge frames, and to check the length */ | |
f4983704 | 2208 | tempval = gfar_read(®s->maccfg2); |
1da177e4 LT |
2209 | |
2210 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) | |
2211 | tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); | |
2212 | else | |
2213 | tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); | |
2214 | ||
f4983704 | 2215 | gfar_write(®s->maccfg2, tempval); |
1da177e4 LT |
2216 | |
2217 | if ((oldsize != tempsize) && (dev->flags & IFF_UP)) | |
2218 | startup_gfar(dev); | |
2219 | ||
2220 | return 0; | |
2221 | } | |
2222 | ||
ab939905 | 2223 | /* gfar_reset_task gets scheduled when a packet has not been |
1da177e4 LT |
2224 | * transmitted after a set amount of time. |
2225 | * For now, assume that clearing out all the structures, and | |
ab939905 SS |
2226 | * starting over will fix the problem. |
2227 | */ | |
2228 | static void gfar_reset_task(struct work_struct *work) | |
1da177e4 | 2229 | { |
ab939905 SS |
2230 | struct gfar_private *priv = container_of(work, struct gfar_private, |
2231 | reset_task); | |
4826857f | 2232 | struct net_device *dev = priv->ndev; |
1da177e4 LT |
2233 | |
2234 | if (dev->flags & IFF_UP) { | |
fba4ed03 | 2235 | netif_tx_stop_all_queues(dev); |
1da177e4 LT |
2236 | stop_gfar(dev); |
2237 | startup_gfar(dev); | |
fba4ed03 | 2238 | netif_tx_start_all_queues(dev); |
1da177e4 LT |
2239 | } |
2240 | ||
263ba320 | 2241 | netif_tx_schedule_all(dev); |
1da177e4 LT |
2242 | } |
2243 | ||
ab939905 SS |
2244 | static void gfar_timeout(struct net_device *dev) |
2245 | { | |
2246 | struct gfar_private *priv = netdev_priv(dev); | |
2247 | ||
2248 | dev->stats.tx_errors++; | |
2249 | schedule_work(&priv->reset_task); | |
2250 | } | |
2251 | ||
1da177e4 | 2252 | /* Interrupt Handler for Transmit complete */ |
a12f801d | 2253 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
1da177e4 | 2254 | { |
a12f801d | 2255 | struct net_device *dev = tx_queue->dev; |
d080cd63 | 2256 | struct gfar_private *priv = netdev_priv(dev); |
a12f801d | 2257 | struct gfar_priv_rx_q *rx_queue = NULL; |
4669bc90 DH |
2258 | struct txbd8 *bdp; |
2259 | struct txbd8 *lbdp = NULL; | |
a12f801d | 2260 | struct txbd8 *base = tx_queue->tx_bd_base; |
4669bc90 DH |
2261 | struct sk_buff *skb; |
2262 | int skb_dirtytx; | |
a12f801d | 2263 | int tx_ring_size = tx_queue->tx_ring_size; |
4669bc90 DH |
2264 | int frags = 0; |
2265 | int i; | |
d080cd63 | 2266 | int howmany = 0; |
4669bc90 | 2267 | u32 lstatus; |
1da177e4 | 2268 | |
fba4ed03 | 2269 | rx_queue = priv->rx_queue[tx_queue->qindex]; |
a12f801d SG |
2270 | bdp = tx_queue->dirty_tx; |
2271 | skb_dirtytx = tx_queue->skb_dirtytx; | |
1da177e4 | 2272 | |
a12f801d | 2273 | while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { |
a3bc1f11 AV |
2274 | unsigned long flags; |
2275 | ||
4669bc90 DH |
2276 | frags = skb_shinfo(skb)->nr_frags; |
2277 | lbdp = skip_txbd(bdp, frags, base, tx_ring_size); | |
1da177e4 | 2278 | |
4669bc90 | 2279 | lstatus = lbdp->lstatus; |
1da177e4 | 2280 | |
4669bc90 DH |
2281 | /* Only clean completed frames */ |
2282 | if ((lstatus & BD_LFLAG(TXBD_READY)) && | |
2283 | (lstatus & BD_LENGTH_MASK)) | |
2284 | break; | |
2285 | ||
4826857f | 2286 | dma_unmap_single(&priv->ofdev->dev, |
4669bc90 DH |
2287 | bdp->bufPtr, |
2288 | bdp->length, | |
2289 | DMA_TO_DEVICE); | |
81183059 | 2290 | |
4669bc90 DH |
2291 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); |
2292 | bdp = next_txbd(bdp, base, tx_ring_size); | |
d080cd63 | 2293 | |
4669bc90 | 2294 | for (i = 0; i < frags; i++) { |
4826857f | 2295 | dma_unmap_page(&priv->ofdev->dev, |
4669bc90 DH |
2296 | bdp->bufPtr, |
2297 | bdp->length, | |
2298 | DMA_TO_DEVICE); | |
2299 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | |
2300 | bdp = next_txbd(bdp, base, tx_ring_size); | |
2301 | } | |
1da177e4 | 2302 | |
0fd56bb5 AF |
2303 | /* |
2304 | * If there's room in the queue (limit it to rx_buffer_size) | |
2305 | * we add this skb back into the pool, if it's the right size | |
2306 | */ | |
a12f801d | 2307 | if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && |
0fd56bb5 AF |
2308 | skb_recycle_check(skb, priv->rx_buffer_size + |
2309 | RXBUF_ALIGNMENT)) | |
2310 | __skb_queue_head(&priv->rx_recycle, skb); | |
2311 | else | |
2312 | dev_kfree_skb_any(skb); | |
2313 | ||
a12f801d | 2314 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; |
d080cd63 | 2315 | |
4669bc90 DH |
2316 | skb_dirtytx = (skb_dirtytx + 1) & |
2317 | TX_RING_MOD_MASK(tx_ring_size); | |
2318 | ||
2319 | howmany++; | |
a3bc1f11 | 2320 | spin_lock_irqsave(&tx_queue->txlock, flags); |
a12f801d | 2321 | tx_queue->num_txbdfree += frags + 1; |
a3bc1f11 | 2322 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
4669bc90 | 2323 | } |
1da177e4 | 2324 | |
4669bc90 | 2325 | /* If we freed a buffer, we can restart transmission, if necessary */ |
fba4ed03 SG |
2326 | if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) |
2327 | netif_wake_subqueue(dev, tx_queue->qindex); | |
1da177e4 | 2328 | |
4669bc90 | 2329 | /* Update dirty indicators */ |
a12f801d SG |
2330 | tx_queue->skb_dirtytx = skb_dirtytx; |
2331 | tx_queue->dirty_tx = bdp; | |
1da177e4 | 2332 | |
d080cd63 DH |
2333 | return howmany; |
2334 | } | |
2335 | ||
f4983704 | 2336 | static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) |
d080cd63 | 2337 | { |
a6d0b91a AV |
2338 | unsigned long flags; |
2339 | ||
fba4ed03 SG |
2340 | spin_lock_irqsave(&gfargrp->grplock, flags); |
2341 | if (napi_schedule_prep(&gfargrp->napi)) { | |
f4983704 | 2342 | gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); |
fba4ed03 | 2343 | __napi_schedule(&gfargrp->napi); |
8707bdd4 JP |
2344 | } else { |
2345 | /* | |
2346 | * Clear IEVENT, so interrupts aren't called again | |
2347 | * because of the packets that have already arrived. | |
2348 | */ | |
f4983704 | 2349 | gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); |
2f448911 | 2350 | } |
fba4ed03 | 2351 | spin_unlock_irqrestore(&gfargrp->grplock, flags); |
a6d0b91a | 2352 | |
8c7396ae | 2353 | } |
1da177e4 | 2354 | |
8c7396ae | 2355 | /* Interrupt Handler for Transmit complete */ |
f4983704 | 2356 | static irqreturn_t gfar_transmit(int irq, void *grp_id) |
8c7396ae | 2357 | { |
f4983704 | 2358 | gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); |
1da177e4 LT |
2359 | return IRQ_HANDLED; |
2360 | } | |
2361 | ||
a12f801d | 2362 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
815b97c6 AF |
2363 | struct sk_buff *skb) |
2364 | { | |
a12f801d | 2365 | struct net_device *dev = rx_queue->dev; |
815b97c6 | 2366 | struct gfar_private *priv = netdev_priv(dev); |
8a102fe0 | 2367 | dma_addr_t buf; |
815b97c6 | 2368 | |
8a102fe0 AV |
2369 | buf = dma_map_single(&priv->ofdev->dev, skb->data, |
2370 | priv->rx_buffer_size, DMA_FROM_DEVICE); | |
a12f801d | 2371 | gfar_init_rxbdp(rx_queue, bdp, buf); |
815b97c6 AF |
2372 | } |
2373 | ||
2374 | ||
2375 | struct sk_buff * gfar_new_skb(struct net_device *dev) | |
1da177e4 | 2376 | { |
7f7f5316 | 2377 | unsigned int alignamount; |
1da177e4 LT |
2378 | struct gfar_private *priv = netdev_priv(dev); |
2379 | struct sk_buff *skb = NULL; | |
1da177e4 | 2380 | |
0fd56bb5 AF |
2381 | skb = __skb_dequeue(&priv->rx_recycle); |
2382 | if (!skb) | |
2383 | skb = netdev_alloc_skb(dev, | |
2384 | priv->rx_buffer_size + RXBUF_ALIGNMENT); | |
1da177e4 | 2385 | |
815b97c6 | 2386 | if (!skb) |
1da177e4 LT |
2387 | return NULL; |
2388 | ||
7f7f5316 | 2389 | alignamount = RXBUF_ALIGNMENT - |
bea3348e | 2390 | (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)); |
7f7f5316 | 2391 | |
1da177e4 LT |
2392 | /* We need the data buffer to be aligned properly. We will reserve |
2393 | * as many bytes as needed to align the data properly | |
2394 | */ | |
7f7f5316 | 2395 | skb_reserve(skb, alignamount); |
a6d36d56 | 2396 | GFAR_CB(skb)->alignamount = alignamount; |
1da177e4 | 2397 | |
1da177e4 LT |
2398 | return skb; |
2399 | } | |
2400 | ||
298e1a9e | 2401 | static inline void count_errors(unsigned short status, struct net_device *dev) |
1da177e4 | 2402 | { |
298e1a9e | 2403 | struct gfar_private *priv = netdev_priv(dev); |
09f75cd7 | 2404 | struct net_device_stats *stats = &dev->stats; |
1da177e4 LT |
2405 | struct gfar_extra_stats *estats = &priv->extra_stats; |
2406 | ||
2407 | /* If the packet was truncated, none of the other errors | |
2408 | * matter */ | |
2409 | if (status & RXBD_TRUNCATED) { | |
2410 | stats->rx_length_errors++; | |
2411 | ||
2412 | estats->rx_trunc++; | |
2413 | ||
2414 | return; | |
2415 | } | |
2416 | /* Count the errors, if there were any */ | |
2417 | if (status & (RXBD_LARGE | RXBD_SHORT)) { | |
2418 | stats->rx_length_errors++; | |
2419 | ||
2420 | if (status & RXBD_LARGE) | |
2421 | estats->rx_large++; | |
2422 | else | |
2423 | estats->rx_short++; | |
2424 | } | |
2425 | if (status & RXBD_NONOCTET) { | |
2426 | stats->rx_frame_errors++; | |
2427 | estats->rx_nonoctet++; | |
2428 | } | |
2429 | if (status & RXBD_CRCERR) { | |
2430 | estats->rx_crcerr++; | |
2431 | stats->rx_crc_errors++; | |
2432 | } | |
2433 | if (status & RXBD_OVERRUN) { | |
2434 | estats->rx_overrun++; | |
2435 | stats->rx_crc_errors++; | |
2436 | } | |
2437 | } | |
2438 | ||
f4983704 | 2439 | irqreturn_t gfar_receive(int irq, void *grp_id) |
1da177e4 | 2440 | { |
f4983704 | 2441 | gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); |
1da177e4 LT |
2442 | return IRQ_HANDLED; |
2443 | } | |
2444 | ||
0bbaf069 KG |
2445 | static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) |
2446 | { | |
2447 | /* If valid headers were found, and valid sums | |
2448 | * were verified, then we tell the kernel that no | |
2449 | * checksumming is necessary. Otherwise, it is */ | |
7f7f5316 | 2450 | if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) |
0bbaf069 KG |
2451 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2452 | else | |
2453 | skb->ip_summed = CHECKSUM_NONE; | |
2454 | } | |
2455 | ||
2456 | ||
1da177e4 LT |
2457 | /* gfar_process_frame() -- handle one incoming packet if skb |
2458 | * isn't NULL. */ | |
2459 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |
2c2db48a | 2460 | int amount_pull) |
1da177e4 LT |
2461 | { |
2462 | struct gfar_private *priv = netdev_priv(dev); | |
0bbaf069 | 2463 | struct rxfcb *fcb = NULL; |
1da177e4 | 2464 | |
2c2db48a | 2465 | int ret; |
1da177e4 | 2466 | |
2c2db48a DH |
2467 | /* fcb is at the beginning if exists */ |
2468 | fcb = (struct rxfcb *)skb->data; | |
0bbaf069 | 2469 | |
2c2db48a DH |
2470 | /* Remove the FCB from the skb */ |
2471 | /* Remove the padded bytes, if there are any */ | |
f74dac08 SG |
2472 | if (amount_pull) { |
2473 | skb_record_rx_queue(skb, fcb->rq); | |
2c2db48a | 2474 | skb_pull(skb, amount_pull); |
f74dac08 | 2475 | } |
0bbaf069 | 2476 | |
2c2db48a DH |
2477 | if (priv->rx_csum_enable) |
2478 | gfar_rx_checksum(skb, fcb); | |
0bbaf069 | 2479 | |
2c2db48a DH |
2480 | /* Tell the skb what kind of packet this is */ |
2481 | skb->protocol = eth_type_trans(skb, dev); | |
1da177e4 | 2482 | |
2c2db48a DH |
2483 | /* Send the packet up the stack */ |
2484 | if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) | |
2485 | ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl); | |
2486 | else | |
2487 | ret = netif_receive_skb(skb); | |
0bbaf069 | 2488 | |
2c2db48a DH |
2489 | if (NET_RX_DROP == ret) |
2490 | priv->extra_stats.kernel_dropped++; | |
1da177e4 LT |
2491 | |
2492 | return 0; | |
2493 | } | |
2494 | ||
2495 | /* gfar_clean_rx_ring() -- Processes each frame in the rx ring | |
0bbaf069 | 2496 | * until the budget/quota has been reached. Returns the number |
1da177e4 LT |
2497 | * of frames handled |
2498 | */ | |
a12f801d | 2499 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) |
1da177e4 | 2500 | { |
a12f801d | 2501 | struct net_device *dev = rx_queue->dev; |
31de198b | 2502 | struct rxbd8 *bdp, *base; |
1da177e4 | 2503 | struct sk_buff *skb; |
2c2db48a DH |
2504 | int pkt_len; |
2505 | int amount_pull; | |
1da177e4 LT |
2506 | int howmany = 0; |
2507 | struct gfar_private *priv = netdev_priv(dev); | |
2508 | ||
2509 | /* Get the first full descriptor */ | |
a12f801d SG |
2510 | bdp = rx_queue->cur_rx; |
2511 | base = rx_queue->rx_bd_base; | |
1da177e4 | 2512 | |
2c2db48a DH |
2513 | amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + |
2514 | priv->padding; | |
2515 | ||
1da177e4 | 2516 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { |
815b97c6 | 2517 | struct sk_buff *newskb; |
3b6330ce | 2518 | rmb(); |
815b97c6 AF |
2519 | |
2520 | /* Add another skb for the future */ | |
2521 | newskb = gfar_new_skb(dev); | |
2522 | ||
a12f801d | 2523 | skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; |
1da177e4 | 2524 | |
4826857f | 2525 | dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, |
81183059 AF |
2526 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
2527 | ||
815b97c6 AF |
2528 | /* We drop the frame if we failed to allocate a new buffer */ |
2529 | if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || | |
2530 | bdp->status & RXBD_ERR)) { | |
2531 | count_errors(bdp->status, dev); | |
2532 | ||
2533 | if (unlikely(!newskb)) | |
2534 | newskb = skb; | |
4e2fd555 LB |
2535 | else if (skb) { |
2536 | /* | |
a6d36d56 | 2537 | * We need to un-reserve() the skb to what it |
4e2fd555 LB |
2538 | * was before gfar_new_skb() re-aligned |
2539 | * it to an RXBUF_ALIGNMENT boundary | |
2540 | * before we put the skb back on the | |
2541 | * recycle list. | |
2542 | */ | |
a6d36d56 | 2543 | skb_reserve(skb, -GFAR_CB(skb)->alignamount); |
0fd56bb5 | 2544 | __skb_queue_head(&priv->rx_recycle, skb); |
4e2fd555 | 2545 | } |
815b97c6 | 2546 | } else { |
1da177e4 | 2547 | /* Increment the number of packets */ |
a7f38041 | 2548 | rx_queue->stats.rx_packets++; |
1da177e4 LT |
2549 | howmany++; |
2550 | ||
2c2db48a DH |
2551 | if (likely(skb)) { |
2552 | pkt_len = bdp->length - ETH_FCS_LEN; | |
2553 | /* Remove the FCS from the packet length */ | |
2554 | skb_put(skb, pkt_len); | |
a7f38041 | 2555 | rx_queue->stats.rx_bytes += pkt_len; |
f74dac08 | 2556 | skb_record_rx_queue(skb, rx_queue->qindex); |
2c2db48a DH |
2557 | gfar_process_frame(dev, skb, amount_pull); |
2558 | ||
2559 | } else { | |
2560 | if (netif_msg_rx_err(priv)) | |
2561 | printk(KERN_WARNING | |
2562 | "%s: Missing skb!\n", dev->name); | |
a7f38041 | 2563 | rx_queue->stats.rx_dropped++; |
2c2db48a DH |
2564 | priv->extra_stats.rx_skbmissing++; |
2565 | } | |
1da177e4 | 2566 | |
1da177e4 LT |
2567 | } |
2568 | ||
a12f801d | 2569 | rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; |
1da177e4 | 2570 | |
815b97c6 | 2571 | /* Setup the new bdp */ |
a12f801d | 2572 | gfar_new_rxbdp(rx_queue, bdp, newskb); |
1da177e4 LT |
2573 | |
2574 | /* Update to the next pointer */ | |
a12f801d | 2575 | bdp = next_bd(bdp, base, rx_queue->rx_ring_size); |
1da177e4 LT |
2576 | |
2577 | /* update to point at the next skb */ | |
a12f801d SG |
2578 | rx_queue->skb_currx = |
2579 | (rx_queue->skb_currx + 1) & | |
2580 | RX_RING_MOD_MASK(rx_queue->rx_ring_size); | |
1da177e4 LT |
2581 | } |
2582 | ||
2583 | /* Update the current rxbd pointer to be the next one */ | |
a12f801d | 2584 | rx_queue->cur_rx = bdp; |
1da177e4 | 2585 | |
1da177e4 LT |
2586 | return howmany; |
2587 | } | |
2588 | ||
bea3348e | 2589 | static int gfar_poll(struct napi_struct *napi, int budget) |
1da177e4 | 2590 | { |
fba4ed03 SG |
2591 | struct gfar_priv_grp *gfargrp = container_of(napi, |
2592 | struct gfar_priv_grp, napi); | |
2593 | struct gfar_private *priv = gfargrp->priv; | |
46ceb60c | 2594 | struct gfar __iomem *regs = gfargrp->regs; |
a12f801d | 2595 | struct gfar_priv_tx_q *tx_queue = NULL; |
fba4ed03 SG |
2596 | struct gfar_priv_rx_q *rx_queue = NULL; |
2597 | int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; | |
18294ad1 AV |
2598 | int tx_cleaned = 0, i, left_over_budget = budget; |
2599 | unsigned long serviced_queues = 0; | |
fba4ed03 | 2600 | int num_queues = 0; |
d080cd63 | 2601 | |
fba4ed03 SG |
2602 | num_queues = gfargrp->num_rx_queues; |
2603 | budget_per_queue = budget/num_queues; | |
2604 | ||
8c7396ae DH |
2605 | /* Clear IEVENT, so interrupts aren't called again |
2606 | * because of the packets that have already arrived */ | |
f4983704 | 2607 | gfar_write(®s->ievent, IEVENT_RTX_MASK); |
8c7396ae | 2608 | |
fba4ed03 | 2609 | while (num_queues && left_over_budget) { |
1da177e4 | 2610 | |
fba4ed03 SG |
2611 | budget_per_queue = left_over_budget/num_queues; |
2612 | left_over_budget = 0; | |
2613 | ||
984b3f57 | 2614 | for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { |
fba4ed03 SG |
2615 | if (test_bit(i, &serviced_queues)) |
2616 | continue; | |
2617 | rx_queue = priv->rx_queue[i]; | |
2618 | tx_queue = priv->tx_queue[rx_queue->qindex]; | |
2619 | ||
a3bc1f11 | 2620 | tx_cleaned += gfar_clean_tx_ring(tx_queue); |
fba4ed03 SG |
2621 | rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, |
2622 | budget_per_queue); | |
2623 | rx_cleaned += rx_cleaned_per_queue; | |
2624 | if(rx_cleaned_per_queue < budget_per_queue) { | |
2625 | left_over_budget = left_over_budget + | |
2626 | (budget_per_queue - rx_cleaned_per_queue); | |
2627 | set_bit(i, &serviced_queues); | |
2628 | num_queues--; | |
2629 | } | |
2630 | } | |
2631 | } | |
1da177e4 | 2632 | |
42199884 AF |
2633 | if (tx_cleaned) |
2634 | return budget; | |
2635 | ||
2636 | if (rx_cleaned < budget) { | |
288379f0 | 2637 | napi_complete(napi); |
1da177e4 LT |
2638 | |
2639 | /* Clear the halt bit in RSTAT */ | |
fba4ed03 | 2640 | gfar_write(®s->rstat, gfargrp->rstat); |
1da177e4 | 2641 | |
f4983704 | 2642 | gfar_write(®s->imask, IMASK_DEFAULT); |
1da177e4 LT |
2643 | |
2644 | /* If we are coalescing interrupts, update the timer */ | |
2645 | /* Otherwise, clear it */ | |
46ceb60c SG |
2646 | gfar_configure_coalescing(priv, |
2647 | gfargrp->rx_bit_map, gfargrp->tx_bit_map); | |
1da177e4 LT |
2648 | } |
2649 | ||
42199884 | 2650 | return rx_cleaned; |
1da177e4 | 2651 | } |
1da177e4 | 2652 | |
f2d71c2d VW |
2653 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2654 | /* | |
2655 | * Polling 'interrupt' - used by things like netconsole to send skbs | |
2656 | * without having to re-enable interrupts. It's not called while | |
2657 | * the interrupt routine is executing. | |
2658 | */ | |
2659 | static void gfar_netpoll(struct net_device *dev) | |
2660 | { | |
2661 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 2662 | int i = 0; |
f2d71c2d VW |
2663 | |
2664 | /* If the device has multiple interrupts, run tx/rx */ | |
b31a1d8b | 2665 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
46ceb60c SG |
2666 | for (i = 0; i < priv->num_grps; i++) { |
2667 | disable_irq(priv->gfargrp[i].interruptTransmit); | |
2668 | disable_irq(priv->gfargrp[i].interruptReceive); | |
2669 | disable_irq(priv->gfargrp[i].interruptError); | |
2670 | gfar_interrupt(priv->gfargrp[i].interruptTransmit, | |
2671 | &priv->gfargrp[i]); | |
2672 | enable_irq(priv->gfargrp[i].interruptError); | |
2673 | enable_irq(priv->gfargrp[i].interruptReceive); | |
2674 | enable_irq(priv->gfargrp[i].interruptTransmit); | |
2675 | } | |
f2d71c2d | 2676 | } else { |
46ceb60c SG |
2677 | for (i = 0; i < priv->num_grps; i++) { |
2678 | disable_irq(priv->gfargrp[i].interruptTransmit); | |
2679 | gfar_interrupt(priv->gfargrp[i].interruptTransmit, | |
2680 | &priv->gfargrp[i]); | |
2681 | enable_irq(priv->gfargrp[i].interruptTransmit); | |
43de004b | 2682 | } |
f2d71c2d VW |
2683 | } |
2684 | } | |
2685 | #endif | |
2686 | ||
1da177e4 | 2687 | /* The interrupt handler for devices with one interrupt */ |
f4983704 | 2688 | static irqreturn_t gfar_interrupt(int irq, void *grp_id) |
1da177e4 | 2689 | { |
f4983704 | 2690 | struct gfar_priv_grp *gfargrp = grp_id; |
1da177e4 LT |
2691 | |
2692 | /* Save ievent for future reference */ | |
f4983704 | 2693 | u32 events = gfar_read(&gfargrp->regs->ievent); |
1da177e4 | 2694 | |
1da177e4 | 2695 | /* Check for reception */ |
538cc7ee | 2696 | if (events & IEVENT_RX_MASK) |
f4983704 | 2697 | gfar_receive(irq, grp_id); |
1da177e4 LT |
2698 | |
2699 | /* Check for transmit completion */ | |
538cc7ee | 2700 | if (events & IEVENT_TX_MASK) |
f4983704 | 2701 | gfar_transmit(irq, grp_id); |
1da177e4 | 2702 | |
538cc7ee SS |
2703 | /* Check for errors */ |
2704 | if (events & IEVENT_ERR_MASK) | |
f4983704 | 2705 | gfar_error(irq, grp_id); |
1da177e4 LT |
2706 | |
2707 | return IRQ_HANDLED; | |
2708 | } | |
2709 | ||
1da177e4 LT |
2710 | /* Called every time the controller might need to be made |
2711 | * aware of new link state. The PHY code conveys this | |
bb40dcbb | 2712 | * information through variables in the phydev structure, and this |
1da177e4 LT |
2713 | * function converts those variables into the appropriate |
2714 | * register values, and can bring down the device if needed. | |
2715 | */ | |
2716 | static void adjust_link(struct net_device *dev) | |
2717 | { | |
2718 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 2719 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
bb40dcbb AF |
2720 | unsigned long flags; |
2721 | struct phy_device *phydev = priv->phydev; | |
2722 | int new_state = 0; | |
2723 | ||
fba4ed03 SG |
2724 | local_irq_save(flags); |
2725 | lock_tx_qs(priv); | |
2726 | ||
bb40dcbb AF |
2727 | if (phydev->link) { |
2728 | u32 tempval = gfar_read(®s->maccfg2); | |
7f7f5316 | 2729 | u32 ecntrl = gfar_read(®s->ecntrl); |
1da177e4 | 2730 | |
1da177e4 LT |
2731 | /* Now we make sure that we can be in full duplex mode. |
2732 | * If not, we operate in half-duplex mode. */ | |
bb40dcbb AF |
2733 | if (phydev->duplex != priv->oldduplex) { |
2734 | new_state = 1; | |
2735 | if (!(phydev->duplex)) | |
1da177e4 | 2736 | tempval &= ~(MACCFG2_FULL_DUPLEX); |
bb40dcbb | 2737 | else |
1da177e4 | 2738 | tempval |= MACCFG2_FULL_DUPLEX; |
1da177e4 | 2739 | |
bb40dcbb | 2740 | priv->oldduplex = phydev->duplex; |
1da177e4 LT |
2741 | } |
2742 | ||
bb40dcbb AF |
2743 | if (phydev->speed != priv->oldspeed) { |
2744 | new_state = 1; | |
2745 | switch (phydev->speed) { | |
1da177e4 | 2746 | case 1000: |
1da177e4 LT |
2747 | tempval = |
2748 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); | |
f430e49e LY |
2749 | |
2750 | ecntrl &= ~(ECNTRL_R100); | |
1da177e4 LT |
2751 | break; |
2752 | case 100: | |
2753 | case 10: | |
1da177e4 LT |
2754 | tempval = |
2755 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); | |
7f7f5316 AF |
2756 | |
2757 | /* Reduced mode distinguishes | |
2758 | * between 10 and 100 */ | |
2759 | if (phydev->speed == SPEED_100) | |
2760 | ecntrl |= ECNTRL_R100; | |
2761 | else | |
2762 | ecntrl &= ~(ECNTRL_R100); | |
1da177e4 LT |
2763 | break; |
2764 | default: | |
0bbaf069 KG |
2765 | if (netif_msg_link(priv)) |
2766 | printk(KERN_WARNING | |
bb40dcbb AF |
2767 | "%s: Ack! Speed (%d) is not 10/100/1000!\n", |
2768 | dev->name, phydev->speed); | |
1da177e4 LT |
2769 | break; |
2770 | } | |
2771 | ||
bb40dcbb | 2772 | priv->oldspeed = phydev->speed; |
1da177e4 LT |
2773 | } |
2774 | ||
bb40dcbb | 2775 | gfar_write(®s->maccfg2, tempval); |
7f7f5316 | 2776 | gfar_write(®s->ecntrl, ecntrl); |
bb40dcbb | 2777 | |
1da177e4 | 2778 | if (!priv->oldlink) { |
bb40dcbb | 2779 | new_state = 1; |
1da177e4 | 2780 | priv->oldlink = 1; |
1da177e4 | 2781 | } |
bb40dcbb AF |
2782 | } else if (priv->oldlink) { |
2783 | new_state = 1; | |
2784 | priv->oldlink = 0; | |
2785 | priv->oldspeed = 0; | |
2786 | priv->oldduplex = -1; | |
1da177e4 | 2787 | } |
1da177e4 | 2788 | |
bb40dcbb AF |
2789 | if (new_state && netif_msg_link(priv)) |
2790 | phy_print_status(phydev); | |
fba4ed03 SG |
2791 | unlock_tx_qs(priv); |
2792 | local_irq_restore(flags); | |
bb40dcbb | 2793 | } |
1da177e4 LT |
2794 | |
2795 | /* Update the hash table based on the current list of multicast | |
2796 | * addresses we subscribe to. Also, change the promiscuity of | |
2797 | * the device based on the flags (this function is called | |
2798 | * whenever dev->flags is changed */ | |
2799 | static void gfar_set_multi(struct net_device *dev) | |
2800 | { | |
2801 | struct dev_mc_list *mc_ptr; | |
2802 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 2803 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1da177e4 LT |
2804 | u32 tempval; |
2805 | ||
a12f801d | 2806 | if (dev->flags & IFF_PROMISC) { |
1da177e4 LT |
2807 | /* Set RCTRL to PROM */ |
2808 | tempval = gfar_read(®s->rctrl); | |
2809 | tempval |= RCTRL_PROM; | |
2810 | gfar_write(®s->rctrl, tempval); | |
2811 | } else { | |
2812 | /* Set RCTRL to not PROM */ | |
2813 | tempval = gfar_read(®s->rctrl); | |
2814 | tempval &= ~(RCTRL_PROM); | |
2815 | gfar_write(®s->rctrl, tempval); | |
2816 | } | |
6aa20a22 | 2817 | |
a12f801d | 2818 | if (dev->flags & IFF_ALLMULTI) { |
1da177e4 | 2819 | /* Set the hash to rx all multicast frames */ |
0bbaf069 KG |
2820 | gfar_write(®s->igaddr0, 0xffffffff); |
2821 | gfar_write(®s->igaddr1, 0xffffffff); | |
2822 | gfar_write(®s->igaddr2, 0xffffffff); | |
2823 | gfar_write(®s->igaddr3, 0xffffffff); | |
2824 | gfar_write(®s->igaddr4, 0xffffffff); | |
2825 | gfar_write(®s->igaddr5, 0xffffffff); | |
2826 | gfar_write(®s->igaddr6, 0xffffffff); | |
2827 | gfar_write(®s->igaddr7, 0xffffffff); | |
1da177e4 LT |
2828 | gfar_write(®s->gaddr0, 0xffffffff); |
2829 | gfar_write(®s->gaddr1, 0xffffffff); | |
2830 | gfar_write(®s->gaddr2, 0xffffffff); | |
2831 | gfar_write(®s->gaddr3, 0xffffffff); | |
2832 | gfar_write(®s->gaddr4, 0xffffffff); | |
2833 | gfar_write(®s->gaddr5, 0xffffffff); | |
2834 | gfar_write(®s->gaddr6, 0xffffffff); | |
2835 | gfar_write(®s->gaddr7, 0xffffffff); | |
2836 | } else { | |
7f7f5316 AF |
2837 | int em_num; |
2838 | int idx; | |
2839 | ||
1da177e4 | 2840 | /* zero out the hash */ |
0bbaf069 KG |
2841 | gfar_write(®s->igaddr0, 0x0); |
2842 | gfar_write(®s->igaddr1, 0x0); | |
2843 | gfar_write(®s->igaddr2, 0x0); | |
2844 | gfar_write(®s->igaddr3, 0x0); | |
2845 | gfar_write(®s->igaddr4, 0x0); | |
2846 | gfar_write(®s->igaddr5, 0x0); | |
2847 | gfar_write(®s->igaddr6, 0x0); | |
2848 | gfar_write(®s->igaddr7, 0x0); | |
1da177e4 LT |
2849 | gfar_write(®s->gaddr0, 0x0); |
2850 | gfar_write(®s->gaddr1, 0x0); | |
2851 | gfar_write(®s->gaddr2, 0x0); | |
2852 | gfar_write(®s->gaddr3, 0x0); | |
2853 | gfar_write(®s->gaddr4, 0x0); | |
2854 | gfar_write(®s->gaddr5, 0x0); | |
2855 | gfar_write(®s->gaddr6, 0x0); | |
2856 | gfar_write(®s->gaddr7, 0x0); | |
2857 | ||
7f7f5316 AF |
2858 | /* If we have extended hash tables, we need to |
2859 | * clear the exact match registers to prepare for | |
2860 | * setting them */ | |
2861 | if (priv->extended_hash) { | |
2862 | em_num = GFAR_EM_NUM + 1; | |
2863 | gfar_clear_exact_match(dev); | |
2864 | idx = 1; | |
2865 | } else { | |
2866 | idx = 0; | |
2867 | em_num = 0; | |
2868 | } | |
2869 | ||
4cd24eaf | 2870 | if (netdev_mc_empty(dev)) |
1da177e4 LT |
2871 | return; |
2872 | ||
2873 | /* Parse the list, and set the appropriate bits */ | |
48e2f183 | 2874 | netdev_for_each_mc_addr(mc_ptr, dev) { |
7f7f5316 AF |
2875 | if (idx < em_num) { |
2876 | gfar_set_mac_for_addr(dev, idx, | |
2877 | mc_ptr->dmi_addr); | |
2878 | idx++; | |
2879 | } else | |
2880 | gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); | |
1da177e4 LT |
2881 | } |
2882 | } | |
2883 | ||
2884 | return; | |
2885 | } | |
2886 | ||
7f7f5316 AF |
2887 | |
2888 | /* Clears each of the exact match registers to zero, so they | |
2889 | * don't interfere with normal reception */ | |
2890 | static void gfar_clear_exact_match(struct net_device *dev) | |
2891 | { | |
2892 | int idx; | |
2893 | u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0}; | |
2894 | ||
2895 | for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) | |
2896 | gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr); | |
2897 | } | |
2898 | ||
1da177e4 LT |
2899 | /* Set the appropriate hash bit for the given addr */ |
2900 | /* The algorithm works like so: | |
2901 | * 1) Take the Destination Address (ie the multicast address), and | |
2902 | * do a CRC on it (little endian), and reverse the bits of the | |
2903 | * result. | |
2904 | * 2) Use the 8 most significant bits as a hash into a 256-entry | |
2905 | * table. The table is controlled through 8 32-bit registers: | |
2906 | * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is | |
2907 | * gaddr7. This means that the 3 most significant bits in the | |
2908 | * hash index which gaddr register to use, and the 5 other bits | |
2909 | * indicate which bit (assuming an IBM numbering scheme, which | |
2910 | * for PowerPC (tm) is usually the case) in the register holds | |
2911 | * the entry. */ | |
2912 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) | |
2913 | { | |
2914 | u32 tempval; | |
2915 | struct gfar_private *priv = netdev_priv(dev); | |
1da177e4 | 2916 | u32 result = ether_crc(MAC_ADDR_LEN, addr); |
0bbaf069 KG |
2917 | int width = priv->hash_width; |
2918 | u8 whichbit = (result >> (32 - width)) & 0x1f; | |
2919 | u8 whichreg = result >> (32 - width + 5); | |
1da177e4 LT |
2920 | u32 value = (1 << (31-whichbit)); |
2921 | ||
0bbaf069 | 2922 | tempval = gfar_read(priv->hash_regs[whichreg]); |
1da177e4 | 2923 | tempval |= value; |
0bbaf069 | 2924 | gfar_write(priv->hash_regs[whichreg], tempval); |
1da177e4 LT |
2925 | |
2926 | return; | |
2927 | } | |
2928 | ||
7f7f5316 AF |
2929 | |
2930 | /* There are multiple MAC Address register pairs on some controllers | |
2931 | * This function sets the numth pair to a given address | |
2932 | */ | |
2933 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) | |
2934 | { | |
2935 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 2936 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
7f7f5316 AF |
2937 | int idx; |
2938 | char tmpbuf[MAC_ADDR_LEN]; | |
2939 | u32 tempval; | |
f4983704 | 2940 | u32 __iomem *macptr = ®s->macstnaddr1; |
7f7f5316 AF |
2941 | |
2942 | macptr += num*2; | |
2943 | ||
2944 | /* Now copy it into the mac registers backwards, cuz */ | |
2945 | /* little endian is silly */ | |
2946 | for (idx = 0; idx < MAC_ADDR_LEN; idx++) | |
2947 | tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; | |
2948 | ||
2949 | gfar_write(macptr, *((u32 *) (tmpbuf))); | |
2950 | ||
2951 | tempval = *((u32 *) (tmpbuf + 4)); | |
2952 | ||
2953 | gfar_write(macptr+1, tempval); | |
2954 | } | |
2955 | ||
1da177e4 | 2956 | /* GFAR error interrupt handler */ |
f4983704 | 2957 | static irqreturn_t gfar_error(int irq, void *grp_id) |
1da177e4 | 2958 | { |
f4983704 SG |
2959 | struct gfar_priv_grp *gfargrp = grp_id; |
2960 | struct gfar __iomem *regs = gfargrp->regs; | |
2961 | struct gfar_private *priv= gfargrp->priv; | |
2962 | struct net_device *dev = priv->ndev; | |
1da177e4 LT |
2963 | |
2964 | /* Save ievent for future reference */ | |
f4983704 | 2965 | u32 events = gfar_read(®s->ievent); |
1da177e4 LT |
2966 | |
2967 | /* Clear IEVENT */ | |
f4983704 | 2968 | gfar_write(®s->ievent, events & IEVENT_ERR_MASK); |
d87eb127 SW |
2969 | |
2970 | /* Magic Packet is not an error. */ | |
b31a1d8b | 2971 | if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && |
d87eb127 SW |
2972 | (events & IEVENT_MAG)) |
2973 | events &= ~IEVENT_MAG; | |
1da177e4 LT |
2974 | |
2975 | /* Hmm... */ | |
0bbaf069 KG |
2976 | if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) |
2977 | printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", | |
f4983704 | 2978 | dev->name, events, gfar_read(®s->imask)); |
1da177e4 LT |
2979 | |
2980 | /* Update the error counters */ | |
2981 | if (events & IEVENT_TXE) { | |
09f75cd7 | 2982 | dev->stats.tx_errors++; |
1da177e4 LT |
2983 | |
2984 | if (events & IEVENT_LC) | |
09f75cd7 | 2985 | dev->stats.tx_window_errors++; |
1da177e4 | 2986 | if (events & IEVENT_CRL) |
09f75cd7 | 2987 | dev->stats.tx_aborted_errors++; |
1da177e4 | 2988 | if (events & IEVENT_XFUN) { |
836cf7fa AV |
2989 | unsigned long flags; |
2990 | ||
0bbaf069 | 2991 | if (netif_msg_tx_err(priv)) |
538cc7ee SS |
2992 | printk(KERN_DEBUG "%s: TX FIFO underrun, " |
2993 | "packet dropped.\n", dev->name); | |
09f75cd7 | 2994 | dev->stats.tx_dropped++; |
1da177e4 LT |
2995 | priv->extra_stats.tx_underrun++; |
2996 | ||
836cf7fa AV |
2997 | local_irq_save(flags); |
2998 | lock_tx_qs(priv); | |
2999 | ||
1da177e4 | 3000 | /* Reactivate the Tx Queues */ |
fba4ed03 | 3001 | gfar_write(®s->tstat, gfargrp->tstat); |
836cf7fa AV |
3002 | |
3003 | unlock_tx_qs(priv); | |
3004 | local_irq_restore(flags); | |
1da177e4 | 3005 | } |
0bbaf069 KG |
3006 | if (netif_msg_tx_err(priv)) |
3007 | printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); | |
1da177e4 LT |
3008 | } |
3009 | if (events & IEVENT_BSY) { | |
09f75cd7 | 3010 | dev->stats.rx_errors++; |
1da177e4 LT |
3011 | priv->extra_stats.rx_bsy++; |
3012 | ||
f4983704 | 3013 | gfar_receive(irq, grp_id); |
1da177e4 | 3014 | |
0bbaf069 | 3015 | if (netif_msg_rx_err(priv)) |
538cc7ee | 3016 | printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", |
f4983704 | 3017 | dev->name, gfar_read(®s->rstat)); |
1da177e4 LT |
3018 | } |
3019 | if (events & IEVENT_BABR) { | |
09f75cd7 | 3020 | dev->stats.rx_errors++; |
1da177e4 LT |
3021 | priv->extra_stats.rx_babr++; |
3022 | ||
0bbaf069 | 3023 | if (netif_msg_rx_err(priv)) |
538cc7ee | 3024 | printk(KERN_DEBUG "%s: babbling RX error\n", dev->name); |
1da177e4 LT |
3025 | } |
3026 | if (events & IEVENT_EBERR) { | |
3027 | priv->extra_stats.eberr++; | |
0bbaf069 | 3028 | if (netif_msg_rx_err(priv)) |
538cc7ee | 3029 | printk(KERN_DEBUG "%s: bus error\n", dev->name); |
1da177e4 | 3030 | } |
0bbaf069 | 3031 | if ((events & IEVENT_RXC) && netif_msg_rx_status(priv)) |
538cc7ee | 3032 | printk(KERN_DEBUG "%s: control frame\n", dev->name); |
1da177e4 LT |
3033 | |
3034 | if (events & IEVENT_BABT) { | |
3035 | priv->extra_stats.tx_babt++; | |
0bbaf069 | 3036 | if (netif_msg_tx_err(priv)) |
538cc7ee | 3037 | printk(KERN_DEBUG "%s: babbling TX error\n", dev->name); |
1da177e4 LT |
3038 | } |
3039 | return IRQ_HANDLED; | |
3040 | } | |
3041 | ||
b31a1d8b AF |
3042 | static struct of_device_id gfar_match[] = |
3043 | { | |
3044 | { | |
3045 | .type = "network", | |
3046 | .compatible = "gianfar", | |
3047 | }, | |
46ceb60c SG |
3048 | { |
3049 | .compatible = "fsl,etsec2", | |
3050 | }, | |
b31a1d8b AF |
3051 | {}, |
3052 | }; | |
e72701ac | 3053 | MODULE_DEVICE_TABLE(of, gfar_match); |
b31a1d8b | 3054 | |
1da177e4 | 3055 | /* Structure for a device driver */ |
b31a1d8b AF |
3056 | static struct of_platform_driver gfar_driver = { |
3057 | .name = "fsl-gianfar", | |
3058 | .match_table = gfar_match, | |
3059 | ||
1da177e4 LT |
3060 | .probe = gfar_probe, |
3061 | .remove = gfar_remove, | |
be926fc4 AV |
3062 | .suspend = gfar_legacy_suspend, |
3063 | .resume = gfar_legacy_resume, | |
3064 | .driver.pm = GFAR_PM_OPS, | |
1da177e4 LT |
3065 | }; |
3066 | ||
3067 | static int __init gfar_init(void) | |
3068 | { | |
1577ecef | 3069 | return of_register_platform_driver(&gfar_driver); |
1da177e4 LT |
3070 | } |
3071 | ||
3072 | static void __exit gfar_exit(void) | |
3073 | { | |
b31a1d8b | 3074 | of_unregister_platform_driver(&gfar_driver); |
1da177e4 LT |
3075 | } |
3076 | ||
3077 | module_init(gfar_init); | |
3078 | module_exit(gfar_exit); | |
3079 |