pasemi_mac: add local skb alignment
[deliverable/linux.git] / drivers / net / pasemi_mac.c
CommitLineData
f5cd7872
OJ
1/*
2 * Copyright (C) 2006-2007 PA Semi, Inc
3 *
4 * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/pci.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <asm/dma-mapping.h>
29#include <linux/in.h>
30#include <linux/skbuff.h>
31
32#include <linux/ip.h>
33#include <linux/tcp.h>
34#include <net/checksum.h>
35
771f7404
OJ
36#include <asm/irq.h>
37
f5cd7872
OJ
38#include "pasemi_mac.h"
39
8dc121a4
OJ
40/* We have our own align, since ppc64 in general has it at 0 because
41 * of design flaws in some of the server bridge chips. However, for
42 * PWRficient doing the unaligned copies is more expensive than doing
43 * unaligned DMA, so make sure the data is aligned instead.
44 */
45#define LOCAL_SKB_ALIGN 2
f5cd7872
OJ
46
47/* TODO list
48 *
49 * - Get rid of pci_{read,write}_config(), map registers with ioremap
50 * for performance
51 * - PHY support
52 * - Multicast support
53 * - Large MTU support
54 * - Other performance improvements
55 */
56
57
58/* Must be a power of two */
59#define RX_RING_SIZE 512
60#define TX_RING_SIZE 512
61
ceb51361
OJ
62#define DEFAULT_MSG_ENABLE \
63 (NETIF_MSG_DRV | \
64 NETIF_MSG_PROBE | \
65 NETIF_MSG_LINK | \
66 NETIF_MSG_TIMER | \
67 NETIF_MSG_IFDOWN | \
68 NETIF_MSG_IFUP | \
69 NETIF_MSG_RX_ERR | \
70 NETIF_MSG_TX_ERR)
71
fc9e4d2a
OJ
72#define TX_RING(mac, num) ((mac)->tx->ring[(num) & (TX_RING_SIZE-1)])
73#define TX_RING_INFO(mac, num) ((mac)->tx->ring_info[(num) & (TX_RING_SIZE-1)])
74#define RX_RING(mac, num) ((mac)->rx->ring[(num) & (RX_RING_SIZE-1)])
75#define RX_RING_INFO(mac, num) ((mac)->rx->ring_info[(num) & (RX_RING_SIZE-1)])
f5cd7872
OJ
76#define RX_BUFF(mac, num) ((mac)->rx->buffers[(num) & (RX_RING_SIZE-1)])
77
021fa22e
OJ
78#define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \
79 & ((ring)->size - 1))
80#define RING_AVAIL(ring) ((ring->size) - RING_USED(ring))
81
f5cd7872
OJ
82#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
83
ceb51361
OJ
84MODULE_LICENSE("GPL");
85MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
86MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
87
88static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */
89module_param(debug, int, 0);
90MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value");
f5cd7872
OJ
91
92static struct pasdma_status *dma_status;
93
a85b9422
OJ
94static void write_iob_reg(struct pasemi_mac *mac, unsigned int reg,
95 unsigned int val)
96{
b6e05a1b 97 out_le32(mac->iob_regs+reg, val);
a85b9422
OJ
98}
99
100static unsigned int read_mac_reg(struct pasemi_mac *mac, unsigned int reg)
101{
b6e05a1b 102 return in_le32(mac->regs+reg);
a85b9422
OJ
103}
104
105static void write_mac_reg(struct pasemi_mac *mac, unsigned int reg,
106 unsigned int val)
107{
b6e05a1b 108 out_le32(mac->regs+reg, val);
a85b9422
OJ
109}
110
111static unsigned int read_dma_reg(struct pasemi_mac *mac, unsigned int reg)
112{
b6e05a1b 113 return in_le32(mac->dma_regs+reg);
a85b9422
OJ
114}
115
116static void write_dma_reg(struct pasemi_mac *mac, unsigned int reg,
117 unsigned int val)
118{
b6e05a1b 119 out_le32(mac->dma_regs+reg, val);
a85b9422
OJ
120}
121
f5cd7872
OJ
122static int pasemi_get_mac_addr(struct pasemi_mac *mac)
123{
124 struct pci_dev *pdev = mac->pdev;
125 struct device_node *dn = pci_device_to_OF_node(pdev);
1af7f056 126 int len;
f5cd7872
OJ
127 const u8 *maddr;
128 u8 addr[6];
129
130 if (!dn) {
131 dev_dbg(&pdev->dev,
132 "No device node for mac, not configuring\n");
133 return -ENOENT;
134 }
135
1af7f056 136 maddr = of_get_property(dn, "local-mac-address", &len);
137
138 if (maddr && len == 6) {
139 memcpy(mac->mac_addr, maddr, 6);
140 return 0;
141 }
142
143 /* Some old versions of firmware mistakenly uses mac-address
144 * (and as a string) instead of a byte array in local-mac-address.
145 */
a5fd22eb 146
a5fd22eb 147 if (maddr == NULL)
9028780a 148 maddr = of_get_property(dn, "mac-address", NULL);
a5fd22eb 149
f5cd7872
OJ
150 if (maddr == NULL) {
151 dev_warn(&pdev->dev,
152 "no mac address in device tree, not configuring\n");
153 return -ENOENT;
154 }
155
1af7f056 156
f5cd7872
OJ
157 if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
158 &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
159 dev_warn(&pdev->dev,
160 "can't parse mac address, not configuring\n");
161 return -EINVAL;
162 }
163
1af7f056 164 memcpy(mac->mac_addr, addr, 6);
165
f5cd7872
OJ
166 return 0;
167}
168
ad3c20d1
OJ
169static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac,
170 struct sk_buff *skb,
171 dma_addr_t *dmas)
172{
173 int f;
174 int nfrags = skb_shinfo(skb)->nr_frags;
175
176 pci_unmap_single(mac->dma_pdev, dmas[0], skb_headlen(skb),
177 PCI_DMA_TODEVICE);
178
179 for (f = 0; f < nfrags; f++) {
180 skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
181
182 pci_unmap_page(mac->dma_pdev, dmas[f+1], frag->size,
183 PCI_DMA_TODEVICE);
184 }
185 dev_kfree_skb_irq(skb);
186
187 /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs,
188 * aligned up to a power of 2
189 */
190 return (nfrags + 3) & ~1;
191}
192
f5cd7872
OJ
193static int pasemi_mac_setup_rx_resources(struct net_device *dev)
194{
195 struct pasemi_mac_rxring *ring;
196 struct pasemi_mac *mac = netdev_priv(dev);
197 int chan_id = mac->dma_rxch;
198
199 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
200
201 if (!ring)
202 goto out_ring;
203
204 spin_lock_init(&ring->lock);
205
021fa22e 206 ring->size = RX_RING_SIZE;
fc9e4d2a 207 ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
f5cd7872
OJ
208 RX_RING_SIZE, GFP_KERNEL);
209
fc9e4d2a
OJ
210 if (!ring->ring_info)
211 goto out_ring_info;
f5cd7872
OJ
212
213 /* Allocate descriptors */
fc9e4d2a
OJ
214 ring->ring = dma_alloc_coherent(&mac->dma_pdev->dev,
215 RX_RING_SIZE * sizeof(u64),
f5cd7872
OJ
216 &ring->dma, GFP_KERNEL);
217
fc9e4d2a
OJ
218 if (!ring->ring)
219 goto out_ring_desc;
f5cd7872 220
fc9e4d2a 221 memset(ring->ring, 0, RX_RING_SIZE * sizeof(u64));
f5cd7872
OJ
222
223 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
224 RX_RING_SIZE * sizeof(u64),
225 &ring->buf_dma, GFP_KERNEL);
226 if (!ring->buffers)
227 goto out_buffers;
228
229 memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
230
a85b9422 231 write_dma_reg(mac, PAS_DMA_RXCHAN_BASEL(chan_id), PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma));
f5cd7872 232
a85b9422
OJ
233 write_dma_reg(mac, PAS_DMA_RXCHAN_BASEU(chan_id),
234 PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) |
fc9e4d2a 235 PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3));
f5cd7872 236
a85b9422 237 write_dma_reg(mac, PAS_DMA_RXCHAN_CFG(chan_id),
c0efd52b 238 PAS_DMA_RXCHAN_CFG_HBU(2));
f5cd7872 239
a85b9422
OJ
240 write_dma_reg(mac, PAS_DMA_RXINT_BASEL(mac->dma_if),
241 PAS_DMA_RXINT_BASEL_BRBL(__pa(ring->buffers)));
f5cd7872 242
a85b9422
OJ
243 write_dma_reg(mac, PAS_DMA_RXINT_BASEU(mac->dma_if),
244 PAS_DMA_RXINT_BASEU_BRBH(__pa(ring->buffers) >> 32) |
245 PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3));
f5cd7872 246
c0efd52b 247 write_dma_reg(mac, PAS_DMA_RXINT_CFG(mac->dma_if),
9a50bebd
OJ
248 PAS_DMA_RXINT_CFG_DHL(3) |
249 PAS_DMA_RXINT_CFG_L2 |
250 PAS_DMA_RXINT_CFG_LW);
c0efd52b 251
f5cd7872
OJ
252 ring->next_to_fill = 0;
253 ring->next_to_clean = 0;
254
255 snprintf(ring->irq_name, sizeof(ring->irq_name),
256 "%s rx", dev->name);
257 mac->rx = ring;
258
259 return 0;
260
261out_buffers:
262 dma_free_coherent(&mac->dma_pdev->dev,
fc9e4d2a
OJ
263 RX_RING_SIZE * sizeof(u64),
264 mac->rx->ring, mac->rx->dma);
265out_ring_desc:
266 kfree(ring->ring_info);
267out_ring_info:
f5cd7872
OJ
268 kfree(ring);
269out_ring:
270 return -ENOMEM;
271}
272
273
274static int pasemi_mac_setup_tx_resources(struct net_device *dev)
275{
276 struct pasemi_mac *mac = netdev_priv(dev);
277 u32 val;
278 int chan_id = mac->dma_txch;
279 struct pasemi_mac_txring *ring;
280
281 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
282 if (!ring)
283 goto out_ring;
284
285 spin_lock_init(&ring->lock);
286
021fa22e 287 ring->size = TX_RING_SIZE;
fc9e4d2a 288 ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
f5cd7872 289 TX_RING_SIZE, GFP_KERNEL);
fc9e4d2a
OJ
290 if (!ring->ring_info)
291 goto out_ring_info;
f5cd7872
OJ
292
293 /* Allocate descriptors */
fc9e4d2a
OJ
294 ring->ring = dma_alloc_coherent(&mac->dma_pdev->dev,
295 TX_RING_SIZE * sizeof(u64),
f5cd7872 296 &ring->dma, GFP_KERNEL);
fc9e4d2a
OJ
297 if (!ring->ring)
298 goto out_ring_desc;
f5cd7872 299
fc9e4d2a 300 memset(ring->ring, 0, TX_RING_SIZE * sizeof(u64));
f5cd7872 301
a85b9422
OJ
302 write_dma_reg(mac, PAS_DMA_TXCHAN_BASEL(chan_id),
303 PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
f5cd7872 304 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
fc9e4d2a 305 val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3);
f5cd7872 306
a85b9422 307 write_dma_reg(mac, PAS_DMA_TXCHAN_BASEU(chan_id), val);
f5cd7872 308
a85b9422
OJ
309 write_dma_reg(mac, PAS_DMA_TXCHAN_CFG(chan_id),
310 PAS_DMA_TXCHAN_CFG_TY_IFACE |
311 PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
312 PAS_DMA_TXCHAN_CFG_UP |
313 PAS_DMA_TXCHAN_CFG_WT(2));
f5cd7872 314
021fa22e 315 ring->next_to_fill = 0;
f5cd7872
OJ
316 ring->next_to_clean = 0;
317
318 snprintf(ring->irq_name, sizeof(ring->irq_name),
319 "%s tx", dev->name);
320 mac->tx = ring;
321
322 return 0;
323
fc9e4d2a
OJ
324out_ring_desc:
325 kfree(ring->ring_info);
326out_ring_info:
f5cd7872
OJ
327 kfree(ring);
328out_ring:
329 return -ENOMEM;
330}
331
332static void pasemi_mac_free_tx_resources(struct net_device *dev)
333{
334 struct pasemi_mac *mac = netdev_priv(dev);
ad3c20d1 335 unsigned int i, j;
f5cd7872 336 struct pasemi_mac_buffer *info;
ad3c20d1
OJ
337 dma_addr_t dmas[MAX_SKB_FRAGS+1];
338 int freed;
fc9e4d2a 339
ad3c20d1 340 for (i = 0; i < TX_RING_SIZE; i += freed) {
fc9e4d2a
OJ
341 info = &TX_RING_INFO(mac, i+1);
342 if (info->dma && info->skb) {
ad3c20d1
OJ
343 for (j = 0; j <= skb_shinfo(info->skb)->nr_frags; j++)
344 dmas[j] = TX_RING_INFO(mac, i+1+j).dma;
345 freed = pasemi_mac_unmap_tx_skb(mac, info->skb, dmas);
346 } else
347 freed = 2;
f5cd7872
OJ
348 }
349
ad3c20d1
OJ
350 for (i = 0; i < TX_RING_SIZE; i++)
351 TX_RING(mac, i) = 0;
352
f5cd7872 353 dma_free_coherent(&mac->dma_pdev->dev,
fc9e4d2a
OJ
354 TX_RING_SIZE * sizeof(u64),
355 mac->tx->ring, mac->tx->dma);
f5cd7872 356
fc9e4d2a 357 kfree(mac->tx->ring_info);
f5cd7872
OJ
358 kfree(mac->tx);
359 mac->tx = NULL;
360}
361
362static void pasemi_mac_free_rx_resources(struct net_device *dev)
363{
364 struct pasemi_mac *mac = netdev_priv(dev);
365 unsigned int i;
366 struct pasemi_mac_buffer *info;
f5cd7872
OJ
367
368 for (i = 0; i < RX_RING_SIZE; i++) {
fc9e4d2a
OJ
369 info = &RX_RING_INFO(mac, i);
370 if (info->skb && info->dma) {
371 pci_unmap_single(mac->dma_pdev,
372 info->dma,
373 info->skb->len,
374 PCI_DMA_FROMDEVICE);
375 dev_kfree_skb_any(info->skb);
f5cd7872 376 }
fc9e4d2a
OJ
377 info->dma = 0;
378 info->skb = NULL;
f5cd7872
OJ
379 }
380
fc9e4d2a
OJ
381 for (i = 0; i < RX_RING_SIZE; i++)
382 RX_RING(mac, i) = 0;
383
f5cd7872 384 dma_free_coherent(&mac->dma_pdev->dev,
fc9e4d2a
OJ
385 RX_RING_SIZE * sizeof(u64),
386 mac->rx->ring, mac->rx->dma);
f5cd7872
OJ
387
388 dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
389 mac->rx->buffers, mac->rx->buf_dma);
390
fc9e4d2a 391 kfree(mac->rx->ring_info);
f5cd7872
OJ
392 kfree(mac->rx);
393 mac->rx = NULL;
394}
395
928773c2 396static void pasemi_mac_replenish_rx_ring(struct net_device *dev, int limit)
f5cd7872
OJ
397{
398 struct pasemi_mac *mac = netdev_priv(dev);
f5cd7872 399 int start = mac->rx->next_to_fill;
fc9e4d2a 400 unsigned int fill, count;
f5cd7872 401
cd4ceb24 402 if (limit <= 0)
f5cd7872
OJ
403 return;
404
fc9e4d2a 405 fill = start;
928773c2 406 for (count = 0; count < limit; count++) {
fc9e4d2a
OJ
407 struct pasemi_mac_buffer *info = &RX_RING_INFO(mac, fill);
408 u64 *buff = &RX_BUFF(mac, fill);
f5cd7872
OJ
409 struct sk_buff *skb;
410 dma_addr_t dma;
411
fc9e4d2a
OJ
412 /* Entry in use? */
413 WARN_ON(*buff);
414
9f05cfe2
OJ
415 /* skb might still be in there for recycle on short receives */
416 if (info->skb)
417 skb = info->skb;
8dc121a4 418 else {
9f05cfe2 419 skb = dev_alloc_skb(BUF_SIZE);
8dc121a4
OJ
420 skb_reserve(skb, LOCAL_SKB_ALIGN);
421 }
f5cd7872 422
9f05cfe2 423 if (unlikely(!skb))
f5cd7872 424 break;
f5cd7872 425
8dc121a4
OJ
426 dma = pci_map_single(mac->dma_pdev, skb->data,
427 BUF_SIZE - LOCAL_SKB_ALIGN,
f5cd7872
OJ
428 PCI_DMA_FROMDEVICE);
429
cd4ceb24 430 if (unlikely(dma_mapping_error(dma))) {
f5cd7872 431 dev_kfree_skb_irq(info->skb);
f5cd7872
OJ
432 break;
433 }
434
435 info->skb = skb;
436 info->dma = dma;
437 *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
fc9e4d2a 438 fill++;
f5cd7872
OJ
439 }
440
441 wmb();
442
928773c2
OJ
443 write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), count);
444 write_dma_reg(mac, PAS_DMA_RXINT_INCR(mac->dma_if), count);
f5cd7872 445
928773c2 446 mac->rx->next_to_fill += count;
f5cd7872
OJ
447}
448
1b0335ea
OJ
449static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac)
450{
52a94351 451 unsigned int reg, pcnt;
1b0335ea
OJ
452 /* Re-enable packet count interrupts: finally
453 * ack the packet count interrupt we got in rx_intr.
454 */
455
52a94351 456 pcnt = *mac->rx_status & PAS_STATUS_PCNT_M;
1b0335ea 457
52a94351 458 reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC;
1b0335ea 459
a85b9422 460 write_iob_reg(mac, PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);
1b0335ea
OJ
461}
462
463static void pasemi_mac_restart_tx_intr(struct pasemi_mac *mac)
464{
52a94351 465 unsigned int reg, pcnt;
1b0335ea
OJ
466
467 /* Re-enable packet count interrupts */
52a94351 468 pcnt = *mac->tx_status & PAS_STATUS_PCNT_M;
1b0335ea 469
52a94351 470 reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;
1b0335ea 471
a85b9422 472 write_iob_reg(mac, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), reg);
1b0335ea
OJ
473}
474
475
69c29d89
OJ
476static inline void pasemi_mac_rx_error(struct pasemi_mac *mac, u64 macrx)
477{
478 unsigned int rcmdsta, ccmdsta;
479
480 if (!netif_msg_rx_err(mac))
481 return;
482
483 rcmdsta = read_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
484 ccmdsta = read_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch));
485
486 printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n",
487 macrx, *mac->rx_status);
488
489 printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n",
490 rcmdsta, ccmdsta);
491}
492
493static inline void pasemi_mac_tx_error(struct pasemi_mac *mac, u64 mactx)
494{
495 unsigned int cmdsta;
496
497 if (!netif_msg_tx_err(mac))
498 return;
499
500 cmdsta = read_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch));
501
502 printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\
503 "tx status 0x%016lx\n", mactx, *mac->tx_status);
504
505 printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta);
506}
507
f5cd7872
OJ
508static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
509{
cd4ceb24
OJ
510 unsigned int n;
511 int count;
cd4ceb24
OJ
512 struct pasemi_mac_buffer *info;
513 struct sk_buff *skb;
514 unsigned int i, len;
515 u64 macrx;
516 dma_addr_t dma;
f5cd7872
OJ
517
518 spin_lock(&mac->rx->lock);
519
cd4ceb24 520 n = mac->rx->next_to_clean;
f5cd7872 521
cd4ceb24 522 for (count = limit; count; count--) {
f5cd7872
OJ
523
524 rmb();
525
fc9e4d2a 526 macrx = RX_RING(mac, n);
f5cd7872 527
69c29d89
OJ
528 if ((macrx & XCT_MACRX_E) ||
529 (*mac->rx_status & PAS_STATUS_ERROR))
530 pasemi_mac_rx_error(mac, macrx);
531
cd4ceb24 532 if (!(macrx & XCT_MACRX_O))
f5cd7872
OJ
533 break;
534
f5cd7872
OJ
535 info = NULL;
536
537 /* We have to scan for our skb since there's no way
538 * to back-map them from the descriptor, and if we
539 * have several receive channels then they might not
540 * show up in the same order as they were put on the
541 * interface ring.
542 */
543
fc9e4d2a
OJ
544 dma = (RX_RING(mac, n+1) & XCT_PTR_ADDR_M);
545 for (i = mac->rx->next_to_fill;
546 i < (mac->rx->next_to_fill + RX_RING_SIZE);
547 i++) {
548 info = &RX_RING_INFO(mac, i);
f5cd7872
OJ
549 if (info->dma == dma)
550 break;
551 }
fc9e4d2a 552
26fcfa95 553 prefetchw(info);
f5cd7872 554
9f05cfe2 555 skb = info->skb;
26fcfa95 556 prefetchw(skb);
cd4ceb24 557 info->dma = 0;
f5cd7872 558
cd4ceb24 559 pci_unmap_single(mac->dma_pdev, dma, skb->len,
f5cd7872
OJ
560 PCI_DMA_FROMDEVICE);
561
cd4ceb24 562 len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
f5cd7872 563
9f05cfe2 564 if (len < 256) {
8dc121a4
OJ
565 struct sk_buff *new_skb;
566
567 new_skb = netdev_alloc_skb(mac->netdev,
568 len + LOCAL_SKB_ALIGN);
9f05cfe2 569 if (new_skb) {
8dc121a4 570 skb_reserve(new_skb, LOCAL_SKB_ALIGN);
73344863 571 memcpy(new_skb->data, skb->data, len);
9f05cfe2
OJ
572 /* save the skb in buffer_info as good */
573 skb = new_skb;
574 }
575 /* else just continue with the old one */
576 } else
577 info->skb = NULL;
f5cd7872 578
fc9e4d2a
OJ
579 /* Need to zero it out since hardware doesn't, since the
580 * replenish loop uses it to tell when it's done.
581 */
582 RX_BUFF(mac, i) = 0;
583
f5cd7872
OJ
584 skb_put(skb, len);
585
26fcfa95 586 if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) {
38bf3184 587 skb->ip_summed = CHECKSUM_UNNECESSARY;
cd4ceb24 588 skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
f5cd7872
OJ
589 XCT_MACRX_CSUM_S;
590 } else
591 skb->ip_summed = CHECKSUM_NONE;
592
09f75cd7
JG
593 mac->netdev->stats.rx_bytes += len;
594 mac->netdev->stats.rx_packets++;
f5cd7872 595
26fcfa95 596 skb->protocol = eth_type_trans(skb, mac->netdev);
f5cd7872
OJ
597 netif_receive_skb(skb);
598
fc9e4d2a
OJ
599 RX_RING(mac, n) = 0;
600 RX_RING(mac, n+1) = 0;
cd4ceb24 601
fc9e4d2a 602 n += 2;
f5cd7872
OJ
603 }
604
9a50bebd
OJ
605 if (n > RX_RING_SIZE) {
606 /* Errata 5971 workaround: L2 target of headers */
607 write_iob_reg(mac, PAS_IOB_COM_PKTHDRCNT, 0);
608 n &= (RX_RING_SIZE-1);
609 }
fc9e4d2a 610 mac->rx->next_to_clean = n;
928773c2 611 pasemi_mac_replenish_rx_ring(mac->netdev, limit-count);
f5cd7872
OJ
612
613 spin_unlock(&mac->rx->lock);
614
615 return count;
616}
617
ad3c20d1
OJ
618/* Can't make this too large or we blow the kernel stack limits */
619#define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS)
620
f5cd7872
OJ
621static int pasemi_mac_clean_tx(struct pasemi_mac *mac)
622{
ad3c20d1 623 int i, j;
f5cd7872 624 struct pasemi_mac_buffer *info;
ad3c20d1 625 unsigned int start, descr_count, buf_count, limit;
02df6cfa 626 unsigned int total_count;
ca7e235f 627 unsigned long flags;
ad3c20d1
OJ
628 struct sk_buff *skbs[TX_CLEAN_BATCHSIZE];
629 dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1];
f5cd7872 630
02df6cfa 631 total_count = 0;
ad3c20d1 632 limit = TX_CLEAN_BATCHSIZE;
02df6cfa 633restart:
f5cd7872
OJ
634 spin_lock_irqsave(&mac->tx->lock, flags);
635
636 start = mac->tx->next_to_clean;
02df6cfa 637
ad3c20d1
OJ
638 buf_count = 0;
639 descr_count = 0;
f5cd7872 640
ad3c20d1
OJ
641 for (i = start;
642 descr_count < limit && i < mac->tx->next_to_fill;
643 i += buf_count) {
fc9e4d2a 644 u64 mactx = TX_RING(mac, i);
ad3c20d1 645
fc9e4d2a 646 if ((mactx & XCT_MACTX_E) ||
69c29d89 647 (*mac->tx_status & PAS_STATUS_ERROR))
fc9e4d2a 648 pasemi_mac_tx_error(mac, mactx);
69c29d89 649
fc9e4d2a 650 if (unlikely(mactx & XCT_MACTX_O))
02df6cfa 651 /* Not yet transmitted */
f5cd7872
OJ
652 break;
653
fc9e4d2a 654 info = &TX_RING_INFO(mac, i+1);
ad3c20d1
OJ
655 skbs[descr_count] = info->skb;
656
657 buf_count = 2 + skb_shinfo(info->skb)->nr_frags;
658 for (j = 0; j <= skb_shinfo(info->skb)->nr_frags; j++)
659 dmas[descr_count][j] = TX_RING_INFO(mac, i+1+j).dma;
660
f5cd7872 661
f5cd7872 662 info->dma = 0;
fc9e4d2a
OJ
663 TX_RING(mac, i) = 0;
664 TX_RING(mac, i+1) = 0;
ad3c20d1
OJ
665 TX_RING_INFO(mac, i+1).skb = 0;
666 TX_RING_INFO(mac, i+1).dma = 0;
fc9e4d2a 667
ad3c20d1
OJ
668 /* Since we always fill with an even number of entries, make
669 * sure we skip any unused one at the end as well.
670 */
671 if (buf_count & 1)
672 buf_count++;
673 descr_count++;
f5cd7872 674 }
ad3c20d1
OJ
675 mac->tx->next_to_clean = i;
676
f5cd7872 677 spin_unlock_irqrestore(&mac->tx->lock, flags);
0ce68c74
OJ
678 netif_wake_queue(mac->netdev);
679
ad3c20d1
OJ
680 for (i = 0; i < descr_count; i++)
681 pasemi_mac_unmap_tx_skb(mac, skbs[i], dmas[i]);
02df6cfa 682
ad3c20d1 683 total_count += descr_count;
02df6cfa
OJ
684
685 /* If the batch was full, try to clean more */
ad3c20d1 686 if (descr_count == limit)
02df6cfa
OJ
687 goto restart;
688
689 return total_count;
f5cd7872
OJ
690}
691
692
693static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
694{
695 struct net_device *dev = data;
696 struct pasemi_mac *mac = netdev_priv(dev);
697 unsigned int reg;
698
6dfa7522 699 if (!(*mac->rx_status & PAS_STATUS_CAUSE_M))
f5cd7872
OJ
700 return IRQ_NONE;
701
6dfa7522
OJ
702 /* Don't reset packet count so it won't fire again but clear
703 * all others.
704 */
705
6dfa7522
OJ
706 reg = 0;
707 if (*mac->rx_status & PAS_STATUS_SOFT)
708 reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
709 if (*mac->rx_status & PAS_STATUS_ERROR)
710 reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;
f5cd7872
OJ
711 if (*mac->rx_status & PAS_STATUS_TIMER)
712 reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
713
bea3348e 714 netif_rx_schedule(dev, &mac->napi);
6dfa7522 715
a85b9422 716 write_iob_reg(mac, PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);
f5cd7872
OJ
717
718 return IRQ_HANDLED;
719}
720
721static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
722{
723 struct net_device *dev = data;
724 struct pasemi_mac *mac = netdev_priv(dev);
52a94351 725 unsigned int reg, pcnt;
f5cd7872 726
6dfa7522 727 if (!(*mac->tx_status & PAS_STATUS_CAUSE_M))
f5cd7872
OJ
728 return IRQ_NONE;
729
730 pasemi_mac_clean_tx(mac);
731
52a94351
OJ
732 pcnt = *mac->tx_status & PAS_STATUS_PCNT_M;
733
734 reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;
6dfa7522
OJ
735
736 if (*mac->tx_status & PAS_STATUS_SOFT)
737 reg |= PAS_IOB_DMA_TXCH_RESET_SINTC;
738 if (*mac->tx_status & PAS_STATUS_ERROR)
739 reg |= PAS_IOB_DMA_TXCH_RESET_DINTC;
f5cd7872 740
a85b9422 741 write_iob_reg(mac, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), reg);
f5cd7872 742
f5cd7872
OJ
743 return IRQ_HANDLED;
744}
745
bb6e9590
OJ
746static void pasemi_adjust_link(struct net_device *dev)
747{
748 struct pasemi_mac *mac = netdev_priv(dev);
749 int msg;
750 unsigned int flags;
751 unsigned int new_flags;
752
753 if (!mac->phydev->link) {
754 /* If no link, MAC speed settings don't matter. Just report
755 * link down and return.
756 */
757 if (mac->link && netif_msg_link(mac))
758 printk(KERN_INFO "%s: Link is down.\n", dev->name);
759
760 netif_carrier_off(dev);
761 mac->link = 0;
762
763 return;
764 } else
765 netif_carrier_on(dev);
766
a85b9422 767 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
bb6e9590
OJ
768 new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M |
769 PAS_MAC_CFG_PCFG_TSR_M);
770
771 if (!mac->phydev->duplex)
772 new_flags |= PAS_MAC_CFG_PCFG_HD;
773
774 switch (mac->phydev->speed) {
775 case 1000:
776 new_flags |= PAS_MAC_CFG_PCFG_SPD_1G |
777 PAS_MAC_CFG_PCFG_TSR_1G;
778 break;
779 case 100:
780 new_flags |= PAS_MAC_CFG_PCFG_SPD_100M |
781 PAS_MAC_CFG_PCFG_TSR_100M;
782 break;
783 case 10:
784 new_flags |= PAS_MAC_CFG_PCFG_SPD_10M |
785 PAS_MAC_CFG_PCFG_TSR_10M;
786 break;
787 default:
788 printk("Unsupported speed %d\n", mac->phydev->speed);
789 }
790
791 /* Print on link or speed/duplex change */
792 msg = mac->link != mac->phydev->link || flags != new_flags;
793
794 mac->duplex = mac->phydev->duplex;
795 mac->speed = mac->phydev->speed;
796 mac->link = mac->phydev->link;
797
798 if (new_flags != flags)
a85b9422 799 write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags);
bb6e9590
OJ
800
801 if (msg && netif_msg_link(mac))
802 printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n",
803 dev->name, mac->speed, mac->duplex ? "full" : "half");
804}
805
806static int pasemi_mac_phy_init(struct net_device *dev)
807{
808 struct pasemi_mac *mac = netdev_priv(dev);
809 struct device_node *dn, *phy_dn;
810 struct phy_device *phydev;
811 unsigned int phy_id;
812 const phandle *ph;
813 const unsigned int *prop;
814 struct resource r;
815 int ret;
816
817 dn = pci_device_to_OF_node(mac->pdev);
9028780a 818 ph = of_get_property(dn, "phy-handle", NULL);
bb6e9590
OJ
819 if (!ph)
820 return -ENODEV;
821 phy_dn = of_find_node_by_phandle(*ph);
822
9028780a 823 prop = of_get_property(phy_dn, "reg", NULL);
bb6e9590
OJ
824 ret = of_address_to_resource(phy_dn->parent, 0, &r);
825 if (ret)
826 goto err;
827
828 phy_id = *prop;
829 snprintf(mac->phy_id, BUS_ID_SIZE, PHY_ID_FMT, (int)r.start, phy_id);
830
831 of_node_put(phy_dn);
832
833 mac->link = 0;
834 mac->speed = 0;
835 mac->duplex = -1;
836
837 phydev = phy_connect(dev, mac->phy_id, &pasemi_adjust_link, 0, PHY_INTERFACE_MODE_SGMII);
838
839 if (IS_ERR(phydev)) {
840 printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
841 return PTR_ERR(phydev);
842 }
843
844 mac->phydev = phydev;
845
846 return 0;
847
848err:
849 of_node_put(phy_dn);
850 return -ENODEV;
851}
852
853
f5cd7872
OJ
854static int pasemi_mac_open(struct net_device *dev)
855{
856 struct pasemi_mac *mac = netdev_priv(dev);
771f7404 857 int base_irq;
f5cd7872
OJ
858 unsigned int flags;
859 int ret;
860
861 /* enable rx section */
a85b9422 862 write_dma_reg(mac, PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);
f5cd7872
OJ
863
864 /* enable tx section */
a85b9422 865 write_dma_reg(mac, PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
f5cd7872
OJ
866
867 flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
868 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
869 PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
870
a85b9422 871 write_mac_reg(mac, PAS_MAC_CFG_TXP, flags);
f5cd7872 872
a85b9422
OJ
873 write_iob_reg(mac, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch),
874 PAS_IOB_DMA_RXCH_CFG_CNTTH(0));
f5cd7872 875
a85b9422 876 write_iob_reg(mac, PAS_IOB_DMA_TXCH_CFG(mac->dma_txch),
02df6cfa 877 PAS_IOB_DMA_TXCH_CFG_CNTTH(128));
f5cd7872 878
1b0335ea
OJ
879 /* Clear out any residual packet count state from firmware */
880 pasemi_mac_restart_rx_intr(mac);
881 pasemi_mac_restart_tx_intr(mac);
882
6dfa7522 883 /* 0xffffff is max value, about 16ms */
a85b9422
OJ
884 write_iob_reg(mac, PAS_IOB_DMA_COM_TIMEOUTCFG,
885 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0xffffff));
f5cd7872 886
f5cd7872
OJ
887 ret = pasemi_mac_setup_rx_resources(dev);
888 if (ret)
889 goto out_rx_resources;
890
891 ret = pasemi_mac_setup_tx_resources(dev);
892 if (ret)
893 goto out_tx_resources;
894
a85b9422
OJ
895 write_mac_reg(mac, PAS_MAC_IPC_CHNL,
896 PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) |
897 PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch));
f5cd7872
OJ
898
899 /* enable rx if */
a85b9422
OJ
900 write_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
901 PAS_DMA_RXINT_RCMDSTA_EN);
f5cd7872
OJ
902
903 /* enable rx channel */
a85b9422
OJ
904 write_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
905 PAS_DMA_RXCHAN_CCMDSTA_EN |
906 PAS_DMA_RXCHAN_CCMDSTA_DU);
f5cd7872
OJ
907
908 /* enable tx channel */
a85b9422
OJ
909 write_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
910 PAS_DMA_TXCHAN_TCMDSTA_EN);
f5cd7872 911
928773c2 912 pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE);
f5cd7872 913
36033766
OJ
914 flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |
915 PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
916
917 if (mac->type == MAC_TYPE_GMAC)
918 flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
919 else
920 flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G;
921
922 /* Enable interface in MAC */
923 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
924
bb6e9590
OJ
925 ret = pasemi_mac_phy_init(dev);
926 /* Some configs don't have PHYs (XAUI etc), so don't complain about
927 * failed init due to -ENODEV.
928 */
929 if (ret && ret != -ENODEV)
930 dev_warn(&mac->pdev->dev, "phy init failed: %d\n", ret);
931
f5cd7872 932 netif_start_queue(dev);
bea3348e 933 napi_enable(&mac->napi);
f5cd7872 934
771f7404
OJ
935 /* Interrupts are a bit different for our DMA controller: While
936 * it's got one a regular PCI device header, the interrupt there
937 * is really the base of the range it's using. Each tx and rx
938 * channel has it's own interrupt source.
939 */
940
941 base_irq = virq_to_hw(mac->dma_pdev->irq);
942
943 mac->tx_irq = irq_create_mapping(NULL, base_irq + mac->dma_txch);
944 mac->rx_irq = irq_create_mapping(NULL, base_irq + 20 + mac->dma_txch);
945
946 ret = request_irq(mac->tx_irq, &pasemi_mac_tx_intr, IRQF_DISABLED,
f5cd7872
OJ
947 mac->tx->irq_name, dev);
948 if (ret) {
949 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
771f7404 950 base_irq + mac->dma_txch, ret);
f5cd7872
OJ
951 goto out_tx_int;
952 }
953
771f7404 954 ret = request_irq(mac->rx_irq, &pasemi_mac_rx_intr, IRQF_DISABLED,
f5cd7872
OJ
955 mac->rx->irq_name, dev);
956 if (ret) {
957 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
771f7404 958 base_irq + 20 + mac->dma_rxch, ret);
f5cd7872
OJ
959 goto out_rx_int;
960 }
961
bb6e9590
OJ
962 if (mac->phydev)
963 phy_start(mac->phydev);
964
f5cd7872
OJ
965 return 0;
966
967out_rx_int:
771f7404 968 free_irq(mac->tx_irq, dev);
f5cd7872 969out_tx_int:
bea3348e 970 napi_disable(&mac->napi);
f5cd7872
OJ
971 netif_stop_queue(dev);
972 pasemi_mac_free_tx_resources(dev);
973out_tx_resources:
974 pasemi_mac_free_rx_resources(dev);
975out_rx_resources:
976
977 return ret;
978}
979
980#define MAX_RETRIES 5000
981
982static int pasemi_mac_close(struct net_device *dev)
983{
984 struct pasemi_mac *mac = netdev_priv(dev);
985 unsigned int stat;
986 int retries;
987
bb6e9590
OJ
988 if (mac->phydev) {
989 phy_stop(mac->phydev);
990 phy_disconnect(mac->phydev);
991 }
992
f5cd7872 993 netif_stop_queue(dev);
bea3348e 994 napi_disable(&mac->napi);
f5cd7872
OJ
995
996 /* Clean out any pending buffers */
997 pasemi_mac_clean_tx(mac);
998 pasemi_mac_clean_rx(mac, RX_RING_SIZE);
999
1000 /* Disable interface */
a85b9422
OJ
1001 write_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), PAS_DMA_TXCHAN_TCMDSTA_ST);
1002 write_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if), PAS_DMA_RXINT_RCMDSTA_ST);
1003 write_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), PAS_DMA_RXCHAN_CCMDSTA_ST);
f5cd7872
OJ
1004
1005 for (retries = 0; retries < MAX_RETRIES; retries++) {
a85b9422 1006 stat = read_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch));
0ce68c74 1007 if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT))
f5cd7872
OJ
1008 break;
1009 cond_resched();
1010 }
1011
0ce68c74 1012 if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
f5cd7872 1013 dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n");
f5cd7872
OJ
1014
1015 for (retries = 0; retries < MAX_RETRIES; retries++) {
a85b9422 1016 stat = read_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch));
0ce68c74 1017 if (!(stat & PAS_DMA_RXCHAN_CCMDSTA_ACT))
f5cd7872
OJ
1018 break;
1019 cond_resched();
1020 }
1021
0ce68c74 1022 if (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT)
f5cd7872 1023 dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");
f5cd7872
OJ
1024
1025 for (retries = 0; retries < MAX_RETRIES; retries++) {
a85b9422 1026 stat = read_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
0ce68c74 1027 if (!(stat & PAS_DMA_RXINT_RCMDSTA_ACT))
f5cd7872
OJ
1028 break;
1029 cond_resched();
1030 }
1031
0ce68c74 1032 if (stat & PAS_DMA_RXINT_RCMDSTA_ACT)
f5cd7872 1033 dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");
f5cd7872
OJ
1034
1035 /* Then, disable the channel. This must be done separately from
1036 * stopping, since you can't disable when active.
1037 */
1038
a85b9422
OJ
1039 write_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0);
1040 write_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0);
1041 write_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
f5cd7872 1042
771f7404
OJ
1043 free_irq(mac->tx_irq, dev);
1044 free_irq(mac->rx_irq, dev);
f5cd7872
OJ
1045
1046 /* Free resources */
1047 pasemi_mac_free_rx_resources(dev);
1048 pasemi_mac_free_tx_resources(dev);
1049
1050 return 0;
1051}
1052
1053static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
1054{
1055 struct pasemi_mac *mac = netdev_priv(dev);
1056 struct pasemi_mac_txring *txring;
ad3c20d1
OJ
1057 u64 dflags, mactx;
1058 dma_addr_t map[MAX_SKB_FRAGS+1];
1059 unsigned int map_size[MAX_SKB_FRAGS+1];
ca7e235f 1060 unsigned long flags;
ad3c20d1 1061 int i, nfrags;
f5cd7872
OJ
1062
1063 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD;
1064
1065 if (skb->ip_summed == CHECKSUM_PARTIAL) {
d56f90a7
ACM
1066 const unsigned char *nh = skb_network_header(skb);
1067
eddc9ec5 1068 switch (ip_hdr(skb)->protocol) {
f5cd7872
OJ
1069 case IPPROTO_TCP:
1070 dflags |= XCT_MACTX_CSUM_TCP;
cfe1fc77 1071 dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
d56f90a7 1072 dflags |= XCT_MACTX_IPO(nh - skb->data);
f5cd7872
OJ
1073 break;
1074 case IPPROTO_UDP:
1075 dflags |= XCT_MACTX_CSUM_UDP;
cfe1fc77 1076 dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
d56f90a7 1077 dflags |= XCT_MACTX_IPO(nh - skb->data);
f5cd7872
OJ
1078 break;
1079 }
1080 }
1081
ad3c20d1
OJ
1082 nfrags = skb_shinfo(skb)->nr_frags;
1083
1084 map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
1085 PCI_DMA_TODEVICE);
1086 map_size[0] = skb_headlen(skb);
1087 if (dma_mapping_error(map[0]))
1088 goto out_err_nolock;
1089
1090 for (i = 0; i < nfrags; i++) {
1091 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
f5cd7872 1092
ad3c20d1
OJ
1093 map[i+1] = pci_map_page(mac->dma_pdev, frag->page,
1094 frag->page_offset, frag->size,
1095 PCI_DMA_TODEVICE);
1096 map_size[i+1] = frag->size;
1097 if (dma_mapping_error(map[i+1])) {
1098 nfrags = i;
1099 goto out_err_nolock;
1100 }
1101 }
f5cd7872 1102
26fcfa95 1103 mactx = dflags | XCT_MACTX_LLEN(skb->len);
26fcfa95 1104
f5cd7872
OJ
1105 txring = mac->tx;
1106
1107 spin_lock_irqsave(&txring->lock, flags);
1108
ad3c20d1 1109 if (RING_AVAIL(txring) <= nfrags+3) {
f5cd7872
OJ
1110 spin_unlock_irqrestore(&txring->lock, flags);
1111 pasemi_mac_clean_tx(mac);
52a94351 1112 pasemi_mac_restart_tx_intr(mac);
f5cd7872
OJ
1113 spin_lock_irqsave(&txring->lock, flags);
1114
ad3c20d1 1115 if (RING_AVAIL(txring) <= nfrags+3) {
f5cd7872
OJ
1116 /* Still no room -- stop the queue and wait for tx
1117 * intr when there's room.
1118 */
1119 netif_stop_queue(dev);
1120 goto out_err;
1121 }
1122 }
1123
fc9e4d2a 1124 TX_RING(mac, txring->next_to_fill) = mactx;
ad3c20d1
OJ
1125 txring->next_to_fill++;
1126 TX_RING_INFO(mac, txring->next_to_fill).skb = skb;
1127 for (i = 0; i <= nfrags; i++) {
1128 TX_RING(mac, txring->next_to_fill+i) =
1129 XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
1130 TX_RING_INFO(mac, txring->next_to_fill+i).dma = map[i];
1131 }
1132
1133 /* We have to add an even number of 8-byte entries to the ring
1134 * even if the last one is unused. That means always an odd number
1135 * of pointers + one mactx descriptor.
1136 */
1137 if (nfrags & 1)
1138 nfrags++;
fc9e4d2a 1139
ad3c20d1 1140 txring->next_to_fill += nfrags + 1;
f5cd7872 1141
f5cd7872 1142
09f75cd7
JG
1143 dev->stats.tx_packets++;
1144 dev->stats.tx_bytes += skb->len;
f5cd7872
OJ
1145
1146 spin_unlock_irqrestore(&txring->lock, flags);
1147
ad3c20d1 1148 write_dma_reg(mac, PAS_DMA_TXCHAN_INCR(mac->dma_txch), (nfrags+2) >> 1);
f5cd7872
OJ
1149
1150 return NETDEV_TX_OK;
1151
1152out_err:
1153 spin_unlock_irqrestore(&txring->lock, flags);
ad3c20d1
OJ
1154out_err_nolock:
1155 while (nfrags--)
1156 pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags],
1157 PCI_DMA_TODEVICE);
1158
f5cd7872
OJ
1159 return NETDEV_TX_BUSY;
1160}
1161
f5cd7872
OJ
1162static void pasemi_mac_set_rx_mode(struct net_device *dev)
1163{
1164 struct pasemi_mac *mac = netdev_priv(dev);
1165 unsigned int flags;
1166
a85b9422 1167 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
f5cd7872
OJ
1168
1169 /* Set promiscuous */
1170 if (dev->flags & IFF_PROMISC)
1171 flags |= PAS_MAC_CFG_PCFG_PR;
1172 else
1173 flags &= ~PAS_MAC_CFG_PCFG_PR;
1174
a85b9422 1175 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
f5cd7872
OJ
1176}
1177
1178
bea3348e 1179static int pasemi_mac_poll(struct napi_struct *napi, int budget)
f5cd7872 1180{
bea3348e
SH
1181 struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi);
1182 struct net_device *dev = mac->netdev;
1183 int pkts;
f5cd7872 1184
829185e9 1185 pasemi_mac_clean_tx(mac);
bea3348e
SH
1186 pkts = pasemi_mac_clean_rx(mac, budget);
1187 if (pkts < budget) {
f5cd7872 1188 /* all done, no more packets present */
bea3348e 1189 netif_rx_complete(dev, napi);
f5cd7872 1190
1b0335ea 1191 pasemi_mac_restart_rx_intr(mac);
f5cd7872 1192 }
bea3348e 1193 return pkts;
f5cd7872
OJ
1194}
1195
b6e05a1b
OJ
1196static void __iomem * __devinit map_onedev(struct pci_dev *p, int index)
1197{
1198 struct device_node *dn;
1199 void __iomem *ret;
1200
1201 dn = pci_device_to_OF_node(p);
1202 if (!dn)
1203 goto fallback;
1204
1205 ret = of_iomap(dn, index);
1206 if (!ret)
1207 goto fallback;
1208
1209 return ret;
1210fallback:
1211 /* This is hardcoded and ugly, but we have some firmware versions
1212 * that don't provide the register space in the device tree. Luckily
1213 * they are at well-known locations so we can just do the math here.
1214 */
1215 return ioremap(0xe0000000 + (p->devfn << 12), 0x2000);
1216}
1217
1218static int __devinit pasemi_mac_map_regs(struct pasemi_mac *mac)
1219{
1220 struct resource res;
1221 struct device_node *dn;
1222 int err;
1223
1224 mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
1225 if (!mac->dma_pdev) {
1226 dev_err(&mac->pdev->dev, "Can't find DMA Controller\n");
1227 return -ENODEV;
1228 }
1229
1230 mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
1231 if (!mac->iob_pdev) {
1232 dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n");
1233 return -ENODEV;
1234 }
1235
1236 mac->regs = map_onedev(mac->pdev, 0);
1237 mac->dma_regs = map_onedev(mac->dma_pdev, 0);
1238 mac->iob_regs = map_onedev(mac->iob_pdev, 0);
1239
1240 if (!mac->regs || !mac->dma_regs || !mac->iob_regs) {
1241 dev_err(&mac->pdev->dev, "Can't map registers\n");
1242 return -ENODEV;
1243 }
1244
1245 /* The dma status structure is located in the I/O bridge, and
1246 * is cache coherent.
1247 */
1248 if (!dma_status) {
1249 dn = pci_device_to_OF_node(mac->iob_pdev);
1250 if (dn)
1251 err = of_address_to_resource(dn, 1, &res);
1252 if (!dn || err) {
1253 /* Fallback for old firmware */
1254 res.start = 0xfd800000;
1255 res.end = res.start + 0x1000;
1256 }
1257 dma_status = __ioremap(res.start, res.end-res.start, 0);
1258 }
1259
1260 return 0;
1261}
1262
f5cd7872
OJ
1263static int __devinit
1264pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1265{
1266 static int index = 0;
1267 struct net_device *dev;
1268 struct pasemi_mac *mac;
1269 int err;
0795af57 1270 DECLARE_MAC_BUF(mac_buf);
f5cd7872
OJ
1271
1272 err = pci_enable_device(pdev);
1273 if (err)
1274 return err;
1275
1276 dev = alloc_etherdev(sizeof(struct pasemi_mac));
1277 if (dev == NULL) {
1278 dev_err(&pdev->dev,
1279 "pasemi_mac: Could not allocate ethernet device.\n");
1280 err = -ENOMEM;
1281 goto out_disable_device;
1282 }
1283
f5cd7872
OJ
1284 pci_set_drvdata(pdev, dev);
1285 SET_NETDEV_DEV(dev, &pdev->dev);
1286
1287 mac = netdev_priv(dev);
1288
1289 mac->pdev = pdev;
1290 mac->netdev = dev;
f5cd7872 1291
bea3348e
SH
1292 netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64);
1293
ad3c20d1 1294 dev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX | NETIF_F_SG;
bea3348e 1295
f5cd7872
OJ
1296 /* These should come out of the device tree eventually */
1297 mac->dma_txch = index;
1298 mac->dma_rxch = index;
1299
1300 /* We probe GMAC before XAUI, but the DMA interfaces are
1301 * in XAUI, GMAC order.
1302 */
1303 if (index < 4)
1304 mac->dma_if = index + 2;
1305 else
1306 mac->dma_if = index - 4;
1307 index++;
1308
1309 switch (pdev->device) {
1310 case 0xa005:
1311 mac->type = MAC_TYPE_GMAC;
1312 break;
1313 case 0xa006:
1314 mac->type = MAC_TYPE_XAUI;
1315 break;
1316 default:
1317 err = -ENODEV;
1318 goto out;
1319 }
1320
1321 /* get mac addr from device tree */
1322 if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
1323 err = -ENODEV;
1324 goto out;
1325 }
1326 memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
1327
1328 dev->open = pasemi_mac_open;
1329 dev->stop = pasemi_mac_close;
1330 dev->hard_start_xmit = pasemi_mac_start_tx;
f5cd7872 1331 dev->set_multicast_list = pasemi_mac_set_rx_mode;
f5cd7872 1332
b6e05a1b
OJ
1333 err = pasemi_mac_map_regs(mac);
1334 if (err)
1335 goto out;
f5cd7872
OJ
1336
1337 mac->rx_status = &dma_status->rx_sta[mac->dma_rxch];
1338 mac->tx_status = &dma_status->tx_sta[mac->dma_txch];
1339
ceb51361
OJ
1340 mac->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
1341
bb6e9590
OJ
1342 /* Enable most messages by default */
1343 mac->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1344
f5cd7872
OJ
1345 err = register_netdev(dev);
1346
1347 if (err) {
1348 dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
1349 err);
1350 goto out;
69c29d89 1351 } else if netif_msg_probe(mac)
f5cd7872 1352 printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, "
0795af57 1353 "hw addr %s\n",
f5cd7872
OJ
1354 dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
1355 mac->dma_if, mac->dma_txch, mac->dma_rxch,
0795af57 1356 print_mac(mac_buf, dev->dev_addr));
f5cd7872
OJ
1357
1358 return err;
1359
1360out:
b6e05a1b
OJ
1361 if (mac->iob_pdev)
1362 pci_dev_put(mac->iob_pdev);
1363 if (mac->dma_pdev)
1364 pci_dev_put(mac->dma_pdev);
1365 if (mac->dma_regs)
1366 iounmap(mac->dma_regs);
1367 if (mac->iob_regs)
1368 iounmap(mac->iob_regs);
1369 if (mac->regs)
1370 iounmap(mac->regs);
1371
f5cd7872
OJ
1372 free_netdev(dev);
1373out_disable_device:
1374 pci_disable_device(pdev);
1375 return err;
1376
1377}
1378
1379static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
1380{
1381 struct net_device *netdev = pci_get_drvdata(pdev);
1382 struct pasemi_mac *mac;
1383
1384 if (!netdev)
1385 return;
1386
1387 mac = netdev_priv(netdev);
1388
1389 unregister_netdev(netdev);
1390
1391 pci_disable_device(pdev);
1392 pci_dev_put(mac->dma_pdev);
1393 pci_dev_put(mac->iob_pdev);
1394
b6e05a1b
OJ
1395 iounmap(mac->regs);
1396 iounmap(mac->dma_regs);
1397 iounmap(mac->iob_regs);
1398
f5cd7872
OJ
1399 pci_set_drvdata(pdev, NULL);
1400 free_netdev(netdev);
1401}
1402
1403static struct pci_device_id pasemi_mac_pci_tbl[] = {
1404 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
1405 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
fd178254 1406 { },
f5cd7872
OJ
1407};
1408
1409MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
1410
1411static struct pci_driver pasemi_mac_driver = {
1412 .name = "pasemi_mac",
1413 .id_table = pasemi_mac_pci_tbl,
1414 .probe = pasemi_mac_probe,
1415 .remove = __devexit_p(pasemi_mac_remove),
1416};
1417
1418static void __exit pasemi_mac_cleanup_module(void)
1419{
1420 pci_unregister_driver(&pasemi_mac_driver);
1421 __iounmap(dma_status);
1422 dma_status = NULL;
1423}
1424
1425int pasemi_mac_init_module(void)
1426{
1427 return pci_register_driver(&pasemi_mac_driver);
1428}
1429
f5cd7872
OJ
1430module_init(pasemi_mac_init_module);
1431module_exit(pasemi_mac_cleanup_module);
This page took 0.260518 seconds and 5 git commands to generate.