mac80211: allow configure_filter callback to sleep
[deliverable/linux.git] / drivers / net / b44.c
1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7 * Copyright (C) 2006 Broadcom Corporation.
8 * Copyright (C) 2007 Michael Buesch <mb@bu3sch.de>
9 *
10 * Distribute under GPL.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/types.h>
17 #include <linux/netdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/mii.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
22 #include <linux/etherdevice.h>
23 #include <linux/pci.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/ssb/ssb.h>
28
29 #include <asm/uaccess.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32
33
34 #include "b44.h"
35
36 #define DRV_MODULE_NAME "b44"
37 #define PFX DRV_MODULE_NAME ": "
38 #define DRV_MODULE_VERSION "2.0"
39
40 #define B44_DEF_MSG_ENABLE \
41 (NETIF_MSG_DRV | \
42 NETIF_MSG_PROBE | \
43 NETIF_MSG_LINK | \
44 NETIF_MSG_TIMER | \
45 NETIF_MSG_IFDOWN | \
46 NETIF_MSG_IFUP | \
47 NETIF_MSG_RX_ERR | \
48 NETIF_MSG_TX_ERR)
49
50 /* length of time before we decide the hardware is borked,
51 * and dev->tx_timeout() should be called to fix the problem
52 */
53 #define B44_TX_TIMEOUT (5 * HZ)
54
55 /* hardware minimum and maximum for a single frame's data payload */
56 #define B44_MIN_MTU 60
57 #define B44_MAX_MTU 1500
58
59 #define B44_RX_RING_SIZE 512
60 #define B44_DEF_RX_RING_PENDING 200
61 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
62 B44_RX_RING_SIZE)
63 #define B44_TX_RING_SIZE 512
64 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
65 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
66 B44_TX_RING_SIZE)
67
68 #define TX_RING_GAP(BP) \
69 (B44_TX_RING_SIZE - (BP)->tx_pending)
70 #define TX_BUFFS_AVAIL(BP) \
71 (((BP)->tx_cons <= (BP)->tx_prod) ? \
72 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
73 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
74 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
75
76 #define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
77 #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
78
79 /* minimum number of free TX descriptors required to wake up TX process */
80 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
81
82 /* b44 internal pattern match filter info */
83 #define B44_PATTERN_BASE 0x400
84 #define B44_PATTERN_SIZE 0x80
85 #define B44_PMASK_BASE 0x600
86 #define B44_PMASK_SIZE 0x10
87 #define B44_MAX_PATTERNS 16
88 #define B44_ETHIPV6UDP_HLEN 62
89 #define B44_ETHIPV4UDP_HLEN 42
90
91 static char version[] __devinitdata =
92 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n";
93
94 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
95 MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver");
96 MODULE_LICENSE("GPL");
97 MODULE_VERSION(DRV_MODULE_VERSION);
98
99 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
100 module_param(b44_debug, int, 0);
101 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
102
103
104 #ifdef CONFIG_B44_PCI
105 static const struct pci_device_id b44_pci_tbl[] = {
106 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
109 { 0 } /* terminate list with empty entry */
110 };
111 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
112
113 static struct pci_driver b44_pci_driver = {
114 .name = DRV_MODULE_NAME,
115 .id_table = b44_pci_tbl,
116 };
117 #endif /* CONFIG_B44_PCI */
118
119 static const struct ssb_device_id b44_ssb_tbl[] = {
120 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
121 SSB_DEVTABLE_END
122 };
123 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
124
125 static void b44_halt(struct b44 *);
126 static void b44_init_rings(struct b44 *);
127
128 #define B44_FULL_RESET 1
129 #define B44_FULL_RESET_SKIP_PHY 2
130 #define B44_PARTIAL_RESET 3
131 #define B44_CHIP_RESET_FULL 4
132 #define B44_CHIP_RESET_PARTIAL 5
133
134 static void b44_init_hw(struct b44 *, int);
135
136 static int dma_desc_align_mask;
137 static int dma_desc_sync_size;
138 static int instance;
139
140 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141 #define _B44(x...) # x,
142 B44_STAT_REG_DECLARE
143 #undef _B44
144 };
145
146 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147 dma_addr_t dma_base,
148 unsigned long offset,
149 enum dma_data_direction dir)
150 {
151 ssb_dma_sync_single_range_for_device(sdev, dma_base,
152 offset & dma_desc_align_mask,
153 dma_desc_sync_size, dir);
154 }
155
156 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
157 dma_addr_t dma_base,
158 unsigned long offset,
159 enum dma_data_direction dir)
160 {
161 ssb_dma_sync_single_range_for_cpu(sdev, dma_base,
162 offset & dma_desc_align_mask,
163 dma_desc_sync_size, dir);
164 }
165
166 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
167 {
168 return ssb_read32(bp->sdev, reg);
169 }
170
171 static inline void bw32(const struct b44 *bp,
172 unsigned long reg, unsigned long val)
173 {
174 ssb_write32(bp->sdev, reg, val);
175 }
176
177 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
178 u32 bit, unsigned long timeout, const int clear)
179 {
180 unsigned long i;
181
182 for (i = 0; i < timeout; i++) {
183 u32 val = br32(bp, reg);
184
185 if (clear && !(val & bit))
186 break;
187 if (!clear && (val & bit))
188 break;
189 udelay(10);
190 }
191 if (i == timeout) {
192 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
193 "%lx to %s.\n",
194 bp->dev->name,
195 bit, reg,
196 (clear ? "clear" : "set"));
197 return -ENODEV;
198 }
199 return 0;
200 }
201
202 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
203 {
204 u32 val;
205
206 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
207 (index << CAM_CTRL_INDEX_SHIFT)));
208
209 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
210
211 val = br32(bp, B44_CAM_DATA_LO);
212
213 data[2] = (val >> 24) & 0xFF;
214 data[3] = (val >> 16) & 0xFF;
215 data[4] = (val >> 8) & 0xFF;
216 data[5] = (val >> 0) & 0xFF;
217
218 val = br32(bp, B44_CAM_DATA_HI);
219
220 data[0] = (val >> 8) & 0xFF;
221 data[1] = (val >> 0) & 0xFF;
222 }
223
224 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
225 {
226 u32 val;
227
228 val = ((u32) data[2]) << 24;
229 val |= ((u32) data[3]) << 16;
230 val |= ((u32) data[4]) << 8;
231 val |= ((u32) data[5]) << 0;
232 bw32(bp, B44_CAM_DATA_LO, val);
233 val = (CAM_DATA_HI_VALID |
234 (((u32) data[0]) << 8) |
235 (((u32) data[1]) << 0));
236 bw32(bp, B44_CAM_DATA_HI, val);
237 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
238 (index << CAM_CTRL_INDEX_SHIFT)));
239 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
240 }
241
242 static inline void __b44_disable_ints(struct b44 *bp)
243 {
244 bw32(bp, B44_IMASK, 0);
245 }
246
247 static void b44_disable_ints(struct b44 *bp)
248 {
249 __b44_disable_ints(bp);
250
251 /* Flush posted writes. */
252 br32(bp, B44_IMASK);
253 }
254
255 static void b44_enable_ints(struct b44 *bp)
256 {
257 bw32(bp, B44_IMASK, bp->imask);
258 }
259
260 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
261 {
262 int err;
263
264 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
265 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
266 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
267 (phy_addr << MDIO_DATA_PMD_SHIFT) |
268 (reg << MDIO_DATA_RA_SHIFT) |
269 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
270 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
271 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
272
273 return err;
274 }
275
276 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
277 {
278 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
279 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
280 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
281 (phy_addr << MDIO_DATA_PMD_SHIFT) |
282 (reg << MDIO_DATA_RA_SHIFT) |
283 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
284 (val & MDIO_DATA_DATA)));
285 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
286 }
287
288 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
289 {
290 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
291 return 0;
292
293 return __b44_readphy(bp, bp->phy_addr, reg, val);
294 }
295
296 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
297 {
298 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
299 return 0;
300
301 return __b44_writephy(bp, bp->phy_addr, reg, val);
302 }
303
304 /* miilib interface */
305 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
306 {
307 u32 val;
308 struct b44 *bp = netdev_priv(dev);
309 int rc = __b44_readphy(bp, phy_id, location, &val);
310 if (rc)
311 return 0xffffffff;
312 return val;
313 }
314
315 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
316 int val)
317 {
318 struct b44 *bp = netdev_priv(dev);
319 __b44_writephy(bp, phy_id, location, val);
320 }
321
322 static int b44_phy_reset(struct b44 *bp)
323 {
324 u32 val;
325 int err;
326
327 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
328 return 0;
329 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
330 if (err)
331 return err;
332 udelay(100);
333 err = b44_readphy(bp, MII_BMCR, &val);
334 if (!err) {
335 if (val & BMCR_RESET) {
336 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
337 bp->dev->name);
338 err = -ENODEV;
339 }
340 }
341
342 return 0;
343 }
344
345 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
346 {
347 u32 val;
348
349 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
350 bp->flags |= pause_flags;
351
352 val = br32(bp, B44_RXCONFIG);
353 if (pause_flags & B44_FLAG_RX_PAUSE)
354 val |= RXCONFIG_FLOW;
355 else
356 val &= ~RXCONFIG_FLOW;
357 bw32(bp, B44_RXCONFIG, val);
358
359 val = br32(bp, B44_MAC_FLOW);
360 if (pause_flags & B44_FLAG_TX_PAUSE)
361 val |= (MAC_FLOW_PAUSE_ENAB |
362 (0xc0 & MAC_FLOW_RX_HI_WATER));
363 else
364 val &= ~MAC_FLOW_PAUSE_ENAB;
365 bw32(bp, B44_MAC_FLOW, val);
366 }
367
368 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
369 {
370 u32 pause_enab = 0;
371
372 /* The driver supports only rx pause by default because
373 the b44 mac tx pause mechanism generates excessive
374 pause frames.
375 Use ethtool to turn on b44 tx pause if necessary.
376 */
377 if ((local & ADVERTISE_PAUSE_CAP) &&
378 (local & ADVERTISE_PAUSE_ASYM)){
379 if ((remote & LPA_PAUSE_ASYM) &&
380 !(remote & LPA_PAUSE_CAP))
381 pause_enab |= B44_FLAG_RX_PAUSE;
382 }
383
384 __b44_set_flow_ctrl(bp, pause_enab);
385 }
386
387 #ifdef SSB_DRIVER_MIPS
388 extern char *nvram_get(char *name);
389 static void b44_wap54g10_workaround(struct b44 *bp)
390 {
391 const char *str;
392 u32 val;
393 int err;
394
395 /*
396 * workaround for bad hardware design in Linksys WAP54G v1.0
397 * see https://dev.openwrt.org/ticket/146
398 * check and reset bit "isolate"
399 */
400 str = nvram_get("boardnum");
401 if (!str)
402 return;
403 if (simple_strtoul(str, NULL, 0) == 2) {
404 err = __b44_readphy(bp, 0, MII_BMCR, &val);
405 if (err)
406 goto error;
407 if (!(val & BMCR_ISOLATE))
408 return;
409 val &= ~BMCR_ISOLATE;
410 err = __b44_writephy(bp, 0, MII_BMCR, val);
411 if (err)
412 goto error;
413 }
414 return;
415 error:
416 printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n");
417 }
418 #else
419 static inline void b44_wap54g10_workaround(struct b44 *bp)
420 {
421 }
422 #endif
423
424 static int b44_setup_phy(struct b44 *bp)
425 {
426 u32 val;
427 int err;
428
429 b44_wap54g10_workaround(bp);
430
431 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
432 return 0;
433 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
434 goto out;
435 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
436 val & MII_ALEDCTRL_ALLMSK)) != 0)
437 goto out;
438 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
439 goto out;
440 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
441 val | MII_TLEDCTRL_ENABLE)) != 0)
442 goto out;
443
444 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
445 u32 adv = ADVERTISE_CSMA;
446
447 if (bp->flags & B44_FLAG_ADV_10HALF)
448 adv |= ADVERTISE_10HALF;
449 if (bp->flags & B44_FLAG_ADV_10FULL)
450 adv |= ADVERTISE_10FULL;
451 if (bp->flags & B44_FLAG_ADV_100HALF)
452 adv |= ADVERTISE_100HALF;
453 if (bp->flags & B44_FLAG_ADV_100FULL)
454 adv |= ADVERTISE_100FULL;
455
456 if (bp->flags & B44_FLAG_PAUSE_AUTO)
457 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
458
459 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
460 goto out;
461 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
462 BMCR_ANRESTART))) != 0)
463 goto out;
464 } else {
465 u32 bmcr;
466
467 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
468 goto out;
469 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
470 if (bp->flags & B44_FLAG_100_BASE_T)
471 bmcr |= BMCR_SPEED100;
472 if (bp->flags & B44_FLAG_FULL_DUPLEX)
473 bmcr |= BMCR_FULLDPLX;
474 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
475 goto out;
476
477 /* Since we will not be negotiating there is no safe way
478 * to determine if the link partner supports flow control
479 * or not. So just disable it completely in this case.
480 */
481 b44_set_flow_ctrl(bp, 0, 0);
482 }
483
484 out:
485 return err;
486 }
487
488 static void b44_stats_update(struct b44 *bp)
489 {
490 unsigned long reg;
491 u32 *val;
492
493 val = &bp->hw_stats.tx_good_octets;
494 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
495 *val++ += br32(bp, reg);
496 }
497
498 /* Pad */
499 reg += 8*4UL;
500
501 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
502 *val++ += br32(bp, reg);
503 }
504 }
505
506 static void b44_link_report(struct b44 *bp)
507 {
508 if (!netif_carrier_ok(bp->dev)) {
509 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
510 } else {
511 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
512 bp->dev->name,
513 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
514 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
515
516 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
517 "%s for RX.\n",
518 bp->dev->name,
519 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
520 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
521 }
522 }
523
524 static void b44_check_phy(struct b44 *bp)
525 {
526 u32 bmsr, aux;
527
528 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
529 bp->flags |= B44_FLAG_100_BASE_T;
530 bp->flags |= B44_FLAG_FULL_DUPLEX;
531 if (!netif_carrier_ok(bp->dev)) {
532 u32 val = br32(bp, B44_TX_CTRL);
533 val |= TX_CTRL_DUPLEX;
534 bw32(bp, B44_TX_CTRL, val);
535 netif_carrier_on(bp->dev);
536 b44_link_report(bp);
537 }
538 return;
539 }
540
541 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
542 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
543 (bmsr != 0xffff)) {
544 if (aux & MII_AUXCTRL_SPEED)
545 bp->flags |= B44_FLAG_100_BASE_T;
546 else
547 bp->flags &= ~B44_FLAG_100_BASE_T;
548 if (aux & MII_AUXCTRL_DUPLEX)
549 bp->flags |= B44_FLAG_FULL_DUPLEX;
550 else
551 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
552
553 if (!netif_carrier_ok(bp->dev) &&
554 (bmsr & BMSR_LSTATUS)) {
555 u32 val = br32(bp, B44_TX_CTRL);
556 u32 local_adv, remote_adv;
557
558 if (bp->flags & B44_FLAG_FULL_DUPLEX)
559 val |= TX_CTRL_DUPLEX;
560 else
561 val &= ~TX_CTRL_DUPLEX;
562 bw32(bp, B44_TX_CTRL, val);
563
564 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
565 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
566 !b44_readphy(bp, MII_LPA, &remote_adv))
567 b44_set_flow_ctrl(bp, local_adv, remote_adv);
568
569 /* Link now up */
570 netif_carrier_on(bp->dev);
571 b44_link_report(bp);
572 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
573 /* Link now down */
574 netif_carrier_off(bp->dev);
575 b44_link_report(bp);
576 }
577
578 if (bmsr & BMSR_RFAULT)
579 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
580 bp->dev->name);
581 if (bmsr & BMSR_JCD)
582 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
583 bp->dev->name);
584 }
585 }
586
587 static void b44_timer(unsigned long __opaque)
588 {
589 struct b44 *bp = (struct b44 *) __opaque;
590
591 spin_lock_irq(&bp->lock);
592
593 b44_check_phy(bp);
594
595 b44_stats_update(bp);
596
597 spin_unlock_irq(&bp->lock);
598
599 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
600 }
601
602 static void b44_tx(struct b44 *bp)
603 {
604 u32 cur, cons;
605
606 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
607 cur /= sizeof(struct dma_desc);
608
609 /* XXX needs updating when NETIF_F_SG is supported */
610 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
611 struct ring_info *rp = &bp->tx_buffers[cons];
612 struct sk_buff *skb = rp->skb;
613
614 BUG_ON(skb == NULL);
615
616 ssb_dma_unmap_single(bp->sdev,
617 rp->mapping,
618 skb->len,
619 DMA_TO_DEVICE);
620 rp->skb = NULL;
621 dev_kfree_skb_irq(skb);
622 }
623
624 bp->tx_cons = cons;
625 if (netif_queue_stopped(bp->dev) &&
626 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
627 netif_wake_queue(bp->dev);
628
629 bw32(bp, B44_GPTIMER, 0);
630 }
631
632 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
633 * before the DMA address you give it. So we allocate 30 more bytes
634 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
635 * point the chip at 30 bytes past where the rx_header will go.
636 */
637 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
638 {
639 struct dma_desc *dp;
640 struct ring_info *src_map, *map;
641 struct rx_header *rh;
642 struct sk_buff *skb;
643 dma_addr_t mapping;
644 int dest_idx;
645 u32 ctrl;
646
647 src_map = NULL;
648 if (src_idx >= 0)
649 src_map = &bp->rx_buffers[src_idx];
650 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
651 map = &bp->rx_buffers[dest_idx];
652 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
653 if (skb == NULL)
654 return -ENOMEM;
655
656 mapping = ssb_dma_map_single(bp->sdev, skb->data,
657 RX_PKT_BUF_SZ,
658 DMA_FROM_DEVICE);
659
660 /* Hardware bug work-around, the chip is unable to do PCI DMA
661 to/from anything above 1GB :-( */
662 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
663 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
664 /* Sigh... */
665 if (!ssb_dma_mapping_error(bp->sdev, mapping))
666 ssb_dma_unmap_single(bp->sdev, mapping,
667 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
668 dev_kfree_skb_any(skb);
669 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
670 if (skb == NULL)
671 return -ENOMEM;
672 mapping = ssb_dma_map_single(bp->sdev, skb->data,
673 RX_PKT_BUF_SZ,
674 DMA_FROM_DEVICE);
675 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
676 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
677 if (!ssb_dma_mapping_error(bp->sdev, mapping))
678 ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
679 dev_kfree_skb_any(skb);
680 return -ENOMEM;
681 }
682 bp->force_copybreak = 1;
683 }
684
685 rh = (struct rx_header *) skb->data;
686
687 rh->len = 0;
688 rh->flags = 0;
689
690 map->skb = skb;
691 map->mapping = mapping;
692
693 if (src_map != NULL)
694 src_map->skb = NULL;
695
696 ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
697 if (dest_idx == (B44_RX_RING_SIZE - 1))
698 ctrl |= DESC_CTRL_EOT;
699
700 dp = &bp->rx_ring[dest_idx];
701 dp->ctrl = cpu_to_le32(ctrl);
702 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
703
704 if (bp->flags & B44_FLAG_RX_RING_HACK)
705 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
706 dest_idx * sizeof(*dp),
707 DMA_BIDIRECTIONAL);
708
709 return RX_PKT_BUF_SZ;
710 }
711
712 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
713 {
714 struct dma_desc *src_desc, *dest_desc;
715 struct ring_info *src_map, *dest_map;
716 struct rx_header *rh;
717 int dest_idx;
718 __le32 ctrl;
719
720 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
721 dest_desc = &bp->rx_ring[dest_idx];
722 dest_map = &bp->rx_buffers[dest_idx];
723 src_desc = &bp->rx_ring[src_idx];
724 src_map = &bp->rx_buffers[src_idx];
725
726 dest_map->skb = src_map->skb;
727 rh = (struct rx_header *) src_map->skb->data;
728 rh->len = 0;
729 rh->flags = 0;
730 dest_map->mapping = src_map->mapping;
731
732 if (bp->flags & B44_FLAG_RX_RING_HACK)
733 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
734 src_idx * sizeof(*src_desc),
735 DMA_BIDIRECTIONAL);
736
737 ctrl = src_desc->ctrl;
738 if (dest_idx == (B44_RX_RING_SIZE - 1))
739 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
740 else
741 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
742
743 dest_desc->ctrl = ctrl;
744 dest_desc->addr = src_desc->addr;
745
746 src_map->skb = NULL;
747
748 if (bp->flags & B44_FLAG_RX_RING_HACK)
749 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
750 dest_idx * sizeof(*dest_desc),
751 DMA_BIDIRECTIONAL);
752
753 ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
754 RX_PKT_BUF_SZ,
755 DMA_FROM_DEVICE);
756 }
757
758 static int b44_rx(struct b44 *bp, int budget)
759 {
760 int received;
761 u32 cons, prod;
762
763 received = 0;
764 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
765 prod /= sizeof(struct dma_desc);
766 cons = bp->rx_cons;
767
768 while (cons != prod && budget > 0) {
769 struct ring_info *rp = &bp->rx_buffers[cons];
770 struct sk_buff *skb = rp->skb;
771 dma_addr_t map = rp->mapping;
772 struct rx_header *rh;
773 u16 len;
774
775 ssb_dma_sync_single_for_cpu(bp->sdev, map,
776 RX_PKT_BUF_SZ,
777 DMA_FROM_DEVICE);
778 rh = (struct rx_header *) skb->data;
779 len = le16_to_cpu(rh->len);
780 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
781 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
782 drop_it:
783 b44_recycle_rx(bp, cons, bp->rx_prod);
784 drop_it_no_recycle:
785 bp->dev->stats.rx_dropped++;
786 goto next_pkt;
787 }
788
789 if (len == 0) {
790 int i = 0;
791
792 do {
793 udelay(2);
794 barrier();
795 len = le16_to_cpu(rh->len);
796 } while (len == 0 && i++ < 5);
797 if (len == 0)
798 goto drop_it;
799 }
800
801 /* Omit CRC. */
802 len -= 4;
803
804 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
805 int skb_size;
806 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
807 if (skb_size < 0)
808 goto drop_it;
809 ssb_dma_unmap_single(bp->sdev, map,
810 skb_size, DMA_FROM_DEVICE);
811 /* Leave out rx_header */
812 skb_put(skb, len + RX_PKT_OFFSET);
813 skb_pull(skb, RX_PKT_OFFSET);
814 } else {
815 struct sk_buff *copy_skb;
816
817 b44_recycle_rx(bp, cons, bp->rx_prod);
818 copy_skb = dev_alloc_skb(len + 2);
819 if (copy_skb == NULL)
820 goto drop_it_no_recycle;
821
822 skb_reserve(copy_skb, 2);
823 skb_put(copy_skb, len);
824 /* DMA sync done above, copy just the actual packet */
825 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
826 copy_skb->data, len);
827 skb = copy_skb;
828 }
829 skb->ip_summed = CHECKSUM_NONE;
830 skb->protocol = eth_type_trans(skb, bp->dev);
831 netif_receive_skb(skb);
832 received++;
833 budget--;
834 next_pkt:
835 bp->rx_prod = (bp->rx_prod + 1) &
836 (B44_RX_RING_SIZE - 1);
837 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
838 }
839
840 bp->rx_cons = cons;
841 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
842
843 return received;
844 }
845
846 static int b44_poll(struct napi_struct *napi, int budget)
847 {
848 struct b44 *bp = container_of(napi, struct b44, napi);
849 int work_done;
850
851 spin_lock_irq(&bp->lock);
852
853 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
854 /* spin_lock(&bp->tx_lock); */
855 b44_tx(bp);
856 /* spin_unlock(&bp->tx_lock); */
857 }
858 spin_unlock_irq(&bp->lock);
859
860 work_done = 0;
861 if (bp->istat & ISTAT_RX)
862 work_done += b44_rx(bp, budget);
863
864 if (bp->istat & ISTAT_ERRORS) {
865 unsigned long flags;
866
867 spin_lock_irqsave(&bp->lock, flags);
868 b44_halt(bp);
869 b44_init_rings(bp);
870 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
871 netif_wake_queue(bp->dev);
872 spin_unlock_irqrestore(&bp->lock, flags);
873 work_done = 0;
874 }
875
876 if (work_done < budget) {
877 napi_complete(napi);
878 b44_enable_ints(bp);
879 }
880
881 return work_done;
882 }
883
884 static irqreturn_t b44_interrupt(int irq, void *dev_id)
885 {
886 struct net_device *dev = dev_id;
887 struct b44 *bp = netdev_priv(dev);
888 u32 istat, imask;
889 int handled = 0;
890
891 spin_lock(&bp->lock);
892
893 istat = br32(bp, B44_ISTAT);
894 imask = br32(bp, B44_IMASK);
895
896 /* The interrupt mask register controls which interrupt bits
897 * will actually raise an interrupt to the CPU when set by hw/firmware,
898 * but doesn't mask off the bits.
899 */
900 istat &= imask;
901 if (istat) {
902 handled = 1;
903
904 if (unlikely(!netif_running(dev))) {
905 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
906 goto irq_ack;
907 }
908
909 if (napi_schedule_prep(&bp->napi)) {
910 /* NOTE: These writes are posted by the readback of
911 * the ISTAT register below.
912 */
913 bp->istat = istat;
914 __b44_disable_ints(bp);
915 __napi_schedule(&bp->napi);
916 } else {
917 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
918 dev->name);
919 }
920
921 irq_ack:
922 bw32(bp, B44_ISTAT, istat);
923 br32(bp, B44_ISTAT);
924 }
925 spin_unlock(&bp->lock);
926 return IRQ_RETVAL(handled);
927 }
928
929 static void b44_tx_timeout(struct net_device *dev)
930 {
931 struct b44 *bp = netdev_priv(dev);
932
933 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
934 dev->name);
935
936 spin_lock_irq(&bp->lock);
937
938 b44_halt(bp);
939 b44_init_rings(bp);
940 b44_init_hw(bp, B44_FULL_RESET);
941
942 spin_unlock_irq(&bp->lock);
943
944 b44_enable_ints(bp);
945
946 netif_wake_queue(dev);
947 }
948
949 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
950 {
951 struct b44 *bp = netdev_priv(dev);
952 int rc = NETDEV_TX_OK;
953 dma_addr_t mapping;
954 u32 len, entry, ctrl;
955
956 len = skb->len;
957 spin_lock_irq(&bp->lock);
958
959 /* This is a hard error, log it. */
960 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
961 netif_stop_queue(dev);
962 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
963 dev->name);
964 goto err_out;
965 }
966
967 mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
968 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
969 struct sk_buff *bounce_skb;
970
971 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
972 if (!ssb_dma_mapping_error(bp->sdev, mapping))
973 ssb_dma_unmap_single(bp->sdev, mapping, len,
974 DMA_TO_DEVICE);
975
976 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
977 if (!bounce_skb)
978 goto err_out;
979
980 mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
981 len, DMA_TO_DEVICE);
982 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
983 if (!ssb_dma_mapping_error(bp->sdev, mapping))
984 ssb_dma_unmap_single(bp->sdev, mapping,
985 len, DMA_TO_DEVICE);
986 dev_kfree_skb_any(bounce_skb);
987 goto err_out;
988 }
989
990 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
991 dev_kfree_skb_any(skb);
992 skb = bounce_skb;
993 }
994
995 entry = bp->tx_prod;
996 bp->tx_buffers[entry].skb = skb;
997 bp->tx_buffers[entry].mapping = mapping;
998
999 ctrl = (len & DESC_CTRL_LEN);
1000 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1001 if (entry == (B44_TX_RING_SIZE - 1))
1002 ctrl |= DESC_CTRL_EOT;
1003
1004 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1005 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1006
1007 if (bp->flags & B44_FLAG_TX_RING_HACK)
1008 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1009 entry * sizeof(bp->tx_ring[0]),
1010 DMA_TO_DEVICE);
1011
1012 entry = NEXT_TX(entry);
1013
1014 bp->tx_prod = entry;
1015
1016 wmb();
1017
1018 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1019 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1020 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1021 if (bp->flags & B44_FLAG_REORDER_BUG)
1022 br32(bp, B44_DMATX_PTR);
1023
1024 if (TX_BUFFS_AVAIL(bp) < 1)
1025 netif_stop_queue(dev);
1026
1027 dev->trans_start = jiffies;
1028
1029 out_unlock:
1030 spin_unlock_irq(&bp->lock);
1031
1032 return rc;
1033
1034 err_out:
1035 rc = NETDEV_TX_BUSY;
1036 goto out_unlock;
1037 }
1038
1039 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1040 {
1041 struct b44 *bp = netdev_priv(dev);
1042
1043 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1044 return -EINVAL;
1045
1046 if (!netif_running(dev)) {
1047 /* We'll just catch it later when the
1048 * device is up'd.
1049 */
1050 dev->mtu = new_mtu;
1051 return 0;
1052 }
1053
1054 spin_lock_irq(&bp->lock);
1055 b44_halt(bp);
1056 dev->mtu = new_mtu;
1057 b44_init_rings(bp);
1058 b44_init_hw(bp, B44_FULL_RESET);
1059 spin_unlock_irq(&bp->lock);
1060
1061 b44_enable_ints(bp);
1062
1063 return 0;
1064 }
1065
1066 /* Free up pending packets in all rx/tx rings.
1067 *
1068 * The chip has been shut down and the driver detached from
1069 * the networking, so no interrupts or new tx packets will
1070 * end up in the driver. bp->lock is not held and we are not
1071 * in an interrupt context and thus may sleep.
1072 */
1073 static void b44_free_rings(struct b44 *bp)
1074 {
1075 struct ring_info *rp;
1076 int i;
1077
1078 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1079 rp = &bp->rx_buffers[i];
1080
1081 if (rp->skb == NULL)
1082 continue;
1083 ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ,
1084 DMA_FROM_DEVICE);
1085 dev_kfree_skb_any(rp->skb);
1086 rp->skb = NULL;
1087 }
1088
1089 /* XXX needs changes once NETIF_F_SG is set... */
1090 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1091 rp = &bp->tx_buffers[i];
1092
1093 if (rp->skb == NULL)
1094 continue;
1095 ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len,
1096 DMA_TO_DEVICE);
1097 dev_kfree_skb_any(rp->skb);
1098 rp->skb = NULL;
1099 }
1100 }
1101
1102 /* Initialize tx/rx rings for packet processing.
1103 *
1104 * The chip has been shut down and the driver detached from
1105 * the networking, so no interrupts or new tx packets will
1106 * end up in the driver.
1107 */
1108 static void b44_init_rings(struct b44 *bp)
1109 {
1110 int i;
1111
1112 b44_free_rings(bp);
1113
1114 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1115 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1116
1117 if (bp->flags & B44_FLAG_RX_RING_HACK)
1118 ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma,
1119 DMA_TABLE_BYTES,
1120 DMA_BIDIRECTIONAL);
1121
1122 if (bp->flags & B44_FLAG_TX_RING_HACK)
1123 ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma,
1124 DMA_TABLE_BYTES,
1125 DMA_TO_DEVICE);
1126
1127 for (i = 0; i < bp->rx_pending; i++) {
1128 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1129 break;
1130 }
1131 }
1132
1133 /*
1134 * Must not be invoked with interrupt sources disabled and
1135 * the hardware shutdown down.
1136 */
1137 static void b44_free_consistent(struct b44 *bp)
1138 {
1139 kfree(bp->rx_buffers);
1140 bp->rx_buffers = NULL;
1141 kfree(bp->tx_buffers);
1142 bp->tx_buffers = NULL;
1143 if (bp->rx_ring) {
1144 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1145 ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma,
1146 DMA_TABLE_BYTES,
1147 DMA_BIDIRECTIONAL);
1148 kfree(bp->rx_ring);
1149 } else
1150 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
1151 bp->rx_ring, bp->rx_ring_dma,
1152 GFP_KERNEL);
1153 bp->rx_ring = NULL;
1154 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1155 }
1156 if (bp->tx_ring) {
1157 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1158 ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma,
1159 DMA_TABLE_BYTES,
1160 DMA_TO_DEVICE);
1161 kfree(bp->tx_ring);
1162 } else
1163 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
1164 bp->tx_ring, bp->tx_ring_dma,
1165 GFP_KERNEL);
1166 bp->tx_ring = NULL;
1167 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1168 }
1169 }
1170
1171 /*
1172 * Must not be invoked with interrupt sources disabled and
1173 * the hardware shutdown down. Can sleep.
1174 */
1175 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1176 {
1177 int size;
1178
1179 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1180 bp->rx_buffers = kzalloc(size, gfp);
1181 if (!bp->rx_buffers)
1182 goto out_err;
1183
1184 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1185 bp->tx_buffers = kzalloc(size, gfp);
1186 if (!bp->tx_buffers)
1187 goto out_err;
1188
1189 size = DMA_TABLE_BYTES;
1190 bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp);
1191 if (!bp->rx_ring) {
1192 /* Allocation may have failed due to pci_alloc_consistent
1193 insisting on use of GFP_DMA, which is more restrictive
1194 than necessary... */
1195 struct dma_desc *rx_ring;
1196 dma_addr_t rx_ring_dma;
1197
1198 rx_ring = kzalloc(size, gfp);
1199 if (!rx_ring)
1200 goto out_err;
1201
1202 rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring,
1203 DMA_TABLE_BYTES,
1204 DMA_BIDIRECTIONAL);
1205
1206 if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
1207 rx_ring_dma + size > DMA_BIT_MASK(30)) {
1208 kfree(rx_ring);
1209 goto out_err;
1210 }
1211
1212 bp->rx_ring = rx_ring;
1213 bp->rx_ring_dma = rx_ring_dma;
1214 bp->flags |= B44_FLAG_RX_RING_HACK;
1215 }
1216
1217 bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp);
1218 if (!bp->tx_ring) {
1219 /* Allocation may have failed due to ssb_dma_alloc_consistent
1220 insisting on use of GFP_DMA, which is more restrictive
1221 than necessary... */
1222 struct dma_desc *tx_ring;
1223 dma_addr_t tx_ring_dma;
1224
1225 tx_ring = kzalloc(size, gfp);
1226 if (!tx_ring)
1227 goto out_err;
1228
1229 tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring,
1230 DMA_TABLE_BYTES,
1231 DMA_TO_DEVICE);
1232
1233 if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
1234 tx_ring_dma + size > DMA_BIT_MASK(30)) {
1235 kfree(tx_ring);
1236 goto out_err;
1237 }
1238
1239 bp->tx_ring = tx_ring;
1240 bp->tx_ring_dma = tx_ring_dma;
1241 bp->flags |= B44_FLAG_TX_RING_HACK;
1242 }
1243
1244 return 0;
1245
1246 out_err:
1247 b44_free_consistent(bp);
1248 return -ENOMEM;
1249 }
1250
1251 /* bp->lock is held. */
1252 static void b44_clear_stats(struct b44 *bp)
1253 {
1254 unsigned long reg;
1255
1256 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1257 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1258 br32(bp, reg);
1259 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1260 br32(bp, reg);
1261 }
1262
1263 /* bp->lock is held. */
1264 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1265 {
1266 struct ssb_device *sdev = bp->sdev;
1267 bool was_enabled;
1268
1269 was_enabled = ssb_device_is_enabled(bp->sdev);
1270
1271 ssb_device_enable(bp->sdev, 0);
1272 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1273
1274 if (was_enabled) {
1275 bw32(bp, B44_RCV_LAZY, 0);
1276 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1277 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1278 bw32(bp, B44_DMATX_CTRL, 0);
1279 bp->tx_prod = bp->tx_cons = 0;
1280 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1281 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1282 100, 0);
1283 }
1284 bw32(bp, B44_DMARX_CTRL, 0);
1285 bp->rx_prod = bp->rx_cons = 0;
1286 }
1287
1288 b44_clear_stats(bp);
1289
1290 /*
1291 * Don't enable PHY if we are doing a partial reset
1292 * we are probably going to power down
1293 */
1294 if (reset_kind == B44_CHIP_RESET_PARTIAL)
1295 return;
1296
1297 switch (sdev->bus->bustype) {
1298 case SSB_BUSTYPE_SSB:
1299 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1300 (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1301 B44_MDC_RATIO)
1302 & MDIO_CTRL_MAXF_MASK)));
1303 break;
1304 case SSB_BUSTYPE_PCI:
1305 case SSB_BUSTYPE_PCMCIA:
1306 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1307 (0x0d & MDIO_CTRL_MAXF_MASK)));
1308 break;
1309 }
1310
1311 br32(bp, B44_MDIO_CTRL);
1312
1313 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1314 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1315 br32(bp, B44_ENET_CTRL);
1316 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1317 } else {
1318 u32 val = br32(bp, B44_DEVCTRL);
1319
1320 if (val & DEVCTRL_EPR) {
1321 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1322 br32(bp, B44_DEVCTRL);
1323 udelay(100);
1324 }
1325 bp->flags |= B44_FLAG_INTERNAL_PHY;
1326 }
1327 }
1328
1329 /* bp->lock is held. */
1330 static void b44_halt(struct b44 *bp)
1331 {
1332 b44_disable_ints(bp);
1333 /* reset PHY */
1334 b44_phy_reset(bp);
1335 /* power down PHY */
1336 printk(KERN_INFO PFX "%s: powering down PHY\n", bp->dev->name);
1337 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1338 /* now reset the chip, but without enabling the MAC&PHY
1339 * part of it. This has to be done _after_ we shut down the PHY */
1340 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1341 }
1342
1343 /* bp->lock is held. */
1344 static void __b44_set_mac_addr(struct b44 *bp)
1345 {
1346 bw32(bp, B44_CAM_CTRL, 0);
1347 if (!(bp->dev->flags & IFF_PROMISC)) {
1348 u32 val;
1349
1350 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1351 val = br32(bp, B44_CAM_CTRL);
1352 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1353 }
1354 }
1355
1356 static int b44_set_mac_addr(struct net_device *dev, void *p)
1357 {
1358 struct b44 *bp = netdev_priv(dev);
1359 struct sockaddr *addr = p;
1360 u32 val;
1361
1362 if (netif_running(dev))
1363 return -EBUSY;
1364
1365 if (!is_valid_ether_addr(addr->sa_data))
1366 return -EINVAL;
1367
1368 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1369
1370 spin_lock_irq(&bp->lock);
1371
1372 val = br32(bp, B44_RXCONFIG);
1373 if (!(val & RXCONFIG_CAM_ABSENT))
1374 __b44_set_mac_addr(bp);
1375
1376 spin_unlock_irq(&bp->lock);
1377
1378 return 0;
1379 }
1380
1381 /* Called at device open time to get the chip ready for
1382 * packet processing. Invoked with bp->lock held.
1383 */
1384 static void __b44_set_rx_mode(struct net_device *);
1385 static void b44_init_hw(struct b44 *bp, int reset_kind)
1386 {
1387 u32 val;
1388
1389 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1390 if (reset_kind == B44_FULL_RESET) {
1391 b44_phy_reset(bp);
1392 b44_setup_phy(bp);
1393 }
1394
1395 /* Enable CRC32, set proper LED modes and power on PHY */
1396 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1397 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1398
1399 /* This sets the MAC address too. */
1400 __b44_set_rx_mode(bp->dev);
1401
1402 /* MTU + eth header + possible VLAN tag + struct rx_header */
1403 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1404 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1405
1406 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1407 if (reset_kind == B44_PARTIAL_RESET) {
1408 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1409 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1410 } else {
1411 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1412 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1413 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1414 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1415 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1416
1417 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1418 bp->rx_prod = bp->rx_pending;
1419
1420 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1421 }
1422
1423 val = br32(bp, B44_ENET_CTRL);
1424 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1425 }
1426
1427 static int b44_open(struct net_device *dev)
1428 {
1429 struct b44 *bp = netdev_priv(dev);
1430 int err;
1431
1432 err = b44_alloc_consistent(bp, GFP_KERNEL);
1433 if (err)
1434 goto out;
1435
1436 napi_enable(&bp->napi);
1437
1438 b44_init_rings(bp);
1439 b44_init_hw(bp, B44_FULL_RESET);
1440
1441 b44_check_phy(bp);
1442
1443 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1444 if (unlikely(err < 0)) {
1445 napi_disable(&bp->napi);
1446 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1447 b44_free_rings(bp);
1448 b44_free_consistent(bp);
1449 goto out;
1450 }
1451
1452 init_timer(&bp->timer);
1453 bp->timer.expires = jiffies + HZ;
1454 bp->timer.data = (unsigned long) bp;
1455 bp->timer.function = b44_timer;
1456 add_timer(&bp->timer);
1457
1458 b44_enable_ints(bp);
1459 netif_start_queue(dev);
1460 out:
1461 return err;
1462 }
1463
1464 #ifdef CONFIG_NET_POLL_CONTROLLER
1465 /*
1466 * Polling receive - used by netconsole and other diagnostic tools
1467 * to allow network i/o with interrupts disabled.
1468 */
1469 static void b44_poll_controller(struct net_device *dev)
1470 {
1471 disable_irq(dev->irq);
1472 b44_interrupt(dev->irq, dev);
1473 enable_irq(dev->irq);
1474 }
1475 #endif
1476
1477 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1478 {
1479 u32 i;
1480 u32 *pattern = (u32 *) pp;
1481
1482 for (i = 0; i < bytes; i += sizeof(u32)) {
1483 bw32(bp, B44_FILT_ADDR, table_offset + i);
1484 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1485 }
1486 }
1487
1488 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1489 {
1490 int magicsync = 6;
1491 int k, j, len = offset;
1492 int ethaddr_bytes = ETH_ALEN;
1493
1494 memset(ppattern + offset, 0xff, magicsync);
1495 for (j = 0; j < magicsync; j++)
1496 set_bit(len++, (unsigned long *) pmask);
1497
1498 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1499 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1500 ethaddr_bytes = ETH_ALEN;
1501 else
1502 ethaddr_bytes = B44_PATTERN_SIZE - len;
1503 if (ethaddr_bytes <=0)
1504 break;
1505 for (k = 0; k< ethaddr_bytes; k++) {
1506 ppattern[offset + magicsync +
1507 (j * ETH_ALEN) + k] = macaddr[k];
1508 len++;
1509 set_bit(len, (unsigned long *) pmask);
1510 }
1511 }
1512 return len - 1;
1513 }
1514
1515 /* Setup magic packet patterns in the b44 WOL
1516 * pattern matching filter.
1517 */
1518 static void b44_setup_pseudo_magicp(struct b44 *bp)
1519 {
1520
1521 u32 val;
1522 int plen0, plen1, plen2;
1523 u8 *pwol_pattern;
1524 u8 pwol_mask[B44_PMASK_SIZE];
1525
1526 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1527 if (!pwol_pattern) {
1528 printk(KERN_ERR PFX "Memory not available for WOL\n");
1529 return;
1530 }
1531
1532 /* Ipv4 magic packet pattern - pattern 0.*/
1533 memset(pwol_mask, 0, B44_PMASK_SIZE);
1534 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1535 B44_ETHIPV4UDP_HLEN);
1536
1537 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1538 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1539
1540 /* Raw ethernet II magic packet pattern - pattern 1 */
1541 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1542 memset(pwol_mask, 0, B44_PMASK_SIZE);
1543 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1544 ETH_HLEN);
1545
1546 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1547 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1548 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1549 B44_PMASK_BASE + B44_PMASK_SIZE);
1550
1551 /* Ipv6 magic packet pattern - pattern 2 */
1552 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1553 memset(pwol_mask, 0, B44_PMASK_SIZE);
1554 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1555 B44_ETHIPV6UDP_HLEN);
1556
1557 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1558 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1559 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1560 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1561
1562 kfree(pwol_pattern);
1563
1564 /* set these pattern's lengths: one less than each real length */
1565 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1566 bw32(bp, B44_WKUP_LEN, val);
1567
1568 /* enable wakeup pattern matching */
1569 val = br32(bp, B44_DEVCTRL);
1570 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1571
1572 }
1573
1574 #ifdef CONFIG_B44_PCI
1575 static void b44_setup_wol_pci(struct b44 *bp)
1576 {
1577 u16 val;
1578
1579 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1580 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1581 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1582 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1583 }
1584 }
1585 #else
1586 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1587 #endif /* CONFIG_B44_PCI */
1588
1589 static void b44_setup_wol(struct b44 *bp)
1590 {
1591 u32 val;
1592
1593 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1594
1595 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1596
1597 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1598
1599 val = bp->dev->dev_addr[2] << 24 |
1600 bp->dev->dev_addr[3] << 16 |
1601 bp->dev->dev_addr[4] << 8 |
1602 bp->dev->dev_addr[5];
1603 bw32(bp, B44_ADDR_LO, val);
1604
1605 val = bp->dev->dev_addr[0] << 8 |
1606 bp->dev->dev_addr[1];
1607 bw32(bp, B44_ADDR_HI, val);
1608
1609 val = br32(bp, B44_DEVCTRL);
1610 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1611
1612 } else {
1613 b44_setup_pseudo_magicp(bp);
1614 }
1615 b44_setup_wol_pci(bp);
1616 }
1617
1618 static int b44_close(struct net_device *dev)
1619 {
1620 struct b44 *bp = netdev_priv(dev);
1621
1622 netif_stop_queue(dev);
1623
1624 napi_disable(&bp->napi);
1625
1626 del_timer_sync(&bp->timer);
1627
1628 spin_lock_irq(&bp->lock);
1629
1630 b44_halt(bp);
1631 b44_free_rings(bp);
1632 netif_carrier_off(dev);
1633
1634 spin_unlock_irq(&bp->lock);
1635
1636 free_irq(dev->irq, dev);
1637
1638 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1639 b44_init_hw(bp, B44_PARTIAL_RESET);
1640 b44_setup_wol(bp);
1641 }
1642
1643 b44_free_consistent(bp);
1644
1645 return 0;
1646 }
1647
1648 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1649 {
1650 struct b44 *bp = netdev_priv(dev);
1651 struct net_device_stats *nstat = &dev->stats;
1652 struct b44_hw_stats *hwstat = &bp->hw_stats;
1653
1654 /* Convert HW stats into netdevice stats. */
1655 nstat->rx_packets = hwstat->rx_pkts;
1656 nstat->tx_packets = hwstat->tx_pkts;
1657 nstat->rx_bytes = hwstat->rx_octets;
1658 nstat->tx_bytes = hwstat->tx_octets;
1659 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1660 hwstat->tx_oversize_pkts +
1661 hwstat->tx_underruns +
1662 hwstat->tx_excessive_cols +
1663 hwstat->tx_late_cols);
1664 nstat->multicast = hwstat->tx_multicast_pkts;
1665 nstat->collisions = hwstat->tx_total_cols;
1666
1667 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1668 hwstat->rx_undersize);
1669 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1670 nstat->rx_frame_errors = hwstat->rx_align_errs;
1671 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1672 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1673 hwstat->rx_oversize_pkts +
1674 hwstat->rx_missed_pkts +
1675 hwstat->rx_crc_align_errs +
1676 hwstat->rx_undersize +
1677 hwstat->rx_crc_errs +
1678 hwstat->rx_align_errs +
1679 hwstat->rx_symbol_errs);
1680
1681 nstat->tx_aborted_errors = hwstat->tx_underruns;
1682 #if 0
1683 /* Carrier lost counter seems to be broken for some devices */
1684 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1685 #endif
1686
1687 return nstat;
1688 }
1689
1690 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1691 {
1692 struct dev_mc_list *mclist;
1693 int i, num_ents;
1694
1695 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1696 mclist = dev->mc_list;
1697 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1698 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1699 }
1700 return i+1;
1701 }
1702
1703 static void __b44_set_rx_mode(struct net_device *dev)
1704 {
1705 struct b44 *bp = netdev_priv(dev);
1706 u32 val;
1707
1708 val = br32(bp, B44_RXCONFIG);
1709 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1710 if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1711 val |= RXCONFIG_PROMISC;
1712 bw32(bp, B44_RXCONFIG, val);
1713 } else {
1714 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1715 int i = 1;
1716
1717 __b44_set_mac_addr(bp);
1718
1719 if ((dev->flags & IFF_ALLMULTI) ||
1720 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1721 val |= RXCONFIG_ALLMULTI;
1722 else
1723 i = __b44_load_mcast(bp, dev);
1724
1725 for (; i < 64; i++)
1726 __b44_cam_write(bp, zero, i);
1727
1728 bw32(bp, B44_RXCONFIG, val);
1729 val = br32(bp, B44_CAM_CTRL);
1730 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1731 }
1732 }
1733
1734 static void b44_set_rx_mode(struct net_device *dev)
1735 {
1736 struct b44 *bp = netdev_priv(dev);
1737
1738 spin_lock_irq(&bp->lock);
1739 __b44_set_rx_mode(dev);
1740 spin_unlock_irq(&bp->lock);
1741 }
1742
1743 static u32 b44_get_msglevel(struct net_device *dev)
1744 {
1745 struct b44 *bp = netdev_priv(dev);
1746 return bp->msg_enable;
1747 }
1748
1749 static void b44_set_msglevel(struct net_device *dev, u32 value)
1750 {
1751 struct b44 *bp = netdev_priv(dev);
1752 bp->msg_enable = value;
1753 }
1754
1755 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1756 {
1757 struct b44 *bp = netdev_priv(dev);
1758 struct ssb_bus *bus = bp->sdev->bus;
1759
1760 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1761 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1762 switch (bus->bustype) {
1763 case SSB_BUSTYPE_PCI:
1764 strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1765 break;
1766 case SSB_BUSTYPE_PCMCIA:
1767 case SSB_BUSTYPE_SSB:
1768 strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1769 break;
1770 }
1771 }
1772
1773 static int b44_nway_reset(struct net_device *dev)
1774 {
1775 struct b44 *bp = netdev_priv(dev);
1776 u32 bmcr;
1777 int r;
1778
1779 spin_lock_irq(&bp->lock);
1780 b44_readphy(bp, MII_BMCR, &bmcr);
1781 b44_readphy(bp, MII_BMCR, &bmcr);
1782 r = -EINVAL;
1783 if (bmcr & BMCR_ANENABLE) {
1784 b44_writephy(bp, MII_BMCR,
1785 bmcr | BMCR_ANRESTART);
1786 r = 0;
1787 }
1788 spin_unlock_irq(&bp->lock);
1789
1790 return r;
1791 }
1792
1793 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1794 {
1795 struct b44 *bp = netdev_priv(dev);
1796
1797 cmd->supported = (SUPPORTED_Autoneg);
1798 cmd->supported |= (SUPPORTED_100baseT_Half |
1799 SUPPORTED_100baseT_Full |
1800 SUPPORTED_10baseT_Half |
1801 SUPPORTED_10baseT_Full |
1802 SUPPORTED_MII);
1803
1804 cmd->advertising = 0;
1805 if (bp->flags & B44_FLAG_ADV_10HALF)
1806 cmd->advertising |= ADVERTISED_10baseT_Half;
1807 if (bp->flags & B44_FLAG_ADV_10FULL)
1808 cmd->advertising |= ADVERTISED_10baseT_Full;
1809 if (bp->flags & B44_FLAG_ADV_100HALF)
1810 cmd->advertising |= ADVERTISED_100baseT_Half;
1811 if (bp->flags & B44_FLAG_ADV_100FULL)
1812 cmd->advertising |= ADVERTISED_100baseT_Full;
1813 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1814 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1815 SPEED_100 : SPEED_10;
1816 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1817 DUPLEX_FULL : DUPLEX_HALF;
1818 cmd->port = 0;
1819 cmd->phy_address = bp->phy_addr;
1820 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1821 XCVR_INTERNAL : XCVR_EXTERNAL;
1822 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1823 AUTONEG_DISABLE : AUTONEG_ENABLE;
1824 if (cmd->autoneg == AUTONEG_ENABLE)
1825 cmd->advertising |= ADVERTISED_Autoneg;
1826 if (!netif_running(dev)){
1827 cmd->speed = 0;
1828 cmd->duplex = 0xff;
1829 }
1830 cmd->maxtxpkt = 0;
1831 cmd->maxrxpkt = 0;
1832 return 0;
1833 }
1834
1835 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1836 {
1837 struct b44 *bp = netdev_priv(dev);
1838
1839 /* We do not support gigabit. */
1840 if (cmd->autoneg == AUTONEG_ENABLE) {
1841 if (cmd->advertising &
1842 (ADVERTISED_1000baseT_Half |
1843 ADVERTISED_1000baseT_Full))
1844 return -EINVAL;
1845 } else if ((cmd->speed != SPEED_100 &&
1846 cmd->speed != SPEED_10) ||
1847 (cmd->duplex != DUPLEX_HALF &&
1848 cmd->duplex != DUPLEX_FULL)) {
1849 return -EINVAL;
1850 }
1851
1852 spin_lock_irq(&bp->lock);
1853
1854 if (cmd->autoneg == AUTONEG_ENABLE) {
1855 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1856 B44_FLAG_100_BASE_T |
1857 B44_FLAG_FULL_DUPLEX |
1858 B44_FLAG_ADV_10HALF |
1859 B44_FLAG_ADV_10FULL |
1860 B44_FLAG_ADV_100HALF |
1861 B44_FLAG_ADV_100FULL);
1862 if (cmd->advertising == 0) {
1863 bp->flags |= (B44_FLAG_ADV_10HALF |
1864 B44_FLAG_ADV_10FULL |
1865 B44_FLAG_ADV_100HALF |
1866 B44_FLAG_ADV_100FULL);
1867 } else {
1868 if (cmd->advertising & ADVERTISED_10baseT_Half)
1869 bp->flags |= B44_FLAG_ADV_10HALF;
1870 if (cmd->advertising & ADVERTISED_10baseT_Full)
1871 bp->flags |= B44_FLAG_ADV_10FULL;
1872 if (cmd->advertising & ADVERTISED_100baseT_Half)
1873 bp->flags |= B44_FLAG_ADV_100HALF;
1874 if (cmd->advertising & ADVERTISED_100baseT_Full)
1875 bp->flags |= B44_FLAG_ADV_100FULL;
1876 }
1877 } else {
1878 bp->flags |= B44_FLAG_FORCE_LINK;
1879 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1880 if (cmd->speed == SPEED_100)
1881 bp->flags |= B44_FLAG_100_BASE_T;
1882 if (cmd->duplex == DUPLEX_FULL)
1883 bp->flags |= B44_FLAG_FULL_DUPLEX;
1884 }
1885
1886 if (netif_running(dev))
1887 b44_setup_phy(bp);
1888
1889 spin_unlock_irq(&bp->lock);
1890
1891 return 0;
1892 }
1893
1894 static void b44_get_ringparam(struct net_device *dev,
1895 struct ethtool_ringparam *ering)
1896 {
1897 struct b44 *bp = netdev_priv(dev);
1898
1899 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1900 ering->rx_pending = bp->rx_pending;
1901
1902 /* XXX ethtool lacks a tx_max_pending, oops... */
1903 }
1904
1905 static int b44_set_ringparam(struct net_device *dev,
1906 struct ethtool_ringparam *ering)
1907 {
1908 struct b44 *bp = netdev_priv(dev);
1909
1910 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1911 (ering->rx_mini_pending != 0) ||
1912 (ering->rx_jumbo_pending != 0) ||
1913 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1914 return -EINVAL;
1915
1916 spin_lock_irq(&bp->lock);
1917
1918 bp->rx_pending = ering->rx_pending;
1919 bp->tx_pending = ering->tx_pending;
1920
1921 b44_halt(bp);
1922 b44_init_rings(bp);
1923 b44_init_hw(bp, B44_FULL_RESET);
1924 netif_wake_queue(bp->dev);
1925 spin_unlock_irq(&bp->lock);
1926
1927 b44_enable_ints(bp);
1928
1929 return 0;
1930 }
1931
1932 static void b44_get_pauseparam(struct net_device *dev,
1933 struct ethtool_pauseparam *epause)
1934 {
1935 struct b44 *bp = netdev_priv(dev);
1936
1937 epause->autoneg =
1938 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1939 epause->rx_pause =
1940 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1941 epause->tx_pause =
1942 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1943 }
1944
1945 static int b44_set_pauseparam(struct net_device *dev,
1946 struct ethtool_pauseparam *epause)
1947 {
1948 struct b44 *bp = netdev_priv(dev);
1949
1950 spin_lock_irq(&bp->lock);
1951 if (epause->autoneg)
1952 bp->flags |= B44_FLAG_PAUSE_AUTO;
1953 else
1954 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1955 if (epause->rx_pause)
1956 bp->flags |= B44_FLAG_RX_PAUSE;
1957 else
1958 bp->flags &= ~B44_FLAG_RX_PAUSE;
1959 if (epause->tx_pause)
1960 bp->flags |= B44_FLAG_TX_PAUSE;
1961 else
1962 bp->flags &= ~B44_FLAG_TX_PAUSE;
1963 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1964 b44_halt(bp);
1965 b44_init_rings(bp);
1966 b44_init_hw(bp, B44_FULL_RESET);
1967 } else {
1968 __b44_set_flow_ctrl(bp, bp->flags);
1969 }
1970 spin_unlock_irq(&bp->lock);
1971
1972 b44_enable_ints(bp);
1973
1974 return 0;
1975 }
1976
1977 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1978 {
1979 switch(stringset) {
1980 case ETH_SS_STATS:
1981 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1982 break;
1983 }
1984 }
1985
1986 static int b44_get_sset_count(struct net_device *dev, int sset)
1987 {
1988 switch (sset) {
1989 case ETH_SS_STATS:
1990 return ARRAY_SIZE(b44_gstrings);
1991 default:
1992 return -EOPNOTSUPP;
1993 }
1994 }
1995
1996 static void b44_get_ethtool_stats(struct net_device *dev,
1997 struct ethtool_stats *stats, u64 *data)
1998 {
1999 struct b44 *bp = netdev_priv(dev);
2000 u32 *val = &bp->hw_stats.tx_good_octets;
2001 u32 i;
2002
2003 spin_lock_irq(&bp->lock);
2004
2005 b44_stats_update(bp);
2006
2007 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2008 *data++ = *val++;
2009
2010 spin_unlock_irq(&bp->lock);
2011 }
2012
2013 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2014 {
2015 struct b44 *bp = netdev_priv(dev);
2016
2017 wol->supported = WAKE_MAGIC;
2018 if (bp->flags & B44_FLAG_WOL_ENABLE)
2019 wol->wolopts = WAKE_MAGIC;
2020 else
2021 wol->wolopts = 0;
2022 memset(&wol->sopass, 0, sizeof(wol->sopass));
2023 }
2024
2025 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2026 {
2027 struct b44 *bp = netdev_priv(dev);
2028
2029 spin_lock_irq(&bp->lock);
2030 if (wol->wolopts & WAKE_MAGIC)
2031 bp->flags |= B44_FLAG_WOL_ENABLE;
2032 else
2033 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2034 spin_unlock_irq(&bp->lock);
2035
2036 return 0;
2037 }
2038
2039 static const struct ethtool_ops b44_ethtool_ops = {
2040 .get_drvinfo = b44_get_drvinfo,
2041 .get_settings = b44_get_settings,
2042 .set_settings = b44_set_settings,
2043 .nway_reset = b44_nway_reset,
2044 .get_link = ethtool_op_get_link,
2045 .get_wol = b44_get_wol,
2046 .set_wol = b44_set_wol,
2047 .get_ringparam = b44_get_ringparam,
2048 .set_ringparam = b44_set_ringparam,
2049 .get_pauseparam = b44_get_pauseparam,
2050 .set_pauseparam = b44_set_pauseparam,
2051 .get_msglevel = b44_get_msglevel,
2052 .set_msglevel = b44_set_msglevel,
2053 .get_strings = b44_get_strings,
2054 .get_sset_count = b44_get_sset_count,
2055 .get_ethtool_stats = b44_get_ethtool_stats,
2056 };
2057
2058 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2059 {
2060 struct mii_ioctl_data *data = if_mii(ifr);
2061 struct b44 *bp = netdev_priv(dev);
2062 int err = -EINVAL;
2063
2064 if (!netif_running(dev))
2065 goto out;
2066
2067 spin_lock_irq(&bp->lock);
2068 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2069 spin_unlock_irq(&bp->lock);
2070 out:
2071 return err;
2072 }
2073
2074 static int __devinit b44_get_invariants(struct b44 *bp)
2075 {
2076 struct ssb_device *sdev = bp->sdev;
2077 int err = 0;
2078 u8 *addr;
2079
2080 bp->dma_offset = ssb_dma_translation(sdev);
2081
2082 if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2083 instance > 1) {
2084 addr = sdev->bus->sprom.et1mac;
2085 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2086 } else {
2087 addr = sdev->bus->sprom.et0mac;
2088 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2089 }
2090 /* Some ROMs have buggy PHY addresses with the high
2091 * bits set (sign extension?). Truncate them to a
2092 * valid PHY address. */
2093 bp->phy_addr &= 0x1F;
2094
2095 memcpy(bp->dev->dev_addr, addr, 6);
2096
2097 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2098 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2099 return -EINVAL;
2100 }
2101
2102 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2103
2104 bp->imask = IMASK_DEF;
2105
2106 /* XXX - really required?
2107 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2108 */
2109
2110 if (bp->sdev->id.revision >= 7)
2111 bp->flags |= B44_FLAG_B0_ANDLATER;
2112
2113 return err;
2114 }
2115
2116 static const struct net_device_ops b44_netdev_ops = {
2117 .ndo_open = b44_open,
2118 .ndo_stop = b44_close,
2119 .ndo_start_xmit = b44_start_xmit,
2120 .ndo_get_stats = b44_get_stats,
2121 .ndo_set_multicast_list = b44_set_rx_mode,
2122 .ndo_set_mac_address = b44_set_mac_addr,
2123 .ndo_validate_addr = eth_validate_addr,
2124 .ndo_do_ioctl = b44_ioctl,
2125 .ndo_tx_timeout = b44_tx_timeout,
2126 .ndo_change_mtu = b44_change_mtu,
2127 #ifdef CONFIG_NET_POLL_CONTROLLER
2128 .ndo_poll_controller = b44_poll_controller,
2129 #endif
2130 };
2131
2132 static int __devinit b44_init_one(struct ssb_device *sdev,
2133 const struct ssb_device_id *ent)
2134 {
2135 static int b44_version_printed = 0;
2136 struct net_device *dev;
2137 struct b44 *bp;
2138 int err;
2139
2140 instance++;
2141
2142 if (b44_version_printed++ == 0)
2143 printk(KERN_INFO "%s", version);
2144
2145
2146 dev = alloc_etherdev(sizeof(*bp));
2147 if (!dev) {
2148 dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n");
2149 err = -ENOMEM;
2150 goto out;
2151 }
2152
2153 SET_NETDEV_DEV(dev, sdev->dev);
2154
2155 /* No interesting netdevice features in this card... */
2156 dev->features |= 0;
2157
2158 bp = netdev_priv(dev);
2159 bp->sdev = sdev;
2160 bp->dev = dev;
2161 bp->force_copybreak = 0;
2162
2163 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2164
2165 spin_lock_init(&bp->lock);
2166
2167 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2168 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2169
2170 dev->netdev_ops = &b44_netdev_ops;
2171 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2172 dev->watchdog_timeo = B44_TX_TIMEOUT;
2173 dev->irq = sdev->irq;
2174 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2175
2176 netif_carrier_off(dev);
2177
2178 err = ssb_bus_powerup(sdev->bus, 0);
2179 if (err) {
2180 dev_err(sdev->dev,
2181 "Failed to powerup the bus\n");
2182 goto err_out_free_dev;
2183 }
2184 err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
2185 if (err) {
2186 dev_err(sdev->dev,
2187 "Required 30BIT DMA mask unsupported by the system.\n");
2188 goto err_out_powerdown;
2189 }
2190 err = b44_get_invariants(bp);
2191 if (err) {
2192 dev_err(sdev->dev,
2193 "Problem fetching invariants of chip, aborting.\n");
2194 goto err_out_powerdown;
2195 }
2196
2197 bp->mii_if.dev = dev;
2198 bp->mii_if.mdio_read = b44_mii_read;
2199 bp->mii_if.mdio_write = b44_mii_write;
2200 bp->mii_if.phy_id = bp->phy_addr;
2201 bp->mii_if.phy_id_mask = 0x1f;
2202 bp->mii_if.reg_num_mask = 0x1f;
2203
2204 /* By default, advertise all speed/duplex settings. */
2205 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2206 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2207
2208 /* By default, auto-negotiate PAUSE. */
2209 bp->flags |= B44_FLAG_PAUSE_AUTO;
2210
2211 err = register_netdev(dev);
2212 if (err) {
2213 dev_err(sdev->dev, "Cannot register net device, aborting.\n");
2214 goto err_out_powerdown;
2215 }
2216
2217 ssb_set_drvdata(sdev, dev);
2218
2219 /* Chip reset provides power to the b44 MAC & PCI cores, which
2220 * is necessary for MAC register access.
2221 */
2222 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2223
2224 printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
2225 dev->name, dev->dev_addr);
2226
2227 return 0;
2228
2229 err_out_powerdown:
2230 ssb_bus_may_powerdown(sdev->bus);
2231
2232 err_out_free_dev:
2233 free_netdev(dev);
2234
2235 out:
2236 return err;
2237 }
2238
2239 static void __devexit b44_remove_one(struct ssb_device *sdev)
2240 {
2241 struct net_device *dev = ssb_get_drvdata(sdev);
2242
2243 unregister_netdev(dev);
2244 ssb_device_disable(sdev, 0);
2245 ssb_bus_may_powerdown(sdev->bus);
2246 free_netdev(dev);
2247 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2248 ssb_set_drvdata(sdev, NULL);
2249 }
2250
2251 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2252 {
2253 struct net_device *dev = ssb_get_drvdata(sdev);
2254 struct b44 *bp = netdev_priv(dev);
2255
2256 if (!netif_running(dev))
2257 return 0;
2258
2259 del_timer_sync(&bp->timer);
2260
2261 spin_lock_irq(&bp->lock);
2262
2263 b44_halt(bp);
2264 netif_carrier_off(bp->dev);
2265 netif_device_detach(bp->dev);
2266 b44_free_rings(bp);
2267
2268 spin_unlock_irq(&bp->lock);
2269
2270 free_irq(dev->irq, dev);
2271 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2272 b44_init_hw(bp, B44_PARTIAL_RESET);
2273 b44_setup_wol(bp);
2274 }
2275
2276 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2277 return 0;
2278 }
2279
2280 static int b44_resume(struct ssb_device *sdev)
2281 {
2282 struct net_device *dev = ssb_get_drvdata(sdev);
2283 struct b44 *bp = netdev_priv(dev);
2284 int rc = 0;
2285
2286 rc = ssb_bus_powerup(sdev->bus, 0);
2287 if (rc) {
2288 dev_err(sdev->dev,
2289 "Failed to powerup the bus\n");
2290 return rc;
2291 }
2292
2293 if (!netif_running(dev))
2294 return 0;
2295
2296 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2297 if (rc) {
2298 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2299 return rc;
2300 }
2301
2302 spin_lock_irq(&bp->lock);
2303
2304 b44_init_rings(bp);
2305 b44_init_hw(bp, B44_FULL_RESET);
2306 netif_device_attach(bp->dev);
2307 spin_unlock_irq(&bp->lock);
2308
2309 b44_enable_ints(bp);
2310 netif_wake_queue(dev);
2311
2312 mod_timer(&bp->timer, jiffies + 1);
2313
2314 return 0;
2315 }
2316
2317 static struct ssb_driver b44_ssb_driver = {
2318 .name = DRV_MODULE_NAME,
2319 .id_table = b44_ssb_tbl,
2320 .probe = b44_init_one,
2321 .remove = __devexit_p(b44_remove_one),
2322 .suspend = b44_suspend,
2323 .resume = b44_resume,
2324 };
2325
2326 static inline int b44_pci_init(void)
2327 {
2328 int err = 0;
2329 #ifdef CONFIG_B44_PCI
2330 err = ssb_pcihost_register(&b44_pci_driver);
2331 #endif
2332 return err;
2333 }
2334
2335 static inline void b44_pci_exit(void)
2336 {
2337 #ifdef CONFIG_B44_PCI
2338 ssb_pcihost_unregister(&b44_pci_driver);
2339 #endif
2340 }
2341
2342 static int __init b44_init(void)
2343 {
2344 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2345 int err;
2346
2347 /* Setup paramaters for syncing RX/TX DMA descriptors */
2348 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2349 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2350
2351 err = b44_pci_init();
2352 if (err)
2353 return err;
2354 err = ssb_driver_register(&b44_ssb_driver);
2355 if (err)
2356 b44_pci_exit();
2357 return err;
2358 }
2359
2360 static void __exit b44_cleanup(void)
2361 {
2362 ssb_driver_unregister(&b44_ssb_driver);
2363 b44_pci_exit();
2364 }
2365
2366 module_init(b44_init);
2367 module_exit(b44_cleanup);
2368
This page took 0.273891 seconds and 5 git commands to generate.