Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* $Id: sunqe.c,v 1.55 2002/01/15 06:48:55 davem Exp $ |
2 | * sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. | |
3 | * Once again I am out to prove that every ethernet | |
4 | * controller out there can be most efficiently programmed | |
5 | * if you make it look like a LANCE. | |
6 | * | |
7 | * Copyright (C) 1996, 1999, 2003 David S. Miller (davem@redhat.com) | |
8 | */ | |
9 | ||
1da177e4 LT |
10 | #include <linux/module.h> |
11 | #include <linux/kernel.h> | |
12 | #include <linux/types.h> | |
13 | #include <linux/errno.h> | |
14 | #include <linux/fcntl.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/ioport.h> | |
17 | #include <linux/in.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/delay.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/crc32.h> | |
23 | #include <linux/netdevice.h> | |
24 | #include <linux/etherdevice.h> | |
25 | #include <linux/skbuff.h> | |
26 | #include <linux/ethtool.h> | |
27 | #include <linux/bitops.h> | |
28 | ||
29 | #include <asm/system.h> | |
30 | #include <asm/io.h> | |
31 | #include <asm/dma.h> | |
32 | #include <asm/byteorder.h> | |
33 | #include <asm/idprom.h> | |
34 | #include <asm/sbus.h> | |
35 | #include <asm/openprom.h> | |
36 | #include <asm/oplib.h> | |
37 | #include <asm/auxio.h> | |
38 | #include <asm/pgtable.h> | |
39 | #include <asm/irq.h> | |
40 | ||
41 | #include "sunqe.h" | |
42 | ||
10158286 TC |
43 | #define DRV_NAME "sunqe" |
44 | #define DRV_VERSION "3.0" | |
45 | #define DRV_RELDATE "8/24/03" | |
46 | #define DRV_AUTHOR "David S. Miller (davem@redhat.com)" | |
47 | ||
48 | static char version[] = | |
49 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; | |
50 | ||
51 | MODULE_VERSION(DRV_VERSION); | |
52 | MODULE_AUTHOR(DRV_AUTHOR); | |
53 | MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver"); | |
54 | MODULE_LICENSE("GPL"); | |
55 | ||
1da177e4 LT |
56 | static struct sunqec *root_qec_dev; |
57 | ||
58 | static void qe_set_multicast(struct net_device *dev); | |
59 | ||
60 | #define QEC_RESET_TRIES 200 | |
61 | ||
62 | static inline int qec_global_reset(void __iomem *gregs) | |
63 | { | |
64 | int tries = QEC_RESET_TRIES; | |
65 | ||
66 | sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); | |
67 | while (--tries) { | |
68 | u32 tmp = sbus_readl(gregs + GLOB_CTRL); | |
69 | if (tmp & GLOB_CTRL_RESET) { | |
70 | udelay(20); | |
71 | continue; | |
72 | } | |
73 | break; | |
74 | } | |
75 | if (tries) | |
76 | return 0; | |
77 | printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n"); | |
78 | return -1; | |
79 | } | |
80 | ||
81 | #define MACE_RESET_RETRIES 200 | |
82 | #define QE_RESET_RETRIES 200 | |
83 | ||
84 | static inline int qe_stop(struct sunqe *qep) | |
85 | { | |
86 | void __iomem *cregs = qep->qcregs; | |
87 | void __iomem *mregs = qep->mregs; | |
88 | int tries; | |
89 | ||
90 | /* Reset the MACE, then the QEC channel. */ | |
91 | sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG); | |
92 | tries = MACE_RESET_RETRIES; | |
93 | while (--tries) { | |
94 | u8 tmp = sbus_readb(mregs + MREGS_BCONFIG); | |
95 | if (tmp & MREGS_BCONFIG_RESET) { | |
96 | udelay(20); | |
97 | continue; | |
98 | } | |
99 | break; | |
100 | } | |
101 | if (!tries) { | |
102 | printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n"); | |
103 | return -1; | |
104 | } | |
105 | ||
106 | sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL); | |
107 | tries = QE_RESET_RETRIES; | |
108 | while (--tries) { | |
109 | u32 tmp = sbus_readl(cregs + CREG_CTRL); | |
110 | if (tmp & CREG_CTRL_RESET) { | |
111 | udelay(20); | |
112 | continue; | |
113 | } | |
114 | break; | |
115 | } | |
116 | if (!tries) { | |
117 | printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n"); | |
118 | return -1; | |
119 | } | |
120 | return 0; | |
121 | } | |
122 | ||
123 | static void qe_init_rings(struct sunqe *qep) | |
124 | { | |
125 | struct qe_init_block *qb = qep->qe_block; | |
126 | struct sunqe_buffers *qbufs = qep->buffers; | |
127 | __u32 qbufs_dvma = qep->buffers_dvma; | |
128 | int i; | |
129 | ||
130 | qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; | |
131 | memset(qb, 0, sizeof(struct qe_init_block)); | |
132 | memset(qbufs, 0, sizeof(struct sunqe_buffers)); | |
133 | for (i = 0; i < RX_RING_SIZE; i++) { | |
134 | qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i); | |
135 | qb->qe_rxd[i].rx_flags = | |
136 | (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); | |
137 | } | |
138 | } | |
139 | ||
140 | static int qe_init(struct sunqe *qep, int from_irq) | |
141 | { | |
142 | struct sunqec *qecp = qep->parent; | |
143 | void __iomem *cregs = qep->qcregs; | |
144 | void __iomem *mregs = qep->mregs; | |
145 | void __iomem *gregs = qecp->gregs; | |
146 | unsigned char *e = &qep->dev->dev_addr[0]; | |
147 | u32 tmp; | |
148 | int i; | |
149 | ||
150 | /* Shut it up. */ | |
151 | if (qe_stop(qep)) | |
152 | return -EAGAIN; | |
153 | ||
154 | /* Setup initial rx/tx init block pointers. */ | |
155 | sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); | |
156 | sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); | |
157 | ||
158 | /* Enable/mask the various irq's. */ | |
159 | sbus_writel(0, cregs + CREG_RIMASK); | |
160 | sbus_writel(1, cregs + CREG_TIMASK); | |
161 | ||
162 | sbus_writel(0, cregs + CREG_QMASK); | |
163 | sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK); | |
164 | ||
165 | /* Setup the FIFO pointers into QEC local memory. */ | |
166 | tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE); | |
167 | sbus_writel(tmp, cregs + CREG_RXRBUFPTR); | |
168 | sbus_writel(tmp, cregs + CREG_RXWBUFPTR); | |
169 | ||
170 | tmp = sbus_readl(cregs + CREG_RXRBUFPTR) + | |
171 | sbus_readl(gregs + GLOB_RSIZE); | |
172 | sbus_writel(tmp, cregs + CREG_TXRBUFPTR); | |
173 | sbus_writel(tmp, cregs + CREG_TXWBUFPTR); | |
174 | ||
175 | /* Clear the channel collision counter. */ | |
176 | sbus_writel(0, cregs + CREG_CCNT); | |
177 | ||
178 | /* For 10baseT, inter frame space nor throttle seems to be necessary. */ | |
179 | sbus_writel(0, cregs + CREG_PIPG); | |
180 | ||
181 | /* Now dork with the AMD MACE. */ | |
182 | sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG); | |
183 | sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL); | |
184 | sbus_writeb(0, mregs + MREGS_RXFCNTL); | |
185 | ||
186 | /* The QEC dma's the rx'd packets from local memory out to main memory, | |
187 | * and therefore it interrupts when the packet reception is "complete". | |
188 | * So don't listen for the MACE talking about it. | |
189 | */ | |
190 | sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK); | |
191 | sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG); | |
192 | sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 | | |
193 | MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU), | |
194 | mregs + MREGS_FCONFIG); | |
195 | ||
196 | /* Only usable interface on QuadEther is twisted pair. */ | |
197 | sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG); | |
198 | ||
199 | /* Tell MACE we are changing the ether address. */ | |
200 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET, | |
201 | mregs + MREGS_IACONFIG); | |
202 | while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) | |
203 | barrier(); | |
204 | sbus_writeb(e[0], mregs + MREGS_ETHADDR); | |
205 | sbus_writeb(e[1], mregs + MREGS_ETHADDR); | |
206 | sbus_writeb(e[2], mregs + MREGS_ETHADDR); | |
207 | sbus_writeb(e[3], mregs + MREGS_ETHADDR); | |
208 | sbus_writeb(e[4], mregs + MREGS_ETHADDR); | |
209 | sbus_writeb(e[5], mregs + MREGS_ETHADDR); | |
210 | ||
211 | /* Clear out the address filter. */ | |
212 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, | |
213 | mregs + MREGS_IACONFIG); | |
214 | while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) | |
215 | barrier(); | |
216 | for (i = 0; i < 8; i++) | |
217 | sbus_writeb(0, mregs + MREGS_FILTER); | |
218 | ||
219 | /* Address changes are now complete. */ | |
220 | sbus_writeb(0, mregs + MREGS_IACONFIG); | |
221 | ||
222 | qe_init_rings(qep); | |
223 | ||
224 | /* Wait a little bit for the link to come up... */ | |
225 | mdelay(5); | |
226 | if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) { | |
227 | int tries = 50; | |
228 | ||
229 | while (tries--) { | |
230 | u8 tmp; | |
231 | ||
232 | mdelay(5); | |
233 | barrier(); | |
234 | tmp = sbus_readb(mregs + MREGS_PHYCONFIG); | |
235 | if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0) | |
236 | break; | |
237 | } | |
238 | if (tries == 0) | |
239 | printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name); | |
240 | } | |
241 | ||
242 | /* Missed packet counter is cleared on a read. */ | |
243 | sbus_readb(mregs + MREGS_MPCNT); | |
244 | ||
245 | /* Reload multicast information, this will enable the receiver | |
246 | * and transmitter. | |
247 | */ | |
248 | qe_set_multicast(qep->dev); | |
249 | ||
250 | /* QEC should now start to show interrupts. */ | |
251 | return 0; | |
252 | } | |
253 | ||
254 | /* Grrr, certain error conditions completely lock up the AMD MACE, | |
255 | * so when we get these we _must_ reset the chip. | |
256 | */ | |
257 | static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) | |
258 | { | |
259 | struct net_device *dev = qep->dev; | |
260 | int mace_hwbug_workaround = 0; | |
261 | ||
262 | if (qe_status & CREG_STAT_EDEFER) { | |
263 | printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); | |
264 | qep->net_stats.tx_errors++; | |
265 | } | |
266 | ||
267 | if (qe_status & CREG_STAT_CLOSS) { | |
268 | printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); | |
269 | qep->net_stats.tx_errors++; | |
270 | qep->net_stats.tx_carrier_errors++; | |
271 | } | |
272 | ||
273 | if (qe_status & CREG_STAT_ERETRIES) { | |
274 | printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); | |
275 | qep->net_stats.tx_errors++; | |
276 | mace_hwbug_workaround = 1; | |
277 | } | |
278 | ||
279 | if (qe_status & CREG_STAT_LCOLL) { | |
280 | printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); | |
281 | qep->net_stats.tx_errors++; | |
282 | qep->net_stats.collisions++; | |
283 | mace_hwbug_workaround = 1; | |
284 | } | |
285 | ||
286 | if (qe_status & CREG_STAT_FUFLOW) { | |
287 | printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); | |
288 | qep->net_stats.tx_errors++; | |
289 | mace_hwbug_workaround = 1; | |
290 | } | |
291 | ||
292 | if (qe_status & CREG_STAT_JERROR) { | |
293 | printk(KERN_ERR "%s: Jabber error.\n", dev->name); | |
294 | } | |
295 | ||
296 | if (qe_status & CREG_STAT_BERROR) { | |
297 | printk(KERN_ERR "%s: Babble error.\n", dev->name); | |
298 | } | |
299 | ||
300 | if (qe_status & CREG_STAT_CCOFLOW) { | |
301 | qep->net_stats.tx_errors += 256; | |
302 | qep->net_stats.collisions += 256; | |
303 | } | |
304 | ||
305 | if (qe_status & CREG_STAT_TXDERROR) { | |
306 | printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); | |
307 | qep->net_stats.tx_errors++; | |
308 | qep->net_stats.tx_aborted_errors++; | |
309 | mace_hwbug_workaround = 1; | |
310 | } | |
311 | ||
312 | if (qe_status & CREG_STAT_TXLERR) { | |
313 | printk(KERN_ERR "%s: Transmit late error.\n", dev->name); | |
314 | qep->net_stats.tx_errors++; | |
315 | mace_hwbug_workaround = 1; | |
316 | } | |
317 | ||
318 | if (qe_status & CREG_STAT_TXPERR) { | |
319 | printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); | |
320 | qep->net_stats.tx_errors++; | |
321 | qep->net_stats.tx_aborted_errors++; | |
322 | mace_hwbug_workaround = 1; | |
323 | } | |
324 | ||
325 | if (qe_status & CREG_STAT_TXSERR) { | |
326 | printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); | |
327 | qep->net_stats.tx_errors++; | |
328 | qep->net_stats.tx_aborted_errors++; | |
329 | mace_hwbug_workaround = 1; | |
330 | } | |
331 | ||
332 | if (qe_status & CREG_STAT_RCCOFLOW) { | |
333 | qep->net_stats.rx_errors += 256; | |
334 | qep->net_stats.collisions += 256; | |
335 | } | |
336 | ||
337 | if (qe_status & CREG_STAT_RUOFLOW) { | |
338 | qep->net_stats.rx_errors += 256; | |
339 | qep->net_stats.rx_over_errors += 256; | |
340 | } | |
341 | ||
342 | if (qe_status & CREG_STAT_MCOFLOW) { | |
343 | qep->net_stats.rx_errors += 256; | |
344 | qep->net_stats.rx_missed_errors += 256; | |
345 | } | |
346 | ||
347 | if (qe_status & CREG_STAT_RXFOFLOW) { | |
348 | printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); | |
349 | qep->net_stats.rx_errors++; | |
350 | qep->net_stats.rx_over_errors++; | |
351 | } | |
352 | ||
353 | if (qe_status & CREG_STAT_RLCOLL) { | |
354 | printk(KERN_ERR "%s: Late receive collision.\n", dev->name); | |
355 | qep->net_stats.rx_errors++; | |
356 | qep->net_stats.collisions++; | |
357 | } | |
358 | ||
359 | if (qe_status & CREG_STAT_FCOFLOW) { | |
360 | qep->net_stats.rx_errors += 256; | |
361 | qep->net_stats.rx_frame_errors += 256; | |
362 | } | |
363 | ||
364 | if (qe_status & CREG_STAT_CECOFLOW) { | |
365 | qep->net_stats.rx_errors += 256; | |
366 | qep->net_stats.rx_crc_errors += 256; | |
367 | } | |
368 | ||
369 | if (qe_status & CREG_STAT_RXDROP) { | |
370 | printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); | |
371 | qep->net_stats.rx_errors++; | |
372 | qep->net_stats.rx_dropped++; | |
373 | qep->net_stats.rx_missed_errors++; | |
374 | } | |
375 | ||
376 | if (qe_status & CREG_STAT_RXSMALL) { | |
377 | printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); | |
378 | qep->net_stats.rx_errors++; | |
379 | qep->net_stats.rx_length_errors++; | |
380 | } | |
381 | ||
382 | if (qe_status & CREG_STAT_RXLERR) { | |
383 | printk(KERN_ERR "%s: Receive late error.\n", dev->name); | |
384 | qep->net_stats.rx_errors++; | |
385 | mace_hwbug_workaround = 1; | |
386 | } | |
387 | ||
388 | if (qe_status & CREG_STAT_RXPERR) { | |
389 | printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); | |
390 | qep->net_stats.rx_errors++; | |
391 | qep->net_stats.rx_missed_errors++; | |
392 | mace_hwbug_workaround = 1; | |
393 | } | |
394 | ||
395 | if (qe_status & CREG_STAT_RXSERR) { | |
396 | printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); | |
397 | qep->net_stats.rx_errors++; | |
398 | qep->net_stats.rx_missed_errors++; | |
399 | mace_hwbug_workaround = 1; | |
400 | } | |
401 | ||
402 | if (mace_hwbug_workaround) | |
403 | qe_init(qep, 1); | |
404 | return mace_hwbug_workaround; | |
405 | } | |
406 | ||
407 | /* Per-QE receive interrupt service routine. Just like on the happy meal | |
408 | * we receive directly into skb's with a small packet copy water mark. | |
409 | */ | |
410 | static void qe_rx(struct sunqe *qep) | |
411 | { | |
412 | struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; | |
413 | struct qe_rxd *this; | |
414 | struct sunqe_buffers *qbufs = qep->buffers; | |
415 | __u32 qbufs_dvma = qep->buffers_dvma; | |
416 | int elem = qep->rx_new, drops = 0; | |
417 | u32 flags; | |
418 | ||
419 | this = &rxbase[elem]; | |
420 | while (!((flags = this->rx_flags) & RXD_OWN)) { | |
421 | struct sk_buff *skb; | |
422 | unsigned char *this_qbuf = | |
423 | &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0]; | |
424 | __u32 this_qbuf_dvma = qbufs_dvma + | |
425 | qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1))); | |
426 | struct qe_rxd *end_rxd = | |
427 | &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)]; | |
428 | int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */ | |
429 | ||
430 | /* Check for errors. */ | |
431 | if (len < ETH_ZLEN) { | |
432 | qep->net_stats.rx_errors++; | |
433 | qep->net_stats.rx_length_errors++; | |
434 | qep->net_stats.rx_dropped++; | |
435 | } else { | |
436 | skb = dev_alloc_skb(len + 2); | |
437 | if (skb == NULL) { | |
438 | drops++; | |
439 | qep->net_stats.rx_dropped++; | |
440 | } else { | |
441 | skb->dev = qep->dev; | |
442 | skb_reserve(skb, 2); | |
443 | skb_put(skb, len); | |
444 | eth_copy_and_sum(skb, (unsigned char *) this_qbuf, | |
445 | len, 0); | |
446 | skb->protocol = eth_type_trans(skb, qep->dev); | |
447 | netif_rx(skb); | |
448 | qep->dev->last_rx = jiffies; | |
449 | qep->net_stats.rx_packets++; | |
450 | qep->net_stats.rx_bytes += len; | |
451 | } | |
452 | } | |
453 | end_rxd->rx_addr = this_qbuf_dvma; | |
454 | end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); | |
455 | ||
456 | elem = NEXT_RX(elem); | |
457 | this = &rxbase[elem]; | |
458 | } | |
459 | qep->rx_new = elem; | |
460 | if (drops) | |
461 | printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name); | |
462 | } | |
463 | ||
464 | static void qe_tx_reclaim(struct sunqe *qep); | |
465 | ||
466 | /* Interrupts for all QE's get filtered out via the QEC master controller, | |
467 | * so we just run through each qe and check to see who is signaling | |
468 | * and thus needs to be serviced. | |
469 | */ | |
470 | static irqreturn_t qec_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |
471 | { | |
472 | struct sunqec *qecp = (struct sunqec *) dev_id; | |
473 | u32 qec_status; | |
474 | int channel = 0; | |
475 | ||
476 | /* Latch the status now. */ | |
477 | qec_status = sbus_readl(qecp->gregs + GLOB_STAT); | |
478 | while (channel < 4) { | |
479 | if (qec_status & 0xf) { | |
480 | struct sunqe *qep = qecp->qes[channel]; | |
481 | u32 qe_status; | |
482 | ||
483 | qe_status = sbus_readl(qep->qcregs + CREG_STAT); | |
484 | if (qe_status & CREG_STAT_ERRORS) { | |
485 | if (qe_is_bolixed(qep, qe_status)) | |
486 | goto next; | |
487 | } | |
488 | if (qe_status & CREG_STAT_RXIRQ) | |
489 | qe_rx(qep); | |
490 | if (netif_queue_stopped(qep->dev) && | |
491 | (qe_status & CREG_STAT_TXIRQ)) { | |
492 | spin_lock(&qep->lock); | |
493 | qe_tx_reclaim(qep); | |
494 | if (TX_BUFFS_AVAIL(qep) > 0) { | |
495 | /* Wake net queue and return to | |
496 | * lazy tx reclaim. | |
497 | */ | |
498 | netif_wake_queue(qep->dev); | |
499 | sbus_writel(1, qep->qcregs + CREG_TIMASK); | |
500 | } | |
501 | spin_unlock(&qep->lock); | |
502 | } | |
503 | next: | |
504 | ; | |
505 | } | |
506 | qec_status >>= 4; | |
507 | channel++; | |
508 | } | |
509 | ||
510 | return IRQ_HANDLED; | |
511 | } | |
512 | ||
513 | static int qe_open(struct net_device *dev) | |
514 | { | |
515 | struct sunqe *qep = (struct sunqe *) dev->priv; | |
516 | ||
517 | qep->mconfig = (MREGS_MCONFIG_TXENAB | | |
518 | MREGS_MCONFIG_RXENAB | | |
519 | MREGS_MCONFIG_MBAENAB); | |
520 | return qe_init(qep, 0); | |
521 | } | |
522 | ||
523 | static int qe_close(struct net_device *dev) | |
524 | { | |
525 | struct sunqe *qep = (struct sunqe *) dev->priv; | |
526 | ||
527 | qe_stop(qep); | |
528 | return 0; | |
529 | } | |
530 | ||
531 | /* Reclaim TX'd frames from the ring. This must always run under | |
532 | * the IRQ protected qep->lock. | |
533 | */ | |
534 | static void qe_tx_reclaim(struct sunqe *qep) | |
535 | { | |
536 | struct qe_txd *txbase = &qep->qe_block->qe_txd[0]; | |
537 | int elem = qep->tx_old; | |
538 | ||
539 | while (elem != qep->tx_new) { | |
540 | u32 flags = txbase[elem].tx_flags; | |
541 | ||
542 | if (flags & TXD_OWN) | |
543 | break; | |
544 | elem = NEXT_TX(elem); | |
545 | } | |
546 | qep->tx_old = elem; | |
547 | } | |
548 | ||
549 | static void qe_tx_timeout(struct net_device *dev) | |
550 | { | |
551 | struct sunqe *qep = (struct sunqe *) dev->priv; | |
552 | int tx_full; | |
553 | ||
554 | spin_lock_irq(&qep->lock); | |
555 | ||
556 | /* Try to reclaim, if that frees up some tx | |
557 | * entries, we're fine. | |
558 | */ | |
559 | qe_tx_reclaim(qep); | |
560 | tx_full = TX_BUFFS_AVAIL(qep) <= 0; | |
561 | ||
562 | spin_unlock_irq(&qep->lock); | |
563 | ||
564 | if (! tx_full) | |
565 | goto out; | |
566 | ||
567 | printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); | |
568 | qe_init(qep, 1); | |
569 | ||
570 | out: | |
571 | netif_wake_queue(dev); | |
572 | } | |
573 | ||
574 | /* Get a packet queued to go onto the wire. */ | |
575 | static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
576 | { | |
577 | struct sunqe *qep = (struct sunqe *) dev->priv; | |
578 | struct sunqe_buffers *qbufs = qep->buffers; | |
579 | __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; | |
580 | unsigned char *txbuf; | |
581 | int len, entry; | |
582 | ||
583 | spin_lock_irq(&qep->lock); | |
584 | ||
585 | qe_tx_reclaim(qep); | |
586 | ||
587 | len = skb->len; | |
588 | entry = qep->tx_new; | |
589 | ||
590 | txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0]; | |
591 | txbuf_dvma = qbufs_dvma + | |
592 | qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1))); | |
593 | ||
594 | /* Avoid a race... */ | |
595 | qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; | |
596 | ||
597 | memcpy(txbuf, skb->data, len); | |
598 | ||
599 | qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; | |
600 | qep->qe_block->qe_txd[entry].tx_flags = | |
601 | (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); | |
602 | qep->tx_new = NEXT_TX(entry); | |
603 | ||
604 | /* Get it going. */ | |
605 | dev->trans_start = jiffies; | |
606 | sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); | |
607 | ||
608 | qep->net_stats.tx_packets++; | |
609 | qep->net_stats.tx_bytes += len; | |
610 | ||
611 | if (TX_BUFFS_AVAIL(qep) <= 0) { | |
612 | /* Halt the net queue and enable tx interrupts. | |
613 | * When the tx queue empties the tx irq handler | |
614 | * will wake up the queue and return us back to | |
615 | * the lazy tx reclaim scheme. | |
616 | */ | |
617 | netif_stop_queue(dev); | |
618 | sbus_writel(0, qep->qcregs + CREG_TIMASK); | |
619 | } | |
620 | spin_unlock_irq(&qep->lock); | |
621 | ||
622 | dev_kfree_skb(skb); | |
623 | ||
624 | return 0; | |
625 | } | |
626 | ||
627 | static struct net_device_stats *qe_get_stats(struct net_device *dev) | |
628 | { | |
629 | struct sunqe *qep = (struct sunqe *) dev->priv; | |
630 | ||
631 | return &qep->net_stats; | |
632 | } | |
633 | ||
634 | static void qe_set_multicast(struct net_device *dev) | |
635 | { | |
636 | struct sunqe *qep = (struct sunqe *) dev->priv; | |
637 | struct dev_mc_list *dmi = dev->mc_list; | |
638 | u8 new_mconfig = qep->mconfig; | |
639 | char *addrs; | |
640 | int i; | |
641 | u32 crc; | |
642 | ||
643 | /* Lock out others. */ | |
644 | netif_stop_queue(dev); | |
645 | ||
646 | if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { | |
647 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, | |
648 | qep->mregs + MREGS_IACONFIG); | |
649 | while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) | |
650 | barrier(); | |
651 | for (i = 0; i < 8; i++) | |
652 | sbus_writeb(0xff, qep->mregs + MREGS_FILTER); | |
653 | sbus_writeb(0, qep->mregs + MREGS_IACONFIG); | |
654 | } else if (dev->flags & IFF_PROMISC) { | |
655 | new_mconfig |= MREGS_MCONFIG_PROMISC; | |
656 | } else { | |
657 | u16 hash_table[4]; | |
658 | u8 *hbytes = (unsigned char *) &hash_table[0]; | |
659 | ||
660 | for (i = 0; i < 4; i++) | |
661 | hash_table[i] = 0; | |
662 | ||
663 | for (i = 0; i < dev->mc_count; i++) { | |
664 | addrs = dmi->dmi_addr; | |
665 | dmi = dmi->next; | |
666 | ||
667 | if (!(*addrs & 1)) | |
668 | continue; | |
669 | crc = ether_crc_le(6, addrs); | |
670 | crc >>= 26; | |
671 | hash_table[crc >> 4] |= 1 << (crc & 0xf); | |
672 | } | |
673 | /* Program the qe with the new filter value. */ | |
674 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, | |
675 | qep->mregs + MREGS_IACONFIG); | |
676 | while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) | |
677 | barrier(); | |
678 | for (i = 0; i < 8; i++) { | |
679 | u8 tmp = *hbytes++; | |
680 | sbus_writeb(tmp, qep->mregs + MREGS_FILTER); | |
681 | } | |
682 | sbus_writeb(0, qep->mregs + MREGS_IACONFIG); | |
683 | } | |
684 | ||
685 | /* Any change of the logical address filter, the physical address, | |
686 | * or enabling/disabling promiscuous mode causes the MACE to disable | |
687 | * the receiver. So we must re-enable them here or else the MACE | |
688 | * refuses to listen to anything on the network. Sheesh, took | |
689 | * me a day or two to find this bug. | |
690 | */ | |
691 | qep->mconfig = new_mconfig; | |
692 | sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG); | |
693 | ||
694 | /* Let us get going again. */ | |
695 | netif_wake_queue(dev); | |
696 | } | |
697 | ||
698 | /* Ethtool support... */ | |
699 | static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | |
700 | { | |
701 | struct sunqe *qep = dev->priv; | |
702 | ||
703 | strcpy(info->driver, "sunqe"); | |
704 | strcpy(info->version, "3.0"); | |
705 | sprintf(info->bus_info, "SBUS:%d", | |
706 | qep->qe_sdev->slot); | |
707 | } | |
708 | ||
709 | static u32 qe_get_link(struct net_device *dev) | |
710 | { | |
711 | struct sunqe *qep = dev->priv; | |
712 | void __iomem *mregs = qep->mregs; | |
713 | u8 phyconfig; | |
714 | ||
715 | spin_lock_irq(&qep->lock); | |
716 | phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG); | |
717 | spin_unlock_irq(&qep->lock); | |
718 | ||
719 | return (phyconfig & MREGS_PHYCONFIG_LSTAT); | |
720 | } | |
721 | ||
722 | static struct ethtool_ops qe_ethtool_ops = { | |
723 | .get_drvinfo = qe_get_drvinfo, | |
724 | .get_link = qe_get_link, | |
725 | }; | |
726 | ||
727 | /* This is only called once at boot time for each card probed. */ | |
728 | static inline void qec_init_once(struct sunqec *qecp, struct sbus_dev *qsdev) | |
729 | { | |
730 | u8 bsizes = qecp->qec_bursts; | |
731 | ||
732 | if (sbus_can_burst64(qsdev) && (bsizes & DMA_BURST64)) { | |
733 | sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL); | |
734 | } else if (bsizes & DMA_BURST32) { | |
735 | sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL); | |
736 | } else { | |
737 | sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL); | |
738 | } | |
739 | ||
740 | /* Packetsize only used in 100baseT BigMAC configurations, | |
741 | * set it to zero just to be on the safe side. | |
742 | */ | |
743 | sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE); | |
744 | ||
745 | /* Set the local memsize register, divided up to one piece per QE channel. */ | |
746 | sbus_writel((qsdev->reg_addrs[1].reg_size >> 2), | |
747 | qecp->gregs + GLOB_MSIZE); | |
748 | ||
749 | /* Divide up the local QEC memory amongst the 4 QE receiver and | |
750 | * transmitter FIFOs. Basically it is (total / 2 / num_channels). | |
751 | */ | |
752 | sbus_writel((qsdev->reg_addrs[1].reg_size >> 2) >> 1, | |
753 | qecp->gregs + GLOB_TSIZE); | |
754 | sbus_writel((qsdev->reg_addrs[1].reg_size >> 2) >> 1, | |
755 | qecp->gregs + GLOB_RSIZE); | |
756 | } | |
757 | ||
758 | /* Four QE's per QEC card. */ | |
759 | static int __init qec_ether_init(struct net_device *dev, struct sbus_dev *sdev) | |
760 | { | |
761 | static unsigned version_printed; | |
762 | struct net_device *qe_devs[4]; | |
763 | struct sunqe *qeps[4]; | |
764 | struct sbus_dev *qesdevs[4]; | |
765 | struct sbus_dev *child; | |
766 | struct sunqec *qecp = NULL; | |
767 | u8 bsizes, bsizes_more; | |
768 | int i, j, res = -ENOMEM; | |
769 | ||
770 | for (i = 0; i < 4; i++) { | |
771 | qe_devs[i] = alloc_etherdev(sizeof(struct sunqe)); | |
772 | if (!qe_devs[i]) | |
773 | goto out; | |
774 | } | |
775 | ||
776 | if (version_printed++ == 0) | |
777 | printk(KERN_INFO "%s", version); | |
778 | ||
779 | for (i = 0; i < 4; i++) { | |
780 | qeps[i] = (struct sunqe *) qe_devs[i]->priv; | |
781 | for (j = 0; j < 6; j++) | |
782 | qe_devs[i]->dev_addr[j] = idprom->id_ethaddr[j]; | |
783 | qeps[i]->channel = i; | |
784 | spin_lock_init(&qeps[i]->lock); | |
785 | } | |
786 | ||
787 | qecp = kmalloc(sizeof(struct sunqec), GFP_KERNEL); | |
788 | if (qecp == NULL) | |
789 | goto out1; | |
790 | qecp->qec_sdev = sdev; | |
791 | ||
792 | for (i = 0; i < 4; i++) { | |
793 | qecp->qes[i] = qeps[i]; | |
794 | qeps[i]->dev = qe_devs[i]; | |
795 | qeps[i]->parent = qecp; | |
796 | } | |
797 | ||
798 | res = -ENODEV; | |
799 | ||
800 | for (i = 0, child = sdev->child; i < 4; i++, child = child->next) { | |
801 | /* Link in channel */ | |
802 | j = prom_getintdefault(child->prom_node, "channel#", -1); | |
803 | if (j == -1) | |
804 | goto out2; | |
805 | qesdevs[j] = child; | |
806 | } | |
807 | ||
808 | for (i = 0; i < 4; i++) | |
809 | qeps[i]->qe_sdev = qesdevs[i]; | |
810 | ||
811 | /* Now map in the registers, QEC globals first. */ | |
812 | qecp->gregs = sbus_ioremap(&sdev->resource[0], 0, | |
813 | GLOB_REG_SIZE, "QEC Global Registers"); | |
814 | if (!qecp->gregs) { | |
815 | printk(KERN_ERR "QuadEther: Cannot map QEC global registers.\n"); | |
816 | goto out2; | |
817 | } | |
818 | ||
819 | /* Make sure the QEC is in MACE mode. */ | |
820 | if ((sbus_readl(qecp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_MMODE) { | |
821 | printk(KERN_ERR "QuadEther: AIEEE, QEC is not in MACE mode!\n"); | |
822 | goto out3; | |
823 | } | |
824 | ||
825 | /* Reset the QEC. */ | |
826 | if (qec_global_reset(qecp->gregs)) | |
827 | goto out3; | |
828 | ||
829 | /* Find and set the burst sizes for the QEC, since it does | |
830 | * the actual dma for all 4 channels. | |
831 | */ | |
832 | bsizes = prom_getintdefault(sdev->prom_node, "burst-sizes", 0xff); | |
833 | bsizes &= 0xff; | |
834 | bsizes_more = prom_getintdefault(sdev->bus->prom_node, "burst-sizes", 0xff); | |
835 | ||
836 | if (bsizes_more != 0xff) | |
837 | bsizes &= bsizes_more; | |
838 | if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || | |
839 | (bsizes & DMA_BURST32)==0) | |
840 | bsizes = (DMA_BURST32 - 1); | |
841 | ||
842 | qecp->qec_bursts = bsizes; | |
843 | ||
844 | /* Perform one time QEC initialization, we never touch the QEC | |
845 | * globals again after this. | |
846 | */ | |
847 | qec_init_once(qecp, sdev); | |
848 | ||
849 | for (i = 0; i < 4; i++) { | |
850 | struct sunqe *qe = qeps[i]; | |
851 | /* Map in QEC per-channel control registers. */ | |
852 | qe->qcregs = sbus_ioremap(&qe->qe_sdev->resource[0], 0, | |
853 | CREG_REG_SIZE, "QEC Channel Registers"); | |
854 | if (!qe->qcregs) { | |
855 | printk(KERN_ERR "QuadEther: Cannot map QE %d's channel registers.\n", i); | |
856 | goto out4; | |
857 | } | |
858 | ||
859 | /* Map in per-channel AMD MACE registers. */ | |
860 | qe->mregs = sbus_ioremap(&qe->qe_sdev->resource[1], 0, | |
861 | MREGS_REG_SIZE, "QE MACE Registers"); | |
862 | if (!qe->mregs) { | |
863 | printk(KERN_ERR "QuadEther: Cannot map QE %d's MACE registers.\n", i); | |
864 | goto out4; | |
865 | } | |
866 | ||
867 | qe->qe_block = sbus_alloc_consistent(qe->qe_sdev, | |
868 | PAGE_SIZE, | |
869 | &qe->qblock_dvma); | |
870 | qe->buffers = sbus_alloc_consistent(qe->qe_sdev, | |
871 | sizeof(struct sunqe_buffers), | |
872 | &qe->buffers_dvma); | |
873 | if (qe->qe_block == NULL || qe->qblock_dvma == 0 || | |
874 | qe->buffers == NULL || qe->buffers_dvma == 0) { | |
875 | goto out4; | |
876 | } | |
877 | ||
878 | /* Stop this QE. */ | |
879 | qe_stop(qe); | |
880 | } | |
881 | ||
882 | for (i = 0; i < 4; i++) { | |
883 | SET_MODULE_OWNER(qe_devs[i]); | |
884 | qe_devs[i]->open = qe_open; | |
885 | qe_devs[i]->stop = qe_close; | |
886 | qe_devs[i]->hard_start_xmit = qe_start_xmit; | |
887 | qe_devs[i]->get_stats = qe_get_stats; | |
888 | qe_devs[i]->set_multicast_list = qe_set_multicast; | |
889 | qe_devs[i]->tx_timeout = qe_tx_timeout; | |
890 | qe_devs[i]->watchdog_timeo = 5*HZ; | |
891 | qe_devs[i]->irq = sdev->irqs[0]; | |
892 | qe_devs[i]->dma = 0; | |
893 | qe_devs[i]->ethtool_ops = &qe_ethtool_ops; | |
894 | } | |
895 | ||
896 | /* QEC receives interrupts from each QE, then it sends the actual | |
897 | * IRQ to the cpu itself. Since QEC is the single point of | |
898 | * interrupt for all QE channels we register the IRQ handler | |
899 | * for it now. | |
900 | */ | |
901 | if (request_irq(sdev->irqs[0], &qec_interrupt, | |
902 | SA_SHIRQ, "QuadEther", (void *) qecp)) { | |
903 | printk(KERN_ERR "QuadEther: Can't register QEC master irq handler.\n"); | |
904 | res = -EAGAIN; | |
905 | goto out4; | |
906 | } | |
907 | ||
908 | for (i = 0; i < 4; i++) { | |
909 | if (register_netdev(qe_devs[i]) != 0) | |
910 | goto out5; | |
911 | } | |
912 | ||
913 | /* Report the QE channels. */ | |
914 | for (i = 0; i < 4; i++) { | |
915 | printk(KERN_INFO "%s: QuadEthernet channel[%d] ", qe_devs[i]->name, i); | |
916 | for (j = 0; j < 6; j++) | |
917 | printk ("%2.2x%c", | |
918 | qe_devs[i]->dev_addr[j], | |
919 | j == 5 ? ' ': ':'); | |
920 | printk("\n"); | |
921 | } | |
922 | ||
923 | /* We are home free at this point, link the qe's into | |
924 | * the master list for later driver exit. | |
925 | */ | |
926 | qecp->next_module = root_qec_dev; | |
927 | root_qec_dev = qecp; | |
928 | ||
929 | return 0; | |
930 | ||
931 | out5: | |
932 | while (i--) | |
933 | unregister_netdev(qe_devs[i]); | |
934 | free_irq(sdev->irqs[0], (void *)qecp); | |
935 | out4: | |
936 | for (i = 0; i < 4; i++) { | |
937 | struct sunqe *qe = (struct sunqe *)qe_devs[i]->priv; | |
938 | ||
939 | if (qe->qcregs) | |
940 | sbus_iounmap(qe->qcregs, CREG_REG_SIZE); | |
941 | if (qe->mregs) | |
942 | sbus_iounmap(qe->mregs, MREGS_REG_SIZE); | |
943 | if (qe->qe_block) | |
944 | sbus_free_consistent(qe->qe_sdev, | |
945 | PAGE_SIZE, | |
946 | qe->qe_block, | |
947 | qe->qblock_dvma); | |
948 | if (qe->buffers) | |
949 | sbus_free_consistent(qe->qe_sdev, | |
950 | sizeof(struct sunqe_buffers), | |
951 | qe->buffers, | |
952 | qe->buffers_dvma); | |
953 | } | |
954 | out3: | |
955 | sbus_iounmap(qecp->gregs, GLOB_REG_SIZE); | |
956 | out2: | |
957 | kfree(qecp); | |
958 | out1: | |
959 | i = 4; | |
960 | out: | |
961 | while (i--) | |
962 | free_netdev(qe_devs[i]); | |
963 | return res; | |
964 | } | |
965 | ||
966 | static int __init qec_match(struct sbus_dev *sdev) | |
967 | { | |
968 | struct sbus_dev *sibling; | |
969 | int i; | |
970 | ||
971 | if (strcmp(sdev->prom_name, "qec") != 0) | |
972 | return 0; | |
973 | ||
974 | /* QEC can be parent of either QuadEthernet or BigMAC | |
975 | * children. Do not confuse this with qfe/SUNW,qfe | |
976 | * which is a quad-happymeal card and handled by | |
977 | * a different driver. | |
978 | */ | |
979 | sibling = sdev->child; | |
980 | for (i = 0; i < 4; i++) { | |
981 | if (sibling == NULL) | |
982 | return 0; | |
983 | if (strcmp(sibling->prom_name, "qe") != 0) | |
984 | return 0; | |
985 | sibling = sibling->next; | |
986 | } | |
987 | return 1; | |
988 | } | |
989 | ||
990 | static int __init qec_probe(void) | |
991 | { | |
992 | struct net_device *dev = NULL; | |
993 | struct sbus_bus *bus; | |
994 | struct sbus_dev *sdev = NULL; | |
995 | static int called; | |
996 | int cards = 0, v; | |
997 | ||
998 | root_qec_dev = NULL; | |
999 | ||
1000 | if (called) | |
1001 | return -ENODEV; | |
1002 | called++; | |
1003 | ||
1004 | for_each_sbus(bus) { | |
1005 | for_each_sbusdev(sdev, bus) { | |
1006 | if (cards) | |
1007 | dev = NULL; | |
1008 | ||
1009 | if (qec_match(sdev)) { | |
1010 | cards++; | |
1011 | if ((v = qec_ether_init(dev, sdev))) | |
1012 | return v; | |
1013 | } | |
1014 | } | |
1015 | } | |
1016 | if (!cards) | |
1017 | return -ENODEV; | |
1018 | return 0; | |
1019 | } | |
1020 | ||
1021 | static void __exit qec_cleanup(void) | |
1022 | { | |
1023 | struct sunqec *next_qec; | |
1024 | int i; | |
1025 | ||
1026 | while (root_qec_dev) { | |
1027 | next_qec = root_qec_dev->next_module; | |
1028 | ||
1029 | /* Release all four QE channels, then the QEC itself. */ | |
1030 | for (i = 0; i < 4; i++) { | |
1031 | unregister_netdev(root_qec_dev->qes[i]->dev); | |
1032 | sbus_iounmap(root_qec_dev->qes[i]->qcregs, CREG_REG_SIZE); | |
1033 | sbus_iounmap(root_qec_dev->qes[i]->mregs, MREGS_REG_SIZE); | |
1034 | sbus_free_consistent(root_qec_dev->qes[i]->qe_sdev, | |
1035 | PAGE_SIZE, | |
1036 | root_qec_dev->qes[i]->qe_block, | |
1037 | root_qec_dev->qes[i]->qblock_dvma); | |
1038 | sbus_free_consistent(root_qec_dev->qes[i]->qe_sdev, | |
1039 | sizeof(struct sunqe_buffers), | |
1040 | root_qec_dev->qes[i]->buffers, | |
1041 | root_qec_dev->qes[i]->buffers_dvma); | |
1042 | free_netdev(root_qec_dev->qes[i]->dev); | |
1043 | } | |
1044 | free_irq(root_qec_dev->qec_sdev->irqs[0], (void *)root_qec_dev); | |
1045 | sbus_iounmap(root_qec_dev->gregs, GLOB_REG_SIZE); | |
1046 | kfree(root_qec_dev); | |
1047 | root_qec_dev = next_qec; | |
1048 | } | |
1049 | } | |
1050 | ||
1051 | module_init(qec_probe); | |
1052 | module_exit(qec_cleanup); |