2 * drivers/net/ibm_newemac/mal.c
4 * Memory Access Layer (MAL) support
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
9 * Based on original work by
10 * Benjamin Herrenschmidt <benh@kernel.crashing.org>,
11 * David Gibson <hermes@gibson.dropbear.id.au>,
13 * Armin Kuster <akuster@mvista.com>
14 * Copyright 2002 MontaVista Softare Inc.
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
23 #include <linux/delay.h>
29 int __devinit
mal_register_commac(struct mal_instance
*mal
,
30 struct mal_commac
*commac
)
34 spin_lock_irqsave(&mal
->lock
, flags
);
36 MAL_DBG(mal
, "reg(%08x, %08x)" NL
,
37 commac
->tx_chan_mask
, commac
->rx_chan_mask
);
39 /* Don't let multiple commacs claim the same channel(s) */
40 if ((mal
->tx_chan_mask
& commac
->tx_chan_mask
) ||
41 (mal
->rx_chan_mask
& commac
->rx_chan_mask
)) {
42 spin_unlock_irqrestore(&mal
->lock
, flags
);
43 printk(KERN_WARNING
"mal%d: COMMAC channels conflict!\n",
48 mal
->tx_chan_mask
|= commac
->tx_chan_mask
;
49 mal
->rx_chan_mask
|= commac
->rx_chan_mask
;
50 list_add(&commac
->list
, &mal
->list
);
52 spin_unlock_irqrestore(&mal
->lock
, flags
);
57 void __devexit
mal_unregister_commac(struct mal_instance
*mal
,
58 struct mal_commac
*commac
)
62 spin_lock_irqsave(&mal
->lock
, flags
);
64 MAL_DBG(mal
, "unreg(%08x, %08x)" NL
,
65 commac
->tx_chan_mask
, commac
->rx_chan_mask
);
67 mal
->tx_chan_mask
&= ~commac
->tx_chan_mask
;
68 mal
->rx_chan_mask
&= ~commac
->rx_chan_mask
;
69 list_del_init(&commac
->list
);
71 spin_unlock_irqrestore(&mal
->lock
, flags
);
74 int mal_set_rcbs(struct mal_instance
*mal
, int channel
, unsigned long size
)
76 BUG_ON(channel
< 0 || channel
>= mal
->num_rx_chans
||
77 size
> MAL_MAX_RX_SIZE
);
79 MAL_DBG(mal
, "set_rbcs(%d, %lu)" NL
, channel
, size
);
83 "mal%d: incorrect RX size %lu for the channel %d\n",
84 mal
->index
, size
, channel
);
88 set_mal_dcrn(mal
, MAL_RCBS(channel
), size
>> 4);
92 int mal_tx_bd_offset(struct mal_instance
*mal
, int channel
)
94 BUG_ON(channel
< 0 || channel
>= mal
->num_tx_chans
);
96 return channel
* NUM_TX_BUFF
;
99 int mal_rx_bd_offset(struct mal_instance
*mal
, int channel
)
101 BUG_ON(channel
< 0 || channel
>= mal
->num_rx_chans
);
102 return mal
->num_tx_chans
* NUM_TX_BUFF
+ channel
* NUM_RX_BUFF
;
105 void mal_enable_tx_channel(struct mal_instance
*mal
, int channel
)
109 spin_lock_irqsave(&mal
->lock
, flags
);
111 MAL_DBG(mal
, "enable_tx(%d)" NL
, channel
);
113 set_mal_dcrn(mal
, MAL_TXCASR
,
114 get_mal_dcrn(mal
, MAL_TXCASR
) | MAL_CHAN_MASK(channel
));
116 spin_unlock_irqrestore(&mal
->lock
, flags
);
119 void mal_disable_tx_channel(struct mal_instance
*mal
, int channel
)
121 set_mal_dcrn(mal
, MAL_TXCARR
, MAL_CHAN_MASK(channel
));
123 MAL_DBG(mal
, "disable_tx(%d)" NL
, channel
);
126 void mal_enable_rx_channel(struct mal_instance
*mal
, int channel
)
130 spin_lock_irqsave(&mal
->lock
, flags
);
132 MAL_DBG(mal
, "enable_rx(%d)" NL
, channel
);
134 set_mal_dcrn(mal
, MAL_RXCASR
,
135 get_mal_dcrn(mal
, MAL_RXCASR
) | MAL_CHAN_MASK(channel
));
137 spin_unlock_irqrestore(&mal
->lock
, flags
);
140 void mal_disable_rx_channel(struct mal_instance
*mal
, int channel
)
142 set_mal_dcrn(mal
, MAL_RXCARR
, MAL_CHAN_MASK(channel
));
144 MAL_DBG(mal
, "disable_rx(%d)" NL
, channel
);
147 void mal_poll_add(struct mal_instance
*mal
, struct mal_commac
*commac
)
151 spin_lock_irqsave(&mal
->lock
, flags
);
153 MAL_DBG(mal
, "poll_add(%p)" NL
, commac
);
155 /* starts disabled */
156 set_bit(MAL_COMMAC_POLL_DISABLED
, &commac
->flags
);
158 list_add_tail(&commac
->poll_list
, &mal
->poll_list
);
160 spin_unlock_irqrestore(&mal
->lock
, flags
);
163 void mal_poll_del(struct mal_instance
*mal
, struct mal_commac
*commac
)
167 spin_lock_irqsave(&mal
->lock
, flags
);
169 MAL_DBG(mal
, "poll_del(%p)" NL
, commac
);
171 list_del(&commac
->poll_list
);
173 spin_unlock_irqrestore(&mal
->lock
, flags
);
176 /* synchronized by mal_poll() */
177 static inline void mal_enable_eob_irq(struct mal_instance
*mal
)
179 MAL_DBG2(mal
, "enable_irq" NL
);
181 // XXX might want to cache MAL_CFG as the DCR read can be slooooow
182 set_mal_dcrn(mal
, MAL_CFG
, get_mal_dcrn(mal
, MAL_CFG
) | MAL_CFG_EOPIE
);
185 /* synchronized by __LINK_STATE_RX_SCHED bit in ndev->state */
186 static inline void mal_disable_eob_irq(struct mal_instance
*mal
)
188 // XXX might want to cache MAL_CFG as the DCR read can be slooooow
189 set_mal_dcrn(mal
, MAL_CFG
, get_mal_dcrn(mal
, MAL_CFG
) & ~MAL_CFG_EOPIE
);
191 MAL_DBG2(mal
, "disable_irq" NL
);
194 static irqreturn_t
mal_serr(int irq
, void *dev_instance
)
196 struct mal_instance
*mal
= dev_instance
;
198 u32 esr
= get_mal_dcrn(mal
, MAL_ESR
);
200 /* Clear the error status register */
201 set_mal_dcrn(mal
, MAL_ESR
, esr
);
203 MAL_DBG(mal
, "SERR %08x" NL
, esr
);
205 if (esr
& MAL_ESR_EVB
) {
206 if (esr
& MAL_ESR_DE
) {
207 /* We ignore Descriptor error,
208 * TXDE or RXDE interrupt will be generated anyway.
213 if (esr
& MAL_ESR_PEIN
) {
214 /* PLB error, it's probably buggy hardware or
215 * incorrect physical address in BD (i.e. bug)
219 "mal%d: system error, "
220 "PLB (ESR = 0x%08x)\n",
225 /* OPB error, it's probably buggy hardware or incorrect
230 "mal%d: system error, OPB (ESR = 0x%08x)\n",
236 static inline void mal_schedule_poll(struct mal_instance
*mal
)
238 if (likely(napi_schedule_prep(&mal
->napi
))) {
239 MAL_DBG2(mal
, "schedule_poll" NL
);
240 mal_disable_eob_irq(mal
);
241 __napi_schedule(&mal
->napi
);
243 MAL_DBG2(mal
, "already in poll" NL
);
246 static irqreturn_t
mal_txeob(int irq
, void *dev_instance
)
248 struct mal_instance
*mal
= dev_instance
;
250 u32 r
= get_mal_dcrn(mal
, MAL_TXEOBISR
);
252 MAL_DBG2(mal
, "txeob %08x" NL
, r
);
254 mal_schedule_poll(mal
);
255 set_mal_dcrn(mal
, MAL_TXEOBISR
, r
);
260 static irqreturn_t
mal_rxeob(int irq
, void *dev_instance
)
262 struct mal_instance
*mal
= dev_instance
;
264 u32 r
= get_mal_dcrn(mal
, MAL_RXEOBISR
);
266 MAL_DBG2(mal
, "rxeob %08x" NL
, r
);
268 mal_schedule_poll(mal
);
269 set_mal_dcrn(mal
, MAL_RXEOBISR
, r
);
274 static irqreturn_t
mal_txde(int irq
, void *dev_instance
)
276 struct mal_instance
*mal
= dev_instance
;
278 u32 deir
= get_mal_dcrn(mal
, MAL_TXDEIR
);
279 set_mal_dcrn(mal
, MAL_TXDEIR
, deir
);
281 MAL_DBG(mal
, "txde %08x" NL
, deir
);
285 "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
291 static irqreturn_t
mal_rxde(int irq
, void *dev_instance
)
293 struct mal_instance
*mal
= dev_instance
;
296 u32 deir
= get_mal_dcrn(mal
, MAL_RXDEIR
);
298 MAL_DBG(mal
, "rxde %08x" NL
, deir
);
300 list_for_each(l
, &mal
->list
) {
301 struct mal_commac
*mc
= list_entry(l
, struct mal_commac
, list
);
302 if (deir
& mc
->rx_chan_mask
) {
303 set_bit(MAL_COMMAC_RX_STOPPED
, &mc
->flags
);
304 mc
->ops
->rxde(mc
->dev
);
308 mal_schedule_poll(mal
);
309 set_mal_dcrn(mal
, MAL_RXDEIR
, deir
);
314 void mal_poll_disable(struct mal_instance
*mal
, struct mal_commac
*commac
)
316 /* Spinlock-type semantics: only one caller disable poll at a time */
317 while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED
, &commac
->flags
))
320 /* Synchronize with the MAL NAPI poller. */
321 napi_disable(&mal
->napi
);
324 void mal_poll_enable(struct mal_instance
*mal
, struct mal_commac
*commac
)
327 clear_bit(MAL_COMMAC_POLL_DISABLED
, &commac
->flags
);
329 // XXX might want to kick a poll now...
332 static int mal_poll(struct napi_struct
*napi
, int budget
)
334 struct mal_instance
*mal
= container_of(napi
, struct mal_instance
, napi
);
339 MAL_DBG2(mal
, "poll(%d) %d ->" NL
, *budget
,
342 /* Process TX skbs */
343 list_for_each(l
, &mal
->poll_list
) {
344 struct mal_commac
*mc
=
345 list_entry(l
, struct mal_commac
, poll_list
);
346 mc
->ops
->poll_tx(mc
->dev
);
351 * We _might_ need something more smart here to enforce polling
354 list_for_each(l
, &mal
->poll_list
) {
355 struct mal_commac
*mc
=
356 list_entry(l
, struct mal_commac
, poll_list
);
358 if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED
, &mc
->flags
)))
360 n
= mc
->ops
->poll_rx(mc
->dev
, budget
);
365 goto more_work
; // XXX What if this is the last one ?
369 /* We need to disable IRQs to protect from RXDE IRQ here */
370 spin_lock_irqsave(&mal
->lock
, flags
);
371 __napi_complete(napi
);
372 mal_enable_eob_irq(mal
);
373 spin_unlock_irqrestore(&mal
->lock
, flags
);
375 /* Check for "rotting" packet(s) */
376 list_for_each(l
, &mal
->poll_list
) {
377 struct mal_commac
*mc
=
378 list_entry(l
, struct mal_commac
, poll_list
);
379 if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED
, &mc
->flags
)))
381 if (unlikely(mc
->ops
->peek_rx(mc
->dev
) ||
382 test_bit(MAL_COMMAC_RX_STOPPED
, &mc
->flags
))) {
383 MAL_DBG2(mal
, "rotting packet" NL
);
384 if (napi_reschedule(napi
))
385 mal_disable_eob_irq(mal
);
387 MAL_DBG2(mal
, "already in poll list" NL
);
394 mc
->ops
->poll_tx(mc
->dev
);
398 MAL_DBG2(mal
, "poll() %d <- %d" NL
, budget
, received
);
402 static void mal_reset(struct mal_instance
*mal
)
406 MAL_DBG(mal
, "reset" NL
);
408 set_mal_dcrn(mal
, MAL_CFG
, MAL_CFG_SR
);
410 /* Wait for reset to complete (1 system clock) */
411 while ((get_mal_dcrn(mal
, MAL_CFG
) & MAL_CFG_SR
) && n
)
415 printk(KERN_ERR
"mal%d: reset timeout\n", mal
->index
);
418 int mal_get_regs_len(struct mal_instance
*mal
)
420 return sizeof(struct emac_ethtool_regs_subhdr
) +
421 sizeof(struct mal_regs
);
424 void *mal_dump_regs(struct mal_instance
*mal
, void *buf
)
426 struct emac_ethtool_regs_subhdr
*hdr
= buf
;
427 struct mal_regs
*regs
= (struct mal_regs
*)(hdr
+ 1);
430 hdr
->version
= mal
->version
;
431 hdr
->index
= mal
->index
;
433 regs
->tx_count
= mal
->num_tx_chans
;
434 regs
->rx_count
= mal
->num_rx_chans
;
436 regs
->cfg
= get_mal_dcrn(mal
, MAL_CFG
);
437 regs
->esr
= get_mal_dcrn(mal
, MAL_ESR
);
438 regs
->ier
= get_mal_dcrn(mal
, MAL_IER
);
439 regs
->tx_casr
= get_mal_dcrn(mal
, MAL_TXCASR
);
440 regs
->tx_carr
= get_mal_dcrn(mal
, MAL_TXCARR
);
441 regs
->tx_eobisr
= get_mal_dcrn(mal
, MAL_TXEOBISR
);
442 regs
->tx_deir
= get_mal_dcrn(mal
, MAL_TXDEIR
);
443 regs
->rx_casr
= get_mal_dcrn(mal
, MAL_RXCASR
);
444 regs
->rx_carr
= get_mal_dcrn(mal
, MAL_RXCARR
);
445 regs
->rx_eobisr
= get_mal_dcrn(mal
, MAL_RXEOBISR
);
446 regs
->rx_deir
= get_mal_dcrn(mal
, MAL_RXDEIR
);
448 for (i
= 0; i
< regs
->tx_count
; ++i
)
449 regs
->tx_ctpr
[i
] = get_mal_dcrn(mal
, MAL_TXCTPR(i
));
451 for (i
= 0; i
< regs
->rx_count
; ++i
) {
452 regs
->rx_ctpr
[i
] = get_mal_dcrn(mal
, MAL_RXCTPR(i
));
453 regs
->rcbs
[i
] = get_mal_dcrn(mal
, MAL_RCBS(i
));
458 static int __devinit
mal_probe(struct of_device
*ofdev
,
459 const struct of_device_id
*match
)
461 struct mal_instance
*mal
;
462 int err
= 0, i
, bd_size
;
463 int index
= mal_count
++;
464 unsigned int dcr_base
;
468 mal
= kzalloc(sizeof(struct mal_instance
), GFP_KERNEL
);
471 "mal%d: out of memory allocating MAL structure!\n",
477 mal
->version
= of_device_is_compatible(ofdev
->node
, "ibm,mcmal2") ? 2 : 1;
479 MAL_DBG(mal
, "probe" NL
);
481 prop
= of_get_property(ofdev
->node
, "num-tx-chans", NULL
);
484 "mal%d: can't find MAL num-tx-chans property!\n",
489 mal
->num_tx_chans
= prop
[0];
491 prop
= of_get_property(ofdev
->node
, "num-rx-chans", NULL
);
494 "mal%d: can't find MAL num-rx-chans property!\n",
499 mal
->num_rx_chans
= prop
[0];
501 dcr_base
= dcr_resource_start(ofdev
->node
, 0);
504 "mal%d: can't find DCR resource!\n", index
);
508 mal
->dcr_host
= dcr_map(ofdev
->node
, dcr_base
, 0x100);
509 if (!DCR_MAP_OK(mal
->dcr_host
)) {
511 "mal%d: failed to map DCRs !\n", index
);
516 mal
->txeob_irq
= irq_of_parse_and_map(ofdev
->node
, 0);
517 mal
->rxeob_irq
= irq_of_parse_and_map(ofdev
->node
, 1);
518 mal
->serr_irq
= irq_of_parse_and_map(ofdev
->node
, 2);
519 mal
->txde_irq
= irq_of_parse_and_map(ofdev
->node
, 3);
520 mal
->rxde_irq
= irq_of_parse_and_map(ofdev
->node
, 4);
521 if (mal
->txeob_irq
== NO_IRQ
|| mal
->rxeob_irq
== NO_IRQ
||
522 mal
->serr_irq
== NO_IRQ
|| mal
->txde_irq
== NO_IRQ
||
523 mal
->rxde_irq
== NO_IRQ
) {
525 "mal%d: failed to map interrupts !\n", index
);
530 INIT_LIST_HEAD(&mal
->poll_list
);
531 mal
->napi
.weight
= CONFIG_IBM_NEW_EMAC_POLL_WEIGHT
;
532 mal
->napi
.poll
= mal_poll
;
533 INIT_LIST_HEAD(&mal
->list
);
534 spin_lock_init(&mal
->lock
);
536 /* Load power-on reset defaults */
539 /* Set the MAL configuration register */
540 cfg
= (mal
->version
== 2) ? MAL2_CFG_DEFAULT
: MAL1_CFG_DEFAULT
;
541 cfg
|= MAL_CFG_PLBB
| MAL_CFG_OPBBL
| MAL_CFG_LEA
;
543 /* Current Axon is not happy with priority being non-0, it can
544 * deadlock, fix it up here
546 if (of_device_is_compatible(ofdev
->node
, "ibm,mcmal-axon"))
547 cfg
&= ~(MAL2_CFG_RPP_10
| MAL2_CFG_WPP_10
);
549 /* Apply configuration */
550 set_mal_dcrn(mal
, MAL_CFG
, cfg
);
552 /* Allocate space for BD rings */
553 BUG_ON(mal
->num_tx_chans
<= 0 || mal
->num_tx_chans
> 32);
554 BUG_ON(mal
->num_rx_chans
<= 0 || mal
->num_rx_chans
> 32);
556 bd_size
= sizeof(struct mal_descriptor
) *
557 (NUM_TX_BUFF
* mal
->num_tx_chans
+
558 NUM_RX_BUFF
* mal
->num_rx_chans
);
560 dma_alloc_coherent(&ofdev
->dev
, bd_size
, &mal
->bd_dma
,
562 if (mal
->bd_virt
== NULL
) {
564 "mal%d: out of memory allocating RX/TX descriptors!\n",
569 memset(mal
->bd_virt
, 0, bd_size
);
571 for (i
= 0; i
< mal
->num_tx_chans
; ++i
)
572 set_mal_dcrn(mal
, MAL_TXCTPR(i
), mal
->bd_dma
+
573 sizeof(struct mal_descriptor
) *
574 mal_tx_bd_offset(mal
, i
));
576 for (i
= 0; i
< mal
->num_rx_chans
; ++i
)
577 set_mal_dcrn(mal
, MAL_RXCTPR(i
), mal
->bd_dma
+
578 sizeof(struct mal_descriptor
) *
579 mal_rx_bd_offset(mal
, i
));
581 err
= request_irq(mal
->serr_irq
, mal_serr
, 0, "MAL SERR", mal
);
584 err
= request_irq(mal
->txde_irq
, mal_txde
, 0, "MAL TX DE", mal
);
587 err
= request_irq(mal
->txeob_irq
, mal_txeob
, 0, "MAL TX EOB", mal
);
590 err
= request_irq(mal
->rxde_irq
, mal_rxde
, 0, "MAL RX DE", mal
);
593 err
= request_irq(mal
->rxeob_irq
, mal_rxeob
, 0, "MAL RX EOB", mal
);
597 /* Enable all MAL SERR interrupt sources */
598 if (mal
->version
== 2)
599 set_mal_dcrn(mal
, MAL_IER
, MAL2_IER_EVENTS
);
601 set_mal_dcrn(mal
, MAL_IER
, MAL1_IER_EVENTS
);
603 /* Enable EOB interrupt */
604 mal_enable_eob_irq(mal
);
607 "MAL v%d %s, %d TX channels, %d RX channels\n",
608 mal
->version
, ofdev
->node
->full_name
,
609 mal
->num_tx_chans
, mal
->num_rx_chans
);
611 /* Advertise this instance to the rest of the world */
613 dev_set_drvdata(&ofdev
->dev
, mal
);
615 mal_dbg_register(mal
);
620 free_irq(mal
->rxde_irq
, mal
);
622 free_irq(mal
->txeob_irq
, mal
);
624 free_irq(mal
->txde_irq
, mal
);
626 free_irq(mal
->serr_irq
, mal
);
628 dma_free_coherent(&ofdev
->dev
, bd_size
, mal
->bd_virt
, mal
->bd_dma
);
630 dcr_unmap(mal
->dcr_host
, dcr_base
, 0x100);
637 static int __devexit
mal_remove(struct of_device
*ofdev
)
639 struct mal_instance
*mal
= dev_get_drvdata(&ofdev
->dev
);
641 MAL_DBG(mal
, "remove" NL
);
643 /* Synchronize with scheduled polling */
644 napi_disable(&mal
->napi
);
646 if (!list_empty(&mal
->list
)) {
647 /* This is *very* bad */
649 "mal%d: commac list is not empty on remove!\n",
654 dev_set_drvdata(&ofdev
->dev
, NULL
);
656 free_irq(mal
->serr_irq
, mal
);
657 free_irq(mal
->txde_irq
, mal
);
658 free_irq(mal
->txeob_irq
, mal
);
659 free_irq(mal
->rxde_irq
, mal
);
660 free_irq(mal
->rxeob_irq
, mal
);
664 mal_dbg_unregister(mal
);
666 dma_free_coherent(&ofdev
->dev
,
667 sizeof(struct mal_descriptor
) *
668 (NUM_TX_BUFF
* mal
->num_tx_chans
+
669 NUM_RX_BUFF
* mal
->num_rx_chans
), mal
->bd_virt
,
676 static struct of_device_id mal_platform_match
[] =
679 .compatible
= "ibm,mcmal",
682 .compatible
= "ibm,mcmal2",
684 /* Backward compat */
687 .compatible
= "ibm,mcmal",
691 .compatible
= "ibm,mcmal2",
696 static struct of_platform_driver mal_of_driver
= {
698 .match_table
= mal_platform_match
,
701 .remove
= mal_remove
,
704 int __init
mal_init(void)
706 return of_register_platform_driver(&mal_of_driver
);
711 of_unregister_platform_driver(&mal_of_driver
);