Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
37448f7d | 2 | * drivers/net/ibm_emac/ibm_emac_mal.c |
1da177e4 | 3 | * |
37448f7d ES |
4 | * Memory Access Layer (MAL) support |
5 | * | |
6 | * Copyright (c) 2004, 2005 Zultys Technologies. | |
7 | * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> | |
1da177e4 | 8 | * |
37448f7d ES |
9 | * Based on original work by |
10 | * Benjamin Herrenschmidt <benh@kernel.crashing.org>, | |
11 | * David Gibson <hermes@gibson.dropbear.id.au>, | |
12 | * | |
13 | * Armin Kuster <akuster@mvista.com> | |
14 | * Copyright 2002 MontaVista Softare Inc. | |
1da177e4 LT |
15 | * |
16 | * This program is free software; you can redistribute it and/or modify it | |
17 | * under the terms of the GNU General Public License as published by the | |
18 | * Free Software Foundation; either version 2 of the License, or (at your | |
19 | * option) any later version. | |
37448f7d | 20 | * |
1da177e4 | 21 | */ |
1da177e4 LT |
22 | #include <linux/module.h> |
23 | #include <linux/kernel.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/netdevice.h> | |
26 | #include <linux/init.h> | |
37448f7d | 27 | #include <linux/interrupt.h> |
1da177e4 LT |
28 | #include <linux/dma-mapping.h> |
29 | ||
1da177e4 LT |
30 | #include <asm/ocp.h> |
31 | ||
37448f7d | 32 | #include "ibm_emac_core.h" |
1da177e4 | 33 | #include "ibm_emac_mal.h" |
37448f7d | 34 | #include "ibm_emac_debug.h" |
1da177e4 | 35 | |
37448f7d ES |
36 | int __init mal_register_commac(struct ibm_ocp_mal *mal, |
37 | struct mal_commac *commac) | |
1da177e4 LT |
38 | { |
39 | unsigned long flags; | |
37448f7d | 40 | local_irq_save(flags); |
1da177e4 | 41 | |
37448f7d ES |
42 | MAL_DBG("%d: reg(%08x, %08x)" NL, mal->def->index, |
43 | commac->tx_chan_mask, commac->rx_chan_mask); | |
1da177e4 | 44 | |
37448f7d | 45 | /* Don't let multiple commacs claim the same channel(s) */ |
1da177e4 LT |
46 | if ((mal->tx_chan_mask & commac->tx_chan_mask) || |
47 | (mal->rx_chan_mask & commac->rx_chan_mask)) { | |
37448f7d ES |
48 | local_irq_restore(flags); |
49 | printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n", | |
50 | mal->def->index); | |
1da177e4 LT |
51 | return -EBUSY; |
52 | } | |
53 | ||
54 | mal->tx_chan_mask |= commac->tx_chan_mask; | |
55 | mal->rx_chan_mask |= commac->rx_chan_mask; | |
37448f7d | 56 | list_add(&commac->list, &mal->list); |
1da177e4 | 57 | |
37448f7d | 58 | local_irq_restore(flags); |
1da177e4 LT |
59 | return 0; |
60 | } | |
61 | ||
0ec6d950 | 62 | void mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac) |
1da177e4 LT |
63 | { |
64 | unsigned long flags; | |
37448f7d | 65 | local_irq_save(flags); |
1da177e4 | 66 | |
37448f7d ES |
67 | MAL_DBG("%d: unreg(%08x, %08x)" NL, mal->def->index, |
68 | commac->tx_chan_mask, commac->rx_chan_mask); | |
1da177e4 LT |
69 | |
70 | mal->tx_chan_mask &= ~commac->tx_chan_mask; | |
71 | mal->rx_chan_mask &= ~commac->rx_chan_mask; | |
1da177e4 LT |
72 | list_del_init(&commac->list); |
73 | ||
37448f7d | 74 | local_irq_restore(flags); |
1da177e4 LT |
75 | } |
76 | ||
77 | int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size) | |
78 | { | |
37448f7d ES |
79 | struct ocp_func_mal_data *maldata = mal->def->additions; |
80 | BUG_ON(channel < 0 || channel >= maldata->num_rx_chans || | |
81 | size > MAL_MAX_RX_SIZE); | |
82 | ||
83 | MAL_DBG("%d: set_rbcs(%d, %lu)" NL, mal->def->index, channel, size); | |
84 | ||
85 | if (size & 0xf) { | |
86 | printk(KERN_WARNING | |
87 | "mal%d: incorrect RX size %lu for the channel %d\n", | |
88 | mal->def->index, size, channel); | |
1da177e4 LT |
89 | return -EINVAL; |
90 | } | |
91 | ||
37448f7d | 92 | set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4); |
1da177e4 LT |
93 | return 0; |
94 | } | |
95 | ||
37448f7d | 96 | int mal_tx_bd_offset(struct ibm_ocp_mal *mal, int channel) |
1da177e4 | 97 | { |
37448f7d ES |
98 | struct ocp_func_mal_data *maldata = mal->def->additions; |
99 | BUG_ON(channel < 0 || channel >= maldata->num_tx_chans); | |
100 | return channel * NUM_TX_BUFF; | |
101 | } | |
1da177e4 | 102 | |
37448f7d ES |
103 | int mal_rx_bd_offset(struct ibm_ocp_mal *mal, int channel) |
104 | { | |
105 | struct ocp_func_mal_data *maldata = mal->def->additions; | |
106 | BUG_ON(channel < 0 || channel >= maldata->num_rx_chans); | |
107 | return maldata->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF; | |
108 | } | |
1da177e4 | 109 | |
37448f7d ES |
110 | void mal_enable_tx_channel(struct ibm_ocp_mal *mal, int channel) |
111 | { | |
112 | local_bh_disable(); | |
113 | MAL_DBG("%d: enable_tx(%d)" NL, mal->def->index, channel); | |
114 | set_mal_dcrn(mal, MAL_TXCASR, | |
115 | get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel)); | |
116 | local_bh_enable(); | |
117 | } | |
1da177e4 | 118 | |
37448f7d ES |
119 | void mal_disable_tx_channel(struct ibm_ocp_mal *mal, int channel) |
120 | { | |
121 | set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel)); | |
122 | MAL_DBG("%d: disable_tx(%d)" NL, mal->def->index, channel); | |
123 | } | |
1da177e4 | 124 | |
37448f7d ES |
125 | void mal_enable_rx_channel(struct ibm_ocp_mal *mal, int channel) |
126 | { | |
127 | local_bh_disable(); | |
128 | MAL_DBG("%d: enable_rx(%d)" NL, mal->def->index, channel); | |
129 | set_mal_dcrn(mal, MAL_RXCASR, | |
130 | get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel)); | |
131 | local_bh_enable(); | |
132 | } | |
1da177e4 | 133 | |
37448f7d ES |
134 | void mal_disable_rx_channel(struct ibm_ocp_mal *mal, int channel) |
135 | { | |
136 | set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel)); | |
137 | MAL_DBG("%d: disable_rx(%d)" NL, mal->def->index, channel); | |
138 | } | |
1da177e4 | 139 | |
37448f7d ES |
140 | void mal_poll_add(struct ibm_ocp_mal *mal, struct mal_commac *commac) |
141 | { | |
142 | local_bh_disable(); | |
143 | MAL_DBG("%d: poll_add(%p)" NL, mal->def->index, commac); | |
144 | list_add_tail(&commac->poll_list, &mal->poll_list); | |
145 | local_bh_enable(); | |
1da177e4 LT |
146 | } |
147 | ||
37448f7d ES |
148 | void mal_poll_del(struct ibm_ocp_mal *mal, struct mal_commac *commac) |
149 | { | |
150 | local_bh_disable(); | |
151 | MAL_DBG("%d: poll_del(%p)" NL, mal->def->index, commac); | |
152 | list_del(&commac->poll_list); | |
153 | local_bh_enable(); | |
154 | } | |
155 | ||
156 | /* synchronized by mal_poll() */ | |
157 | static inline void mal_enable_eob_irq(struct ibm_ocp_mal *mal) | |
158 | { | |
159 | MAL_DBG2("%d: enable_irq" NL, mal->def->index); | |
160 | set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); | |
161 | } | |
162 | ||
163 | /* synchronized by __LINK_STATE_RX_SCHED bit in ndev->state */ | |
164 | static inline void mal_disable_eob_irq(struct ibm_ocp_mal *mal) | |
165 | { | |
166 | set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE); | |
167 | MAL_DBG2("%d: disable_irq" NL, mal->def->index); | |
168 | } | |
169 | ||
7d12e780 | 170 | static irqreturn_t mal_serr(int irq, void *dev_instance) |
1da177e4 LT |
171 | { |
172 | struct ibm_ocp_mal *mal = dev_instance; | |
37448f7d | 173 | u32 esr = get_mal_dcrn(mal, MAL_ESR); |
1da177e4 | 174 | |
37448f7d ES |
175 | /* Clear the error status register */ |
176 | set_mal_dcrn(mal, MAL_ESR, esr); | |
1da177e4 | 177 | |
37448f7d | 178 | MAL_DBG("%d: SERR %08x" NL, mal->def->index, esr); |
1da177e4 | 179 | |
37448f7d ES |
180 | if (esr & MAL_ESR_EVB) { |
181 | if (esr & MAL_ESR_DE) { | |
182 | /* We ignore Descriptor error, | |
183 | * TXDE or RXDE interrupt will be generated anyway. | |
184 | */ | |
185 | return IRQ_HANDLED; | |
1da177e4 | 186 | } |
37448f7d ES |
187 | |
188 | if (esr & MAL_ESR_PEIN) { | |
189 | /* PLB error, it's probably buggy hardware or | |
190 | * incorrect physical address in BD (i.e. bug) | |
191 | */ | |
192 | if (net_ratelimit()) | |
193 | printk(KERN_ERR | |
194 | "mal%d: system error, PLB (ESR = 0x%08x)\n", | |
195 | mal->def->index, esr); | |
196 | return IRQ_HANDLED; | |
197 | } | |
198 | ||
199 | /* OPB error, it's probably buggy hardware or incorrect EBC setup */ | |
200 | if (net_ratelimit()) | |
201 | printk(KERN_ERR | |
202 | "mal%d: system error, OPB (ESR = 0x%08x)\n", | |
203 | mal->def->index, esr); | |
1da177e4 | 204 | } |
37448f7d ES |
205 | return IRQ_HANDLED; |
206 | } | |
207 | ||
208 | static inline void mal_schedule_poll(struct ibm_ocp_mal *mal) | |
209 | { | |
bfe13f54 | 210 | if (likely(napi_schedule_prep(&mal->napi))) { |
37448f7d ES |
211 | MAL_DBG2("%d: schedule_poll" NL, mal->def->index); |
212 | mal_disable_eob_irq(mal); | |
bfe13f54 | 213 | __napi_schedule(&mal->napi); |
37448f7d ES |
214 | } else |
215 | MAL_DBG2("%d: already in poll" NL, mal->def->index); | |
216 | } | |
1da177e4 | 217 | |
7d12e780 | 218 | static irqreturn_t mal_txeob(int irq, void *dev_instance) |
37448f7d ES |
219 | { |
220 | struct ibm_ocp_mal *mal = dev_instance; | |
221 | u32 r = get_mal_dcrn(mal, MAL_TXEOBISR); | |
222 | MAL_DBG2("%d: txeob %08x" NL, mal->def->index, r); | |
223 | mal_schedule_poll(mal); | |
224 | set_mal_dcrn(mal, MAL_TXEOBISR, r); | |
1da177e4 LT |
225 | return IRQ_HANDLED; |
226 | } | |
227 | ||
7d12e780 | 228 | static irqreturn_t mal_rxeob(int irq, void *dev_instance) |
1da177e4 LT |
229 | { |
230 | struct ibm_ocp_mal *mal = dev_instance; | |
37448f7d ES |
231 | u32 r = get_mal_dcrn(mal, MAL_RXEOBISR); |
232 | MAL_DBG2("%d: rxeob %08x" NL, mal->def->index, r); | |
233 | mal_schedule_poll(mal); | |
234 | set_mal_dcrn(mal, MAL_RXEOBISR, r); | |
235 | return IRQ_HANDLED; | |
236 | } | |
1da177e4 | 237 | |
7d12e780 | 238 | static irqreturn_t mal_txde(int irq, void *dev_instance) |
37448f7d ES |
239 | { |
240 | struct ibm_ocp_mal *mal = dev_instance; | |
241 | u32 deir = get_mal_dcrn(mal, MAL_TXDEIR); | |
242 | set_mal_dcrn(mal, MAL_TXDEIR, deir); | |
1da177e4 | 243 | |
37448f7d | 244 | MAL_DBG("%d: txde %08x" NL, mal->def->index, deir); |
1da177e4 | 245 | |
37448f7d ES |
246 | if (net_ratelimit()) |
247 | printk(KERN_ERR | |
248 | "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n", | |
249 | mal->def->index, deir); | |
1da177e4 LT |
250 | |
251 | return IRQ_HANDLED; | |
252 | } | |
253 | ||
7d12e780 | 254 | static irqreturn_t mal_rxde(int irq, void *dev_instance) |
1da177e4 LT |
255 | { |
256 | struct ibm_ocp_mal *mal = dev_instance; | |
257 | struct list_head *l; | |
37448f7d | 258 | u32 deir = get_mal_dcrn(mal, MAL_RXDEIR); |
1da177e4 | 259 | |
37448f7d | 260 | MAL_DBG("%d: rxde %08x" NL, mal->def->index, deir); |
1da177e4 | 261 | |
37448f7d | 262 | list_for_each(l, &mal->list) { |
1da177e4 | 263 | struct mal_commac *mc = list_entry(l, struct mal_commac, list); |
37448f7d ES |
264 | if (deir & mc->rx_chan_mask) { |
265 | mc->rx_stopped = 1; | |
266 | mc->ops->rxde(mc->dev); | |
1da177e4 LT |
267 | } |
268 | } | |
37448f7d ES |
269 | |
270 | mal_schedule_poll(mal); | |
271 | set_mal_dcrn(mal, MAL_RXDEIR, deir); | |
1da177e4 LT |
272 | |
273 | return IRQ_HANDLED; | |
274 | } | |
275 | ||
bfe13f54 | 276 | static int mal_poll(struct napi_struct *napi, int budget) |
1da177e4 | 277 | { |
bfe13f54 | 278 | struct ibm_ocp_mal *mal = container_of(napi, struct ibm_ocp_mal, napi); |
1da177e4 | 279 | struct list_head *l; |
bfe13f54 | 280 | int received = 0; |
37448f7d ES |
281 | |
282 | MAL_DBG2("%d: poll(%d) %d ->" NL, mal->def->index, *budget, | |
283 | rx_work_limit); | |
284 | again: | |
285 | /* Process TX skbs */ | |
286 | list_for_each(l, &mal->poll_list) { | |
287 | struct mal_commac *mc = | |
288 | list_entry(l, struct mal_commac, poll_list); | |
289 | mc->ops->poll_tx(mc->dev); | |
290 | } | |
1da177e4 | 291 | |
37448f7d ES |
292 | /* Process RX skbs. |
293 | * We _might_ need something more smart here to enforce polling fairness. | |
1da177e4 | 294 | */ |
37448f7d ES |
295 | list_for_each(l, &mal->poll_list) { |
296 | struct mal_commac *mc = | |
297 | list_entry(l, struct mal_commac, poll_list); | |
bfe13f54 | 298 | int n = mc->ops->poll_rx(mc->dev, budget); |
37448f7d ES |
299 | if (n) { |
300 | received += n; | |
bfe13f54 RD |
301 | budget -= n; |
302 | if (budget <= 0) | |
37448f7d | 303 | goto more_work; // XXX What if this is the last one ? |
37448f7d ES |
304 | } |
305 | } | |
1da177e4 | 306 | |
37448f7d ES |
307 | /* We need to disable IRQs to protect from RXDE IRQ here */ |
308 | local_irq_disable(); | |
bfe13f54 | 309 | __napi_complete(napi); |
37448f7d ES |
310 | mal_enable_eob_irq(mal); |
311 | local_irq_enable(); | |
312 | ||
37448f7d ES |
313 | /* Check for "rotting" packet(s) */ |
314 | list_for_each(l, &mal->poll_list) { | |
315 | struct mal_commac *mc = | |
316 | list_entry(l, struct mal_commac, poll_list); | |
317 | if (unlikely(mc->ops->peek_rx(mc->dev) || mc->rx_stopped)) { | |
318 | MAL_DBG2("%d: rotting packet" NL, mal->def->index); | |
bfe13f54 | 319 | if (napi_reschedule(napi)) |
37448f7d ES |
320 | mal_disable_eob_irq(mal); |
321 | else | |
322 | MAL_DBG2("%d: already in poll list" NL, | |
323 | mal->def->index); | |
324 | ||
bfe13f54 | 325 | if (budget > 0) |
37448f7d ES |
326 | goto again; |
327 | else | |
328 | goto more_work; | |
1da177e4 | 329 | } |
37448f7d | 330 | mc->ops->poll_tx(mc->dev); |
1da177e4 | 331 | } |
1da177e4 | 332 | |
37448f7d | 333 | more_work: |
bfe13f54 RD |
334 | MAL_DBG2("%d: poll() %d <- %d" NL, mal->def->index, budget, received); |
335 | return received; | |
37448f7d ES |
336 | } |
337 | ||
338 | static void mal_reset(struct ibm_ocp_mal *mal) | |
339 | { | |
340 | int n = 10; | |
341 | MAL_DBG("%d: reset" NL, mal->def->index); | |
342 | ||
343 | set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR); | |
344 | ||
345 | /* Wait for reset to complete (1 system clock) */ | |
346 | while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n) | |
347 | --n; | |
348 | ||
349 | if (unlikely(!n)) | |
350 | printk(KERN_ERR "mal%d: reset timeout\n", mal->def->index); | |
351 | } | |
352 | ||
353 | int mal_get_regs_len(struct ibm_ocp_mal *mal) | |
354 | { | |
355 | return sizeof(struct emac_ethtool_regs_subhdr) + | |
356 | sizeof(struct ibm_mal_regs); | |
357 | } | |
358 | ||
359 | void *mal_dump_regs(struct ibm_ocp_mal *mal, void *buf) | |
360 | { | |
361 | struct emac_ethtool_regs_subhdr *hdr = buf; | |
362 | struct ibm_mal_regs *regs = (struct ibm_mal_regs *)(hdr + 1); | |
363 | struct ocp_func_mal_data *maldata = mal->def->additions; | |
364 | int i; | |
365 | ||
366 | hdr->version = MAL_VERSION; | |
367 | hdr->index = mal->def->index; | |
368 | ||
369 | regs->tx_count = maldata->num_tx_chans; | |
370 | regs->rx_count = maldata->num_rx_chans; | |
371 | ||
372 | regs->cfg = get_mal_dcrn(mal, MAL_CFG); | |
373 | regs->esr = get_mal_dcrn(mal, MAL_ESR); | |
374 | regs->ier = get_mal_dcrn(mal, MAL_IER); | |
375 | regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR); | |
376 | regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR); | |
377 | regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR); | |
378 | regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR); | |
379 | regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR); | |
380 | regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR); | |
381 | regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR); | |
382 | regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR); | |
383 | ||
384 | for (i = 0; i < regs->tx_count; ++i) | |
385 | regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i)); | |
386 | ||
387 | for (i = 0; i < regs->rx_count; ++i) { | |
388 | regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i)); | |
389 | regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i)); | |
390 | } | |
391 | return regs + 1; | |
1da177e4 LT |
392 | } |
393 | ||
394 | static int __init mal_probe(struct ocp_device *ocpdev) | |
395 | { | |
37448f7d | 396 | struct ibm_ocp_mal *mal; |
1da177e4 | 397 | struct ocp_func_mal_data *maldata; |
37448f7d ES |
398 | int err = 0, i, bd_size; |
399 | ||
400 | MAL_DBG("%d: probe" NL, ocpdev->def->index); | |
1da177e4 | 401 | |
37448f7d | 402 | maldata = ocpdev->def->additions; |
1da177e4 | 403 | if (maldata == NULL) { |
37448f7d | 404 | printk(KERN_ERR "mal%d: missing additional data!\n", |
1da177e4 LT |
405 | ocpdev->def->index); |
406 | return -ENODEV; | |
407 | } | |
408 | ||
37448f7d ES |
409 | mal = kzalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL); |
410 | if (!mal) { | |
1da177e4 | 411 | printk(KERN_ERR |
37448f7d | 412 | "mal%d: out of memory allocating MAL structure!\n", |
1da177e4 LT |
413 | ocpdev->def->index); |
414 | return -ENOMEM; | |
415 | } | |
0f18e719 ME |
416 | |
417 | /* XXX This only works for native dcr for now */ | |
418 | mal->dcrhost = dcr_map(NULL, maldata->dcr_base, 0); | |
419 | ||
37448f7d | 420 | mal->def = ocpdev->def; |
1da177e4 | 421 | |
37448f7d | 422 | INIT_LIST_HEAD(&mal->poll_list); |
bfe13f54 RD |
423 | mal->napi.weight = CONFIG_IBM_EMAC_POLL_WEIGHT; |
424 | mal->napi.poll = mal_poll; | |
1da177e4 | 425 | |
37448f7d | 426 | INIT_LIST_HEAD(&mal->list); |
1da177e4 | 427 | |
37448f7d ES |
428 | /* Load power-on reset defaults */ |
429 | mal_reset(mal); | |
1da177e4 LT |
430 | |
431 | /* Set the MAL configuration register */ | |
37448f7d ES |
432 | set_mal_dcrn(mal, MAL_CFG, MAL_CFG_DEFAULT | MAL_CFG_PLBB | |
433 | MAL_CFG_OPBBL | MAL_CFG_LEA); | |
434 | ||
435 | mal_enable_eob_irq(mal); | |
436 | ||
437 | /* Allocate space for BD rings */ | |
438 | BUG_ON(maldata->num_tx_chans <= 0 || maldata->num_tx_chans > 32); | |
439 | BUG_ON(maldata->num_rx_chans <= 0 || maldata->num_rx_chans > 32); | |
440 | bd_size = sizeof(struct mal_descriptor) * | |
441 | (NUM_TX_BUFF * maldata->num_tx_chans + | |
442 | NUM_RX_BUFF * maldata->num_rx_chans); | |
443 | mal->bd_virt = | |
444 | dma_alloc_coherent(&ocpdev->dev, bd_size, &mal->bd_dma, GFP_KERNEL); | |
445 | ||
446 | if (!mal->bd_virt) { | |
1da177e4 | 447 | printk(KERN_ERR |
37448f7d ES |
448 | "mal%d: out of memory allocating RX/TX descriptors!\n", |
449 | mal->def->index); | |
1da177e4 LT |
450 | err = -ENOMEM; |
451 | goto fail; | |
452 | } | |
37448f7d | 453 | memset(mal->bd_virt, 0, bd_size); |
1da177e4 | 454 | |
37448f7d ES |
455 | for (i = 0; i < maldata->num_tx_chans; ++i) |
456 | set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma + | |
457 | sizeof(struct mal_descriptor) * | |
458 | mal_tx_bd_offset(mal, i)); | |
459 | ||
460 | for (i = 0; i < maldata->num_rx_chans; ++i) | |
461 | set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma + | |
462 | sizeof(struct mal_descriptor) * | |
463 | mal_rx_bd_offset(mal, i)); | |
1da177e4 LT |
464 | |
465 | err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal); | |
466 | if (err) | |
37448f7d ES |
467 | goto fail2; |
468 | err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE", mal); | |
1da177e4 | 469 | if (err) |
37448f7d | 470 | goto fail3; |
1da177e4 LT |
471 | err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); |
472 | if (err) | |
37448f7d | 473 | goto fail4; |
1da177e4 LT |
474 | err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal); |
475 | if (err) | |
37448f7d | 476 | goto fail5; |
1da177e4 LT |
477 | err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); |
478 | if (err) | |
37448f7d | 479 | goto fail6; |
1da177e4 | 480 | |
37448f7d ES |
481 | /* Enable all MAL SERR interrupt sources */ |
482 | set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS); | |
1da177e4 | 483 | |
37448f7d | 484 | /* Advertise this instance to the rest of the world */ |
1da177e4 LT |
485 | ocp_set_drvdata(ocpdev, mal); |
486 | ||
37448f7d | 487 | mal_dbg_register(mal->def->index, mal); |
1da177e4 | 488 | |
37448f7d ES |
489 | printk(KERN_INFO "mal%d: initialized, %d TX channels, %d RX channels\n", |
490 | mal->def->index, maldata->num_tx_chans, maldata->num_rx_chans); | |
1da177e4 LT |
491 | return 0; |
492 | ||
37448f7d ES |
493 | fail6: |
494 | free_irq(maldata->rxde_irq, mal); | |
495 | fail5: | |
496 | free_irq(maldata->txeob_irq, mal); | |
497 | fail4: | |
498 | free_irq(maldata->txde_irq, mal); | |
499 | fail3: | |
500 | free_irq(maldata->serr_irq, mal); | |
501 | fail2: | |
502 | dma_free_coherent(&ocpdev->dev, bd_size, mal->bd_virt, mal->bd_dma); | |
1da177e4 | 503 | fail: |
37448f7d | 504 | kfree(mal); |
1da177e4 LT |
505 | return err; |
506 | } | |
507 | ||
508 | static void __exit mal_remove(struct ocp_device *ocpdev) | |
509 | { | |
510 | struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev); | |
37448f7d ES |
511 | struct ocp_func_mal_data *maldata = mal->def->additions; |
512 | ||
513 | MAL_DBG("%d: remove" NL, mal->def->index); | |
1da177e4 | 514 | |
bfe13f54 RD |
515 | /* Synchronize with scheduled polling */ |
516 | napi_disable(&mal->napi); | |
37448f7d ES |
517 | |
518 | if (!list_empty(&mal->list)) { | |
519 | /* This is *very* bad */ | |
520 | printk(KERN_EMERG | |
521 | "mal%d: commac list is not empty on remove!\n", | |
522 | mal->def->index); | |
523 | } | |
1da177e4 LT |
524 | |
525 | ocp_set_drvdata(ocpdev, NULL); | |
526 | ||
1da177e4 LT |
527 | free_irq(maldata->serr_irq, mal); |
528 | free_irq(maldata->txde_irq, mal); | |
529 | free_irq(maldata->txeob_irq, mal); | |
530 | free_irq(maldata->rxde_irq, mal); | |
531 | free_irq(maldata->rxeob_irq, mal); | |
532 | ||
37448f7d | 533 | mal_reset(mal); |
1da177e4 | 534 | |
37448f7d ES |
535 | mal_dbg_register(mal->def->index, NULL); |
536 | ||
537 | dma_free_coherent(&ocpdev->dev, | |
538 | sizeof(struct mal_descriptor) * | |
539 | (NUM_TX_BUFF * maldata->num_tx_chans + | |
540 | NUM_RX_BUFF * maldata->num_rx_chans), mal->bd_virt, | |
541 | mal->bd_dma); | |
1da177e4 LT |
542 | |
543 | kfree(mal); | |
544 | } | |
545 | ||
546 | /* Structure for a device driver */ | |
547 | static struct ocp_device_id mal_ids[] = { | |
37448f7d ES |
548 | { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_MAL }, |
549 | { .vendor = OCP_VENDOR_INVALID} | |
1da177e4 LT |
550 | }; |
551 | ||
552 | static struct ocp_driver mal_driver = { | |
553 | .name = "mal", | |
554 | .id_table = mal_ids, | |
555 | ||
556 | .probe = mal_probe, | |
557 | .remove = mal_remove, | |
558 | }; | |
559 | ||
37448f7d | 560 | int __init mal_init(void) |
1da177e4 | 561 | { |
37448f7d ES |
562 | MAL_DBG(": init" NL); |
563 | return ocp_register_driver(&mal_driver); | |
1da177e4 LT |
564 | } |
565 | ||
37448f7d | 566 | void __exit mal_exit(void) |
1da177e4 | 567 | { |
37448f7d | 568 | MAL_DBG(": exit" NL); |
1da177e4 LT |
569 | ocp_unregister_driver(&mal_driver); |
570 | } |