Commit | Line | Data |
---|---|---|
ef8c2dab CC |
1 | /* |
2 | * Texas Instruments CPDMA Driver | |
3 | * | |
4 | * Copyright (C) 2010 Texas Instruments | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License as | |
8 | * published by the Free Software Foundation version 2. | |
9 | * | |
10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
11 | * kind, whether express or implied; without even the implied warranty | |
12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | */ | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/spinlock.h> | |
17 | #include <linux/device.h> | |
76fbc247 | 18 | #include <linux/module.h> |
ef8c2dab CC |
19 | #include <linux/slab.h> |
20 | #include <linux/err.h> | |
21 | #include <linux/dma-mapping.h> | |
22 | #include <linux/io.h> | |
817f6d1a | 23 | #include <linux/delay.h> |
ef8c2dab CC |
24 | |
25 | #include "davinci_cpdma.h" | |
26 | ||
27 | /* DMA Registers */ | |
28 | #define CPDMA_TXIDVER 0x00 | |
29 | #define CPDMA_TXCONTROL 0x04 | |
30 | #define CPDMA_TXTEARDOWN 0x08 | |
31 | #define CPDMA_RXIDVER 0x10 | |
32 | #define CPDMA_RXCONTROL 0x14 | |
33 | #define CPDMA_SOFTRESET 0x1c | |
34 | #define CPDMA_RXTEARDOWN 0x18 | |
35 | #define CPDMA_TXINTSTATRAW 0x80 | |
36 | #define CPDMA_TXINTSTATMASKED 0x84 | |
37 | #define CPDMA_TXINTMASKSET 0x88 | |
38 | #define CPDMA_TXINTMASKCLEAR 0x8c | |
39 | #define CPDMA_MACINVECTOR 0x90 | |
40 | #define CPDMA_MACEOIVECTOR 0x94 | |
41 | #define CPDMA_RXINTSTATRAW 0xa0 | |
42 | #define CPDMA_RXINTSTATMASKED 0xa4 | |
43 | #define CPDMA_RXINTMASKSET 0xa8 | |
44 | #define CPDMA_RXINTMASKCLEAR 0xac | |
45 | #define CPDMA_DMAINTSTATRAW 0xb0 | |
46 | #define CPDMA_DMAINTSTATMASKED 0xb4 | |
47 | #define CPDMA_DMAINTMASKSET 0xb8 | |
48 | #define CPDMA_DMAINTMASKCLEAR 0xbc | |
49 | #define CPDMA_DMAINT_HOSTERR BIT(1) | |
50 | ||
51 | /* the following exist only if has_ext_regs is set */ | |
52 | #define CPDMA_DMACONTROL 0x20 | |
53 | #define CPDMA_DMASTATUS 0x24 | |
54 | #define CPDMA_RXBUFFOFS 0x28 | |
55 | #define CPDMA_EM_CONTROL 0x2c | |
56 | ||
57 | /* Descriptor mode bits */ | |
58 | #define CPDMA_DESC_SOP BIT(31) | |
59 | #define CPDMA_DESC_EOP BIT(30) | |
60 | #define CPDMA_DESC_OWNER BIT(29) | |
61 | #define CPDMA_DESC_EOQ BIT(28) | |
62 | #define CPDMA_DESC_TD_COMPLETE BIT(27) | |
63 | #define CPDMA_DESC_PASS_CRC BIT(26) | |
f6e135c8 M |
64 | #define CPDMA_DESC_TO_PORT_EN BIT(20) |
65 | #define CPDMA_TO_PORT_SHIFT 16 | |
66 | #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) | |
28a19fe6 | 67 | #define CPDMA_DESC_CRC_LEN 4 |
ef8c2dab CC |
68 | |
69 | #define CPDMA_TEARDOWN_VALUE 0xfffffffc | |
70 | ||
71 | struct cpdma_desc { | |
72 | /* hardware fields */ | |
73 | u32 hw_next; | |
74 | u32 hw_buffer; | |
75 | u32 hw_len; | |
76 | u32 hw_mode; | |
77 | /* software fields */ | |
78 | void *sw_token; | |
79 | u32 sw_buffer; | |
80 | u32 sw_len; | |
81 | }; | |
82 | ||
83 | struct cpdma_desc_pool { | |
84 | u32 phys; | |
6a1fef6d | 85 | u32 hw_addr; |
ef8c2dab CC |
86 | void __iomem *iomap; /* ioremap map */ |
87 | void *cpumap; /* dma_alloc map */ | |
88 | int desc_size, mem_size; | |
89 | int num_desc, used_desc; | |
90 | unsigned long *bitmap; | |
91 | struct device *dev; | |
92 | spinlock_t lock; | |
93 | }; | |
94 | ||
95 | enum cpdma_state { | |
96 | CPDMA_STATE_IDLE, | |
97 | CPDMA_STATE_ACTIVE, | |
98 | CPDMA_STATE_TEARDOWN, | |
99 | }; | |
100 | ||
32a6d90b | 101 | static const char *cpdma_state_str[] = { "idle", "active", "teardown" }; |
ef8c2dab CC |
102 | |
103 | struct cpdma_ctlr { | |
104 | enum cpdma_state state; | |
105 | struct cpdma_params params; | |
106 | struct device *dev; | |
107 | struct cpdma_desc_pool *pool; | |
108 | spinlock_t lock; | |
109 | struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; | |
110 | }; | |
111 | ||
112 | struct cpdma_chan { | |
fae50823 M |
113 | struct cpdma_desc __iomem *head, *tail; |
114 | void __iomem *hdp, *cp, *rxfree; | |
ef8c2dab CC |
115 | enum cpdma_state state; |
116 | struct cpdma_ctlr *ctlr; | |
117 | int chan_num; | |
118 | spinlock_t lock; | |
ef8c2dab | 119 | int count; |
ef8c2dab CC |
120 | u32 mask; |
121 | cpdma_handler_fn handler; | |
122 | enum dma_data_direction dir; | |
123 | struct cpdma_chan_stats stats; | |
124 | /* offsets into dmaregs */ | |
125 | int int_set, int_clear, td; | |
126 | }; | |
127 | ||
128 | /* The following make access to common cpdma_ctlr params more readable */ | |
129 | #define dmaregs params.dmaregs | |
130 | #define num_chan params.num_chan | |
131 | ||
132 | /* various accessors */ | |
133 | #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) | |
134 | #define chan_read(chan, fld) __raw_readl((chan)->fld) | |
135 | #define desc_read(desc, fld) __raw_readl(&(desc)->fld) | |
136 | #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) | |
137 | #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) | |
138 | #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) | |
139 | ||
f6e135c8 M |
140 | #define cpdma_desc_to_port(chan, mode, directed) \ |
141 | do { \ | |
142 | if (!is_rx_chan(chan) && ((directed == 1) || \ | |
143 | (directed == 2))) \ | |
144 | mode |= (CPDMA_DESC_TO_PORT_EN | \ | |
145 | (directed << CPDMA_TO_PORT_SHIFT)); \ | |
146 | } while (0) | |
147 | ||
ef8c2dab CC |
148 | /* |
149 | * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci | |
150 | * emac) have dedicated on-chip memory for these descriptors. Some other | |
151 | * devices (e.g. cpsw switches) use plain old memory. Descriptor pools | |
152 | * abstract out these details | |
153 | */ | |
154 | static struct cpdma_desc_pool * | |
6a1fef6d S |
155 | cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, |
156 | int size, int align) | |
ef8c2dab CC |
157 | { |
158 | int bitmap_size; | |
159 | struct cpdma_desc_pool *pool; | |
160 | ||
161 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); | |
162 | if (!pool) | |
163 | return NULL; | |
164 | ||
165 | spin_lock_init(&pool->lock); | |
166 | ||
167 | pool->dev = dev; | |
168 | pool->mem_size = size; | |
169 | pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); | |
170 | pool->num_desc = size / pool->desc_size; | |
171 | ||
172 | bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); | |
173 | pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | |
174 | if (!pool->bitmap) | |
175 | goto fail; | |
176 | ||
177 | if (phys) { | |
178 | pool->phys = phys; | |
179 | pool->iomap = ioremap(phys, size); | |
6a1fef6d | 180 | pool->hw_addr = hw_addr; |
ef8c2dab CC |
181 | } else { |
182 | pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, | |
183 | GFP_KERNEL); | |
43d620c8 | 184 | pool->iomap = pool->cpumap; |
6a1fef6d | 185 | pool->hw_addr = pool->phys; |
ef8c2dab CC |
186 | } |
187 | ||
188 | if (pool->iomap) | |
189 | return pool; | |
190 | ||
191 | fail: | |
192 | kfree(pool->bitmap); | |
193 | kfree(pool); | |
194 | return NULL; | |
195 | } | |
196 | ||
197 | static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) | |
198 | { | |
199 | unsigned long flags; | |
200 | ||
201 | if (!pool) | |
202 | return; | |
203 | ||
204 | spin_lock_irqsave(&pool->lock, flags); | |
205 | WARN_ON(pool->used_desc); | |
206 | kfree(pool->bitmap); | |
207 | if (pool->cpumap) { | |
208 | dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, | |
209 | pool->phys); | |
210 | } else { | |
211 | iounmap(pool->iomap); | |
212 | } | |
213 | spin_unlock_irqrestore(&pool->lock, flags); | |
214 | kfree(pool); | |
215 | } | |
216 | ||
217 | static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, | |
218 | struct cpdma_desc __iomem *desc) | |
219 | { | |
220 | if (!desc) | |
221 | return 0; | |
6a1fef6d | 222 | return pool->hw_addr + (__force dma_addr_t)desc - |
ef8c2dab CC |
223 | (__force dma_addr_t)pool->iomap; |
224 | } | |
225 | ||
226 | static inline struct cpdma_desc __iomem * | |
227 | desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) | |
228 | { | |
6a1fef6d | 229 | return dma ? pool->iomap + dma - pool->hw_addr : NULL; |
ef8c2dab CC |
230 | } |
231 | ||
232 | static struct cpdma_desc __iomem * | |
fae50823 | 233 | cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) |
ef8c2dab CC |
234 | { |
235 | unsigned long flags; | |
236 | int index; | |
fae50823 M |
237 | int desc_start; |
238 | int desc_end; | |
ef8c2dab CC |
239 | struct cpdma_desc __iomem *desc = NULL; |
240 | ||
241 | spin_lock_irqsave(&pool->lock, flags); | |
242 | ||
fae50823 M |
243 | if (is_rx) { |
244 | desc_start = 0; | |
245 | desc_end = pool->num_desc/2; | |
246 | } else { | |
247 | desc_start = pool->num_desc/2; | |
248 | desc_end = pool->num_desc; | |
249 | } | |
250 | ||
251 | index = bitmap_find_next_zero_area(pool->bitmap, | |
252 | desc_end, desc_start, num_desc, 0); | |
253 | if (index < desc_end) { | |
ef8c2dab CC |
254 | bitmap_set(pool->bitmap, index, num_desc); |
255 | desc = pool->iomap + pool->desc_size * index; | |
256 | pool->used_desc++; | |
257 | } | |
258 | ||
259 | spin_unlock_irqrestore(&pool->lock, flags); | |
260 | return desc; | |
261 | } | |
262 | ||
263 | static void cpdma_desc_free(struct cpdma_desc_pool *pool, | |
264 | struct cpdma_desc __iomem *desc, int num_desc) | |
265 | { | |
266 | unsigned long flags, index; | |
267 | ||
268 | index = ((unsigned long)desc - (unsigned long)pool->iomap) / | |
269 | pool->desc_size; | |
270 | spin_lock_irqsave(&pool->lock, flags); | |
271 | bitmap_clear(pool->bitmap, index, num_desc); | |
272 | pool->used_desc--; | |
273 | spin_unlock_irqrestore(&pool->lock, flags); | |
274 | } | |
275 | ||
276 | struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) | |
277 | { | |
278 | struct cpdma_ctlr *ctlr; | |
279 | ||
280 | ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL); | |
281 | if (!ctlr) | |
282 | return NULL; | |
283 | ||
284 | ctlr->state = CPDMA_STATE_IDLE; | |
285 | ctlr->params = *params; | |
286 | ctlr->dev = params->dev; | |
287 | spin_lock_init(&ctlr->lock); | |
288 | ||
289 | ctlr->pool = cpdma_desc_pool_create(ctlr->dev, | |
290 | ctlr->params.desc_mem_phys, | |
6a1fef6d | 291 | ctlr->params.desc_hw_addr, |
ef8c2dab CC |
292 | ctlr->params.desc_mem_size, |
293 | ctlr->params.desc_align); | |
294 | if (!ctlr->pool) { | |
295 | kfree(ctlr); | |
296 | return NULL; | |
297 | } | |
298 | ||
299 | if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) | |
300 | ctlr->num_chan = CPDMA_MAX_CHANNELS; | |
301 | return ctlr; | |
302 | } | |
32a6d90b | 303 | EXPORT_SYMBOL_GPL(cpdma_ctlr_create); |
ef8c2dab CC |
304 | |
305 | int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) | |
306 | { | |
307 | unsigned long flags; | |
308 | int i; | |
309 | ||
310 | spin_lock_irqsave(&ctlr->lock, flags); | |
311 | if (ctlr->state != CPDMA_STATE_IDLE) { | |
312 | spin_unlock_irqrestore(&ctlr->lock, flags); | |
313 | return -EBUSY; | |
314 | } | |
315 | ||
316 | if (ctlr->params.has_soft_reset) { | |
817f6d1a | 317 | unsigned timeout = 10 * 100; |
ef8c2dab CC |
318 | |
319 | dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); | |
817f6d1a | 320 | while (timeout) { |
ef8c2dab CC |
321 | if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) |
322 | break; | |
817f6d1a SS |
323 | udelay(10); |
324 | timeout--; | |
ef8c2dab | 325 | } |
817f6d1a | 326 | WARN_ON(!timeout); |
ef8c2dab CC |
327 | } |
328 | ||
329 | for (i = 0; i < ctlr->num_chan; i++) { | |
330 | __raw_writel(0, ctlr->params.txhdp + 4 * i); | |
331 | __raw_writel(0, ctlr->params.rxhdp + 4 * i); | |
332 | __raw_writel(0, ctlr->params.txcp + 4 * i); | |
333 | __raw_writel(0, ctlr->params.rxcp + 4 * i); | |
334 | } | |
335 | ||
336 | dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); | |
337 | dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); | |
338 | ||
339 | dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); | |
340 | dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); | |
341 | ||
342 | ctlr->state = CPDMA_STATE_ACTIVE; | |
343 | ||
344 | for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { | |
345 | if (ctlr->channels[i]) | |
346 | cpdma_chan_start(ctlr->channels[i]); | |
347 | } | |
348 | spin_unlock_irqrestore(&ctlr->lock, flags); | |
349 | return 0; | |
350 | } | |
32a6d90b | 351 | EXPORT_SYMBOL_GPL(cpdma_ctlr_start); |
ef8c2dab CC |
352 | |
353 | int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) | |
354 | { | |
355 | unsigned long flags; | |
356 | int i; | |
357 | ||
358 | spin_lock_irqsave(&ctlr->lock, flags); | |
359 | if (ctlr->state != CPDMA_STATE_ACTIVE) { | |
360 | spin_unlock_irqrestore(&ctlr->lock, flags); | |
361 | return -EINVAL; | |
362 | } | |
363 | ||
364 | ctlr->state = CPDMA_STATE_TEARDOWN; | |
365 | ||
366 | for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { | |
367 | if (ctlr->channels[i]) | |
368 | cpdma_chan_stop(ctlr->channels[i]); | |
369 | } | |
370 | ||
371 | dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); | |
372 | dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); | |
373 | ||
374 | dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); | |
375 | dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); | |
376 | ||
377 | ctlr->state = CPDMA_STATE_IDLE; | |
378 | ||
379 | spin_unlock_irqrestore(&ctlr->lock, flags); | |
380 | return 0; | |
381 | } | |
32a6d90b | 382 | EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); |
ef8c2dab CC |
383 | |
384 | int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) | |
385 | { | |
386 | struct device *dev = ctlr->dev; | |
387 | unsigned long flags; | |
388 | int i; | |
389 | ||
390 | spin_lock_irqsave(&ctlr->lock, flags); | |
391 | ||
392 | dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]); | |
393 | ||
394 | dev_info(dev, "CPDMA: txidver: %x", | |
395 | dma_reg_read(ctlr, CPDMA_TXIDVER)); | |
396 | dev_info(dev, "CPDMA: txcontrol: %x", | |
397 | dma_reg_read(ctlr, CPDMA_TXCONTROL)); | |
398 | dev_info(dev, "CPDMA: txteardown: %x", | |
399 | dma_reg_read(ctlr, CPDMA_TXTEARDOWN)); | |
400 | dev_info(dev, "CPDMA: rxidver: %x", | |
401 | dma_reg_read(ctlr, CPDMA_RXIDVER)); | |
402 | dev_info(dev, "CPDMA: rxcontrol: %x", | |
403 | dma_reg_read(ctlr, CPDMA_RXCONTROL)); | |
404 | dev_info(dev, "CPDMA: softreset: %x", | |
405 | dma_reg_read(ctlr, CPDMA_SOFTRESET)); | |
406 | dev_info(dev, "CPDMA: rxteardown: %x", | |
407 | dma_reg_read(ctlr, CPDMA_RXTEARDOWN)); | |
408 | dev_info(dev, "CPDMA: txintstatraw: %x", | |
409 | dma_reg_read(ctlr, CPDMA_TXINTSTATRAW)); | |
410 | dev_info(dev, "CPDMA: txintstatmasked: %x", | |
411 | dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED)); | |
412 | dev_info(dev, "CPDMA: txintmaskset: %x", | |
413 | dma_reg_read(ctlr, CPDMA_TXINTMASKSET)); | |
414 | dev_info(dev, "CPDMA: txintmaskclear: %x", | |
415 | dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR)); | |
416 | dev_info(dev, "CPDMA: macinvector: %x", | |
417 | dma_reg_read(ctlr, CPDMA_MACINVECTOR)); | |
418 | dev_info(dev, "CPDMA: maceoivector: %x", | |
419 | dma_reg_read(ctlr, CPDMA_MACEOIVECTOR)); | |
420 | dev_info(dev, "CPDMA: rxintstatraw: %x", | |
421 | dma_reg_read(ctlr, CPDMA_RXINTSTATRAW)); | |
422 | dev_info(dev, "CPDMA: rxintstatmasked: %x", | |
423 | dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED)); | |
424 | dev_info(dev, "CPDMA: rxintmaskset: %x", | |
425 | dma_reg_read(ctlr, CPDMA_RXINTMASKSET)); | |
426 | dev_info(dev, "CPDMA: rxintmaskclear: %x", | |
427 | dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR)); | |
428 | dev_info(dev, "CPDMA: dmaintstatraw: %x", | |
429 | dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW)); | |
430 | dev_info(dev, "CPDMA: dmaintstatmasked: %x", | |
431 | dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED)); | |
432 | dev_info(dev, "CPDMA: dmaintmaskset: %x", | |
433 | dma_reg_read(ctlr, CPDMA_DMAINTMASKSET)); | |
434 | dev_info(dev, "CPDMA: dmaintmaskclear: %x", | |
435 | dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR)); | |
436 | ||
437 | if (!ctlr->params.has_ext_regs) { | |
438 | dev_info(dev, "CPDMA: dmacontrol: %x", | |
439 | dma_reg_read(ctlr, CPDMA_DMACONTROL)); | |
440 | dev_info(dev, "CPDMA: dmastatus: %x", | |
441 | dma_reg_read(ctlr, CPDMA_DMASTATUS)); | |
442 | dev_info(dev, "CPDMA: rxbuffofs: %x", | |
443 | dma_reg_read(ctlr, CPDMA_RXBUFFOFS)); | |
444 | } | |
445 | ||
446 | for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) | |
447 | if (ctlr->channels[i]) | |
448 | cpdma_chan_dump(ctlr->channels[i]); | |
449 | ||
450 | spin_unlock_irqrestore(&ctlr->lock, flags); | |
451 | return 0; | |
452 | } | |
32a6d90b | 453 | EXPORT_SYMBOL_GPL(cpdma_ctlr_dump); |
ef8c2dab CC |
454 | |
455 | int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) | |
456 | { | |
457 | unsigned long flags; | |
458 | int ret = 0, i; | |
459 | ||
460 | if (!ctlr) | |
461 | return -EINVAL; | |
462 | ||
463 | spin_lock_irqsave(&ctlr->lock, flags); | |
464 | if (ctlr->state != CPDMA_STATE_IDLE) | |
465 | cpdma_ctlr_stop(ctlr); | |
466 | ||
79876e03 CR |
467 | for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) |
468 | cpdma_chan_destroy(ctlr->channels[i]); | |
ef8c2dab CC |
469 | |
470 | cpdma_desc_pool_destroy(ctlr->pool); | |
471 | spin_unlock_irqrestore(&ctlr->lock, flags); | |
472 | kfree(ctlr); | |
473 | return ret; | |
474 | } | |
32a6d90b | 475 | EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); |
ef8c2dab CC |
476 | |
477 | int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) | |
478 | { | |
479 | unsigned long flags; | |
480 | int i, reg; | |
481 | ||
482 | spin_lock_irqsave(&ctlr->lock, flags); | |
483 | if (ctlr->state != CPDMA_STATE_ACTIVE) { | |
484 | spin_unlock_irqrestore(&ctlr->lock, flags); | |
485 | return -EINVAL; | |
486 | } | |
487 | ||
488 | reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR; | |
489 | dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR); | |
490 | ||
491 | for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { | |
492 | if (ctlr->channels[i]) | |
493 | cpdma_chan_int_ctrl(ctlr->channels[i], enable); | |
494 | } | |
495 | ||
496 | spin_unlock_irqrestore(&ctlr->lock, flags); | |
497 | return 0; | |
498 | } | |
6929e24e | 499 | EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); |
ef8c2dab | 500 | |
510a1e72 | 501 | void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) |
ef8c2dab | 502 | { |
510a1e72 | 503 | dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); |
ef8c2dab | 504 | } |
6929e24e | 505 | EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); |
ef8c2dab CC |
506 | |
507 | struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, | |
508 | cpdma_handler_fn handler) | |
509 | { | |
510 | struct cpdma_chan *chan; | |
511 | int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; | |
512 | unsigned long flags; | |
513 | ||
514 | if (__chan_linear(chan_num) >= ctlr->num_chan) | |
515 | return NULL; | |
516 | ||
517 | ret = -ENOMEM; | |
518 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); | |
519 | if (!chan) | |
520 | goto err_chan_alloc; | |
521 | ||
522 | spin_lock_irqsave(&ctlr->lock, flags); | |
523 | ret = -EBUSY; | |
524 | if (ctlr->channels[chan_num]) | |
525 | goto err_chan_busy; | |
526 | ||
527 | chan->ctlr = ctlr; | |
528 | chan->state = CPDMA_STATE_IDLE; | |
529 | chan->chan_num = chan_num; | |
530 | chan->handler = handler; | |
531 | ||
532 | if (is_rx_chan(chan)) { | |
533 | chan->hdp = ctlr->params.rxhdp + offset; | |
534 | chan->cp = ctlr->params.rxcp + offset; | |
535 | chan->rxfree = ctlr->params.rxfree + offset; | |
536 | chan->int_set = CPDMA_RXINTMASKSET; | |
537 | chan->int_clear = CPDMA_RXINTMASKCLEAR; | |
538 | chan->td = CPDMA_RXTEARDOWN; | |
539 | chan->dir = DMA_FROM_DEVICE; | |
540 | } else { | |
541 | chan->hdp = ctlr->params.txhdp + offset; | |
542 | chan->cp = ctlr->params.txcp + offset; | |
543 | chan->int_set = CPDMA_TXINTMASKSET; | |
544 | chan->int_clear = CPDMA_TXINTMASKCLEAR; | |
545 | chan->td = CPDMA_TXTEARDOWN; | |
546 | chan->dir = DMA_TO_DEVICE; | |
547 | } | |
548 | chan->mask = BIT(chan_linear(chan)); | |
549 | ||
550 | spin_lock_init(&chan->lock); | |
551 | ||
552 | ctlr->channels[chan_num] = chan; | |
553 | spin_unlock_irqrestore(&ctlr->lock, flags); | |
554 | return chan; | |
555 | ||
556 | err_chan_busy: | |
557 | spin_unlock_irqrestore(&ctlr->lock, flags); | |
558 | kfree(chan); | |
559 | err_chan_alloc: | |
560 | return ERR_PTR(ret); | |
561 | } | |
32a6d90b | 562 | EXPORT_SYMBOL_GPL(cpdma_chan_create); |
ef8c2dab CC |
563 | |
564 | int cpdma_chan_destroy(struct cpdma_chan *chan) | |
565 | { | |
f37c54b6 | 566 | struct cpdma_ctlr *ctlr; |
ef8c2dab CC |
567 | unsigned long flags; |
568 | ||
569 | if (!chan) | |
570 | return -EINVAL; | |
f37c54b6 | 571 | ctlr = chan->ctlr; |
ef8c2dab CC |
572 | |
573 | spin_lock_irqsave(&ctlr->lock, flags); | |
574 | if (chan->state != CPDMA_STATE_IDLE) | |
575 | cpdma_chan_stop(chan); | |
576 | ctlr->channels[chan->chan_num] = NULL; | |
577 | spin_unlock_irqrestore(&ctlr->lock, flags); | |
578 | kfree(chan); | |
579 | return 0; | |
580 | } | |
32a6d90b | 581 | EXPORT_SYMBOL_GPL(cpdma_chan_destroy); |
ef8c2dab CC |
582 | |
583 | int cpdma_chan_get_stats(struct cpdma_chan *chan, | |
584 | struct cpdma_chan_stats *stats) | |
585 | { | |
586 | unsigned long flags; | |
587 | if (!chan) | |
588 | return -EINVAL; | |
589 | spin_lock_irqsave(&chan->lock, flags); | |
590 | memcpy(stats, &chan->stats, sizeof(*stats)); | |
591 | spin_unlock_irqrestore(&chan->lock, flags); | |
592 | return 0; | |
593 | } | |
594 | ||
595 | int cpdma_chan_dump(struct cpdma_chan *chan) | |
596 | { | |
597 | unsigned long flags; | |
598 | struct device *dev = chan->ctlr->dev; | |
599 | ||
600 | spin_lock_irqsave(&chan->lock, flags); | |
601 | ||
602 | dev_info(dev, "channel %d (%s %d) state %s", | |
603 | chan->chan_num, is_rx_chan(chan) ? "rx" : "tx", | |
604 | chan_linear(chan), cpdma_state_str[chan->state]); | |
605 | dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp)); | |
606 | dev_info(dev, "\tcp: %x\n", chan_read(chan, cp)); | |
607 | if (chan->rxfree) { | |
608 | dev_info(dev, "\trxfree: %x\n", | |
609 | chan_read(chan, rxfree)); | |
610 | } | |
611 | ||
612 | dev_info(dev, "\tstats head_enqueue: %d\n", | |
613 | chan->stats.head_enqueue); | |
614 | dev_info(dev, "\tstats tail_enqueue: %d\n", | |
615 | chan->stats.tail_enqueue); | |
616 | dev_info(dev, "\tstats pad_enqueue: %d\n", | |
617 | chan->stats.pad_enqueue); | |
618 | dev_info(dev, "\tstats misqueued: %d\n", | |
619 | chan->stats.misqueued); | |
620 | dev_info(dev, "\tstats desc_alloc_fail: %d\n", | |
621 | chan->stats.desc_alloc_fail); | |
622 | dev_info(dev, "\tstats pad_alloc_fail: %d\n", | |
623 | chan->stats.pad_alloc_fail); | |
624 | dev_info(dev, "\tstats runt_receive_buff: %d\n", | |
625 | chan->stats.runt_receive_buff); | |
626 | dev_info(dev, "\tstats runt_transmit_buff: %d\n", | |
627 | chan->stats.runt_transmit_buff); | |
628 | dev_info(dev, "\tstats empty_dequeue: %d\n", | |
629 | chan->stats.empty_dequeue); | |
630 | dev_info(dev, "\tstats busy_dequeue: %d\n", | |
631 | chan->stats.busy_dequeue); | |
632 | dev_info(dev, "\tstats good_dequeue: %d\n", | |
633 | chan->stats.good_dequeue); | |
634 | dev_info(dev, "\tstats requeue: %d\n", | |
635 | chan->stats.requeue); | |
636 | dev_info(dev, "\tstats teardown_dequeue: %d\n", | |
637 | chan->stats.teardown_dequeue); | |
638 | ||
639 | spin_unlock_irqrestore(&chan->lock, flags); | |
640 | return 0; | |
641 | } | |
642 | ||
643 | static void __cpdma_chan_submit(struct cpdma_chan *chan, | |
644 | struct cpdma_desc __iomem *desc) | |
645 | { | |
646 | struct cpdma_ctlr *ctlr = chan->ctlr; | |
647 | struct cpdma_desc __iomem *prev = chan->tail; | |
648 | struct cpdma_desc_pool *pool = ctlr->pool; | |
649 | dma_addr_t desc_dma; | |
650 | u32 mode; | |
651 | ||
652 | desc_dma = desc_phys(pool, desc); | |
653 | ||
654 | /* simple case - idle channel */ | |
655 | if (!chan->head) { | |
656 | chan->stats.head_enqueue++; | |
657 | chan->head = desc; | |
658 | chan->tail = desc; | |
659 | if (chan->state == CPDMA_STATE_ACTIVE) | |
660 | chan_write(chan, hdp, desc_dma); | |
661 | return; | |
662 | } | |
663 | ||
664 | /* first chain the descriptor at the tail of the list */ | |
665 | desc_write(prev, hw_next, desc_dma); | |
666 | chan->tail = desc; | |
667 | chan->stats.tail_enqueue++; | |
668 | ||
669 | /* next check if EOQ has been triggered already */ | |
670 | mode = desc_read(prev, hw_mode); | |
671 | if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) && | |
672 | (chan->state == CPDMA_STATE_ACTIVE)) { | |
673 | desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ); | |
674 | chan_write(chan, hdp, desc_dma); | |
675 | chan->stats.misqueued++; | |
676 | } | |
677 | } | |
678 | ||
679 | int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, | |
aef614e1 | 680 | int len, int directed) |
ef8c2dab CC |
681 | { |
682 | struct cpdma_ctlr *ctlr = chan->ctlr; | |
683 | struct cpdma_desc __iomem *desc; | |
684 | dma_addr_t buffer; | |
685 | unsigned long flags; | |
686 | u32 mode; | |
687 | int ret = 0; | |
688 | ||
689 | spin_lock_irqsave(&chan->lock, flags); | |
690 | ||
691 | if (chan->state == CPDMA_STATE_TEARDOWN) { | |
692 | ret = -EINVAL; | |
693 | goto unlock_ret; | |
694 | } | |
695 | ||
fae50823 | 696 | desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); |
ef8c2dab CC |
697 | if (!desc) { |
698 | chan->stats.desc_alloc_fail++; | |
699 | ret = -ENOMEM; | |
700 | goto unlock_ret; | |
701 | } | |
702 | ||
703 | if (len < ctlr->params.min_packet_size) { | |
704 | len = ctlr->params.min_packet_size; | |
705 | chan->stats.runt_transmit_buff++; | |
706 | } | |
707 | ||
708 | buffer = dma_map_single(ctlr->dev, data, len, chan->dir); | |
14bd0769 SS |
709 | ret = dma_mapping_error(ctlr->dev, buffer); |
710 | if (ret) { | |
711 | cpdma_desc_free(ctlr->pool, desc, 1); | |
712 | ret = -EINVAL; | |
713 | goto unlock_ret; | |
714 | } | |
715 | ||
ef8c2dab | 716 | mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; |
f6e135c8 | 717 | cpdma_desc_to_port(chan, mode, directed); |
ef8c2dab CC |
718 | |
719 | desc_write(desc, hw_next, 0); | |
720 | desc_write(desc, hw_buffer, buffer); | |
721 | desc_write(desc, hw_len, len); | |
722 | desc_write(desc, hw_mode, mode | len); | |
723 | desc_write(desc, sw_token, token); | |
724 | desc_write(desc, sw_buffer, buffer); | |
725 | desc_write(desc, sw_len, len); | |
726 | ||
727 | __cpdma_chan_submit(chan, desc); | |
728 | ||
729 | if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree) | |
730 | chan_write(chan, rxfree, 1); | |
731 | ||
732 | chan->count++; | |
733 | ||
734 | unlock_ret: | |
735 | spin_unlock_irqrestore(&chan->lock, flags); | |
736 | return ret; | |
737 | } | |
32a6d90b | 738 | EXPORT_SYMBOL_GPL(cpdma_chan_submit); |
ef8c2dab | 739 | |
fae50823 M |
740 | bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) |
741 | { | |
742 | unsigned long flags; | |
743 | int index; | |
744 | bool ret; | |
745 | struct cpdma_ctlr *ctlr = chan->ctlr; | |
746 | struct cpdma_desc_pool *pool = ctlr->pool; | |
747 | ||
748 | spin_lock_irqsave(&pool->lock, flags); | |
749 | ||
750 | index = bitmap_find_next_zero_area(pool->bitmap, | |
751 | pool->num_desc, pool->num_desc/2, 1, 0); | |
752 | ||
753 | if (index < pool->num_desc) | |
754 | ret = true; | |
755 | else | |
756 | ret = false; | |
757 | ||
758 | spin_unlock_irqrestore(&pool->lock, flags); | |
759 | return ret; | |
760 | } | |
761 | EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); | |
762 | ||
ef8c2dab CC |
763 | static void __cpdma_chan_free(struct cpdma_chan *chan, |
764 | struct cpdma_desc __iomem *desc, | |
765 | int outlen, int status) | |
766 | { | |
767 | struct cpdma_ctlr *ctlr = chan->ctlr; | |
768 | struct cpdma_desc_pool *pool = ctlr->pool; | |
769 | dma_addr_t buff_dma; | |
770 | int origlen; | |
771 | void *token; | |
772 | ||
773 | token = (void *)desc_read(desc, sw_token); | |
774 | buff_dma = desc_read(desc, sw_buffer); | |
775 | origlen = desc_read(desc, sw_len); | |
776 | ||
777 | dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); | |
778 | cpdma_desc_free(pool, desc, 1); | |
779 | (*chan->handler)(token, outlen, status); | |
780 | } | |
781 | ||
782 | static int __cpdma_chan_process(struct cpdma_chan *chan) | |
783 | { | |
784 | struct cpdma_ctlr *ctlr = chan->ctlr; | |
785 | struct cpdma_desc __iomem *desc; | |
786 | int status, outlen; | |
b4727e69 | 787 | int cb_status = 0; |
ef8c2dab CC |
788 | struct cpdma_desc_pool *pool = ctlr->pool; |
789 | dma_addr_t desc_dma; | |
790 | unsigned long flags; | |
791 | ||
792 | spin_lock_irqsave(&chan->lock, flags); | |
793 | ||
794 | desc = chan->head; | |
795 | if (!desc) { | |
796 | chan->stats.empty_dequeue++; | |
797 | status = -ENOENT; | |
798 | goto unlock_ret; | |
799 | } | |
800 | desc_dma = desc_phys(pool, desc); | |
801 | ||
802 | status = __raw_readl(&desc->hw_mode); | |
803 | outlen = status & 0x7ff; | |
804 | if (status & CPDMA_DESC_OWNER) { | |
805 | chan->stats.busy_dequeue++; | |
806 | status = -EBUSY; | |
807 | goto unlock_ret; | |
808 | } | |
28a19fe6 M |
809 | |
810 | if (status & CPDMA_DESC_PASS_CRC) | |
811 | outlen -= CPDMA_DESC_CRC_LEN; | |
812 | ||
f6e135c8 M |
813 | status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | |
814 | CPDMA_DESC_PORT_MASK); | |
ef8c2dab CC |
815 | |
816 | chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); | |
817 | chan_write(chan, cp, desc_dma); | |
818 | chan->count--; | |
819 | chan->stats.good_dequeue++; | |
820 | ||
821 | if (status & CPDMA_DESC_EOQ) { | |
822 | chan->stats.requeue++; | |
823 | chan_write(chan, hdp, desc_phys(pool, chan->head)); | |
824 | } | |
825 | ||
826 | spin_unlock_irqrestore(&chan->lock, flags); | |
b4727e69 SS |
827 | if (unlikely(status & CPDMA_DESC_TD_COMPLETE)) |
828 | cb_status = -ENOSYS; | |
829 | else | |
830 | cb_status = status; | |
ef8c2dab | 831 | |
b4727e69 | 832 | __cpdma_chan_free(chan, desc, outlen, cb_status); |
ef8c2dab CC |
833 | return status; |
834 | ||
835 | unlock_ret: | |
836 | spin_unlock_irqrestore(&chan->lock, flags); | |
837 | return status; | |
838 | } | |
839 | ||
840 | int cpdma_chan_process(struct cpdma_chan *chan, int quota) | |
841 | { | |
842 | int used = 0, ret = 0; | |
843 | ||
844 | if (chan->state != CPDMA_STATE_ACTIVE) | |
845 | return -EINVAL; | |
846 | ||
847 | while (used < quota) { | |
848 | ret = __cpdma_chan_process(chan); | |
849 | if (ret < 0) | |
850 | break; | |
851 | used++; | |
852 | } | |
853 | return used; | |
854 | } | |
32a6d90b | 855 | EXPORT_SYMBOL_GPL(cpdma_chan_process); |
ef8c2dab CC |
856 | |
857 | int cpdma_chan_start(struct cpdma_chan *chan) | |
858 | { | |
859 | struct cpdma_ctlr *ctlr = chan->ctlr; | |
860 | struct cpdma_desc_pool *pool = ctlr->pool; | |
861 | unsigned long flags; | |
862 | ||
863 | spin_lock_irqsave(&chan->lock, flags); | |
864 | if (chan->state != CPDMA_STATE_IDLE) { | |
865 | spin_unlock_irqrestore(&chan->lock, flags); | |
866 | return -EBUSY; | |
867 | } | |
868 | if (ctlr->state != CPDMA_STATE_ACTIVE) { | |
869 | spin_unlock_irqrestore(&chan->lock, flags); | |
870 | return -EINVAL; | |
871 | } | |
872 | dma_reg_write(ctlr, chan->int_set, chan->mask); | |
873 | chan->state = CPDMA_STATE_ACTIVE; | |
874 | if (chan->head) { | |
875 | chan_write(chan, hdp, desc_phys(pool, chan->head)); | |
876 | if (chan->rxfree) | |
877 | chan_write(chan, rxfree, chan->count); | |
878 | } | |
879 | ||
880 | spin_unlock_irqrestore(&chan->lock, flags); | |
881 | return 0; | |
882 | } | |
32a6d90b | 883 | EXPORT_SYMBOL_GPL(cpdma_chan_start); |
ef8c2dab CC |
884 | |
885 | int cpdma_chan_stop(struct cpdma_chan *chan) | |
886 | { | |
887 | struct cpdma_ctlr *ctlr = chan->ctlr; | |
888 | struct cpdma_desc_pool *pool = ctlr->pool; | |
889 | unsigned long flags; | |
890 | int ret; | |
817f6d1a | 891 | unsigned timeout; |
ef8c2dab CC |
892 | |
893 | spin_lock_irqsave(&chan->lock, flags); | |
894 | if (chan->state != CPDMA_STATE_ACTIVE) { | |
895 | spin_unlock_irqrestore(&chan->lock, flags); | |
896 | return -EINVAL; | |
897 | } | |
898 | ||
899 | chan->state = CPDMA_STATE_TEARDOWN; | |
900 | dma_reg_write(ctlr, chan->int_clear, chan->mask); | |
901 | ||
902 | /* trigger teardown */ | |
b4ad0428 | 903 | dma_reg_write(ctlr, chan->td, chan_linear(chan)); |
ef8c2dab CC |
904 | |
905 | /* wait for teardown complete */ | |
817f6d1a SS |
906 | timeout = 100 * 100; /* 100 ms */ |
907 | while (timeout) { | |
ef8c2dab CC |
908 | u32 cp = chan_read(chan, cp); |
909 | if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) | |
910 | break; | |
817f6d1a SS |
911 | udelay(10); |
912 | timeout--; | |
ef8c2dab | 913 | } |
817f6d1a | 914 | WARN_ON(!timeout); |
ef8c2dab CC |
915 | chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); |
916 | ||
917 | /* handle completed packets */ | |
7746ab0a | 918 | spin_unlock_irqrestore(&chan->lock, flags); |
ef8c2dab CC |
919 | do { |
920 | ret = __cpdma_chan_process(chan); | |
921 | if (ret < 0) | |
922 | break; | |
923 | } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); | |
7746ab0a | 924 | spin_lock_irqsave(&chan->lock, flags); |
ef8c2dab CC |
925 | |
926 | /* remaining packets haven't been tx/rx'ed, clean them up */ | |
927 | while (chan->head) { | |
928 | struct cpdma_desc __iomem *desc = chan->head; | |
929 | dma_addr_t next_dma; | |
930 | ||
931 | next_dma = desc_read(desc, hw_next); | |
932 | chan->head = desc_from_phys(pool, next_dma); | |
ffb5ba90 | 933 | chan->count--; |
ef8c2dab CC |
934 | chan->stats.teardown_dequeue++; |
935 | ||
936 | /* issue callback without locks held */ | |
937 | spin_unlock_irqrestore(&chan->lock, flags); | |
938 | __cpdma_chan_free(chan, desc, 0, -ENOSYS); | |
939 | spin_lock_irqsave(&chan->lock, flags); | |
940 | } | |
941 | ||
942 | chan->state = CPDMA_STATE_IDLE; | |
943 | spin_unlock_irqrestore(&chan->lock, flags); | |
944 | return 0; | |
945 | } | |
32a6d90b | 946 | EXPORT_SYMBOL_GPL(cpdma_chan_stop); |
ef8c2dab CC |
947 | |
948 | int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) | |
949 | { | |
950 | unsigned long flags; | |
951 | ||
952 | spin_lock_irqsave(&chan->lock, flags); | |
953 | if (chan->state != CPDMA_STATE_ACTIVE) { | |
954 | spin_unlock_irqrestore(&chan->lock, flags); | |
955 | return -EINVAL; | |
956 | } | |
957 | ||
958 | dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, | |
959 | chan->mask); | |
960 | spin_unlock_irqrestore(&chan->lock, flags); | |
961 | ||
962 | return 0; | |
963 | } | |
964 | ||
965 | struct cpdma_control_info { | |
966 | u32 reg; | |
967 | u32 shift, mask; | |
968 | int access; | |
969 | #define ACCESS_RO BIT(0) | |
970 | #define ACCESS_WO BIT(1) | |
971 | #define ACCESS_RW (ACCESS_RO | ACCESS_WO) | |
972 | }; | |
973 | ||
974 | struct cpdma_control_info controls[] = { | |
975 | [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO}, | |
976 | [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW}, | |
977 | [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW}, | |
978 | [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW}, | |
979 | [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW}, | |
980 | [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO}, | |
981 | [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW}, | |
982 | [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW}, | |
983 | [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW}, | |
984 | [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW}, | |
985 | [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW}, | |
986 | }; | |
987 | ||
988 | int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) | |
989 | { | |
990 | unsigned long flags; | |
991 | struct cpdma_control_info *info = &controls[control]; | |
992 | int ret; | |
993 | ||
994 | spin_lock_irqsave(&ctlr->lock, flags); | |
995 | ||
996 | ret = -ENOTSUPP; | |
997 | if (!ctlr->params.has_ext_regs) | |
998 | goto unlock_ret; | |
999 | ||
1000 | ret = -EINVAL; | |
1001 | if (ctlr->state != CPDMA_STATE_ACTIVE) | |
1002 | goto unlock_ret; | |
1003 | ||
1004 | ret = -ENOENT; | |
1005 | if (control < 0 || control >= ARRAY_SIZE(controls)) | |
1006 | goto unlock_ret; | |
1007 | ||
1008 | ret = -EPERM; | |
1009 | if ((info->access & ACCESS_RO) != ACCESS_RO) | |
1010 | goto unlock_ret; | |
1011 | ||
1012 | ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; | |
1013 | ||
1014 | unlock_ret: | |
1015 | spin_unlock_irqrestore(&ctlr->lock, flags); | |
1016 | return ret; | |
1017 | } | |
1018 | ||
1019 | int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) | |
1020 | { | |
1021 | unsigned long flags; | |
1022 | struct cpdma_control_info *info = &controls[control]; | |
1023 | int ret; | |
1024 | u32 val; | |
1025 | ||
1026 | spin_lock_irqsave(&ctlr->lock, flags); | |
1027 | ||
1028 | ret = -ENOTSUPP; | |
1029 | if (!ctlr->params.has_ext_regs) | |
1030 | goto unlock_ret; | |
1031 | ||
1032 | ret = -EINVAL; | |
1033 | if (ctlr->state != CPDMA_STATE_ACTIVE) | |
1034 | goto unlock_ret; | |
1035 | ||
1036 | ret = -ENOENT; | |
1037 | if (control < 0 || control >= ARRAY_SIZE(controls)) | |
1038 | goto unlock_ret; | |
1039 | ||
1040 | ret = -EPERM; | |
1041 | if ((info->access & ACCESS_WO) != ACCESS_WO) | |
1042 | goto unlock_ret; | |
1043 | ||
1044 | val = dma_reg_read(ctlr, info->reg); | |
1045 | val &= ~(info->mask << info->shift); | |
1046 | val |= (value & info->mask) << info->shift; | |
1047 | dma_reg_write(ctlr, info->reg, val); | |
1048 | ret = 0; | |
1049 | ||
1050 | unlock_ret: | |
1051 | spin_unlock_irqrestore(&ctlr->lock, flags); | |
1052 | return ret; | |
1053 | } | |
6929e24e | 1054 | EXPORT_SYMBOL_GPL(cpdma_control_set); |
4bc21d41 SS |
1055 | |
1056 | MODULE_LICENSE("GPL"); |