net: ethernet: ti: remove redundant NULL check.
[deliverable/linux.git] / drivers / net / ethernet / ti / davinci_cpdma.c
CommitLineData
ef8c2dab
CC
1/*
2 * Texas Instruments CPDMA Driver
3 *
4 * Copyright (C) 2010 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#include <linux/kernel.h>
16#include <linux/spinlock.h>
17#include <linux/device.h>
76fbc247 18#include <linux/module.h>
ef8c2dab
CC
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/dma-mapping.h>
22#include <linux/io.h>
23
24#include "davinci_cpdma.h"
25
26/* DMA Registers */
27#define CPDMA_TXIDVER 0x00
28#define CPDMA_TXCONTROL 0x04
29#define CPDMA_TXTEARDOWN 0x08
30#define CPDMA_RXIDVER 0x10
31#define CPDMA_RXCONTROL 0x14
32#define CPDMA_SOFTRESET 0x1c
33#define CPDMA_RXTEARDOWN 0x18
34#define CPDMA_TXINTSTATRAW 0x80
35#define CPDMA_TXINTSTATMASKED 0x84
36#define CPDMA_TXINTMASKSET 0x88
37#define CPDMA_TXINTMASKCLEAR 0x8c
38#define CPDMA_MACINVECTOR 0x90
39#define CPDMA_MACEOIVECTOR 0x94
40#define CPDMA_RXINTSTATRAW 0xa0
41#define CPDMA_RXINTSTATMASKED 0xa4
42#define CPDMA_RXINTMASKSET 0xa8
43#define CPDMA_RXINTMASKCLEAR 0xac
44#define CPDMA_DMAINTSTATRAW 0xb0
45#define CPDMA_DMAINTSTATMASKED 0xb4
46#define CPDMA_DMAINTMASKSET 0xb8
47#define CPDMA_DMAINTMASKCLEAR 0xbc
48#define CPDMA_DMAINT_HOSTERR BIT(1)
49
50/* the following exist only if has_ext_regs is set */
51#define CPDMA_DMACONTROL 0x20
52#define CPDMA_DMASTATUS 0x24
53#define CPDMA_RXBUFFOFS 0x28
54#define CPDMA_EM_CONTROL 0x2c
55
56/* Descriptor mode bits */
57#define CPDMA_DESC_SOP BIT(31)
58#define CPDMA_DESC_EOP BIT(30)
59#define CPDMA_DESC_OWNER BIT(29)
60#define CPDMA_DESC_EOQ BIT(28)
61#define CPDMA_DESC_TD_COMPLETE BIT(27)
62#define CPDMA_DESC_PASS_CRC BIT(26)
f6e135c8
M
63#define CPDMA_DESC_TO_PORT_EN BIT(20)
64#define CPDMA_TO_PORT_SHIFT 16
65#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
ef8c2dab
CC
66
67#define CPDMA_TEARDOWN_VALUE 0xfffffffc
68
69struct cpdma_desc {
70 /* hardware fields */
71 u32 hw_next;
72 u32 hw_buffer;
73 u32 hw_len;
74 u32 hw_mode;
75 /* software fields */
76 void *sw_token;
77 u32 sw_buffer;
78 u32 sw_len;
79};
80
81struct cpdma_desc_pool {
82 u32 phys;
6a1fef6d 83 u32 hw_addr;
ef8c2dab
CC
84 void __iomem *iomap; /* ioremap map */
85 void *cpumap; /* dma_alloc map */
86 int desc_size, mem_size;
87 int num_desc, used_desc;
88 unsigned long *bitmap;
89 struct device *dev;
90 spinlock_t lock;
91};
92
93enum cpdma_state {
94 CPDMA_STATE_IDLE,
95 CPDMA_STATE_ACTIVE,
96 CPDMA_STATE_TEARDOWN,
97};
98
32a6d90b 99static const char *cpdma_state_str[] = { "idle", "active", "teardown" };
ef8c2dab
CC
100
101struct cpdma_ctlr {
102 enum cpdma_state state;
103 struct cpdma_params params;
104 struct device *dev;
105 struct cpdma_desc_pool *pool;
106 spinlock_t lock;
107 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
108};
109
110struct cpdma_chan {
fae50823
M
111 struct cpdma_desc __iomem *head, *tail;
112 void __iomem *hdp, *cp, *rxfree;
ef8c2dab
CC
113 enum cpdma_state state;
114 struct cpdma_ctlr *ctlr;
115 int chan_num;
116 spinlock_t lock;
ef8c2dab 117 int count;
ef8c2dab
CC
118 u32 mask;
119 cpdma_handler_fn handler;
120 enum dma_data_direction dir;
121 struct cpdma_chan_stats stats;
122 /* offsets into dmaregs */
123 int int_set, int_clear, td;
124};
125
126/* The following make access to common cpdma_ctlr params more readable */
127#define dmaregs params.dmaregs
128#define num_chan params.num_chan
129
130/* various accessors */
131#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
132#define chan_read(chan, fld) __raw_readl((chan)->fld)
133#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
134#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
135#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
136#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
137
f6e135c8
M
138#define cpdma_desc_to_port(chan, mode, directed) \
139 do { \
140 if (!is_rx_chan(chan) && ((directed == 1) || \
141 (directed == 2))) \
142 mode |= (CPDMA_DESC_TO_PORT_EN | \
143 (directed << CPDMA_TO_PORT_SHIFT)); \
144 } while (0)
145
ef8c2dab
CC
146/*
147 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
148 * emac) have dedicated on-chip memory for these descriptors. Some other
149 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
150 * abstract out these details
151 */
152static struct cpdma_desc_pool *
6a1fef6d
S
153cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
154 int size, int align)
ef8c2dab
CC
155{
156 int bitmap_size;
157 struct cpdma_desc_pool *pool;
158
159 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
160 if (!pool)
161 return NULL;
162
163 spin_lock_init(&pool->lock);
164
165 pool->dev = dev;
166 pool->mem_size = size;
167 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
168 pool->num_desc = size / pool->desc_size;
169
170 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
171 pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
172 if (!pool->bitmap)
173 goto fail;
174
175 if (phys) {
176 pool->phys = phys;
177 pool->iomap = ioremap(phys, size);
6a1fef6d 178 pool->hw_addr = hw_addr;
ef8c2dab
CC
179 } else {
180 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
181 GFP_KERNEL);
43d620c8 182 pool->iomap = pool->cpumap;
6a1fef6d 183 pool->hw_addr = pool->phys;
ef8c2dab
CC
184 }
185
186 if (pool->iomap)
187 return pool;
188
189fail:
190 kfree(pool->bitmap);
191 kfree(pool);
192 return NULL;
193}
194
195static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
196{
197 unsigned long flags;
198
199 if (!pool)
200 return;
201
202 spin_lock_irqsave(&pool->lock, flags);
203 WARN_ON(pool->used_desc);
204 kfree(pool->bitmap);
205 if (pool->cpumap) {
206 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
207 pool->phys);
208 } else {
209 iounmap(pool->iomap);
210 }
211 spin_unlock_irqrestore(&pool->lock, flags);
212 kfree(pool);
213}
214
215static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
216 struct cpdma_desc __iomem *desc)
217{
218 if (!desc)
219 return 0;
6a1fef6d 220 return pool->hw_addr + (__force dma_addr_t)desc -
ef8c2dab
CC
221 (__force dma_addr_t)pool->iomap;
222}
223
224static inline struct cpdma_desc __iomem *
225desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
226{
6a1fef6d 227 return dma ? pool->iomap + dma - pool->hw_addr : NULL;
ef8c2dab
CC
228}
229
230static struct cpdma_desc __iomem *
fae50823 231cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
ef8c2dab
CC
232{
233 unsigned long flags;
234 int index;
fae50823
M
235 int desc_start;
236 int desc_end;
ef8c2dab
CC
237 struct cpdma_desc __iomem *desc = NULL;
238
239 spin_lock_irqsave(&pool->lock, flags);
240
fae50823
M
241 if (is_rx) {
242 desc_start = 0;
243 desc_end = pool->num_desc/2;
244 } else {
245 desc_start = pool->num_desc/2;
246 desc_end = pool->num_desc;
247 }
248
249 index = bitmap_find_next_zero_area(pool->bitmap,
250 desc_end, desc_start, num_desc, 0);
251 if (index < desc_end) {
ef8c2dab
CC
252 bitmap_set(pool->bitmap, index, num_desc);
253 desc = pool->iomap + pool->desc_size * index;
254 pool->used_desc++;
255 }
256
257 spin_unlock_irqrestore(&pool->lock, flags);
258 return desc;
259}
260
261static void cpdma_desc_free(struct cpdma_desc_pool *pool,
262 struct cpdma_desc __iomem *desc, int num_desc)
263{
264 unsigned long flags, index;
265
266 index = ((unsigned long)desc - (unsigned long)pool->iomap) /
267 pool->desc_size;
268 spin_lock_irqsave(&pool->lock, flags);
269 bitmap_clear(pool->bitmap, index, num_desc);
270 pool->used_desc--;
271 spin_unlock_irqrestore(&pool->lock, flags);
272}
273
274struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
275{
276 struct cpdma_ctlr *ctlr;
277
278 ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
279 if (!ctlr)
280 return NULL;
281
282 ctlr->state = CPDMA_STATE_IDLE;
283 ctlr->params = *params;
284 ctlr->dev = params->dev;
285 spin_lock_init(&ctlr->lock);
286
287 ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
288 ctlr->params.desc_mem_phys,
6a1fef6d 289 ctlr->params.desc_hw_addr,
ef8c2dab
CC
290 ctlr->params.desc_mem_size,
291 ctlr->params.desc_align);
292 if (!ctlr->pool) {
293 kfree(ctlr);
294 return NULL;
295 }
296
297 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
298 ctlr->num_chan = CPDMA_MAX_CHANNELS;
299 return ctlr;
300}
32a6d90b 301EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
ef8c2dab
CC
302
303int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
304{
305 unsigned long flags;
306 int i;
307
308 spin_lock_irqsave(&ctlr->lock, flags);
309 if (ctlr->state != CPDMA_STATE_IDLE) {
310 spin_unlock_irqrestore(&ctlr->lock, flags);
311 return -EBUSY;
312 }
313
314 if (ctlr->params.has_soft_reset) {
315 unsigned long timeout = jiffies + HZ/10;
316
317 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
318 while (time_before(jiffies, timeout)) {
319 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
320 break;
321 }
322 WARN_ON(!time_before(jiffies, timeout));
323 }
324
325 for (i = 0; i < ctlr->num_chan; i++) {
326 __raw_writel(0, ctlr->params.txhdp + 4 * i);
327 __raw_writel(0, ctlr->params.rxhdp + 4 * i);
328 __raw_writel(0, ctlr->params.txcp + 4 * i);
329 __raw_writel(0, ctlr->params.rxcp + 4 * i);
330 }
331
332 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
333 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
334
335 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
336 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
337
338 ctlr->state = CPDMA_STATE_ACTIVE;
339
340 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
341 if (ctlr->channels[i])
342 cpdma_chan_start(ctlr->channels[i]);
343 }
344 spin_unlock_irqrestore(&ctlr->lock, flags);
345 return 0;
346}
32a6d90b 347EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
ef8c2dab
CC
348
349int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
350{
351 unsigned long flags;
352 int i;
353
354 spin_lock_irqsave(&ctlr->lock, flags);
355 if (ctlr->state != CPDMA_STATE_ACTIVE) {
356 spin_unlock_irqrestore(&ctlr->lock, flags);
357 return -EINVAL;
358 }
359
360 ctlr->state = CPDMA_STATE_TEARDOWN;
361
362 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
363 if (ctlr->channels[i])
364 cpdma_chan_stop(ctlr->channels[i]);
365 }
366
367 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
368 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
369
370 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
371 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
372
373 ctlr->state = CPDMA_STATE_IDLE;
374
375 spin_unlock_irqrestore(&ctlr->lock, flags);
376 return 0;
377}
32a6d90b 378EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
ef8c2dab
CC
379
380int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
381{
382 struct device *dev = ctlr->dev;
383 unsigned long flags;
384 int i;
385
386 spin_lock_irqsave(&ctlr->lock, flags);
387
388 dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
389
390 dev_info(dev, "CPDMA: txidver: %x",
391 dma_reg_read(ctlr, CPDMA_TXIDVER));
392 dev_info(dev, "CPDMA: txcontrol: %x",
393 dma_reg_read(ctlr, CPDMA_TXCONTROL));
394 dev_info(dev, "CPDMA: txteardown: %x",
395 dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
396 dev_info(dev, "CPDMA: rxidver: %x",
397 dma_reg_read(ctlr, CPDMA_RXIDVER));
398 dev_info(dev, "CPDMA: rxcontrol: %x",
399 dma_reg_read(ctlr, CPDMA_RXCONTROL));
400 dev_info(dev, "CPDMA: softreset: %x",
401 dma_reg_read(ctlr, CPDMA_SOFTRESET));
402 dev_info(dev, "CPDMA: rxteardown: %x",
403 dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
404 dev_info(dev, "CPDMA: txintstatraw: %x",
405 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
406 dev_info(dev, "CPDMA: txintstatmasked: %x",
407 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
408 dev_info(dev, "CPDMA: txintmaskset: %x",
409 dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
410 dev_info(dev, "CPDMA: txintmaskclear: %x",
411 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
412 dev_info(dev, "CPDMA: macinvector: %x",
413 dma_reg_read(ctlr, CPDMA_MACINVECTOR));
414 dev_info(dev, "CPDMA: maceoivector: %x",
415 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
416 dev_info(dev, "CPDMA: rxintstatraw: %x",
417 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
418 dev_info(dev, "CPDMA: rxintstatmasked: %x",
419 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
420 dev_info(dev, "CPDMA: rxintmaskset: %x",
421 dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
422 dev_info(dev, "CPDMA: rxintmaskclear: %x",
423 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
424 dev_info(dev, "CPDMA: dmaintstatraw: %x",
425 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
426 dev_info(dev, "CPDMA: dmaintstatmasked: %x",
427 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
428 dev_info(dev, "CPDMA: dmaintmaskset: %x",
429 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
430 dev_info(dev, "CPDMA: dmaintmaskclear: %x",
431 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
432
433 if (!ctlr->params.has_ext_regs) {
434 dev_info(dev, "CPDMA: dmacontrol: %x",
435 dma_reg_read(ctlr, CPDMA_DMACONTROL));
436 dev_info(dev, "CPDMA: dmastatus: %x",
437 dma_reg_read(ctlr, CPDMA_DMASTATUS));
438 dev_info(dev, "CPDMA: rxbuffofs: %x",
439 dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
440 }
441
442 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
443 if (ctlr->channels[i])
444 cpdma_chan_dump(ctlr->channels[i]);
445
446 spin_unlock_irqrestore(&ctlr->lock, flags);
447 return 0;
448}
32a6d90b 449EXPORT_SYMBOL_GPL(cpdma_ctlr_dump);
ef8c2dab
CC
450
451int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
452{
453 unsigned long flags;
454 int ret = 0, i;
455
456 if (!ctlr)
457 return -EINVAL;
458
459 spin_lock_irqsave(&ctlr->lock, flags);
460 if (ctlr->state != CPDMA_STATE_IDLE)
461 cpdma_ctlr_stop(ctlr);
462
79876e03
CR
463 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
464 cpdma_chan_destroy(ctlr->channels[i]);
ef8c2dab
CC
465
466 cpdma_desc_pool_destroy(ctlr->pool);
467 spin_unlock_irqrestore(&ctlr->lock, flags);
468 kfree(ctlr);
469 return ret;
470}
32a6d90b 471EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
ef8c2dab
CC
472
473int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
474{
475 unsigned long flags;
476 int i, reg;
477
478 spin_lock_irqsave(&ctlr->lock, flags);
479 if (ctlr->state != CPDMA_STATE_ACTIVE) {
480 spin_unlock_irqrestore(&ctlr->lock, flags);
481 return -EINVAL;
482 }
483
484 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
485 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
486
487 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
488 if (ctlr->channels[i])
489 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
490 }
491
492 spin_unlock_irqrestore(&ctlr->lock, flags);
493 return 0;
494}
495
496void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
497{
498 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
499}
500
501struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
502 cpdma_handler_fn handler)
503{
504 struct cpdma_chan *chan;
505 int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
506 unsigned long flags;
507
508 if (__chan_linear(chan_num) >= ctlr->num_chan)
509 return NULL;
510
511 ret = -ENOMEM;
512 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
513 if (!chan)
514 goto err_chan_alloc;
515
516 spin_lock_irqsave(&ctlr->lock, flags);
517 ret = -EBUSY;
518 if (ctlr->channels[chan_num])
519 goto err_chan_busy;
520
521 chan->ctlr = ctlr;
522 chan->state = CPDMA_STATE_IDLE;
523 chan->chan_num = chan_num;
524 chan->handler = handler;
525
526 if (is_rx_chan(chan)) {
527 chan->hdp = ctlr->params.rxhdp + offset;
528 chan->cp = ctlr->params.rxcp + offset;
529 chan->rxfree = ctlr->params.rxfree + offset;
530 chan->int_set = CPDMA_RXINTMASKSET;
531 chan->int_clear = CPDMA_RXINTMASKCLEAR;
532 chan->td = CPDMA_RXTEARDOWN;
533 chan->dir = DMA_FROM_DEVICE;
534 } else {
535 chan->hdp = ctlr->params.txhdp + offset;
536 chan->cp = ctlr->params.txcp + offset;
537 chan->int_set = CPDMA_TXINTMASKSET;
538 chan->int_clear = CPDMA_TXINTMASKCLEAR;
539 chan->td = CPDMA_TXTEARDOWN;
540 chan->dir = DMA_TO_DEVICE;
541 }
542 chan->mask = BIT(chan_linear(chan));
543
544 spin_lock_init(&chan->lock);
545
546 ctlr->channels[chan_num] = chan;
547 spin_unlock_irqrestore(&ctlr->lock, flags);
548 return chan;
549
550err_chan_busy:
551 spin_unlock_irqrestore(&ctlr->lock, flags);
552 kfree(chan);
553err_chan_alloc:
554 return ERR_PTR(ret);
555}
32a6d90b 556EXPORT_SYMBOL_GPL(cpdma_chan_create);
ef8c2dab
CC
557
558int cpdma_chan_destroy(struct cpdma_chan *chan)
559{
f37c54b6 560 struct cpdma_ctlr *ctlr;
ef8c2dab
CC
561 unsigned long flags;
562
563 if (!chan)
564 return -EINVAL;
f37c54b6 565 ctlr = chan->ctlr;
ef8c2dab
CC
566
567 spin_lock_irqsave(&ctlr->lock, flags);
568 if (chan->state != CPDMA_STATE_IDLE)
569 cpdma_chan_stop(chan);
570 ctlr->channels[chan->chan_num] = NULL;
571 spin_unlock_irqrestore(&ctlr->lock, flags);
572 kfree(chan);
573 return 0;
574}
32a6d90b 575EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
ef8c2dab
CC
576
577int cpdma_chan_get_stats(struct cpdma_chan *chan,
578 struct cpdma_chan_stats *stats)
579{
580 unsigned long flags;
581 if (!chan)
582 return -EINVAL;
583 spin_lock_irqsave(&chan->lock, flags);
584 memcpy(stats, &chan->stats, sizeof(*stats));
585 spin_unlock_irqrestore(&chan->lock, flags);
586 return 0;
587}
588
589int cpdma_chan_dump(struct cpdma_chan *chan)
590{
591 unsigned long flags;
592 struct device *dev = chan->ctlr->dev;
593
594 spin_lock_irqsave(&chan->lock, flags);
595
596 dev_info(dev, "channel %d (%s %d) state %s",
597 chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
598 chan_linear(chan), cpdma_state_str[chan->state]);
599 dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
600 dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
601 if (chan->rxfree) {
602 dev_info(dev, "\trxfree: %x\n",
603 chan_read(chan, rxfree));
604 }
605
606 dev_info(dev, "\tstats head_enqueue: %d\n",
607 chan->stats.head_enqueue);
608 dev_info(dev, "\tstats tail_enqueue: %d\n",
609 chan->stats.tail_enqueue);
610 dev_info(dev, "\tstats pad_enqueue: %d\n",
611 chan->stats.pad_enqueue);
612 dev_info(dev, "\tstats misqueued: %d\n",
613 chan->stats.misqueued);
614 dev_info(dev, "\tstats desc_alloc_fail: %d\n",
615 chan->stats.desc_alloc_fail);
616 dev_info(dev, "\tstats pad_alloc_fail: %d\n",
617 chan->stats.pad_alloc_fail);
618 dev_info(dev, "\tstats runt_receive_buff: %d\n",
619 chan->stats.runt_receive_buff);
620 dev_info(dev, "\tstats runt_transmit_buff: %d\n",
621 chan->stats.runt_transmit_buff);
622 dev_info(dev, "\tstats empty_dequeue: %d\n",
623 chan->stats.empty_dequeue);
624 dev_info(dev, "\tstats busy_dequeue: %d\n",
625 chan->stats.busy_dequeue);
626 dev_info(dev, "\tstats good_dequeue: %d\n",
627 chan->stats.good_dequeue);
628 dev_info(dev, "\tstats requeue: %d\n",
629 chan->stats.requeue);
630 dev_info(dev, "\tstats teardown_dequeue: %d\n",
631 chan->stats.teardown_dequeue);
632
633 spin_unlock_irqrestore(&chan->lock, flags);
634 return 0;
635}
636
637static void __cpdma_chan_submit(struct cpdma_chan *chan,
638 struct cpdma_desc __iomem *desc)
639{
640 struct cpdma_ctlr *ctlr = chan->ctlr;
641 struct cpdma_desc __iomem *prev = chan->tail;
642 struct cpdma_desc_pool *pool = ctlr->pool;
643 dma_addr_t desc_dma;
644 u32 mode;
645
646 desc_dma = desc_phys(pool, desc);
647
648 /* simple case - idle channel */
649 if (!chan->head) {
650 chan->stats.head_enqueue++;
651 chan->head = desc;
652 chan->tail = desc;
653 if (chan->state == CPDMA_STATE_ACTIVE)
654 chan_write(chan, hdp, desc_dma);
655 return;
656 }
657
658 /* first chain the descriptor at the tail of the list */
659 desc_write(prev, hw_next, desc_dma);
660 chan->tail = desc;
661 chan->stats.tail_enqueue++;
662
663 /* next check if EOQ has been triggered already */
664 mode = desc_read(prev, hw_mode);
665 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
666 (chan->state == CPDMA_STATE_ACTIVE)) {
667 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
668 chan_write(chan, hdp, desc_dma);
669 chan->stats.misqueued++;
670 }
671}
672
673int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
f6e135c8 674 int len, int directed, gfp_t gfp_mask)
ef8c2dab
CC
675{
676 struct cpdma_ctlr *ctlr = chan->ctlr;
677 struct cpdma_desc __iomem *desc;
678 dma_addr_t buffer;
679 unsigned long flags;
680 u32 mode;
681 int ret = 0;
682
683 spin_lock_irqsave(&chan->lock, flags);
684
685 if (chan->state == CPDMA_STATE_TEARDOWN) {
686 ret = -EINVAL;
687 goto unlock_ret;
688 }
689
fae50823 690 desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan));
ef8c2dab
CC
691 if (!desc) {
692 chan->stats.desc_alloc_fail++;
693 ret = -ENOMEM;
694 goto unlock_ret;
695 }
696
697 if (len < ctlr->params.min_packet_size) {
698 len = ctlr->params.min_packet_size;
699 chan->stats.runt_transmit_buff++;
700 }
701
702 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
703 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
f6e135c8 704 cpdma_desc_to_port(chan, mode, directed);
ef8c2dab
CC
705
706 desc_write(desc, hw_next, 0);
707 desc_write(desc, hw_buffer, buffer);
708 desc_write(desc, hw_len, len);
709 desc_write(desc, hw_mode, mode | len);
710 desc_write(desc, sw_token, token);
711 desc_write(desc, sw_buffer, buffer);
712 desc_write(desc, sw_len, len);
713
714 __cpdma_chan_submit(chan, desc);
715
716 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
717 chan_write(chan, rxfree, 1);
718
719 chan->count++;
720
721unlock_ret:
722 spin_unlock_irqrestore(&chan->lock, flags);
723 return ret;
724}
32a6d90b 725EXPORT_SYMBOL_GPL(cpdma_chan_submit);
ef8c2dab 726
fae50823
M
727bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
728{
729 unsigned long flags;
730 int index;
731 bool ret;
732 struct cpdma_ctlr *ctlr = chan->ctlr;
733 struct cpdma_desc_pool *pool = ctlr->pool;
734
735 spin_lock_irqsave(&pool->lock, flags);
736
737 index = bitmap_find_next_zero_area(pool->bitmap,
738 pool->num_desc, pool->num_desc/2, 1, 0);
739
740 if (index < pool->num_desc)
741 ret = true;
742 else
743 ret = false;
744
745 spin_unlock_irqrestore(&pool->lock, flags);
746 return ret;
747}
748EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
749
ef8c2dab
CC
750static void __cpdma_chan_free(struct cpdma_chan *chan,
751 struct cpdma_desc __iomem *desc,
752 int outlen, int status)
753{
754 struct cpdma_ctlr *ctlr = chan->ctlr;
755 struct cpdma_desc_pool *pool = ctlr->pool;
756 dma_addr_t buff_dma;
757 int origlen;
758 void *token;
759
760 token = (void *)desc_read(desc, sw_token);
761 buff_dma = desc_read(desc, sw_buffer);
762 origlen = desc_read(desc, sw_len);
763
764 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
765 cpdma_desc_free(pool, desc, 1);
766 (*chan->handler)(token, outlen, status);
767}
768
769static int __cpdma_chan_process(struct cpdma_chan *chan)
770{
771 struct cpdma_ctlr *ctlr = chan->ctlr;
772 struct cpdma_desc __iomem *desc;
773 int status, outlen;
774 struct cpdma_desc_pool *pool = ctlr->pool;
775 dma_addr_t desc_dma;
776 unsigned long flags;
777
778 spin_lock_irqsave(&chan->lock, flags);
779
780 desc = chan->head;
781 if (!desc) {
782 chan->stats.empty_dequeue++;
783 status = -ENOENT;
784 goto unlock_ret;
785 }
786 desc_dma = desc_phys(pool, desc);
787
788 status = __raw_readl(&desc->hw_mode);
789 outlen = status & 0x7ff;
790 if (status & CPDMA_DESC_OWNER) {
791 chan->stats.busy_dequeue++;
792 status = -EBUSY;
793 goto unlock_ret;
794 }
f6e135c8
M
795 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
796 CPDMA_DESC_PORT_MASK);
ef8c2dab
CC
797
798 chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
799 chan_write(chan, cp, desc_dma);
800 chan->count--;
801 chan->stats.good_dequeue++;
802
803 if (status & CPDMA_DESC_EOQ) {
804 chan->stats.requeue++;
805 chan_write(chan, hdp, desc_phys(pool, chan->head));
806 }
807
808 spin_unlock_irqrestore(&chan->lock, flags);
809
810 __cpdma_chan_free(chan, desc, outlen, status);
811 return status;
812
813unlock_ret:
814 spin_unlock_irqrestore(&chan->lock, flags);
815 return status;
816}
817
818int cpdma_chan_process(struct cpdma_chan *chan, int quota)
819{
820 int used = 0, ret = 0;
821
822 if (chan->state != CPDMA_STATE_ACTIVE)
823 return -EINVAL;
824
825 while (used < quota) {
826 ret = __cpdma_chan_process(chan);
827 if (ret < 0)
828 break;
829 used++;
830 }
831 return used;
832}
32a6d90b 833EXPORT_SYMBOL_GPL(cpdma_chan_process);
ef8c2dab
CC
834
835int cpdma_chan_start(struct cpdma_chan *chan)
836{
837 struct cpdma_ctlr *ctlr = chan->ctlr;
838 struct cpdma_desc_pool *pool = ctlr->pool;
839 unsigned long flags;
840
841 spin_lock_irqsave(&chan->lock, flags);
842 if (chan->state != CPDMA_STATE_IDLE) {
843 spin_unlock_irqrestore(&chan->lock, flags);
844 return -EBUSY;
845 }
846 if (ctlr->state != CPDMA_STATE_ACTIVE) {
847 spin_unlock_irqrestore(&chan->lock, flags);
848 return -EINVAL;
849 }
850 dma_reg_write(ctlr, chan->int_set, chan->mask);
851 chan->state = CPDMA_STATE_ACTIVE;
852 if (chan->head) {
853 chan_write(chan, hdp, desc_phys(pool, chan->head));
854 if (chan->rxfree)
855 chan_write(chan, rxfree, chan->count);
856 }
857
858 spin_unlock_irqrestore(&chan->lock, flags);
859 return 0;
860}
32a6d90b 861EXPORT_SYMBOL_GPL(cpdma_chan_start);
ef8c2dab
CC
862
863int cpdma_chan_stop(struct cpdma_chan *chan)
864{
865 struct cpdma_ctlr *ctlr = chan->ctlr;
866 struct cpdma_desc_pool *pool = ctlr->pool;
867 unsigned long flags;
868 int ret;
869 unsigned long timeout;
870
871 spin_lock_irqsave(&chan->lock, flags);
872 if (chan->state != CPDMA_STATE_ACTIVE) {
873 spin_unlock_irqrestore(&chan->lock, flags);
874 return -EINVAL;
875 }
876
877 chan->state = CPDMA_STATE_TEARDOWN;
878 dma_reg_write(ctlr, chan->int_clear, chan->mask);
879
880 /* trigger teardown */
b4ad0428 881 dma_reg_write(ctlr, chan->td, chan_linear(chan));
ef8c2dab
CC
882
883 /* wait for teardown complete */
884 timeout = jiffies + HZ/10; /* 100 msec */
885 while (time_before(jiffies, timeout)) {
886 u32 cp = chan_read(chan, cp);
887 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
888 break;
889 cpu_relax();
890 }
891 WARN_ON(!time_before(jiffies, timeout));
892 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
893
894 /* handle completed packets */
7746ab0a 895 spin_unlock_irqrestore(&chan->lock, flags);
ef8c2dab
CC
896 do {
897 ret = __cpdma_chan_process(chan);
898 if (ret < 0)
899 break;
900 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
7746ab0a 901 spin_lock_irqsave(&chan->lock, flags);
ef8c2dab
CC
902
903 /* remaining packets haven't been tx/rx'ed, clean them up */
904 while (chan->head) {
905 struct cpdma_desc __iomem *desc = chan->head;
906 dma_addr_t next_dma;
907
908 next_dma = desc_read(desc, hw_next);
909 chan->head = desc_from_phys(pool, next_dma);
ffb5ba90 910 chan->count--;
ef8c2dab
CC
911 chan->stats.teardown_dequeue++;
912
913 /* issue callback without locks held */
914 spin_unlock_irqrestore(&chan->lock, flags);
915 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
916 spin_lock_irqsave(&chan->lock, flags);
917 }
918
919 chan->state = CPDMA_STATE_IDLE;
920 spin_unlock_irqrestore(&chan->lock, flags);
921 return 0;
922}
32a6d90b 923EXPORT_SYMBOL_GPL(cpdma_chan_stop);
ef8c2dab
CC
924
925int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
926{
927 unsigned long flags;
928
929 spin_lock_irqsave(&chan->lock, flags);
930 if (chan->state != CPDMA_STATE_ACTIVE) {
931 spin_unlock_irqrestore(&chan->lock, flags);
932 return -EINVAL;
933 }
934
935 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
936 chan->mask);
937 spin_unlock_irqrestore(&chan->lock, flags);
938
939 return 0;
940}
941
942struct cpdma_control_info {
943 u32 reg;
944 u32 shift, mask;
945 int access;
946#define ACCESS_RO BIT(0)
947#define ACCESS_WO BIT(1)
948#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
949};
950
951struct cpdma_control_info controls[] = {
952 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
953 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
954 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
955 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
956 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
957 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
958 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
959 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
960 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
961 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
962 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
963};
964
965int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
966{
967 unsigned long flags;
968 struct cpdma_control_info *info = &controls[control];
969 int ret;
970
971 spin_lock_irqsave(&ctlr->lock, flags);
972
973 ret = -ENOTSUPP;
974 if (!ctlr->params.has_ext_regs)
975 goto unlock_ret;
976
977 ret = -EINVAL;
978 if (ctlr->state != CPDMA_STATE_ACTIVE)
979 goto unlock_ret;
980
981 ret = -ENOENT;
982 if (control < 0 || control >= ARRAY_SIZE(controls))
983 goto unlock_ret;
984
985 ret = -EPERM;
986 if ((info->access & ACCESS_RO) != ACCESS_RO)
987 goto unlock_ret;
988
989 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
990
991unlock_ret:
992 spin_unlock_irqrestore(&ctlr->lock, flags);
993 return ret;
994}
995
996int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
997{
998 unsigned long flags;
999 struct cpdma_control_info *info = &controls[control];
1000 int ret;
1001 u32 val;
1002
1003 spin_lock_irqsave(&ctlr->lock, flags);
1004
1005 ret = -ENOTSUPP;
1006 if (!ctlr->params.has_ext_regs)
1007 goto unlock_ret;
1008
1009 ret = -EINVAL;
1010 if (ctlr->state != CPDMA_STATE_ACTIVE)
1011 goto unlock_ret;
1012
1013 ret = -ENOENT;
1014 if (control < 0 || control >= ARRAY_SIZE(controls))
1015 goto unlock_ret;
1016
1017 ret = -EPERM;
1018 if ((info->access & ACCESS_WO) != ACCESS_WO)
1019 goto unlock_ret;
1020
1021 val = dma_reg_read(ctlr, info->reg);
1022 val &= ~(info->mask << info->shift);
1023 val |= (value & info->mask) << info->shift;
1024 dma_reg_write(ctlr, info->reg, val);
1025 ret = 0;
1026
1027unlock_ret:
1028 spin_unlock_irqrestore(&ctlr->lock, flags);
1029 return ret;
1030}
This page took 0.231258 seconds and 5 git commands to generate.