dmaengine: consolidate tx_status functions
[deliverable/linux.git] / drivers / dma / mv_xor.c
CommitLineData
ff7b0479
SB
1/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
5a0e3ad6 21#include <linux/slab.h>
ff7b0479
SB
22#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
6f088f1d 28#include <plat/mv_xor.h>
d2ebfb33
RKAL
29
30#include "dmaengine.h"
ff7b0479
SB
31#include "mv_xor.h"
32
33static void mv_xor_issue_pending(struct dma_chan *chan);
34
35#define to_mv_xor_chan(chan) \
36 container_of(chan, struct mv_xor_chan, common)
37
38#define to_mv_xor_device(dev) \
39 container_of(dev, struct mv_xor_device, common)
40
41#define to_mv_xor_slot(tx) \
42 container_of(tx, struct mv_xor_desc_slot, async_tx)
43
44static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
45{
46 struct mv_xor_desc *hw_desc = desc->hw_desc;
47
48 hw_desc->status = (1 << 31);
49 hw_desc->phy_next_desc = 0;
50 hw_desc->desc_command = (1 << 31);
51}
52
53static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
54{
55 struct mv_xor_desc *hw_desc = desc->hw_desc;
56 return hw_desc->phy_dest_addr;
57}
58
59static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
60 int src_idx)
61{
62 struct mv_xor_desc *hw_desc = desc->hw_desc;
63 return hw_desc->phy_src_addr[src_idx];
64}
65
66
67static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
68 u32 byte_count)
69{
70 struct mv_xor_desc *hw_desc = desc->hw_desc;
71 hw_desc->byte_count = byte_count;
72}
73
74static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
75 u32 next_desc_addr)
76{
77 struct mv_xor_desc *hw_desc = desc->hw_desc;
78 BUG_ON(hw_desc->phy_next_desc);
79 hw_desc->phy_next_desc = next_desc_addr;
80}
81
82static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
83{
84 struct mv_xor_desc *hw_desc = desc->hw_desc;
85 hw_desc->phy_next_desc = 0;
86}
87
88static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
89{
90 desc->value = val;
91}
92
93static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
94 dma_addr_t addr)
95{
96 struct mv_xor_desc *hw_desc = desc->hw_desc;
97 hw_desc->phy_dest_addr = addr;
98}
99
100static int mv_chan_memset_slot_count(size_t len)
101{
102 return 1;
103}
104
105#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
106
107static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
108 int index, dma_addr_t addr)
109{
110 struct mv_xor_desc *hw_desc = desc->hw_desc;
111 hw_desc->phy_src_addr[index] = addr;
112 if (desc->type == DMA_XOR)
113 hw_desc->desc_command |= (1 << index);
114}
115
116static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
117{
118 return __raw_readl(XOR_CURR_DESC(chan));
119}
120
121static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
122 u32 next_desc_addr)
123{
124 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
125}
126
127static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
128{
129 __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
130}
131
132static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
133{
134 __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
135}
136
137static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
138{
139 __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
140 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
141}
142
143static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
144{
145 u32 val = __raw_readl(XOR_INTR_MASK(chan));
146 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
147 __raw_writel(val, XOR_INTR_MASK(chan));
148}
149
150static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
151{
152 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
153 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
154 return intr_cause;
155}
156
157static int mv_is_err_intr(u32 intr_cause)
158{
159 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
160 return 1;
161
162 return 0;
163}
164
165static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
166{
86363682 167 u32 val = ~(1 << (chan->idx * 16));
ff7b0479
SB
168 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
169 __raw_writel(val, XOR_INTR_CAUSE(chan));
170}
171
172static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
173{
174 u32 val = 0xFFFF0000 >> (chan->idx * 16);
175 __raw_writel(val, XOR_INTR_CAUSE(chan));
176}
177
178static int mv_can_chain(struct mv_xor_desc_slot *desc)
179{
180 struct mv_xor_desc_slot *chain_old_tail = list_entry(
181 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
182
183 if (chain_old_tail->type != desc->type)
184 return 0;
185 if (desc->type == DMA_MEMSET)
186 return 0;
187
188 return 1;
189}
190
191static void mv_set_mode(struct mv_xor_chan *chan,
192 enum dma_transaction_type type)
193{
194 u32 op_mode;
195 u32 config = __raw_readl(XOR_CONFIG(chan));
196
197 switch (type) {
198 case DMA_XOR:
199 op_mode = XOR_OPERATION_MODE_XOR;
200 break;
201 case DMA_MEMCPY:
202 op_mode = XOR_OPERATION_MODE_MEMCPY;
203 break;
204 case DMA_MEMSET:
205 op_mode = XOR_OPERATION_MODE_MEMSET;
206 break;
207 default:
208 dev_printk(KERN_ERR, chan->device->common.dev,
209 "error: unsupported operation %d.\n",
210 type);
211 BUG();
212 return;
213 }
214
215 config &= ~0x7;
216 config |= op_mode;
217 __raw_writel(config, XOR_CONFIG(chan));
218 chan->current_type = type;
219}
220
221static void mv_chan_activate(struct mv_xor_chan *chan)
222{
223 u32 activation;
224
225 dev_dbg(chan->device->common.dev, " activate chan.\n");
226 activation = __raw_readl(XOR_ACTIVATION(chan));
227 activation |= 0x1;
228 __raw_writel(activation, XOR_ACTIVATION(chan));
229}
230
231static char mv_chan_is_busy(struct mv_xor_chan *chan)
232{
233 u32 state = __raw_readl(XOR_ACTIVATION(chan));
234
235 state = (state >> 4) & 0x3;
236
237 return (state == 1) ? 1 : 0;
238}
239
240static int mv_chan_xor_slot_count(size_t len, int src_cnt)
241{
242 return 1;
243}
244
245/**
246 * mv_xor_free_slots - flags descriptor slots for reuse
247 * @slot: Slot to free
248 * Caller must hold &mv_chan->lock while calling this function
249 */
250static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
251 struct mv_xor_desc_slot *slot)
252{
253 dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
254 __func__, __LINE__, slot);
255
256 slot->slots_per_op = 0;
257
258}
259
260/*
261 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
262 * sw_desc
263 * Caller must hold &mv_chan->lock while calling this function
264 */
265static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
266 struct mv_xor_desc_slot *sw_desc)
267{
268 dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
269 __func__, __LINE__, sw_desc);
270 if (sw_desc->type != mv_chan->current_type)
271 mv_set_mode(mv_chan, sw_desc->type);
272
273 if (sw_desc->type == DMA_MEMSET) {
274 /* for memset requests we need to program the engine, no
275 * descriptors used.
276 */
277 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
278 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
279 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
280 mv_chan_set_value(mv_chan, sw_desc->value);
281 } else {
282 /* set the hardware chain */
283 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
284 }
285 mv_chan->pending += sw_desc->slot_cnt;
286 mv_xor_issue_pending(&mv_chan->common);
287}
288
289static dma_cookie_t
290mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
291 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
292{
293 BUG_ON(desc->async_tx.cookie < 0);
294
295 if (desc->async_tx.cookie > 0) {
296 cookie = desc->async_tx.cookie;
297
298 /* call the callback (must not sleep or submit new
299 * operations to this channel)
300 */
301 if (desc->async_tx.callback)
302 desc->async_tx.callback(
303 desc->async_tx.callback_param);
304
305 /* unmap dma addresses
306 * (unmap_single vs unmap_page?)
307 */
308 if (desc->group_head && desc->unmap_len) {
309 struct mv_xor_desc_slot *unmap = desc->group_head;
310 struct device *dev =
311 &mv_chan->device->pdev->dev;
312 u32 len = unmap->unmap_len;
e1d181ef
DW
313 enum dma_ctrl_flags flags = desc->async_tx.flags;
314 u32 src_cnt;
315 dma_addr_t addr;
a06d568f 316 dma_addr_t dest;
ff7b0479 317
a06d568f
DW
318 src_cnt = unmap->unmap_src_cnt;
319 dest = mv_desc_get_dest_addr(unmap);
e1d181ef 320 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
a06d568f
DW
321 enum dma_data_direction dir;
322
323 if (src_cnt > 1) /* is xor ? */
324 dir = DMA_BIDIRECTIONAL;
325 else
326 dir = DMA_FROM_DEVICE;
327 dma_unmap_page(dev, dest, len, dir);
e1d181ef
DW
328 }
329
330 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
e1d181ef
DW
331 while (src_cnt--) {
332 addr = mv_desc_get_src_addr(unmap,
333 src_cnt);
a06d568f
DW
334 if (addr == dest)
335 continue;
e1d181ef
DW
336 dma_unmap_page(dev, addr, len,
337 DMA_TO_DEVICE);
338 }
ff7b0479
SB
339 }
340 desc->group_head = NULL;
341 }
342 }
343
344 /* run dependent operations */
07f2211e 345 dma_run_dependencies(&desc->async_tx);
ff7b0479
SB
346
347 return cookie;
348}
349
350static int
351mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
352{
353 struct mv_xor_desc_slot *iter, *_iter;
354
355 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
356 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
357 completed_node) {
358
359 if (async_tx_test_ack(&iter->async_tx)) {
360 list_del(&iter->completed_node);
361 mv_xor_free_slots(mv_chan, iter);
362 }
363 }
364 return 0;
365}
366
367static int
368mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
369 struct mv_xor_chan *mv_chan)
370{
371 dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
372 __func__, __LINE__, desc, desc->async_tx.flags);
373 list_del(&desc->chain_node);
374 /* the client is allowed to attach dependent operations
375 * until 'ack' is set
376 */
377 if (!async_tx_test_ack(&desc->async_tx)) {
378 /* move this slot to the completed_slots */
379 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
380 return 0;
381 }
382
383 mv_xor_free_slots(mv_chan, desc);
384 return 0;
385}
386
387static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
388{
389 struct mv_xor_desc_slot *iter, *_iter;
390 dma_cookie_t cookie = 0;
391 int busy = mv_chan_is_busy(mv_chan);
392 u32 current_desc = mv_chan_get_current_desc(mv_chan);
393 int seen_current = 0;
394
395 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
396 dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
397 mv_xor_clean_completed_slots(mv_chan);
398
399 /* free completed slots from the chain starting with
400 * the oldest descriptor
401 */
402
403 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
404 chain_node) {
405 prefetch(_iter);
406 prefetch(&_iter->async_tx);
407
408 /* do not advance past the current descriptor loaded into the
409 * hardware channel, subsequent descriptors are either in
410 * process or have not been submitted
411 */
412 if (seen_current)
413 break;
414
415 /* stop the search if we reach the current descriptor and the
416 * channel is busy
417 */
418 if (iter->async_tx.phys == current_desc) {
419 seen_current = 1;
420 if (busy)
421 break;
422 }
423
424 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
425
426 if (mv_xor_clean_slot(iter, mv_chan))
427 break;
428 }
429
430 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
431 struct mv_xor_desc_slot *chain_head;
432 chain_head = list_entry(mv_chan->chain.next,
433 struct mv_xor_desc_slot,
434 chain_node);
435
436 mv_xor_start_new_chain(mv_chan, chain_head);
437 }
438
439 if (cookie > 0)
4d4e58de 440 mv_chan->common.completed_cookie = cookie;
ff7b0479
SB
441}
442
443static void
444mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
445{
446 spin_lock_bh(&mv_chan->lock);
447 __mv_xor_slot_cleanup(mv_chan);
448 spin_unlock_bh(&mv_chan->lock);
449}
450
451static void mv_xor_tasklet(unsigned long data)
452{
453 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
8333f65e 454 mv_xor_slot_cleanup(chan);
ff7b0479
SB
455}
456
457static struct mv_xor_desc_slot *
458mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
459 int slots_per_op)
460{
461 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
462 LIST_HEAD(chain);
463 int slots_found, retry = 0;
464
465 /* start search from the last allocated descrtiptor
466 * if a contiguous allocation can not be found start searching
467 * from the beginning of the list
468 */
469retry:
470 slots_found = 0;
471 if (retry == 0)
472 iter = mv_chan->last_used;
473 else
474 iter = list_entry(&mv_chan->all_slots,
475 struct mv_xor_desc_slot,
476 slot_node);
477
478 list_for_each_entry_safe_continue(
479 iter, _iter, &mv_chan->all_slots, slot_node) {
480 prefetch(_iter);
481 prefetch(&_iter->async_tx);
482 if (iter->slots_per_op) {
483 /* give up after finding the first busy slot
484 * on the second pass through the list
485 */
486 if (retry)
487 break;
488
489 slots_found = 0;
490 continue;
491 }
492
493 /* start the allocation if the slot is correctly aligned */
494 if (!slots_found++)
495 alloc_start = iter;
496
497 if (slots_found == num_slots) {
498 struct mv_xor_desc_slot *alloc_tail = NULL;
499 struct mv_xor_desc_slot *last_used = NULL;
500 iter = alloc_start;
501 while (num_slots) {
502 int i;
503
504 /* pre-ack all but the last descriptor */
505 async_tx_ack(&iter->async_tx);
506
507 list_add_tail(&iter->chain_node, &chain);
508 alloc_tail = iter;
509 iter->async_tx.cookie = 0;
510 iter->slot_cnt = num_slots;
511 iter->xor_check_result = NULL;
512 for (i = 0; i < slots_per_op; i++) {
513 iter->slots_per_op = slots_per_op - i;
514 last_used = iter;
515 iter = list_entry(iter->slot_node.next,
516 struct mv_xor_desc_slot,
517 slot_node);
518 }
519 num_slots -= slots_per_op;
520 }
521 alloc_tail->group_head = alloc_start;
522 alloc_tail->async_tx.cookie = -EBUSY;
64203b67 523 list_splice(&chain, &alloc_tail->tx_list);
ff7b0479
SB
524 mv_chan->last_used = last_used;
525 mv_desc_clear_next_desc(alloc_start);
526 mv_desc_clear_next_desc(alloc_tail);
527 return alloc_tail;
528 }
529 }
530 if (!retry++)
531 goto retry;
532
533 /* try to free some slots if the allocation fails */
534 tasklet_schedule(&mv_chan->irq_tasklet);
535
536 return NULL;
537}
538
ff7b0479
SB
539/************************ DMA engine API functions ****************************/
540static dma_cookie_t
541mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
542{
543 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
544 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
545 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
546 dma_cookie_t cookie;
547 int new_hw_chain = 1;
548
549 dev_dbg(mv_chan->device->common.dev,
550 "%s sw_desc %p: async_tx %p\n",
551 __func__, sw_desc, &sw_desc->async_tx);
552
553 grp_start = sw_desc->group_head;
554
555 spin_lock_bh(&mv_chan->lock);
884485e1 556 cookie = dma_cookie_assign(tx);
ff7b0479
SB
557
558 if (list_empty(&mv_chan->chain))
64203b67 559 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
ff7b0479
SB
560 else {
561 new_hw_chain = 0;
562
563 old_chain_tail = list_entry(mv_chan->chain.prev,
564 struct mv_xor_desc_slot,
565 chain_node);
64203b67 566 list_splice_init(&grp_start->tx_list,
ff7b0479
SB
567 &old_chain_tail->chain_node);
568
569 if (!mv_can_chain(grp_start))
570 goto submit_done;
571
572 dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
573 old_chain_tail->async_tx.phys);
574
575 /* fix up the hardware chain */
576 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
577
578 /* if the channel is not busy */
579 if (!mv_chan_is_busy(mv_chan)) {
580 u32 current_desc = mv_chan_get_current_desc(mv_chan);
581 /*
582 * and the curren desc is the end of the chain before
583 * the append, then we need to start the channel
584 */
585 if (current_desc == old_chain_tail->async_tx.phys)
586 new_hw_chain = 1;
587 }
588 }
589
590 if (new_hw_chain)
591 mv_xor_start_new_chain(mv_chan, grp_start);
592
593submit_done:
594 spin_unlock_bh(&mv_chan->lock);
595
596 return cookie;
597}
598
599/* returns the number of allocated descriptors */
aa1e6f1a 600static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
ff7b0479
SB
601{
602 char *hw_desc;
603 int idx;
604 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
605 struct mv_xor_desc_slot *slot = NULL;
606 struct mv_xor_platform_data *plat_data =
607 mv_chan->device->pdev->dev.platform_data;
608 int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
609
610 /* Allocate descriptor slots */
611 idx = mv_chan->slots_allocated;
612 while (idx < num_descs_in_pool) {
613 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
614 if (!slot) {
615 printk(KERN_INFO "MV XOR Channel only initialized"
616 " %d descriptor slots", idx);
617 break;
618 }
619 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
620 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
621
622 dma_async_tx_descriptor_init(&slot->async_tx, chan);
623 slot->async_tx.tx_submit = mv_xor_tx_submit;
624 INIT_LIST_HEAD(&slot->chain_node);
625 INIT_LIST_HEAD(&slot->slot_node);
64203b67 626 INIT_LIST_HEAD(&slot->tx_list);
ff7b0479
SB
627 hw_desc = (char *) mv_chan->device->dma_desc_pool;
628 slot->async_tx.phys =
629 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
630 slot->idx = idx++;
631
632 spin_lock_bh(&mv_chan->lock);
633 mv_chan->slots_allocated = idx;
634 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
635 spin_unlock_bh(&mv_chan->lock);
636 }
637
638 if (mv_chan->slots_allocated && !mv_chan->last_used)
639 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
640 struct mv_xor_desc_slot,
641 slot_node);
642
643 dev_dbg(mv_chan->device->common.dev,
644 "allocated %d descriptor slots last_used: %p\n",
645 mv_chan->slots_allocated, mv_chan->last_used);
646
647 return mv_chan->slots_allocated ? : -ENOMEM;
648}
649
650static struct dma_async_tx_descriptor *
651mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
652 size_t len, unsigned long flags)
653{
654 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
655 struct mv_xor_desc_slot *sw_desc, *grp_start;
656 int slot_cnt;
657
658 dev_dbg(mv_chan->device->common.dev,
659 "%s dest: %x src %x len: %u flags: %ld\n",
660 __func__, dest, src, len, flags);
661 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
662 return NULL;
663
7912d300 664 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479
SB
665
666 spin_lock_bh(&mv_chan->lock);
667 slot_cnt = mv_chan_memcpy_slot_count(len);
668 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
669 if (sw_desc) {
670 sw_desc->type = DMA_MEMCPY;
671 sw_desc->async_tx.flags = flags;
672 grp_start = sw_desc->group_head;
673 mv_desc_init(grp_start, flags);
674 mv_desc_set_byte_count(grp_start, len);
675 mv_desc_set_dest_addr(sw_desc->group_head, dest);
676 mv_desc_set_src_addr(grp_start, 0, src);
677 sw_desc->unmap_src_cnt = 1;
678 sw_desc->unmap_len = len;
679 }
680 spin_unlock_bh(&mv_chan->lock);
681
682 dev_dbg(mv_chan->device->common.dev,
683 "%s sw_desc %p async_tx %p\n",
684 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
685
686 return sw_desc ? &sw_desc->async_tx : NULL;
687}
688
689static struct dma_async_tx_descriptor *
690mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
691 size_t len, unsigned long flags)
692{
693 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
694 struct mv_xor_desc_slot *sw_desc, *grp_start;
695 int slot_cnt;
696
697 dev_dbg(mv_chan->device->common.dev,
698 "%s dest: %x len: %u flags: %ld\n",
699 __func__, dest, len, flags);
700 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
701 return NULL;
702
7912d300 703 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479
SB
704
705 spin_lock_bh(&mv_chan->lock);
706 slot_cnt = mv_chan_memset_slot_count(len);
707 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
708 if (sw_desc) {
709 sw_desc->type = DMA_MEMSET;
710 sw_desc->async_tx.flags = flags;
711 grp_start = sw_desc->group_head;
712 mv_desc_init(grp_start, flags);
713 mv_desc_set_byte_count(grp_start, len);
714 mv_desc_set_dest_addr(sw_desc->group_head, dest);
715 mv_desc_set_block_fill_val(grp_start, value);
716 sw_desc->unmap_src_cnt = 1;
717 sw_desc->unmap_len = len;
718 }
719 spin_unlock_bh(&mv_chan->lock);
720 dev_dbg(mv_chan->device->common.dev,
721 "%s sw_desc %p async_tx %p \n",
722 __func__, sw_desc, &sw_desc->async_tx);
723 return sw_desc ? &sw_desc->async_tx : NULL;
724}
725
726static struct dma_async_tx_descriptor *
727mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
728 unsigned int src_cnt, size_t len, unsigned long flags)
729{
730 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
731 struct mv_xor_desc_slot *sw_desc, *grp_start;
732 int slot_cnt;
733
734 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
735 return NULL;
736
7912d300 737 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479
SB
738
739 dev_dbg(mv_chan->device->common.dev,
740 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
741 __func__, src_cnt, len, dest, flags);
742
743 spin_lock_bh(&mv_chan->lock);
744 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
745 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
746 if (sw_desc) {
747 sw_desc->type = DMA_XOR;
748 sw_desc->async_tx.flags = flags;
749 grp_start = sw_desc->group_head;
750 mv_desc_init(grp_start, flags);
751 /* the byte count field is the same as in memcpy desc*/
752 mv_desc_set_byte_count(grp_start, len);
753 mv_desc_set_dest_addr(sw_desc->group_head, dest);
754 sw_desc->unmap_src_cnt = src_cnt;
755 sw_desc->unmap_len = len;
756 while (src_cnt--)
757 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
758 }
759 spin_unlock_bh(&mv_chan->lock);
760 dev_dbg(mv_chan->device->common.dev,
761 "%s sw_desc %p async_tx %p \n",
762 __func__, sw_desc, &sw_desc->async_tx);
763 return sw_desc ? &sw_desc->async_tx : NULL;
764}
765
766static void mv_xor_free_chan_resources(struct dma_chan *chan)
767{
768 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
769 struct mv_xor_desc_slot *iter, *_iter;
770 int in_use_descs = 0;
771
772 mv_xor_slot_cleanup(mv_chan);
773
774 spin_lock_bh(&mv_chan->lock);
775 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
776 chain_node) {
777 in_use_descs++;
778 list_del(&iter->chain_node);
779 }
780 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
781 completed_node) {
782 in_use_descs++;
783 list_del(&iter->completed_node);
784 }
785 list_for_each_entry_safe_reverse(
786 iter, _iter, &mv_chan->all_slots, slot_node) {
787 list_del(&iter->slot_node);
788 kfree(iter);
789 mv_chan->slots_allocated--;
790 }
791 mv_chan->last_used = NULL;
792
793 dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
794 __func__, mv_chan->slots_allocated);
795 spin_unlock_bh(&mv_chan->lock);
796
797 if (in_use_descs)
798 dev_err(mv_chan->device->common.dev,
799 "freeing %d in use descriptors!\n", in_use_descs);
800}
801
802/**
07934481 803 * mv_xor_status - poll the status of an XOR transaction
ff7b0479
SB
804 * @chan: XOR channel handle
805 * @cookie: XOR transaction identifier
07934481 806 * @txstate: XOR transactions state holder (or NULL)
ff7b0479 807 */
07934481 808static enum dma_status mv_xor_status(struct dma_chan *chan,
ff7b0479 809 dma_cookie_t cookie,
07934481 810 struct dma_tx_state *txstate)
ff7b0479
SB
811{
812 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
ff7b0479
SB
813 enum dma_status ret;
814
96a2af41 815 ret = dma_cookie_status(chan, cookie, txstate);
ff7b0479
SB
816 if (ret == DMA_SUCCESS) {
817 mv_xor_clean_completed_slots(mv_chan);
818 return ret;
819 }
820 mv_xor_slot_cleanup(mv_chan);
821
96a2af41 822 return dma_cookie_status(chan, cookie, txstate);
ff7b0479
SB
823}
824
825static void mv_dump_xor_regs(struct mv_xor_chan *chan)
826{
827 u32 val;
828
829 val = __raw_readl(XOR_CONFIG(chan));
830 dev_printk(KERN_ERR, chan->device->common.dev,
831 "config 0x%08x.\n", val);
832
833 val = __raw_readl(XOR_ACTIVATION(chan));
834 dev_printk(KERN_ERR, chan->device->common.dev,
835 "activation 0x%08x.\n", val);
836
837 val = __raw_readl(XOR_INTR_CAUSE(chan));
838 dev_printk(KERN_ERR, chan->device->common.dev,
839 "intr cause 0x%08x.\n", val);
840
841 val = __raw_readl(XOR_INTR_MASK(chan));
842 dev_printk(KERN_ERR, chan->device->common.dev,
843 "intr mask 0x%08x.\n", val);
844
845 val = __raw_readl(XOR_ERROR_CAUSE(chan));
846 dev_printk(KERN_ERR, chan->device->common.dev,
847 "error cause 0x%08x.\n", val);
848
849 val = __raw_readl(XOR_ERROR_ADDR(chan));
850 dev_printk(KERN_ERR, chan->device->common.dev,
851 "error addr 0x%08x.\n", val);
852}
853
854static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
855 u32 intr_cause)
856{
857 if (intr_cause & (1 << 4)) {
858 dev_dbg(chan->device->common.dev,
859 "ignore this error\n");
860 return;
861 }
862
863 dev_printk(KERN_ERR, chan->device->common.dev,
864 "error on chan %d. intr cause 0x%08x.\n",
865 chan->idx, intr_cause);
866
867 mv_dump_xor_regs(chan);
868 BUG();
869}
870
871static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
872{
873 struct mv_xor_chan *chan = data;
874 u32 intr_cause = mv_chan_get_intr_cause(chan);
875
876 dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
877
878 if (mv_is_err_intr(intr_cause))
879 mv_xor_err_interrupt_handler(chan, intr_cause);
880
881 tasklet_schedule(&chan->irq_tasklet);
882
883 mv_xor_device_clear_eoc_cause(chan);
884
885 return IRQ_HANDLED;
886}
887
888static void mv_xor_issue_pending(struct dma_chan *chan)
889{
890 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
891
892 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
893 mv_chan->pending = 0;
894 mv_chan_activate(mv_chan);
895 }
896}
897
898/*
899 * Perform a transaction to verify the HW works.
900 */
901#define MV_XOR_TEST_SIZE 2000
902
903static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
904{
905 int i;
906 void *src, *dest;
907 dma_addr_t src_dma, dest_dma;
908 struct dma_chan *dma_chan;
909 dma_cookie_t cookie;
910 struct dma_async_tx_descriptor *tx;
911 int err = 0;
912 struct mv_xor_chan *mv_chan;
913
914 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
915 if (!src)
916 return -ENOMEM;
917
918 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
919 if (!dest) {
920 kfree(src);
921 return -ENOMEM;
922 }
923
924 /* Fill in src buffer */
925 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
926 ((u8 *) src)[i] = (u8)i;
927
928 /* Start copy, using first DMA channel */
929 dma_chan = container_of(device->common.channels.next,
930 struct dma_chan,
931 device_node);
aa1e6f1a 932 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b0479
SB
933 err = -ENODEV;
934 goto out;
935 }
936
937 dest_dma = dma_map_single(dma_chan->device->dev, dest,
938 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
939
940 src_dma = dma_map_single(dma_chan->device->dev, src,
941 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
942
943 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
944 MV_XOR_TEST_SIZE, 0);
945 cookie = mv_xor_tx_submit(tx);
946 mv_xor_issue_pending(dma_chan);
947 async_tx_ack(tx);
948 msleep(1);
949
07934481 950 if (mv_xor_status(dma_chan, cookie, NULL) !=
ff7b0479
SB
951 DMA_SUCCESS) {
952 dev_printk(KERN_ERR, dma_chan->device->dev,
953 "Self-test copy timed out, disabling\n");
954 err = -ENODEV;
955 goto free_resources;
956 }
957
958 mv_chan = to_mv_xor_chan(dma_chan);
959 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
960 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
961 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
962 dev_printk(KERN_ERR, dma_chan->device->dev,
963 "Self-test copy failed compare, disabling\n");
964 err = -ENODEV;
965 goto free_resources;
966 }
967
968free_resources:
969 mv_xor_free_chan_resources(dma_chan);
970out:
971 kfree(src);
972 kfree(dest);
973 return err;
974}
975
976#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
977static int __devinit
978mv_xor_xor_self_test(struct mv_xor_device *device)
979{
980 int i, src_idx;
981 struct page *dest;
982 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
983 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
984 dma_addr_t dest_dma;
985 struct dma_async_tx_descriptor *tx;
986 struct dma_chan *dma_chan;
987 dma_cookie_t cookie;
988 u8 cmp_byte = 0;
989 u32 cmp_word;
990 int err = 0;
991 struct mv_xor_chan *mv_chan;
992
993 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
994 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
a09b09ae
RK
995 if (!xor_srcs[src_idx]) {
996 while (src_idx--)
ff7b0479 997 __free_page(xor_srcs[src_idx]);
a09b09ae
RK
998 return -ENOMEM;
999 }
ff7b0479
SB
1000 }
1001
1002 dest = alloc_page(GFP_KERNEL);
a09b09ae
RK
1003 if (!dest) {
1004 while (src_idx--)
ff7b0479 1005 __free_page(xor_srcs[src_idx]);
a09b09ae
RK
1006 return -ENOMEM;
1007 }
ff7b0479
SB
1008
1009 /* Fill in src buffers */
1010 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1011 u8 *ptr = page_address(xor_srcs[src_idx]);
1012 for (i = 0; i < PAGE_SIZE; i++)
1013 ptr[i] = (1 << src_idx);
1014 }
1015
1016 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1017 cmp_byte ^= (u8) (1 << src_idx);
1018
1019 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1020 (cmp_byte << 8) | cmp_byte;
1021
1022 memset(page_address(dest), 0, PAGE_SIZE);
1023
1024 dma_chan = container_of(device->common.channels.next,
1025 struct dma_chan,
1026 device_node);
aa1e6f1a 1027 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b0479
SB
1028 err = -ENODEV;
1029 goto out;
1030 }
1031
1032 /* test xor */
1033 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1034 DMA_FROM_DEVICE);
1035
1036 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1037 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1038 0, PAGE_SIZE, DMA_TO_DEVICE);
1039
1040 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1041 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1042
1043 cookie = mv_xor_tx_submit(tx);
1044 mv_xor_issue_pending(dma_chan);
1045 async_tx_ack(tx);
1046 msleep(8);
1047
07934481 1048 if (mv_xor_status(dma_chan, cookie, NULL) !=
ff7b0479
SB
1049 DMA_SUCCESS) {
1050 dev_printk(KERN_ERR, dma_chan->device->dev,
1051 "Self-test xor timed out, disabling\n");
1052 err = -ENODEV;
1053 goto free_resources;
1054 }
1055
1056 mv_chan = to_mv_xor_chan(dma_chan);
1057 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1058 PAGE_SIZE, DMA_FROM_DEVICE);
1059 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1060 u32 *ptr = page_address(dest);
1061 if (ptr[i] != cmp_word) {
1062 dev_printk(KERN_ERR, dma_chan->device->dev,
1063 "Self-test xor failed compare, disabling."
1064 " index %d, data %x, expected %x\n", i,
1065 ptr[i], cmp_word);
1066 err = -ENODEV;
1067 goto free_resources;
1068 }
1069 }
1070
1071free_resources:
1072 mv_xor_free_chan_resources(dma_chan);
1073out:
1074 src_idx = MV_XOR_NUM_SRC_TEST;
1075 while (src_idx--)
1076 __free_page(xor_srcs[src_idx]);
1077 __free_page(dest);
1078 return err;
1079}
1080
1081static int __devexit mv_xor_remove(struct platform_device *dev)
1082{
1083 struct mv_xor_device *device = platform_get_drvdata(dev);
1084 struct dma_chan *chan, *_chan;
1085 struct mv_xor_chan *mv_chan;
1086 struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
1087
1088 dma_async_device_unregister(&device->common);
1089
1090 dma_free_coherent(&dev->dev, plat_data->pool_size,
1091 device->dma_desc_pool_virt, device->dma_desc_pool);
1092
1093 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1094 device_node) {
1095 mv_chan = to_mv_xor_chan(chan);
1096 list_del(&chan->device_node);
1097 }
1098
1099 return 0;
1100}
1101
1102static int __devinit mv_xor_probe(struct platform_device *pdev)
1103{
1104 int ret = 0;
1105 int irq;
1106 struct mv_xor_device *adev;
1107 struct mv_xor_chan *mv_chan;
1108 struct dma_device *dma_dev;
1109 struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
1110
1111
1112 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
1113 if (!adev)
1114 return -ENOMEM;
1115
1116 dma_dev = &adev->common;
1117
1118 /* allocate coherent memory for hardware descriptors
1119 * note: writecombine gives slightly better performance, but
1120 * requires that we explicitly flush the writes
1121 */
1122 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1123 plat_data->pool_size,
1124 &adev->dma_desc_pool,
1125 GFP_KERNEL);
1126 if (!adev->dma_desc_pool_virt)
1127 return -ENOMEM;
1128
1129 adev->id = plat_data->hw_id;
1130
1131 /* discover transaction capabilites from the platform data */
1132 dma_dev->cap_mask = plat_data->cap_mask;
1133 adev->pdev = pdev;
1134 platform_set_drvdata(pdev, adev);
1135
1136 adev->shared = platform_get_drvdata(plat_data->shared);
1137
1138 INIT_LIST_HEAD(&dma_dev->channels);
1139
1140 /* set base routines */
1141 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1142 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
07934481 1143 dma_dev->device_tx_status = mv_xor_status;
ff7b0479
SB
1144 dma_dev->device_issue_pending = mv_xor_issue_pending;
1145 dma_dev->dev = &pdev->dev;
1146
1147 /* set prep routines based on capability */
1148 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1149 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1150 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1151 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1152 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
c019894e 1153 dma_dev->max_xor = 8;
ff7b0479
SB
1154 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1155 }
1156
1157 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1158 if (!mv_chan) {
1159 ret = -ENOMEM;
1160 goto err_free_dma;
1161 }
1162 mv_chan->device = adev;
1163 mv_chan->idx = plat_data->hw_id;
1164 mv_chan->mmr_base = adev->shared->xor_base;
1165
1166 if (!mv_chan->mmr_base) {
1167 ret = -ENOMEM;
1168 goto err_free_dma;
1169 }
1170 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1171 mv_chan);
1172
1173 /* clear errors before enabling interrupts */
1174 mv_xor_device_clear_err_status(mv_chan);
1175
1176 irq = platform_get_irq(pdev, 0);
1177 if (irq < 0) {
1178 ret = irq;
1179 goto err_free_dma;
1180 }
1181 ret = devm_request_irq(&pdev->dev, irq,
1182 mv_xor_interrupt_handler,
1183 0, dev_name(&pdev->dev), mv_chan);
1184 if (ret)
1185 goto err_free_dma;
1186
1187 mv_chan_unmask_interrupts(mv_chan);
1188
1189 mv_set_mode(mv_chan, DMA_MEMCPY);
1190
1191 spin_lock_init(&mv_chan->lock);
1192 INIT_LIST_HEAD(&mv_chan->chain);
1193 INIT_LIST_HEAD(&mv_chan->completed_slots);
1194 INIT_LIST_HEAD(&mv_chan->all_slots);
ff7b0479
SB
1195 mv_chan->common.device = dma_dev;
1196
1197 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
1198
1199 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1200 ret = mv_xor_memcpy_self_test(adev);
1201 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1202 if (ret)
1203 goto err_free_dma;
1204 }
1205
1206 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1207 ret = mv_xor_xor_self_test(adev);
1208 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1209 if (ret)
1210 goto err_free_dma;
1211 }
1212
1213 dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
1214 "( %s%s%s%s)\n",
1215 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1216 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1217 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1218 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1219
1220 dma_async_device_register(dma_dev);
1221 goto out;
1222
1223 err_free_dma:
1224 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1225 adev->dma_desc_pool_virt, adev->dma_desc_pool);
1226 out:
1227 return ret;
1228}
1229
1230static void
1231mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
63a9332b 1232 const struct mbus_dram_target_info *dram)
ff7b0479
SB
1233{
1234 void __iomem *base = msp->xor_base;
1235 u32 win_enable = 0;
1236 int i;
1237
1238 for (i = 0; i < 8; i++) {
1239 writel(0, base + WINDOW_BASE(i));
1240 writel(0, base + WINDOW_SIZE(i));
1241 if (i < 4)
1242 writel(0, base + WINDOW_REMAP_HIGH(i));
1243 }
1244
1245 for (i = 0; i < dram->num_cs; i++) {
63a9332b 1246 const struct mbus_dram_window *cs = dram->cs + i;
ff7b0479
SB
1247
1248 writel((cs->base & 0xffff0000) |
1249 (cs->mbus_attr << 8) |
1250 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1251 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1252
1253 win_enable |= (1 << i);
1254 win_enable |= 3 << (16 + (2 * i));
1255 }
1256
1257 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1258 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1259}
1260
1261static struct platform_driver mv_xor_driver = {
1262 .probe = mv_xor_probe,
bdf602bd 1263 .remove = __devexit_p(mv_xor_remove),
ff7b0479
SB
1264 .driver = {
1265 .owner = THIS_MODULE,
1266 .name = MV_XOR_NAME,
1267 },
1268};
1269
1270static int mv_xor_shared_probe(struct platform_device *pdev)
1271{
63a9332b 1272 const struct mbus_dram_target_info *dram;
ff7b0479
SB
1273 struct mv_xor_shared_private *msp;
1274 struct resource *res;
1275
1276 dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
1277
1278 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
1279 if (!msp)
1280 return -ENOMEM;
1281
1282 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1283 if (!res)
1284 return -ENODEV;
1285
1286 msp->xor_base = devm_ioremap(&pdev->dev, res->start,
4de1ba15 1287 resource_size(res));
ff7b0479
SB
1288 if (!msp->xor_base)
1289 return -EBUSY;
1290
1291 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1292 if (!res)
1293 return -ENODEV;
1294
1295 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
4de1ba15 1296 resource_size(res));
ff7b0479
SB
1297 if (!msp->xor_high_base)
1298 return -EBUSY;
1299
1300 platform_set_drvdata(pdev, msp);
1301
1302 /*
1303 * (Re-)program MBUS remapping windows if we are asked to.
1304 */
63a9332b
AL
1305 dram = mv_mbus_dram_info();
1306 if (dram)
1307 mv_xor_conf_mbus_windows(msp, dram);
ff7b0479
SB
1308
1309 return 0;
1310}
1311
1312static int mv_xor_shared_remove(struct platform_device *pdev)
1313{
1314 return 0;
1315}
1316
1317static struct platform_driver mv_xor_shared_driver = {
1318 .probe = mv_xor_shared_probe,
1319 .remove = mv_xor_shared_remove,
1320 .driver = {
1321 .owner = THIS_MODULE,
1322 .name = MV_XOR_SHARED_NAME,
1323 },
1324};
1325
1326
1327static int __init mv_xor_init(void)
1328{
1329 int rc;
1330
1331 rc = platform_driver_register(&mv_xor_shared_driver);
1332 if (!rc) {
1333 rc = platform_driver_register(&mv_xor_driver);
1334 if (rc)
1335 platform_driver_unregister(&mv_xor_shared_driver);
1336 }
1337 return rc;
1338}
1339module_init(mv_xor_init);
1340
1341/* it's currently unsafe to unload this module */
1342#if 0
1343static void __exit mv_xor_exit(void)
1344{
1345 platform_driver_unregister(&mv_xor_driver);
1346 platform_driver_unregister(&mv_xor_shared_driver);
1347 return;
1348}
1349
1350module_exit(mv_xor_exit);
1351#endif
1352
1353MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1354MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1355MODULE_LICENSE("GPL");
This page took 0.315748 seconds and 5 git commands to generate.