2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/platform_device.h>
27 #include <linux/memory.h>
28 #include <linux/clk.h>
30 #include <linux/of_irq.h>
31 #include <linux/irqdomain.h>
32 #include <linux/platform_data/dma-mv_xor.h>
34 #include "dmaengine.h"
37 static void mv_xor_issue_pending(struct dma_chan
*chan
);
39 #define to_mv_xor_chan(chan) \
40 container_of(chan, struct mv_xor_chan, dmachan)
42 #define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx)
45 #define mv_chan_to_devp(chan) \
48 static void mv_desc_init(struct mv_xor_desc_slot
*desc
, unsigned long flags
)
50 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
52 hw_desc
->status
= (1 << 31);
53 hw_desc
->phy_next_desc
= 0;
54 hw_desc
->desc_command
= (1 << 31);
57 static void mv_desc_set_byte_count(struct mv_xor_desc_slot
*desc
,
60 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
61 hw_desc
->byte_count
= byte_count
;
64 static void mv_desc_set_next_desc(struct mv_xor_desc_slot
*desc
,
67 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
68 BUG_ON(hw_desc
->phy_next_desc
);
69 hw_desc
->phy_next_desc
= next_desc_addr
;
72 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot
*desc
)
74 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
75 hw_desc
->phy_next_desc
= 0;
78 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot
*desc
,
81 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
82 hw_desc
->phy_dest_addr
= addr
;
85 static int mv_chan_memset_slot_count(size_t len
)
90 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
92 static void mv_desc_set_src_addr(struct mv_xor_desc_slot
*desc
,
93 int index
, dma_addr_t addr
)
95 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
96 hw_desc
->phy_src_addr
[mv_phy_src_idx(index
)] = addr
;
97 if (desc
->type
== DMA_XOR
)
98 hw_desc
->desc_command
|= (1 << index
);
101 static u32
mv_chan_get_current_desc(struct mv_xor_chan
*chan
)
103 return readl_relaxed(XOR_CURR_DESC(chan
));
106 static void mv_chan_set_next_descriptor(struct mv_xor_chan
*chan
,
109 writel_relaxed(next_desc_addr
, XOR_NEXT_DESC(chan
));
112 static void mv_chan_unmask_interrupts(struct mv_xor_chan
*chan
)
114 u32 val
= readl_relaxed(XOR_INTR_MASK(chan
));
115 val
|= XOR_INTR_MASK_VALUE
<< (chan
->idx
* 16);
116 writel_relaxed(val
, XOR_INTR_MASK(chan
));
119 static u32
mv_chan_get_intr_cause(struct mv_xor_chan
*chan
)
121 u32 intr_cause
= readl_relaxed(XOR_INTR_CAUSE(chan
));
122 intr_cause
= (intr_cause
>> (chan
->idx
* 16)) & 0xFFFF;
126 static int mv_is_err_intr(u32 intr_cause
)
128 if (intr_cause
& ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
134 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan
*chan
)
136 u32 val
= ~(1 << (chan
->idx
* 16));
137 dev_dbg(mv_chan_to_devp(chan
), "%s, val 0x%08x\n", __func__
, val
);
138 writel_relaxed(val
, XOR_INTR_CAUSE(chan
));
141 static void mv_xor_device_clear_err_status(struct mv_xor_chan
*chan
)
143 u32 val
= 0xFFFF0000 >> (chan
->idx
* 16);
144 writel_relaxed(val
, XOR_INTR_CAUSE(chan
));
147 static int mv_can_chain(struct mv_xor_desc_slot
*desc
)
149 struct mv_xor_desc_slot
*chain_old_tail
= list_entry(
150 desc
->chain_node
.prev
, struct mv_xor_desc_slot
, chain_node
);
152 if (chain_old_tail
->type
!= desc
->type
)
158 static void mv_set_mode(struct mv_xor_chan
*chan
,
159 enum dma_transaction_type type
)
162 u32 config
= readl_relaxed(XOR_CONFIG(chan
));
166 op_mode
= XOR_OPERATION_MODE_XOR
;
169 op_mode
= XOR_OPERATION_MODE_MEMCPY
;
172 dev_err(mv_chan_to_devp(chan
),
173 "error: unsupported operation %d\n",
182 #if defined(__BIG_ENDIAN)
183 config
|= XOR_DESCRIPTOR_SWAP
;
185 config
&= ~XOR_DESCRIPTOR_SWAP
;
188 writel_relaxed(config
, XOR_CONFIG(chan
));
189 chan
->current_type
= type
;
192 static void mv_chan_activate(struct mv_xor_chan
*chan
)
196 dev_dbg(mv_chan_to_devp(chan
), " activate chan.\n");
197 activation
= readl_relaxed(XOR_ACTIVATION(chan
));
199 writel_relaxed(activation
, XOR_ACTIVATION(chan
));
202 static char mv_chan_is_busy(struct mv_xor_chan
*chan
)
204 u32 state
= readl_relaxed(XOR_ACTIVATION(chan
));
206 state
= (state
>> 4) & 0x3;
208 return (state
== 1) ? 1 : 0;
211 static int mv_chan_xor_slot_count(size_t len
, int src_cnt
)
217 * mv_xor_free_slots - flags descriptor slots for reuse
218 * @slot: Slot to free
219 * Caller must hold &mv_chan->lock while calling this function
221 static void mv_xor_free_slots(struct mv_xor_chan
*mv_chan
,
222 struct mv_xor_desc_slot
*slot
)
224 dev_dbg(mv_chan_to_devp(mv_chan
), "%s %d slot %p\n",
225 __func__
, __LINE__
, slot
);
227 slot
->slots_per_op
= 0;
232 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
234 * Caller must hold &mv_chan->lock while calling this function
236 static void mv_xor_start_new_chain(struct mv_xor_chan
*mv_chan
,
237 struct mv_xor_desc_slot
*sw_desc
)
239 dev_dbg(mv_chan_to_devp(mv_chan
), "%s %d: sw_desc %p\n",
240 __func__
, __LINE__
, sw_desc
);
241 if (sw_desc
->type
!= mv_chan
->current_type
)
242 mv_set_mode(mv_chan
, sw_desc
->type
);
244 /* set the hardware chain */
245 mv_chan_set_next_descriptor(mv_chan
, sw_desc
->async_tx
.phys
);
247 mv_chan
->pending
+= sw_desc
->slot_cnt
;
248 mv_xor_issue_pending(&mv_chan
->dmachan
);
252 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot
*desc
,
253 struct mv_xor_chan
*mv_chan
, dma_cookie_t cookie
)
255 BUG_ON(desc
->async_tx
.cookie
< 0);
257 if (desc
->async_tx
.cookie
> 0) {
258 cookie
= desc
->async_tx
.cookie
;
260 /* call the callback (must not sleep or submit new
261 * operations to this channel)
263 if (desc
->async_tx
.callback
)
264 desc
->async_tx
.callback(
265 desc
->async_tx
.callback_param
);
267 dma_descriptor_unmap(&desc
->async_tx
);
268 if (desc
->group_head
)
269 desc
->group_head
= NULL
;
272 /* run dependent operations */
273 dma_run_dependencies(&desc
->async_tx
);
279 mv_xor_clean_completed_slots(struct mv_xor_chan
*mv_chan
)
281 struct mv_xor_desc_slot
*iter
, *_iter
;
283 dev_dbg(mv_chan_to_devp(mv_chan
), "%s %d\n", __func__
, __LINE__
);
284 list_for_each_entry_safe(iter
, _iter
, &mv_chan
->completed_slots
,
287 if (async_tx_test_ack(&iter
->async_tx
)) {
288 list_del(&iter
->completed_node
);
289 mv_xor_free_slots(mv_chan
, iter
);
296 mv_xor_clean_slot(struct mv_xor_desc_slot
*desc
,
297 struct mv_xor_chan
*mv_chan
)
299 dev_dbg(mv_chan_to_devp(mv_chan
), "%s %d: desc %p flags %d\n",
300 __func__
, __LINE__
, desc
, desc
->async_tx
.flags
);
301 list_del(&desc
->chain_node
);
302 /* the client is allowed to attach dependent operations
305 if (!async_tx_test_ack(&desc
->async_tx
)) {
306 /* move this slot to the completed_slots */
307 list_add_tail(&desc
->completed_node
, &mv_chan
->completed_slots
);
311 mv_xor_free_slots(mv_chan
, desc
);
315 static void __mv_xor_slot_cleanup(struct mv_xor_chan
*mv_chan
)
317 struct mv_xor_desc_slot
*iter
, *_iter
;
318 dma_cookie_t cookie
= 0;
319 int busy
= mv_chan_is_busy(mv_chan
);
320 u32 current_desc
= mv_chan_get_current_desc(mv_chan
);
321 int seen_current
= 0;
323 dev_dbg(mv_chan_to_devp(mv_chan
), "%s %d\n", __func__
, __LINE__
);
324 dev_dbg(mv_chan_to_devp(mv_chan
), "current_desc %x\n", current_desc
);
325 mv_xor_clean_completed_slots(mv_chan
);
327 /* free completed slots from the chain starting with
328 * the oldest descriptor
331 list_for_each_entry_safe(iter
, _iter
, &mv_chan
->chain
,
334 prefetch(&_iter
->async_tx
);
336 /* do not advance past the current descriptor loaded into the
337 * hardware channel, subsequent descriptors are either in
338 * process or have not been submitted
343 /* stop the search if we reach the current descriptor and the
346 if (iter
->async_tx
.phys
== current_desc
) {
352 cookie
= mv_xor_run_tx_complete_actions(iter
, mv_chan
, cookie
);
354 if (mv_xor_clean_slot(iter
, mv_chan
))
358 if ((busy
== 0) && !list_empty(&mv_chan
->chain
)) {
359 struct mv_xor_desc_slot
*chain_head
;
360 chain_head
= list_entry(mv_chan
->chain
.next
,
361 struct mv_xor_desc_slot
,
364 mv_xor_start_new_chain(mv_chan
, chain_head
);
368 mv_chan
->dmachan
.completed_cookie
= cookie
;
372 mv_xor_slot_cleanup(struct mv_xor_chan
*mv_chan
)
374 spin_lock_bh(&mv_chan
->lock
);
375 __mv_xor_slot_cleanup(mv_chan
);
376 spin_unlock_bh(&mv_chan
->lock
);
379 static void mv_xor_tasklet(unsigned long data
)
381 struct mv_xor_chan
*chan
= (struct mv_xor_chan
*) data
;
383 spin_lock_bh(&chan
->lock
);
384 __mv_xor_slot_cleanup(chan
);
385 spin_unlock_bh(&chan
->lock
);
388 static struct mv_xor_desc_slot
*
389 mv_xor_alloc_slots(struct mv_xor_chan
*mv_chan
, int num_slots
,
392 struct mv_xor_desc_slot
*iter
, *_iter
, *alloc_start
= NULL
;
394 int slots_found
, retry
= 0;
396 /* start search from the last allocated descrtiptor
397 * if a contiguous allocation can not be found start searching
398 * from the beginning of the list
403 iter
= mv_chan
->last_used
;
405 iter
= list_entry(&mv_chan
->all_slots
,
406 struct mv_xor_desc_slot
,
409 list_for_each_entry_safe_continue(
410 iter
, _iter
, &mv_chan
->all_slots
, slot_node
) {
412 prefetch(&_iter
->async_tx
);
413 if (iter
->slots_per_op
) {
414 /* give up after finding the first busy slot
415 * on the second pass through the list
424 /* start the allocation if the slot is correctly aligned */
428 if (slots_found
== num_slots
) {
429 struct mv_xor_desc_slot
*alloc_tail
= NULL
;
430 struct mv_xor_desc_slot
*last_used
= NULL
;
435 /* pre-ack all but the last descriptor */
436 async_tx_ack(&iter
->async_tx
);
438 list_add_tail(&iter
->chain_node
, &chain
);
440 iter
->async_tx
.cookie
= 0;
441 iter
->slot_cnt
= num_slots
;
442 iter
->xor_check_result
= NULL
;
443 for (i
= 0; i
< slots_per_op
; i
++) {
444 iter
->slots_per_op
= slots_per_op
- i
;
446 iter
= list_entry(iter
->slot_node
.next
,
447 struct mv_xor_desc_slot
,
450 num_slots
-= slots_per_op
;
452 alloc_tail
->group_head
= alloc_start
;
453 alloc_tail
->async_tx
.cookie
= -EBUSY
;
454 list_splice(&chain
, &alloc_tail
->tx_list
);
455 mv_chan
->last_used
= last_used
;
456 mv_desc_clear_next_desc(alloc_start
);
457 mv_desc_clear_next_desc(alloc_tail
);
464 /* try to free some slots if the allocation fails */
465 tasklet_schedule(&mv_chan
->irq_tasklet
);
470 /************************ DMA engine API functions ****************************/
472 mv_xor_tx_submit(struct dma_async_tx_descriptor
*tx
)
474 struct mv_xor_desc_slot
*sw_desc
= to_mv_xor_slot(tx
);
475 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(tx
->chan
);
476 struct mv_xor_desc_slot
*grp_start
, *old_chain_tail
;
478 int new_hw_chain
= 1;
480 dev_dbg(mv_chan_to_devp(mv_chan
),
481 "%s sw_desc %p: async_tx %p\n",
482 __func__
, sw_desc
, &sw_desc
->async_tx
);
484 grp_start
= sw_desc
->group_head
;
486 spin_lock_bh(&mv_chan
->lock
);
487 cookie
= dma_cookie_assign(tx
);
489 if (list_empty(&mv_chan
->chain
))
490 list_splice_init(&sw_desc
->tx_list
, &mv_chan
->chain
);
494 old_chain_tail
= list_entry(mv_chan
->chain
.prev
,
495 struct mv_xor_desc_slot
,
497 list_splice_init(&grp_start
->tx_list
,
498 &old_chain_tail
->chain_node
);
500 if (!mv_can_chain(grp_start
))
503 dev_dbg(mv_chan_to_devp(mv_chan
), "Append to last desc %pa\n",
504 &old_chain_tail
->async_tx
.phys
);
506 /* fix up the hardware chain */
507 mv_desc_set_next_desc(old_chain_tail
, grp_start
->async_tx
.phys
);
509 /* if the channel is not busy */
510 if (!mv_chan_is_busy(mv_chan
)) {
511 u32 current_desc
= mv_chan_get_current_desc(mv_chan
);
513 * and the curren desc is the end of the chain before
514 * the append, then we need to start the channel
516 if (current_desc
== old_chain_tail
->async_tx
.phys
)
522 mv_xor_start_new_chain(mv_chan
, grp_start
);
525 spin_unlock_bh(&mv_chan
->lock
);
530 /* returns the number of allocated descriptors */
531 static int mv_xor_alloc_chan_resources(struct dma_chan
*chan
)
536 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
537 struct mv_xor_desc_slot
*slot
= NULL
;
538 int num_descs_in_pool
= MV_XOR_POOL_SIZE
/MV_XOR_SLOT_SIZE
;
540 /* Allocate descriptor slots */
541 idx
= mv_chan
->slots_allocated
;
542 while (idx
< num_descs_in_pool
) {
543 slot
= kzalloc(sizeof(*slot
), GFP_KERNEL
);
545 printk(KERN_INFO
"MV XOR Channel only initialized"
546 " %d descriptor slots", idx
);
549 virt_desc
= mv_chan
->dma_desc_pool_virt
;
550 slot
->hw_desc
= virt_desc
+ idx
* MV_XOR_SLOT_SIZE
;
552 dma_async_tx_descriptor_init(&slot
->async_tx
, chan
);
553 slot
->async_tx
.tx_submit
= mv_xor_tx_submit
;
554 INIT_LIST_HEAD(&slot
->chain_node
);
555 INIT_LIST_HEAD(&slot
->slot_node
);
556 INIT_LIST_HEAD(&slot
->tx_list
);
557 dma_desc
= mv_chan
->dma_desc_pool
;
558 slot
->async_tx
.phys
= dma_desc
+ idx
* MV_XOR_SLOT_SIZE
;
561 spin_lock_bh(&mv_chan
->lock
);
562 mv_chan
->slots_allocated
= idx
;
563 list_add_tail(&slot
->slot_node
, &mv_chan
->all_slots
);
564 spin_unlock_bh(&mv_chan
->lock
);
567 if (mv_chan
->slots_allocated
&& !mv_chan
->last_used
)
568 mv_chan
->last_used
= list_entry(mv_chan
->all_slots
.next
,
569 struct mv_xor_desc_slot
,
572 dev_dbg(mv_chan_to_devp(mv_chan
),
573 "allocated %d descriptor slots last_used: %p\n",
574 mv_chan
->slots_allocated
, mv_chan
->last_used
);
576 return mv_chan
->slots_allocated
? : -ENOMEM
;
579 static struct dma_async_tx_descriptor
*
580 mv_xor_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
581 size_t len
, unsigned long flags
)
583 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
584 struct mv_xor_desc_slot
*sw_desc
, *grp_start
;
587 dev_dbg(mv_chan_to_devp(mv_chan
),
588 "%s dest: %pad src %pad len: %u flags: %ld\n",
589 __func__
, &dest
, &src
, len
, flags
);
590 if (unlikely(len
< MV_XOR_MIN_BYTE_COUNT
))
593 BUG_ON(len
> MV_XOR_MAX_BYTE_COUNT
);
595 spin_lock_bh(&mv_chan
->lock
);
596 slot_cnt
= mv_chan_memcpy_slot_count(len
);
597 sw_desc
= mv_xor_alloc_slots(mv_chan
, slot_cnt
, 1);
599 sw_desc
->type
= DMA_MEMCPY
;
600 sw_desc
->async_tx
.flags
= flags
;
601 grp_start
= sw_desc
->group_head
;
602 mv_desc_init(grp_start
, flags
);
603 mv_desc_set_byte_count(grp_start
, len
);
604 mv_desc_set_dest_addr(sw_desc
->group_head
, dest
);
605 mv_desc_set_src_addr(grp_start
, 0, src
);
606 sw_desc
->unmap_src_cnt
= 1;
607 sw_desc
->unmap_len
= len
;
609 spin_unlock_bh(&mv_chan
->lock
);
611 dev_dbg(mv_chan_to_devp(mv_chan
),
612 "%s sw_desc %p async_tx %p\n",
613 __func__
, sw_desc
, sw_desc
? &sw_desc
->async_tx
: NULL
);
615 return sw_desc
? &sw_desc
->async_tx
: NULL
;
618 static struct dma_async_tx_descriptor
*
619 mv_xor_prep_dma_xor(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t
*src
,
620 unsigned int src_cnt
, size_t len
, unsigned long flags
)
622 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
623 struct mv_xor_desc_slot
*sw_desc
, *grp_start
;
626 if (unlikely(len
< MV_XOR_MIN_BYTE_COUNT
))
629 BUG_ON(len
> MV_XOR_MAX_BYTE_COUNT
);
631 dev_dbg(mv_chan_to_devp(mv_chan
),
632 "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
633 __func__
, src_cnt
, len
, &dest
, flags
);
635 spin_lock_bh(&mv_chan
->lock
);
636 slot_cnt
= mv_chan_xor_slot_count(len
, src_cnt
);
637 sw_desc
= mv_xor_alloc_slots(mv_chan
, slot_cnt
, 1);
639 sw_desc
->type
= DMA_XOR
;
640 sw_desc
->async_tx
.flags
= flags
;
641 grp_start
= sw_desc
->group_head
;
642 mv_desc_init(grp_start
, flags
);
643 /* the byte count field is the same as in memcpy desc*/
644 mv_desc_set_byte_count(grp_start
, len
);
645 mv_desc_set_dest_addr(sw_desc
->group_head
, dest
);
646 sw_desc
->unmap_src_cnt
= src_cnt
;
647 sw_desc
->unmap_len
= len
;
649 mv_desc_set_src_addr(grp_start
, src_cnt
, src
[src_cnt
]);
651 spin_unlock_bh(&mv_chan
->lock
);
652 dev_dbg(mv_chan_to_devp(mv_chan
),
653 "%s sw_desc %p async_tx %p \n",
654 __func__
, sw_desc
, &sw_desc
->async_tx
);
655 return sw_desc
? &sw_desc
->async_tx
: NULL
;
658 static void mv_xor_free_chan_resources(struct dma_chan
*chan
)
660 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
661 struct mv_xor_desc_slot
*iter
, *_iter
;
662 int in_use_descs
= 0;
664 spin_lock_bh(&mv_chan
->lock
);
666 __mv_xor_slot_cleanup(mv_chan
);
668 list_for_each_entry_safe(iter
, _iter
, &mv_chan
->chain
,
671 list_del(&iter
->chain_node
);
673 list_for_each_entry_safe(iter
, _iter
, &mv_chan
->completed_slots
,
676 list_del(&iter
->completed_node
);
678 list_for_each_entry_safe_reverse(
679 iter
, _iter
, &mv_chan
->all_slots
, slot_node
) {
680 list_del(&iter
->slot_node
);
682 mv_chan
->slots_allocated
--;
684 mv_chan
->last_used
= NULL
;
686 dev_dbg(mv_chan_to_devp(mv_chan
), "%s slots_allocated %d\n",
687 __func__
, mv_chan
->slots_allocated
);
688 spin_unlock_bh(&mv_chan
->lock
);
691 dev_err(mv_chan_to_devp(mv_chan
),
692 "freeing %d in use descriptors!\n", in_use_descs
);
696 * mv_xor_status - poll the status of an XOR transaction
697 * @chan: XOR channel handle
698 * @cookie: XOR transaction identifier
699 * @txstate: XOR transactions state holder (or NULL)
701 static enum dma_status
mv_xor_status(struct dma_chan
*chan
,
703 struct dma_tx_state
*txstate
)
705 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
708 ret
= dma_cookie_status(chan
, cookie
, txstate
);
709 if (ret
== DMA_COMPLETE
)
712 spin_lock_bh(&mv_chan
->lock
);
713 __mv_xor_slot_cleanup(mv_chan
);
714 spin_unlock_bh(&mv_chan
->lock
);
716 return dma_cookie_status(chan
, cookie
, txstate
);
719 static void mv_dump_xor_regs(struct mv_xor_chan
*chan
)
723 val
= readl_relaxed(XOR_CONFIG(chan
));
724 dev_err(mv_chan_to_devp(chan
), "config 0x%08x\n", val
);
726 val
= readl_relaxed(XOR_ACTIVATION(chan
));
727 dev_err(mv_chan_to_devp(chan
), "activation 0x%08x\n", val
);
729 val
= readl_relaxed(XOR_INTR_CAUSE(chan
));
730 dev_err(mv_chan_to_devp(chan
), "intr cause 0x%08x\n", val
);
732 val
= readl_relaxed(XOR_INTR_MASK(chan
));
733 dev_err(mv_chan_to_devp(chan
), "intr mask 0x%08x\n", val
);
735 val
= readl_relaxed(XOR_ERROR_CAUSE(chan
));
736 dev_err(mv_chan_to_devp(chan
), "error cause 0x%08x\n", val
);
738 val
= readl_relaxed(XOR_ERROR_ADDR(chan
));
739 dev_err(mv_chan_to_devp(chan
), "error addr 0x%08x\n", val
);
742 static void mv_xor_err_interrupt_handler(struct mv_xor_chan
*chan
,
745 if (intr_cause
& (1 << 4)) {
746 dev_dbg(mv_chan_to_devp(chan
),
747 "ignore this error\n");
751 dev_err(mv_chan_to_devp(chan
),
752 "error on chan %d. intr cause 0x%08x\n",
753 chan
->idx
, intr_cause
);
755 mv_dump_xor_regs(chan
);
759 static irqreturn_t
mv_xor_interrupt_handler(int irq
, void *data
)
761 struct mv_xor_chan
*chan
= data
;
762 u32 intr_cause
= mv_chan_get_intr_cause(chan
);
764 dev_dbg(mv_chan_to_devp(chan
), "intr cause %x\n", intr_cause
);
766 if (mv_is_err_intr(intr_cause
))
767 mv_xor_err_interrupt_handler(chan
, intr_cause
);
769 tasklet_schedule(&chan
->irq_tasklet
);
771 mv_xor_device_clear_eoc_cause(chan
);
776 static void mv_xor_issue_pending(struct dma_chan
*chan
)
778 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
780 if (mv_chan
->pending
>= MV_XOR_THRESHOLD
) {
781 mv_chan
->pending
= 0;
782 mv_chan_activate(mv_chan
);
787 * Perform a transaction to verify the HW works.
790 static int mv_xor_memcpy_self_test(struct mv_xor_chan
*mv_chan
)
794 dma_addr_t src_dma
, dest_dma
;
795 struct dma_chan
*dma_chan
;
797 struct dma_async_tx_descriptor
*tx
;
798 struct dmaengine_unmap_data
*unmap
;
801 src
= kmalloc(sizeof(u8
) * PAGE_SIZE
, GFP_KERNEL
);
805 dest
= kzalloc(sizeof(u8
) * PAGE_SIZE
, GFP_KERNEL
);
811 /* Fill in src buffer */
812 for (i
= 0; i
< PAGE_SIZE
; i
++)
813 ((u8
*) src
)[i
] = (u8
)i
;
815 dma_chan
= &mv_chan
->dmachan
;
816 if (mv_xor_alloc_chan_resources(dma_chan
) < 1) {
821 unmap
= dmaengine_get_unmap_data(dma_chan
->device
->dev
, 2, GFP_KERNEL
);
827 src_dma
= dma_map_page(dma_chan
->device
->dev
, virt_to_page(src
), 0,
828 PAGE_SIZE
, DMA_TO_DEVICE
);
829 unmap
->addr
[0] = src_dma
;
831 ret
= dma_mapping_error(dma_chan
->device
->dev
, src_dma
);
838 dest_dma
= dma_map_page(dma_chan
->device
->dev
, virt_to_page(dest
), 0,
839 PAGE_SIZE
, DMA_FROM_DEVICE
);
840 unmap
->addr
[1] = dest_dma
;
842 ret
= dma_mapping_error(dma_chan
->device
->dev
, dest_dma
);
848 unmap
->len
= PAGE_SIZE
;
850 tx
= mv_xor_prep_dma_memcpy(dma_chan
, dest_dma
, src_dma
,
853 dev_err(dma_chan
->device
->dev
,
854 "Self-test cannot prepare operation, disabling\n");
859 cookie
= mv_xor_tx_submit(tx
);
860 if (dma_submit_error(cookie
)) {
861 dev_err(dma_chan
->device
->dev
,
862 "Self-test submit error, disabling\n");
867 mv_xor_issue_pending(dma_chan
);
871 if (mv_xor_status(dma_chan
, cookie
, NULL
) !=
873 dev_err(dma_chan
->device
->dev
,
874 "Self-test copy timed out, disabling\n");
879 dma_sync_single_for_cpu(dma_chan
->device
->dev
, dest_dma
,
880 PAGE_SIZE
, DMA_FROM_DEVICE
);
881 if (memcmp(src
, dest
, PAGE_SIZE
)) {
882 dev_err(dma_chan
->device
->dev
,
883 "Self-test copy failed compare, disabling\n");
889 dmaengine_unmap_put(unmap
);
890 mv_xor_free_chan_resources(dma_chan
);
897 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
899 mv_xor_xor_self_test(struct mv_xor_chan
*mv_chan
)
903 struct page
*xor_srcs
[MV_XOR_NUM_SRC_TEST
];
904 dma_addr_t dma_srcs
[MV_XOR_NUM_SRC_TEST
];
906 struct dma_async_tx_descriptor
*tx
;
907 struct dmaengine_unmap_data
*unmap
;
908 struct dma_chan
*dma_chan
;
913 int src_count
= MV_XOR_NUM_SRC_TEST
;
915 for (src_idx
= 0; src_idx
< src_count
; src_idx
++) {
916 xor_srcs
[src_idx
] = alloc_page(GFP_KERNEL
);
917 if (!xor_srcs
[src_idx
]) {
919 __free_page(xor_srcs
[src_idx
]);
924 dest
= alloc_page(GFP_KERNEL
);
927 __free_page(xor_srcs
[src_idx
]);
931 /* Fill in src buffers */
932 for (src_idx
= 0; src_idx
< src_count
; src_idx
++) {
933 u8
*ptr
= page_address(xor_srcs
[src_idx
]);
934 for (i
= 0; i
< PAGE_SIZE
; i
++)
935 ptr
[i
] = (1 << src_idx
);
938 for (src_idx
= 0; src_idx
< src_count
; src_idx
++)
939 cmp_byte
^= (u8
) (1 << src_idx
);
941 cmp_word
= (cmp_byte
<< 24) | (cmp_byte
<< 16) |
942 (cmp_byte
<< 8) | cmp_byte
;
944 memset(page_address(dest
), 0, PAGE_SIZE
);
946 dma_chan
= &mv_chan
->dmachan
;
947 if (mv_xor_alloc_chan_resources(dma_chan
) < 1) {
952 unmap
= dmaengine_get_unmap_data(dma_chan
->device
->dev
, src_count
+ 1,
960 for (i
= 0; i
< src_count
; i
++) {
961 unmap
->addr
[i
] = dma_map_page(dma_chan
->device
->dev
, xor_srcs
[i
],
962 0, PAGE_SIZE
, DMA_TO_DEVICE
);
963 dma_srcs
[i
] = unmap
->addr
[i
];
964 ret
= dma_mapping_error(dma_chan
->device
->dev
, unmap
->addr
[i
]);
972 unmap
->addr
[src_count
] = dma_map_page(dma_chan
->device
->dev
, dest
, 0, PAGE_SIZE
,
974 dest_dma
= unmap
->addr
[src_count
];
975 ret
= dma_mapping_error(dma_chan
->device
->dev
, unmap
->addr
[src_count
]);
981 unmap
->len
= PAGE_SIZE
;
983 tx
= mv_xor_prep_dma_xor(dma_chan
, dest_dma
, dma_srcs
,
984 src_count
, PAGE_SIZE
, 0);
986 dev_err(dma_chan
->device
->dev
,
987 "Self-test cannot prepare operation, disabling\n");
992 cookie
= mv_xor_tx_submit(tx
);
993 if (dma_submit_error(cookie
)) {
994 dev_err(dma_chan
->device
->dev
,
995 "Self-test submit error, disabling\n");
1000 mv_xor_issue_pending(dma_chan
);
1004 if (mv_xor_status(dma_chan
, cookie
, NULL
) !=
1006 dev_err(dma_chan
->device
->dev
,
1007 "Self-test xor timed out, disabling\n");
1009 goto free_resources
;
1012 dma_sync_single_for_cpu(dma_chan
->device
->dev
, dest_dma
,
1013 PAGE_SIZE
, DMA_FROM_DEVICE
);
1014 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(u32
)); i
++) {
1015 u32
*ptr
= page_address(dest
);
1016 if (ptr
[i
] != cmp_word
) {
1017 dev_err(dma_chan
->device
->dev
,
1018 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
1019 i
, ptr
[i
], cmp_word
);
1021 goto free_resources
;
1026 dmaengine_unmap_put(unmap
);
1027 mv_xor_free_chan_resources(dma_chan
);
1029 src_idx
= src_count
;
1031 __free_page(xor_srcs
[src_idx
]);
1036 /* This driver does not implement any of the optional DMA operations. */
1038 mv_xor_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
1044 static int mv_xor_channel_remove(struct mv_xor_chan
*mv_chan
)
1046 struct dma_chan
*chan
, *_chan
;
1047 struct device
*dev
= mv_chan
->dmadev
.dev
;
1049 dma_async_device_unregister(&mv_chan
->dmadev
);
1051 dma_free_coherent(dev
, MV_XOR_POOL_SIZE
,
1052 mv_chan
->dma_desc_pool_virt
, mv_chan
->dma_desc_pool
);
1054 list_for_each_entry_safe(chan
, _chan
, &mv_chan
->dmadev
.channels
,
1056 list_del(&chan
->device_node
);
1059 free_irq(mv_chan
->irq
, mv_chan
);
1064 static struct mv_xor_chan
*
1065 mv_xor_channel_add(struct mv_xor_device
*xordev
,
1066 struct platform_device
*pdev
,
1067 int idx
, dma_cap_mask_t cap_mask
, int irq
)
1070 struct mv_xor_chan
*mv_chan
;
1071 struct dma_device
*dma_dev
;
1073 mv_chan
= devm_kzalloc(&pdev
->dev
, sizeof(*mv_chan
), GFP_KERNEL
);
1075 return ERR_PTR(-ENOMEM
);
1080 dma_dev
= &mv_chan
->dmadev
;
1082 /* allocate coherent memory for hardware descriptors
1083 * note: writecombine gives slightly better performance, but
1084 * requires that we explicitly flush the writes
1086 mv_chan
->dma_desc_pool_virt
=
1087 dma_alloc_writecombine(&pdev
->dev
, MV_XOR_POOL_SIZE
,
1088 &mv_chan
->dma_desc_pool
, GFP_KERNEL
);
1089 if (!mv_chan
->dma_desc_pool_virt
)
1090 return ERR_PTR(-ENOMEM
);
1092 /* discover transaction capabilites from the platform data */
1093 dma_dev
->cap_mask
= cap_mask
;
1095 INIT_LIST_HEAD(&dma_dev
->channels
);
1097 /* set base routines */
1098 dma_dev
->device_alloc_chan_resources
= mv_xor_alloc_chan_resources
;
1099 dma_dev
->device_free_chan_resources
= mv_xor_free_chan_resources
;
1100 dma_dev
->device_tx_status
= mv_xor_status
;
1101 dma_dev
->device_issue_pending
= mv_xor_issue_pending
;
1102 dma_dev
->device_control
= mv_xor_control
;
1103 dma_dev
->dev
= &pdev
->dev
;
1105 /* set prep routines based on capability */
1106 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
))
1107 dma_dev
->device_prep_dma_memcpy
= mv_xor_prep_dma_memcpy
;
1108 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
1109 dma_dev
->max_xor
= 8;
1110 dma_dev
->device_prep_dma_xor
= mv_xor_prep_dma_xor
;
1113 mv_chan
->mmr_base
= xordev
->xor_base
;
1114 mv_chan
->mmr_high_base
= xordev
->xor_high_base
;
1115 tasklet_init(&mv_chan
->irq_tasklet
, mv_xor_tasklet
, (unsigned long)
1118 /* clear errors before enabling interrupts */
1119 mv_xor_device_clear_err_status(mv_chan
);
1121 ret
= request_irq(mv_chan
->irq
, mv_xor_interrupt_handler
,
1122 0, dev_name(&pdev
->dev
), mv_chan
);
1126 mv_chan_unmask_interrupts(mv_chan
);
1128 mv_set_mode(mv_chan
, DMA_MEMCPY
);
1130 spin_lock_init(&mv_chan
->lock
);
1131 INIT_LIST_HEAD(&mv_chan
->chain
);
1132 INIT_LIST_HEAD(&mv_chan
->completed_slots
);
1133 INIT_LIST_HEAD(&mv_chan
->all_slots
);
1134 mv_chan
->dmachan
.device
= dma_dev
;
1135 dma_cookie_init(&mv_chan
->dmachan
);
1137 list_add_tail(&mv_chan
->dmachan
.device_node
, &dma_dev
->channels
);
1139 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
)) {
1140 ret
= mv_xor_memcpy_self_test(mv_chan
);
1141 dev_dbg(&pdev
->dev
, "memcpy self test returned %d\n", ret
);
1146 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
1147 ret
= mv_xor_xor_self_test(mv_chan
);
1148 dev_dbg(&pdev
->dev
, "xor self test returned %d\n", ret
);
1153 dev_info(&pdev
->dev
, "Marvell XOR: ( %s%s%s)\n",
1154 dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
) ? "xor " : "",
1155 dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
) ? "cpy " : "",
1156 dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
) ? "intr " : "");
1158 dma_async_device_register(dma_dev
);
1162 free_irq(mv_chan
->irq
, mv_chan
);
1164 dma_free_coherent(&pdev
->dev
, MV_XOR_POOL_SIZE
,
1165 mv_chan
->dma_desc_pool_virt
, mv_chan
->dma_desc_pool
);
1166 return ERR_PTR(ret
);
1170 mv_xor_conf_mbus_windows(struct mv_xor_device
*xordev
,
1171 const struct mbus_dram_target_info
*dram
)
1173 void __iomem
*base
= xordev
->xor_high_base
;
1177 for (i
= 0; i
< 8; i
++) {
1178 writel(0, base
+ WINDOW_BASE(i
));
1179 writel(0, base
+ WINDOW_SIZE(i
));
1181 writel(0, base
+ WINDOW_REMAP_HIGH(i
));
1184 for (i
= 0; i
< dram
->num_cs
; i
++) {
1185 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
1187 writel((cs
->base
& 0xffff0000) |
1188 (cs
->mbus_attr
<< 8) |
1189 dram
->mbus_dram_target_id
, base
+ WINDOW_BASE(i
));
1190 writel((cs
->size
- 1) & 0xffff0000, base
+ WINDOW_SIZE(i
));
1192 win_enable
|= (1 << i
);
1193 win_enable
|= 3 << (16 + (2 * i
));
1196 writel(win_enable
, base
+ WINDOW_BAR_ENABLE(0));
1197 writel(win_enable
, base
+ WINDOW_BAR_ENABLE(1));
1198 writel(0, base
+ WINDOW_OVERRIDE_CTRL(0));
1199 writel(0, base
+ WINDOW_OVERRIDE_CTRL(1));
1202 static int mv_xor_probe(struct platform_device
*pdev
)
1204 const struct mbus_dram_target_info
*dram
;
1205 struct mv_xor_device
*xordev
;
1206 struct mv_xor_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1207 struct resource
*res
;
1210 dev_notice(&pdev
->dev
, "Marvell shared XOR driver\n");
1212 xordev
= devm_kzalloc(&pdev
->dev
, sizeof(*xordev
), GFP_KERNEL
);
1216 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1220 xordev
->xor_base
= devm_ioremap(&pdev
->dev
, res
->start
,
1221 resource_size(res
));
1222 if (!xordev
->xor_base
)
1225 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1229 xordev
->xor_high_base
= devm_ioremap(&pdev
->dev
, res
->start
,
1230 resource_size(res
));
1231 if (!xordev
->xor_high_base
)
1234 platform_set_drvdata(pdev
, xordev
);
1237 * (Re-)program MBUS remapping windows if we are asked to.
1239 dram
= mv_mbus_dram_info();
1241 mv_xor_conf_mbus_windows(xordev
, dram
);
1243 /* Not all platforms can gate the clock, so it is not
1244 * an error if the clock does not exists.
1246 xordev
->clk
= clk_get(&pdev
->dev
, NULL
);
1247 if (!IS_ERR(xordev
->clk
))
1248 clk_prepare_enable(xordev
->clk
);
1250 if (pdev
->dev
.of_node
) {
1251 struct device_node
*np
;
1254 for_each_child_of_node(pdev
->dev
.of_node
, np
) {
1255 struct mv_xor_chan
*chan
;
1256 dma_cap_mask_t cap_mask
;
1259 dma_cap_zero(cap_mask
);
1260 if (of_property_read_bool(np
, "dmacap,memcpy"))
1261 dma_cap_set(DMA_MEMCPY
, cap_mask
);
1262 if (of_property_read_bool(np
, "dmacap,xor"))
1263 dma_cap_set(DMA_XOR
, cap_mask
);
1264 if (of_property_read_bool(np
, "dmacap,interrupt"))
1265 dma_cap_set(DMA_INTERRUPT
, cap_mask
);
1267 irq
= irq_of_parse_and_map(np
, 0);
1270 goto err_channel_add
;
1273 chan
= mv_xor_channel_add(xordev
, pdev
, i
,
1276 ret
= PTR_ERR(chan
);
1277 irq_dispose_mapping(irq
);
1278 goto err_channel_add
;
1281 xordev
->channels
[i
] = chan
;
1284 } else if (pdata
&& pdata
->channels
) {
1285 for (i
= 0; i
< MV_XOR_MAX_CHANNELS
; i
++) {
1286 struct mv_xor_channel_data
*cd
;
1287 struct mv_xor_chan
*chan
;
1290 cd
= &pdata
->channels
[i
];
1293 goto err_channel_add
;
1296 irq
= platform_get_irq(pdev
, i
);
1299 goto err_channel_add
;
1302 chan
= mv_xor_channel_add(xordev
, pdev
, i
,
1305 ret
= PTR_ERR(chan
);
1306 goto err_channel_add
;
1309 xordev
->channels
[i
] = chan
;
1316 for (i
= 0; i
< MV_XOR_MAX_CHANNELS
; i
++)
1317 if (xordev
->channels
[i
]) {
1318 mv_xor_channel_remove(xordev
->channels
[i
]);
1319 if (pdev
->dev
.of_node
)
1320 irq_dispose_mapping(xordev
->channels
[i
]->irq
);
1323 if (!IS_ERR(xordev
->clk
)) {
1324 clk_disable_unprepare(xordev
->clk
);
1325 clk_put(xordev
->clk
);
1331 static int mv_xor_remove(struct platform_device
*pdev
)
1333 struct mv_xor_device
*xordev
= platform_get_drvdata(pdev
);
1336 for (i
= 0; i
< MV_XOR_MAX_CHANNELS
; i
++) {
1337 if (xordev
->channels
[i
])
1338 mv_xor_channel_remove(xordev
->channels
[i
]);
1341 if (!IS_ERR(xordev
->clk
)) {
1342 clk_disable_unprepare(xordev
->clk
);
1343 clk_put(xordev
->clk
);
1350 static struct of_device_id mv_xor_dt_ids
[] = {
1351 { .compatible
= "marvell,orion-xor", },
1354 MODULE_DEVICE_TABLE(of
, mv_xor_dt_ids
);
1357 static struct platform_driver mv_xor_driver
= {
1358 .probe
= mv_xor_probe
,
1359 .remove
= mv_xor_remove
,
1361 .owner
= THIS_MODULE
,
1362 .name
= MV_XOR_NAME
,
1363 .of_match_table
= of_match_ptr(mv_xor_dt_ids
),
1368 static int __init
mv_xor_init(void)
1370 return platform_driver_register(&mv_xor_driver
);
1372 module_init(mv_xor_init
);
1374 /* it's currently unsafe to unload this module */
1376 static void __exit
mv_xor_exit(void)
1378 platform_driver_unregister(&mv_xor_driver
);
1382 module_exit(mv_xor_exit
);
1385 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1386 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1387 MODULE_LICENSE("GPL");