Commit | Line | Data |
---|---|---|
ff7b0479 SB |
1 | /* |
2 | * offload engine driver for the Marvell XOR engine | |
3 | * Copyright (C) 2007, 2008, Marvell International Ltd. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
ff7b0479 SB |
13 | */ |
14 | ||
15 | #include <linux/init.h> | |
5a0e3ad6 | 16 | #include <linux/slab.h> |
ff7b0479 SB |
17 | #include <linux/delay.h> |
18 | #include <linux/dma-mapping.h> | |
19 | #include <linux/spinlock.h> | |
20 | #include <linux/interrupt.h> | |
6f166312 | 21 | #include <linux/of_device.h> |
ff7b0479 SB |
22 | #include <linux/platform_device.h> |
23 | #include <linux/memory.h> | |
c510182b | 24 | #include <linux/clk.h> |
f7d12ef5 TP |
25 | #include <linux/of.h> |
26 | #include <linux/of_irq.h> | |
27 | #include <linux/irqdomain.h> | |
77757291 | 28 | #include <linux/cpumask.h> |
c02cecb9 | 29 | #include <linux/platform_data/dma-mv_xor.h> |
d2ebfb33 RKAL |
30 | |
31 | #include "dmaengine.h" | |
ff7b0479 SB |
32 | #include "mv_xor.h" |
33 | ||
6f166312 LA |
34 | enum mv_xor_mode { |
35 | XOR_MODE_IN_REG, | |
36 | XOR_MODE_IN_DESC, | |
37 | }; | |
38 | ||
ff7b0479 SB |
39 | static void mv_xor_issue_pending(struct dma_chan *chan); |
40 | ||
41 | #define to_mv_xor_chan(chan) \ | |
98817b99 | 42 | container_of(chan, struct mv_xor_chan, dmachan) |
ff7b0479 SB |
43 | |
44 | #define to_mv_xor_slot(tx) \ | |
45 | container_of(tx, struct mv_xor_desc_slot, async_tx) | |
46 | ||
c98c1781 | 47 | #define mv_chan_to_devp(chan) \ |
1ef48a26 | 48 | ((chan)->dmadev.dev) |
c98c1781 | 49 | |
dfc97661 | 50 | static void mv_desc_init(struct mv_xor_desc_slot *desc, |
ba87d137 LA |
51 | dma_addr_t addr, u32 byte_count, |
52 | enum dma_ctrl_flags flags) | |
ff7b0479 SB |
53 | { |
54 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
55 | ||
0e7488ed | 56 | hw_desc->status = XOR_DESC_DMA_OWNED; |
ff7b0479 | 57 | hw_desc->phy_next_desc = 0; |
ba87d137 LA |
58 | /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */ |
59 | hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ? | |
60 | XOR_DESC_EOD_INT_EN : 0; | |
dfc97661 | 61 | hw_desc->phy_dest_addr = addr; |
ff7b0479 SB |
62 | hw_desc->byte_count = byte_count; |
63 | } | |
64 | ||
6f166312 LA |
65 | static void mv_desc_set_mode(struct mv_xor_desc_slot *desc) |
66 | { | |
67 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
68 | ||
69 | switch (desc->type) { | |
70 | case DMA_XOR: | |
71 | case DMA_INTERRUPT: | |
72 | hw_desc->desc_command |= XOR_DESC_OPERATION_XOR; | |
73 | break; | |
74 | case DMA_MEMCPY: | |
75 | hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY; | |
76 | break; | |
77 | default: | |
78 | BUG(); | |
79 | return; | |
80 | } | |
81 | } | |
82 | ||
ff7b0479 SB |
83 | static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, |
84 | u32 next_desc_addr) | |
85 | { | |
86 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
87 | BUG_ON(hw_desc->phy_next_desc); | |
88 | hw_desc->phy_next_desc = next_desc_addr; | |
89 | } | |
90 | ||
ff7b0479 SB |
91 | static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, |
92 | int index, dma_addr_t addr) | |
93 | { | |
94 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
e03bc654 | 95 | hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr; |
ff7b0479 SB |
96 | if (desc->type == DMA_XOR) |
97 | hw_desc->desc_command |= (1 << index); | |
98 | } | |
99 | ||
100 | static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) | |
101 | { | |
5733c38a | 102 | return readl_relaxed(XOR_CURR_DESC(chan)); |
ff7b0479 SB |
103 | } |
104 | ||
105 | static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, | |
106 | u32 next_desc_addr) | |
107 | { | |
5733c38a | 108 | writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan)); |
ff7b0479 SB |
109 | } |
110 | ||
ff7b0479 SB |
111 | static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) |
112 | { | |
5733c38a | 113 | u32 val = readl_relaxed(XOR_INTR_MASK(chan)); |
ff7b0479 | 114 | val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); |
5733c38a | 115 | writel_relaxed(val, XOR_INTR_MASK(chan)); |
ff7b0479 SB |
116 | } |
117 | ||
118 | static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) | |
119 | { | |
5733c38a | 120 | u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan)); |
ff7b0479 SB |
121 | intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; |
122 | return intr_cause; | |
123 | } | |
124 | ||
0951e728 | 125 | static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan) |
ff7b0479 | 126 | { |
ba87d137 LA |
127 | u32 val; |
128 | ||
129 | val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED; | |
130 | val = ~(val << (chan->idx * 16)); | |
c98c1781 | 131 | dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); |
5733c38a | 132 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
ff7b0479 SB |
133 | } |
134 | ||
0951e728 | 135 | static void mv_chan_clear_err_status(struct mv_xor_chan *chan) |
ff7b0479 SB |
136 | { |
137 | u32 val = 0xFFFF0000 >> (chan->idx * 16); | |
5733c38a | 138 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
ff7b0479 SB |
139 | } |
140 | ||
0951e728 | 141 | static void mv_chan_set_mode(struct mv_xor_chan *chan, |
81aafb3e | 142 | u32 op_mode) |
ff7b0479 | 143 | { |
5733c38a | 144 | u32 config = readl_relaxed(XOR_CONFIG(chan)); |
ff7b0479 | 145 | |
6f166312 LA |
146 | config &= ~0x7; |
147 | config |= op_mode; | |
148 | ||
e03bc654 TP |
149 | #if defined(__BIG_ENDIAN) |
150 | config |= XOR_DESCRIPTOR_SWAP; | |
151 | #else | |
152 | config &= ~XOR_DESCRIPTOR_SWAP; | |
153 | #endif | |
154 | ||
5733c38a | 155 | writel_relaxed(config, XOR_CONFIG(chan)); |
ff7b0479 SB |
156 | } |
157 | ||
158 | static void mv_chan_activate(struct mv_xor_chan *chan) | |
159 | { | |
c98c1781 | 160 | dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); |
5a9a55bf EG |
161 | |
162 | /* writel ensures all descriptors are flushed before activation */ | |
163 | writel(BIT(0), XOR_ACTIVATION(chan)); | |
ff7b0479 SB |
164 | } |
165 | ||
166 | static char mv_chan_is_busy(struct mv_xor_chan *chan) | |
167 | { | |
5733c38a | 168 | u32 state = readl_relaxed(XOR_ACTIVATION(chan)); |
ff7b0479 SB |
169 | |
170 | state = (state >> 4) & 0x3; | |
171 | ||
172 | return (state == 1) ? 1 : 0; | |
173 | } | |
174 | ||
ff7b0479 | 175 | /* |
0951e728 MR |
176 | * mv_chan_start_new_chain - program the engine to operate on new |
177 | * chain headed by sw_desc | |
ff7b0479 SB |
178 | * Caller must hold &mv_chan->lock while calling this function |
179 | */ | |
0951e728 MR |
180 | static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan, |
181 | struct mv_xor_desc_slot *sw_desc) | |
ff7b0479 | 182 | { |
c98c1781 | 183 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", |
ff7b0479 | 184 | __func__, __LINE__, sw_desc); |
ff7b0479 | 185 | |
48a9db46 BZ |
186 | /* set the hardware chain */ |
187 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); | |
188 | ||
dfc97661 | 189 | mv_chan->pending++; |
98817b99 | 190 | mv_xor_issue_pending(&mv_chan->dmachan); |
ff7b0479 SB |
191 | } |
192 | ||
193 | static dma_cookie_t | |
0951e728 MR |
194 | mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc, |
195 | struct mv_xor_chan *mv_chan, | |
196 | dma_cookie_t cookie) | |
ff7b0479 SB |
197 | { |
198 | BUG_ON(desc->async_tx.cookie < 0); | |
199 | ||
200 | if (desc->async_tx.cookie > 0) { | |
201 | cookie = desc->async_tx.cookie; | |
202 | ||
203 | /* call the callback (must not sleep or submit new | |
204 | * operations to this channel) | |
205 | */ | |
206 | if (desc->async_tx.callback) | |
207 | desc->async_tx.callback( | |
208 | desc->async_tx.callback_param); | |
209 | ||
d38a8c62 | 210 | dma_descriptor_unmap(&desc->async_tx); |
ff7b0479 SB |
211 | } |
212 | ||
213 | /* run dependent operations */ | |
07f2211e | 214 | dma_run_dependencies(&desc->async_tx); |
ff7b0479 SB |
215 | |
216 | return cookie; | |
217 | } | |
218 | ||
219 | static int | |
0951e728 | 220 | mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan) |
ff7b0479 SB |
221 | { |
222 | struct mv_xor_desc_slot *iter, *_iter; | |
223 | ||
c98c1781 | 224 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); |
ff7b0479 | 225 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, |
fbea28a2 | 226 | node) { |
ff7b0479 | 227 | |
fbea28a2 LA |
228 | if (async_tx_test_ack(&iter->async_tx)) |
229 | list_move_tail(&iter->node, &mv_chan->free_slots); | |
ff7b0479 SB |
230 | } |
231 | return 0; | |
232 | } | |
233 | ||
234 | static int | |
0951e728 MR |
235 | mv_desc_clean_slot(struct mv_xor_desc_slot *desc, |
236 | struct mv_xor_chan *mv_chan) | |
ff7b0479 | 237 | { |
c98c1781 | 238 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n", |
ff7b0479 | 239 | __func__, __LINE__, desc, desc->async_tx.flags); |
fbea28a2 | 240 | |
ff7b0479 SB |
241 | /* the client is allowed to attach dependent operations |
242 | * until 'ack' is set | |
243 | */ | |
fbea28a2 | 244 | if (!async_tx_test_ack(&desc->async_tx)) |
ff7b0479 | 245 | /* move this slot to the completed_slots */ |
fbea28a2 LA |
246 | list_move_tail(&desc->node, &mv_chan->completed_slots); |
247 | else | |
248 | list_move_tail(&desc->node, &mv_chan->free_slots); | |
ff7b0479 | 249 | |
ff7b0479 SB |
250 | return 0; |
251 | } | |
252 | ||
fbeec99a | 253 | /* This function must be called with the mv_xor_chan spinlock held */ |
0951e728 | 254 | static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan) |
ff7b0479 SB |
255 | { |
256 | struct mv_xor_desc_slot *iter, *_iter; | |
257 | dma_cookie_t cookie = 0; | |
258 | int busy = mv_chan_is_busy(mv_chan); | |
259 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | |
9136291f LA |
260 | int current_cleaned = 0; |
261 | struct mv_xor_desc *hw_desc; | |
ff7b0479 | 262 | |
c98c1781 TP |
263 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); |
264 | dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc); | |
0951e728 | 265 | mv_chan_clean_completed_slots(mv_chan); |
ff7b0479 SB |
266 | |
267 | /* free completed slots from the chain starting with | |
268 | * the oldest descriptor | |
269 | */ | |
270 | ||
271 | list_for_each_entry_safe(iter, _iter, &mv_chan->chain, | |
fbea28a2 | 272 | node) { |
ff7b0479 | 273 | |
9136291f LA |
274 | /* clean finished descriptors */ |
275 | hw_desc = iter->hw_desc; | |
276 | if (hw_desc->status & XOR_DESC_SUCCESS) { | |
0951e728 MR |
277 | cookie = mv_desc_run_tx_complete_actions(iter, mv_chan, |
278 | cookie); | |
ff7b0479 | 279 | |
9136291f | 280 | /* done processing desc, clean slot */ |
0951e728 | 281 | mv_desc_clean_slot(iter, mv_chan); |
9136291f LA |
282 | |
283 | /* break if we did cleaned the current */ | |
284 | if (iter->async_tx.phys == current_desc) { | |
285 | current_cleaned = 1; | |
286 | break; | |
287 | } | |
288 | } else { | |
289 | if (iter->async_tx.phys == current_desc) { | |
290 | current_cleaned = 0; | |
ff7b0479 | 291 | break; |
9136291f | 292 | } |
ff7b0479 | 293 | } |
ff7b0479 SB |
294 | } |
295 | ||
296 | if ((busy == 0) && !list_empty(&mv_chan->chain)) { | |
9136291f LA |
297 | if (current_cleaned) { |
298 | /* | |
299 | * current descriptor cleaned and removed, run | |
300 | * from list head | |
301 | */ | |
302 | iter = list_entry(mv_chan->chain.next, | |
303 | struct mv_xor_desc_slot, | |
fbea28a2 | 304 | node); |
0951e728 | 305 | mv_chan_start_new_chain(mv_chan, iter); |
9136291f | 306 | } else { |
fbea28a2 | 307 | if (!list_is_last(&iter->node, &mv_chan->chain)) { |
9136291f LA |
308 | /* |
309 | * descriptors are still waiting after | |
310 | * current, trigger them | |
311 | */ | |
fbea28a2 | 312 | iter = list_entry(iter->node.next, |
9136291f | 313 | struct mv_xor_desc_slot, |
fbea28a2 | 314 | node); |
0951e728 | 315 | mv_chan_start_new_chain(mv_chan, iter); |
9136291f LA |
316 | } else { |
317 | /* | |
318 | * some descriptors are still waiting | |
319 | * to be cleaned | |
320 | */ | |
321 | tasklet_schedule(&mv_chan->irq_tasklet); | |
322 | } | |
323 | } | |
ff7b0479 SB |
324 | } |
325 | ||
326 | if (cookie > 0) | |
98817b99 | 327 | mv_chan->dmachan.completed_cookie = cookie; |
ff7b0479 SB |
328 | } |
329 | ||
ff7b0479 SB |
330 | static void mv_xor_tasklet(unsigned long data) |
331 | { | |
332 | struct mv_xor_chan *chan = (struct mv_xor_chan *) data; | |
e43147ac EG |
333 | |
334 | spin_lock_bh(&chan->lock); | |
0951e728 | 335 | mv_chan_slot_cleanup(chan); |
e43147ac | 336 | spin_unlock_bh(&chan->lock); |
ff7b0479 SB |
337 | } |
338 | ||
339 | static struct mv_xor_desc_slot * | |
0951e728 | 340 | mv_chan_alloc_slot(struct mv_xor_chan *mv_chan) |
ff7b0479 | 341 | { |
fbea28a2 | 342 | struct mv_xor_desc_slot *iter; |
ff7b0479 | 343 | |
fbea28a2 LA |
344 | spin_lock_bh(&mv_chan->lock); |
345 | ||
346 | if (!list_empty(&mv_chan->free_slots)) { | |
347 | iter = list_first_entry(&mv_chan->free_slots, | |
348 | struct mv_xor_desc_slot, | |
349 | node); | |
350 | ||
351 | list_move_tail(&iter->node, &mv_chan->allocated_slots); | |
352 | ||
353 | spin_unlock_bh(&mv_chan->lock); | |
ff7b0479 | 354 | |
dfc97661 LA |
355 | /* pre-ack descriptor */ |
356 | async_tx_ack(&iter->async_tx); | |
dfc97661 | 357 | iter->async_tx.cookie = -EBUSY; |
dfc97661 LA |
358 | |
359 | return iter; | |
360 | ||
ff7b0479 | 361 | } |
fbea28a2 LA |
362 | |
363 | spin_unlock_bh(&mv_chan->lock); | |
ff7b0479 SB |
364 | |
365 | /* try to free some slots if the allocation fails */ | |
366 | tasklet_schedule(&mv_chan->irq_tasklet); | |
367 | ||
368 | return NULL; | |
369 | } | |
370 | ||
ff7b0479 SB |
371 | /************************ DMA engine API functions ****************************/ |
372 | static dma_cookie_t | |
373 | mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |
374 | { | |
375 | struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); | |
376 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); | |
dfc97661 | 377 | struct mv_xor_desc_slot *old_chain_tail; |
ff7b0479 SB |
378 | dma_cookie_t cookie; |
379 | int new_hw_chain = 1; | |
380 | ||
c98c1781 | 381 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
382 | "%s sw_desc %p: async_tx %p\n", |
383 | __func__, sw_desc, &sw_desc->async_tx); | |
384 | ||
ff7b0479 | 385 | spin_lock_bh(&mv_chan->lock); |
884485e1 | 386 | cookie = dma_cookie_assign(tx); |
ff7b0479 SB |
387 | |
388 | if (list_empty(&mv_chan->chain)) | |
fbea28a2 | 389 | list_move_tail(&sw_desc->node, &mv_chan->chain); |
ff7b0479 SB |
390 | else { |
391 | new_hw_chain = 0; | |
392 | ||
393 | old_chain_tail = list_entry(mv_chan->chain.prev, | |
394 | struct mv_xor_desc_slot, | |
fbea28a2 LA |
395 | node); |
396 | list_move_tail(&sw_desc->node, &mv_chan->chain); | |
ff7b0479 | 397 | |
31fd8f5b OJ |
398 | dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", |
399 | &old_chain_tail->async_tx.phys); | |
ff7b0479 SB |
400 | |
401 | /* fix up the hardware chain */ | |
dfc97661 | 402 | mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys); |
ff7b0479 SB |
403 | |
404 | /* if the channel is not busy */ | |
405 | if (!mv_chan_is_busy(mv_chan)) { | |
406 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | |
407 | /* | |
408 | * and the curren desc is the end of the chain before | |
409 | * the append, then we need to start the channel | |
410 | */ | |
411 | if (current_desc == old_chain_tail->async_tx.phys) | |
412 | new_hw_chain = 1; | |
413 | } | |
414 | } | |
415 | ||
416 | if (new_hw_chain) | |
0951e728 | 417 | mv_chan_start_new_chain(mv_chan, sw_desc); |
ff7b0479 | 418 | |
ff7b0479 SB |
419 | spin_unlock_bh(&mv_chan->lock); |
420 | ||
421 | return cookie; | |
422 | } | |
423 | ||
424 | /* returns the number of allocated descriptors */ | |
aa1e6f1a | 425 | static int mv_xor_alloc_chan_resources(struct dma_chan *chan) |
ff7b0479 | 426 | { |
31fd8f5b OJ |
427 | void *virt_desc; |
428 | dma_addr_t dma_desc; | |
ff7b0479 SB |
429 | int idx; |
430 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
431 | struct mv_xor_desc_slot *slot = NULL; | |
b503fa01 | 432 | int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE; |
ff7b0479 SB |
433 | |
434 | /* Allocate descriptor slots */ | |
435 | idx = mv_chan->slots_allocated; | |
436 | while (idx < num_descs_in_pool) { | |
437 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); | |
438 | if (!slot) { | |
b8291dde EG |
439 | dev_info(mv_chan_to_devp(mv_chan), |
440 | "channel only initialized %d descriptor slots", | |
441 | idx); | |
ff7b0479 SB |
442 | break; |
443 | } | |
31fd8f5b OJ |
444 | virt_desc = mv_chan->dma_desc_pool_virt; |
445 | slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE; | |
ff7b0479 SB |
446 | |
447 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | |
448 | slot->async_tx.tx_submit = mv_xor_tx_submit; | |
fbea28a2 | 449 | INIT_LIST_HEAD(&slot->node); |
31fd8f5b OJ |
450 | dma_desc = mv_chan->dma_desc_pool; |
451 | slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; | |
ff7b0479 SB |
452 | slot->idx = idx++; |
453 | ||
454 | spin_lock_bh(&mv_chan->lock); | |
455 | mv_chan->slots_allocated = idx; | |
fbea28a2 | 456 | list_add_tail(&slot->node, &mv_chan->free_slots); |
ff7b0479 SB |
457 | spin_unlock_bh(&mv_chan->lock); |
458 | } | |
459 | ||
c98c1781 | 460 | dev_dbg(mv_chan_to_devp(mv_chan), |
fbea28a2 LA |
461 | "allocated %d descriptor slots\n", |
462 | mv_chan->slots_allocated); | |
ff7b0479 SB |
463 | |
464 | return mv_chan->slots_allocated ? : -ENOMEM; | |
465 | } | |
466 | ||
ff7b0479 SB |
467 | static struct dma_async_tx_descriptor * |
468 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |
469 | unsigned int src_cnt, size_t len, unsigned long flags) | |
470 | { | |
471 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
dfc97661 | 472 | struct mv_xor_desc_slot *sw_desc; |
ff7b0479 SB |
473 | |
474 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | |
475 | return NULL; | |
476 | ||
7912d300 | 477 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
ff7b0479 | 478 | |
c98c1781 | 479 | dev_dbg(mv_chan_to_devp(mv_chan), |
31fd8f5b OJ |
480 | "%s src_cnt: %d len: %u dest %pad flags: %ld\n", |
481 | __func__, src_cnt, len, &dest, flags); | |
ff7b0479 | 482 | |
0951e728 | 483 | sw_desc = mv_chan_alloc_slot(mv_chan); |
ff7b0479 SB |
484 | if (sw_desc) { |
485 | sw_desc->type = DMA_XOR; | |
486 | sw_desc->async_tx.flags = flags; | |
ba87d137 | 487 | mv_desc_init(sw_desc, dest, len, flags); |
6f166312 LA |
488 | if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) |
489 | mv_desc_set_mode(sw_desc); | |
ff7b0479 | 490 | while (src_cnt--) |
dfc97661 | 491 | mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]); |
ff7b0479 | 492 | } |
fbea28a2 | 493 | |
c98c1781 | 494 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
495 | "%s sw_desc %p async_tx %p \n", |
496 | __func__, sw_desc, &sw_desc->async_tx); | |
497 | return sw_desc ? &sw_desc->async_tx : NULL; | |
498 | } | |
499 | ||
3e4f52e2 LA |
500 | static struct dma_async_tx_descriptor * |
501 | mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
502 | size_t len, unsigned long flags) | |
503 | { | |
504 | /* | |
505 | * A MEMCPY operation is identical to an XOR operation with only | |
506 | * a single source address. | |
507 | */ | |
508 | return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); | |
509 | } | |
510 | ||
22843545 LA |
511 | static struct dma_async_tx_descriptor * |
512 | mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | |
513 | { | |
514 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
515 | dma_addr_t src, dest; | |
516 | size_t len; | |
517 | ||
518 | src = mv_chan->dummy_src_addr; | |
519 | dest = mv_chan->dummy_dst_addr; | |
520 | len = MV_XOR_MIN_BYTE_COUNT; | |
521 | ||
522 | /* | |
523 | * We implement the DMA_INTERRUPT operation as a minimum sized | |
524 | * XOR operation with a single dummy source address. | |
525 | */ | |
526 | return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); | |
527 | } | |
528 | ||
ff7b0479 SB |
529 | static void mv_xor_free_chan_resources(struct dma_chan *chan) |
530 | { | |
531 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
532 | struct mv_xor_desc_slot *iter, *_iter; | |
533 | int in_use_descs = 0; | |
534 | ||
ff7b0479 | 535 | spin_lock_bh(&mv_chan->lock); |
e43147ac | 536 | |
0951e728 | 537 | mv_chan_slot_cleanup(mv_chan); |
ff7b0479 | 538 | |
ff7b0479 | 539 | list_for_each_entry_safe(iter, _iter, &mv_chan->chain, |
fbea28a2 | 540 | node) { |
ff7b0479 | 541 | in_use_descs++; |
fbea28a2 | 542 | list_move_tail(&iter->node, &mv_chan->free_slots); |
ff7b0479 SB |
543 | } |
544 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, | |
fbea28a2 LA |
545 | node) { |
546 | in_use_descs++; | |
547 | list_move_tail(&iter->node, &mv_chan->free_slots); | |
548 | } | |
549 | list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots, | |
550 | node) { | |
ff7b0479 | 551 | in_use_descs++; |
fbea28a2 | 552 | list_move_tail(&iter->node, &mv_chan->free_slots); |
ff7b0479 SB |
553 | } |
554 | list_for_each_entry_safe_reverse( | |
fbea28a2 LA |
555 | iter, _iter, &mv_chan->free_slots, node) { |
556 | list_del(&iter->node); | |
ff7b0479 SB |
557 | kfree(iter); |
558 | mv_chan->slots_allocated--; | |
559 | } | |
ff7b0479 | 560 | |
c98c1781 | 561 | dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n", |
ff7b0479 SB |
562 | __func__, mv_chan->slots_allocated); |
563 | spin_unlock_bh(&mv_chan->lock); | |
564 | ||
565 | if (in_use_descs) | |
c98c1781 | 566 | dev_err(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
567 | "freeing %d in use descriptors!\n", in_use_descs); |
568 | } | |
569 | ||
570 | /** | |
07934481 | 571 | * mv_xor_status - poll the status of an XOR transaction |
ff7b0479 SB |
572 | * @chan: XOR channel handle |
573 | * @cookie: XOR transaction identifier | |
07934481 | 574 | * @txstate: XOR transactions state holder (or NULL) |
ff7b0479 | 575 | */ |
07934481 | 576 | static enum dma_status mv_xor_status(struct dma_chan *chan, |
ff7b0479 | 577 | dma_cookie_t cookie, |
07934481 | 578 | struct dma_tx_state *txstate) |
ff7b0479 SB |
579 | { |
580 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
ff7b0479 SB |
581 | enum dma_status ret; |
582 | ||
96a2af41 | 583 | ret = dma_cookie_status(chan, cookie, txstate); |
890766d2 | 584 | if (ret == DMA_COMPLETE) |
ff7b0479 | 585 | return ret; |
e43147ac EG |
586 | |
587 | spin_lock_bh(&mv_chan->lock); | |
0951e728 | 588 | mv_chan_slot_cleanup(mv_chan); |
e43147ac | 589 | spin_unlock_bh(&mv_chan->lock); |
ff7b0479 | 590 | |
96a2af41 | 591 | return dma_cookie_status(chan, cookie, txstate); |
ff7b0479 SB |
592 | } |
593 | ||
0951e728 | 594 | static void mv_chan_dump_regs(struct mv_xor_chan *chan) |
ff7b0479 SB |
595 | { |
596 | u32 val; | |
597 | ||
5733c38a | 598 | val = readl_relaxed(XOR_CONFIG(chan)); |
1ba151cd | 599 | dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); |
ff7b0479 | 600 | |
5733c38a | 601 | val = readl_relaxed(XOR_ACTIVATION(chan)); |
1ba151cd | 602 | dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); |
ff7b0479 | 603 | |
5733c38a | 604 | val = readl_relaxed(XOR_INTR_CAUSE(chan)); |
1ba151cd | 605 | dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); |
ff7b0479 | 606 | |
5733c38a | 607 | val = readl_relaxed(XOR_INTR_MASK(chan)); |
1ba151cd | 608 | dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); |
ff7b0479 | 609 | |
5733c38a | 610 | val = readl_relaxed(XOR_ERROR_CAUSE(chan)); |
1ba151cd | 611 | dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); |
ff7b0479 | 612 | |
5733c38a | 613 | val = readl_relaxed(XOR_ERROR_ADDR(chan)); |
1ba151cd | 614 | dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); |
ff7b0479 SB |
615 | } |
616 | ||
0951e728 MR |
617 | static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan, |
618 | u32 intr_cause) | |
ff7b0479 | 619 | { |
0e7488ed EG |
620 | if (intr_cause & XOR_INT_ERR_DECODE) { |
621 | dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n"); | |
622 | return; | |
ff7b0479 SB |
623 | } |
624 | ||
0e7488ed | 625 | dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n", |
a3fc74bc | 626 | chan->idx, intr_cause); |
ff7b0479 | 627 | |
0951e728 | 628 | mv_chan_dump_regs(chan); |
0e7488ed | 629 | WARN_ON(1); |
ff7b0479 SB |
630 | } |
631 | ||
632 | static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) | |
633 | { | |
634 | struct mv_xor_chan *chan = data; | |
635 | u32 intr_cause = mv_chan_get_intr_cause(chan); | |
636 | ||
c98c1781 | 637 | dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); |
ff7b0479 | 638 | |
0e7488ed | 639 | if (intr_cause & XOR_INTR_ERRORS) |
0951e728 | 640 | mv_chan_err_interrupt_handler(chan, intr_cause); |
ff7b0479 SB |
641 | |
642 | tasklet_schedule(&chan->irq_tasklet); | |
643 | ||
0951e728 | 644 | mv_chan_clear_eoc_cause(chan); |
ff7b0479 SB |
645 | |
646 | return IRQ_HANDLED; | |
647 | } | |
648 | ||
649 | static void mv_xor_issue_pending(struct dma_chan *chan) | |
650 | { | |
651 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
652 | ||
653 | if (mv_chan->pending >= MV_XOR_THRESHOLD) { | |
654 | mv_chan->pending = 0; | |
655 | mv_chan_activate(mv_chan); | |
656 | } | |
657 | } | |
658 | ||
659 | /* | |
660 | * Perform a transaction to verify the HW works. | |
661 | */ | |
ff7b0479 | 662 | |
0951e728 | 663 | static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) |
ff7b0479 | 664 | { |
b8c01d25 | 665 | int i, ret; |
ff7b0479 SB |
666 | void *src, *dest; |
667 | dma_addr_t src_dma, dest_dma; | |
668 | struct dma_chan *dma_chan; | |
669 | dma_cookie_t cookie; | |
670 | struct dma_async_tx_descriptor *tx; | |
d16695a7 | 671 | struct dmaengine_unmap_data *unmap; |
ff7b0479 | 672 | int err = 0; |
ff7b0479 | 673 | |
d16695a7 | 674 | src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); |
ff7b0479 SB |
675 | if (!src) |
676 | return -ENOMEM; | |
677 | ||
d16695a7 | 678 | dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); |
ff7b0479 SB |
679 | if (!dest) { |
680 | kfree(src); | |
681 | return -ENOMEM; | |
682 | } | |
683 | ||
684 | /* Fill in src buffer */ | |
d16695a7 | 685 | for (i = 0; i < PAGE_SIZE; i++) |
ff7b0479 SB |
686 | ((u8 *) src)[i] = (u8)i; |
687 | ||
275cc0c8 | 688 | dma_chan = &mv_chan->dmachan; |
aa1e6f1a | 689 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
ff7b0479 SB |
690 | err = -ENODEV; |
691 | goto out; | |
692 | } | |
693 | ||
d16695a7 EG |
694 | unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); |
695 | if (!unmap) { | |
696 | err = -ENOMEM; | |
697 | goto free_resources; | |
698 | } | |
699 | ||
700 | src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, | |
701 | PAGE_SIZE, DMA_TO_DEVICE); | |
d16695a7 | 702 | unmap->addr[0] = src_dma; |
ff7b0479 | 703 | |
b8c01d25 EG |
704 | ret = dma_mapping_error(dma_chan->device->dev, src_dma); |
705 | if (ret) { | |
706 | err = -ENOMEM; | |
707 | goto free_resources; | |
708 | } | |
709 | unmap->to_cnt = 1; | |
710 | ||
d16695a7 EG |
711 | dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, |
712 | PAGE_SIZE, DMA_FROM_DEVICE); | |
d16695a7 EG |
713 | unmap->addr[1] = dest_dma; |
714 | ||
b8c01d25 EG |
715 | ret = dma_mapping_error(dma_chan->device->dev, dest_dma); |
716 | if (ret) { | |
717 | err = -ENOMEM; | |
718 | goto free_resources; | |
719 | } | |
720 | unmap->from_cnt = 1; | |
d16695a7 | 721 | unmap->len = PAGE_SIZE; |
ff7b0479 SB |
722 | |
723 | tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, | |
d16695a7 | 724 | PAGE_SIZE, 0); |
b8c01d25 EG |
725 | if (!tx) { |
726 | dev_err(dma_chan->device->dev, | |
727 | "Self-test cannot prepare operation, disabling\n"); | |
728 | err = -ENODEV; | |
729 | goto free_resources; | |
730 | } | |
731 | ||
ff7b0479 | 732 | cookie = mv_xor_tx_submit(tx); |
b8c01d25 EG |
733 | if (dma_submit_error(cookie)) { |
734 | dev_err(dma_chan->device->dev, | |
735 | "Self-test submit error, disabling\n"); | |
736 | err = -ENODEV; | |
737 | goto free_resources; | |
738 | } | |
739 | ||
ff7b0479 SB |
740 | mv_xor_issue_pending(dma_chan); |
741 | async_tx_ack(tx); | |
742 | msleep(1); | |
743 | ||
07934481 | 744 | if (mv_xor_status(dma_chan, cookie, NULL) != |
b3efb8fc | 745 | DMA_COMPLETE) { |
a3fc74bc TP |
746 | dev_err(dma_chan->device->dev, |
747 | "Self-test copy timed out, disabling\n"); | |
ff7b0479 SB |
748 | err = -ENODEV; |
749 | goto free_resources; | |
750 | } | |
751 | ||
c35064c4 | 752 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, |
d16695a7 EG |
753 | PAGE_SIZE, DMA_FROM_DEVICE); |
754 | if (memcmp(src, dest, PAGE_SIZE)) { | |
a3fc74bc TP |
755 | dev_err(dma_chan->device->dev, |
756 | "Self-test copy failed compare, disabling\n"); | |
ff7b0479 SB |
757 | err = -ENODEV; |
758 | goto free_resources; | |
759 | } | |
760 | ||
761 | free_resources: | |
d16695a7 | 762 | dmaengine_unmap_put(unmap); |
ff7b0479 SB |
763 | mv_xor_free_chan_resources(dma_chan); |
764 | out: | |
765 | kfree(src); | |
766 | kfree(dest); | |
767 | return err; | |
768 | } | |
769 | ||
770 | #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ | |
463a1f8b | 771 | static int |
0951e728 | 772 | mv_chan_xor_self_test(struct mv_xor_chan *mv_chan) |
ff7b0479 | 773 | { |
b8c01d25 | 774 | int i, src_idx, ret; |
ff7b0479 SB |
775 | struct page *dest; |
776 | struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; | |
777 | dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; | |
778 | dma_addr_t dest_dma; | |
779 | struct dma_async_tx_descriptor *tx; | |
d16695a7 | 780 | struct dmaengine_unmap_data *unmap; |
ff7b0479 SB |
781 | struct dma_chan *dma_chan; |
782 | dma_cookie_t cookie; | |
783 | u8 cmp_byte = 0; | |
784 | u32 cmp_word; | |
785 | int err = 0; | |
d16695a7 | 786 | int src_count = MV_XOR_NUM_SRC_TEST; |
ff7b0479 | 787 | |
d16695a7 | 788 | for (src_idx = 0; src_idx < src_count; src_idx++) { |
ff7b0479 | 789 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); |
a09b09ae RK |
790 | if (!xor_srcs[src_idx]) { |
791 | while (src_idx--) | |
ff7b0479 | 792 | __free_page(xor_srcs[src_idx]); |
a09b09ae RK |
793 | return -ENOMEM; |
794 | } | |
ff7b0479 SB |
795 | } |
796 | ||
797 | dest = alloc_page(GFP_KERNEL); | |
a09b09ae RK |
798 | if (!dest) { |
799 | while (src_idx--) | |
ff7b0479 | 800 | __free_page(xor_srcs[src_idx]); |
a09b09ae RK |
801 | return -ENOMEM; |
802 | } | |
ff7b0479 SB |
803 | |
804 | /* Fill in src buffers */ | |
d16695a7 | 805 | for (src_idx = 0; src_idx < src_count; src_idx++) { |
ff7b0479 SB |
806 | u8 *ptr = page_address(xor_srcs[src_idx]); |
807 | for (i = 0; i < PAGE_SIZE; i++) | |
808 | ptr[i] = (1 << src_idx); | |
809 | } | |
810 | ||
d16695a7 | 811 | for (src_idx = 0; src_idx < src_count; src_idx++) |
ff7b0479 SB |
812 | cmp_byte ^= (u8) (1 << src_idx); |
813 | ||
814 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | |
815 | (cmp_byte << 8) | cmp_byte; | |
816 | ||
817 | memset(page_address(dest), 0, PAGE_SIZE); | |
818 | ||
275cc0c8 | 819 | dma_chan = &mv_chan->dmachan; |
aa1e6f1a | 820 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
ff7b0479 SB |
821 | err = -ENODEV; |
822 | goto out; | |
823 | } | |
824 | ||
d16695a7 EG |
825 | unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, |
826 | GFP_KERNEL); | |
827 | if (!unmap) { | |
828 | err = -ENOMEM; | |
829 | goto free_resources; | |
830 | } | |
831 | ||
ff7b0479 | 832 | /* test xor */ |
d16695a7 EG |
833 | for (i = 0; i < src_count; i++) { |
834 | unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | |
835 | 0, PAGE_SIZE, DMA_TO_DEVICE); | |
836 | dma_srcs[i] = unmap->addr[i]; | |
b8c01d25 EG |
837 | ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]); |
838 | if (ret) { | |
839 | err = -ENOMEM; | |
840 | goto free_resources; | |
841 | } | |
d16695a7 EG |
842 | unmap->to_cnt++; |
843 | } | |
ff7b0479 | 844 | |
d16695a7 EG |
845 | unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, |
846 | DMA_FROM_DEVICE); | |
847 | dest_dma = unmap->addr[src_count]; | |
b8c01d25 EG |
848 | ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]); |
849 | if (ret) { | |
850 | err = -ENOMEM; | |
851 | goto free_resources; | |
852 | } | |
d16695a7 EG |
853 | unmap->from_cnt = 1; |
854 | unmap->len = PAGE_SIZE; | |
ff7b0479 SB |
855 | |
856 | tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | |
d16695a7 | 857 | src_count, PAGE_SIZE, 0); |
b8c01d25 EG |
858 | if (!tx) { |
859 | dev_err(dma_chan->device->dev, | |
860 | "Self-test cannot prepare operation, disabling\n"); | |
861 | err = -ENODEV; | |
862 | goto free_resources; | |
863 | } | |
ff7b0479 SB |
864 | |
865 | cookie = mv_xor_tx_submit(tx); | |
b8c01d25 EG |
866 | if (dma_submit_error(cookie)) { |
867 | dev_err(dma_chan->device->dev, | |
868 | "Self-test submit error, disabling\n"); | |
869 | err = -ENODEV; | |
870 | goto free_resources; | |
871 | } | |
872 | ||
ff7b0479 SB |
873 | mv_xor_issue_pending(dma_chan); |
874 | async_tx_ack(tx); | |
875 | msleep(8); | |
876 | ||
07934481 | 877 | if (mv_xor_status(dma_chan, cookie, NULL) != |
b3efb8fc | 878 | DMA_COMPLETE) { |
a3fc74bc TP |
879 | dev_err(dma_chan->device->dev, |
880 | "Self-test xor timed out, disabling\n"); | |
ff7b0479 SB |
881 | err = -ENODEV; |
882 | goto free_resources; | |
883 | } | |
884 | ||
c35064c4 | 885 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, |
ff7b0479 SB |
886 | PAGE_SIZE, DMA_FROM_DEVICE); |
887 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | |
888 | u32 *ptr = page_address(dest); | |
889 | if (ptr[i] != cmp_word) { | |
a3fc74bc | 890 | dev_err(dma_chan->device->dev, |
1ba151cd JP |
891 | "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n", |
892 | i, ptr[i], cmp_word); | |
ff7b0479 SB |
893 | err = -ENODEV; |
894 | goto free_resources; | |
895 | } | |
896 | } | |
897 | ||
898 | free_resources: | |
d16695a7 | 899 | dmaengine_unmap_put(unmap); |
ff7b0479 SB |
900 | mv_xor_free_chan_resources(dma_chan); |
901 | out: | |
d16695a7 | 902 | src_idx = src_count; |
ff7b0479 SB |
903 | while (src_idx--) |
904 | __free_page(xor_srcs[src_idx]); | |
905 | __free_page(dest); | |
906 | return err; | |
907 | } | |
908 | ||
1ef48a26 | 909 | static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) |
ff7b0479 | 910 | { |
ff7b0479 | 911 | struct dma_chan *chan, *_chan; |
1ef48a26 | 912 | struct device *dev = mv_chan->dmadev.dev; |
ff7b0479 | 913 | |
1ef48a26 | 914 | dma_async_device_unregister(&mv_chan->dmadev); |
ff7b0479 | 915 | |
b503fa01 | 916 | dma_free_coherent(dev, MV_XOR_POOL_SIZE, |
1ef48a26 | 917 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); |
22843545 LA |
918 | dma_unmap_single(dev, mv_chan->dummy_src_addr, |
919 | MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); | |
920 | dma_unmap_single(dev, mv_chan->dummy_dst_addr, | |
921 | MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); | |
ff7b0479 | 922 | |
1ef48a26 | 923 | list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, |
a6b4a9d2 | 924 | device_node) { |
ff7b0479 SB |
925 | list_del(&chan->device_node); |
926 | } | |
927 | ||
88eb92cb TP |
928 | free_irq(mv_chan->irq, mv_chan); |
929 | ||
ff7b0479 SB |
930 | return 0; |
931 | } | |
932 | ||
1ef48a26 | 933 | static struct mv_xor_chan * |
297eedba | 934 | mv_xor_channel_add(struct mv_xor_device *xordev, |
a6b4a9d2 | 935 | struct platform_device *pdev, |
6f166312 | 936 | int idx, dma_cap_mask_t cap_mask, int irq, int op_in_desc) |
ff7b0479 SB |
937 | { |
938 | int ret = 0; | |
ff7b0479 SB |
939 | struct mv_xor_chan *mv_chan; |
940 | struct dma_device *dma_dev; | |
ff7b0479 | 941 | |
1ef48a26 | 942 | mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); |
a577659f SK |
943 | if (!mv_chan) |
944 | return ERR_PTR(-ENOMEM); | |
ff7b0479 | 945 | |
9aedbdba | 946 | mv_chan->idx = idx; |
88eb92cb | 947 | mv_chan->irq = irq; |
6f166312 | 948 | mv_chan->op_in_desc = op_in_desc; |
ff7b0479 | 949 | |
1ef48a26 | 950 | dma_dev = &mv_chan->dmadev; |
ff7b0479 | 951 | |
22843545 LA |
952 | /* |
953 | * These source and destination dummy buffers are used to implement | |
954 | * a DMA_INTERRUPT operation as a minimum-sized XOR operation. | |
955 | * Hence, we only need to map the buffers at initialization-time. | |
956 | */ | |
957 | mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev, | |
958 | mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); | |
959 | mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev, | |
960 | mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); | |
961 | ||
ff7b0479 SB |
962 | /* allocate coherent memory for hardware descriptors |
963 | * note: writecombine gives slightly better performance, but | |
964 | * requires that we explicitly flush the writes | |
965 | */ | |
1ef48a26 | 966 | mv_chan->dma_desc_pool_virt = |
b503fa01 | 967 | dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE, |
1ef48a26 TP |
968 | &mv_chan->dma_desc_pool, GFP_KERNEL); |
969 | if (!mv_chan->dma_desc_pool_virt) | |
a6b4a9d2 | 970 | return ERR_PTR(-ENOMEM); |
ff7b0479 SB |
971 | |
972 | /* discover transaction capabilites from the platform data */ | |
a6b4a9d2 | 973 | dma_dev->cap_mask = cap_mask; |
ff7b0479 SB |
974 | |
975 | INIT_LIST_HEAD(&dma_dev->channels); | |
976 | ||
977 | /* set base routines */ | |
978 | dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; | |
979 | dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; | |
07934481 | 980 | dma_dev->device_tx_status = mv_xor_status; |
ff7b0479 SB |
981 | dma_dev->device_issue_pending = mv_xor_issue_pending; |
982 | dma_dev->dev = &pdev->dev; | |
983 | ||
984 | /* set prep routines based on capability */ | |
22843545 LA |
985 | if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) |
986 | dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; | |
ff7b0479 SB |
987 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) |
988 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | |
ff7b0479 | 989 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
c019894e | 990 | dma_dev->max_xor = 8; |
ff7b0479 SB |
991 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; |
992 | } | |
993 | ||
297eedba | 994 | mv_chan->mmr_base = xordev->xor_base; |
82a1402e | 995 | mv_chan->mmr_high_base = xordev->xor_high_base; |
ff7b0479 SB |
996 | tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) |
997 | mv_chan); | |
998 | ||
999 | /* clear errors before enabling interrupts */ | |
0951e728 | 1000 | mv_chan_clear_err_status(mv_chan); |
ff7b0479 | 1001 | |
2d0a0745 TP |
1002 | ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, |
1003 | 0, dev_name(&pdev->dev), mv_chan); | |
ff7b0479 SB |
1004 | if (ret) |
1005 | goto err_free_dma; | |
1006 | ||
1007 | mv_chan_unmask_interrupts(mv_chan); | |
1008 | ||
6f166312 | 1009 | if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) |
81aafb3e | 1010 | mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC); |
6f166312 | 1011 | else |
81aafb3e | 1012 | mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR); |
ff7b0479 SB |
1013 | |
1014 | spin_lock_init(&mv_chan->lock); | |
1015 | INIT_LIST_HEAD(&mv_chan->chain); | |
1016 | INIT_LIST_HEAD(&mv_chan->completed_slots); | |
fbea28a2 LA |
1017 | INIT_LIST_HEAD(&mv_chan->free_slots); |
1018 | INIT_LIST_HEAD(&mv_chan->allocated_slots); | |
98817b99 TP |
1019 | mv_chan->dmachan.device = dma_dev; |
1020 | dma_cookie_init(&mv_chan->dmachan); | |
ff7b0479 | 1021 | |
98817b99 | 1022 | list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); |
ff7b0479 SB |
1023 | |
1024 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { | |
0951e728 | 1025 | ret = mv_chan_memcpy_self_test(mv_chan); |
ff7b0479 SB |
1026 | dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); |
1027 | if (ret) | |
2d0a0745 | 1028 | goto err_free_irq; |
ff7b0479 SB |
1029 | } |
1030 | ||
1031 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | |
0951e728 | 1032 | ret = mv_chan_xor_self_test(mv_chan); |
ff7b0479 SB |
1033 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); |
1034 | if (ret) | |
2d0a0745 | 1035 | goto err_free_irq; |
ff7b0479 SB |
1036 | } |
1037 | ||
6f166312 LA |
1038 | dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n", |
1039 | mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", | |
1ba151cd | 1040 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1ba151cd JP |
1041 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1042 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | |
ff7b0479 SB |
1043 | |
1044 | dma_async_device_register(dma_dev); | |
1ef48a26 | 1045 | return mv_chan; |
ff7b0479 | 1046 | |
2d0a0745 TP |
1047 | err_free_irq: |
1048 | free_irq(mv_chan->irq, mv_chan); | |
ff7b0479 | 1049 | err_free_dma: |
b503fa01 | 1050 | dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, |
1ef48a26 | 1051 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); |
a6b4a9d2 | 1052 | return ERR_PTR(ret); |
ff7b0479 SB |
1053 | } |
1054 | ||
1055 | static void | |
297eedba | 1056 | mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, |
63a9332b | 1057 | const struct mbus_dram_target_info *dram) |
ff7b0479 | 1058 | { |
82a1402e | 1059 | void __iomem *base = xordev->xor_high_base; |
ff7b0479 SB |
1060 | u32 win_enable = 0; |
1061 | int i; | |
1062 | ||
1063 | for (i = 0; i < 8; i++) { | |
1064 | writel(0, base + WINDOW_BASE(i)); | |
1065 | writel(0, base + WINDOW_SIZE(i)); | |
1066 | if (i < 4) | |
1067 | writel(0, base + WINDOW_REMAP_HIGH(i)); | |
1068 | } | |
1069 | ||
1070 | for (i = 0; i < dram->num_cs; i++) { | |
63a9332b | 1071 | const struct mbus_dram_window *cs = dram->cs + i; |
ff7b0479 SB |
1072 | |
1073 | writel((cs->base & 0xffff0000) | | |
1074 | (cs->mbus_attr << 8) | | |
1075 | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); | |
1076 | writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); | |
1077 | ||
1078 | win_enable |= (1 << i); | |
1079 | win_enable |= 3 << (16 + (2 * i)); | |
1080 | } | |
1081 | ||
1082 | writel(win_enable, base + WINDOW_BAR_ENABLE(0)); | |
1083 | writel(win_enable, base + WINDOW_BAR_ENABLE(1)); | |
c4b4b732 TP |
1084 | writel(0, base + WINDOW_OVERRIDE_CTRL(0)); |
1085 | writel(0, base + WINDOW_OVERRIDE_CTRL(1)); | |
ff7b0479 SB |
1086 | } |
1087 | ||
8b648436 TP |
1088 | /* |
1089 | * Since this XOR driver is basically used only for RAID5, we don't | |
1090 | * need to care about synchronizing ->suspend with DMA activity, | |
1091 | * because the DMA engine will naturally be quiet due to the block | |
1092 | * devices being suspended. | |
1093 | */ | |
1094 | static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state) | |
1095 | { | |
1096 | struct mv_xor_device *xordev = platform_get_drvdata(pdev); | |
1097 | int i; | |
1098 | ||
1099 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { | |
1100 | struct mv_xor_chan *mv_chan = xordev->channels[i]; | |
1101 | ||
1102 | if (!mv_chan) | |
1103 | continue; | |
1104 | ||
1105 | mv_chan->saved_config_reg = | |
1106 | readl_relaxed(XOR_CONFIG(mv_chan)); | |
1107 | mv_chan->saved_int_mask_reg = | |
1108 | readl_relaxed(XOR_INTR_MASK(mv_chan)); | |
1109 | } | |
1110 | ||
1111 | return 0; | |
1112 | } | |
1113 | ||
1114 | static int mv_xor_resume(struct platform_device *dev) | |
1115 | { | |
1116 | struct mv_xor_device *xordev = platform_get_drvdata(dev); | |
1117 | const struct mbus_dram_target_info *dram; | |
1118 | int i; | |
1119 | ||
1120 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { | |
1121 | struct mv_xor_chan *mv_chan = xordev->channels[i]; | |
1122 | ||
1123 | if (!mv_chan) | |
1124 | continue; | |
1125 | ||
1126 | writel_relaxed(mv_chan->saved_config_reg, | |
1127 | XOR_CONFIG(mv_chan)); | |
1128 | writel_relaxed(mv_chan->saved_int_mask_reg, | |
1129 | XOR_INTR_MASK(mv_chan)); | |
1130 | } | |
1131 | ||
1132 | dram = mv_mbus_dram_info(); | |
1133 | if (dram) | |
1134 | mv_xor_conf_mbus_windows(xordev, dram); | |
1135 | ||
1136 | return 0; | |
1137 | } | |
1138 | ||
6f166312 LA |
1139 | static const struct of_device_id mv_xor_dt_ids[] = { |
1140 | { .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG }, | |
1141 | { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC }, | |
1142 | {}, | |
1143 | }; | |
6f166312 | 1144 | |
77757291 | 1145 | static unsigned int mv_xor_engine_count; |
6f166312 | 1146 | |
c2714334 | 1147 | static int mv_xor_probe(struct platform_device *pdev) |
ff7b0479 | 1148 | { |
63a9332b | 1149 | const struct mbus_dram_target_info *dram; |
297eedba | 1150 | struct mv_xor_device *xordev; |
d4adcc01 | 1151 | struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); |
ff7b0479 | 1152 | struct resource *res; |
77757291 | 1153 | unsigned int max_engines, max_channels; |
60d151f3 | 1154 | int i, ret; |
6f166312 | 1155 | int op_in_desc; |
ff7b0479 | 1156 | |
1ba151cd | 1157 | dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); |
ff7b0479 | 1158 | |
297eedba TP |
1159 | xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); |
1160 | if (!xordev) | |
ff7b0479 SB |
1161 | return -ENOMEM; |
1162 | ||
1163 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1164 | if (!res) | |
1165 | return -ENODEV; | |
1166 | ||
297eedba TP |
1167 | xordev->xor_base = devm_ioremap(&pdev->dev, res->start, |
1168 | resource_size(res)); | |
1169 | if (!xordev->xor_base) | |
ff7b0479 SB |
1170 | return -EBUSY; |
1171 | ||
1172 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
1173 | if (!res) | |
1174 | return -ENODEV; | |
1175 | ||
297eedba TP |
1176 | xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start, |
1177 | resource_size(res)); | |
1178 | if (!xordev->xor_high_base) | |
ff7b0479 SB |
1179 | return -EBUSY; |
1180 | ||
297eedba | 1181 | platform_set_drvdata(pdev, xordev); |
ff7b0479 SB |
1182 | |
1183 | /* | |
1184 | * (Re-)program MBUS remapping windows if we are asked to. | |
1185 | */ | |
63a9332b AL |
1186 | dram = mv_mbus_dram_info(); |
1187 | if (dram) | |
297eedba | 1188 | mv_xor_conf_mbus_windows(xordev, dram); |
ff7b0479 | 1189 | |
c510182b AL |
1190 | /* Not all platforms can gate the clock, so it is not |
1191 | * an error if the clock does not exists. | |
1192 | */ | |
297eedba TP |
1193 | xordev->clk = clk_get(&pdev->dev, NULL); |
1194 | if (!IS_ERR(xordev->clk)) | |
1195 | clk_prepare_enable(xordev->clk); | |
c510182b | 1196 | |
77757291 TP |
1197 | /* |
1198 | * We don't want to have more than one channel per CPU in | |
1199 | * order for async_tx to perform well. So we limit the number | |
1200 | * of engines and channels so that we take into account this | |
1201 | * constraint. Note that we also want to use channels from | |
1202 | * separate engines when possible. | |
1203 | */ | |
1204 | max_engines = num_present_cpus(); | |
1205 | max_channels = min_t(unsigned int, | |
1206 | MV_XOR_MAX_CHANNELS, | |
1207 | DIV_ROUND_UP(num_present_cpus(), 2)); | |
1208 | ||
1209 | if (mv_xor_engine_count >= max_engines) | |
1210 | return 0; | |
1211 | ||
f7d12ef5 TP |
1212 | if (pdev->dev.of_node) { |
1213 | struct device_node *np; | |
1214 | int i = 0; | |
6f166312 LA |
1215 | const struct of_device_id *of_id = |
1216 | of_match_device(mv_xor_dt_ids, | |
1217 | &pdev->dev); | |
f7d12ef5 TP |
1218 | |
1219 | for_each_child_of_node(pdev->dev.of_node, np) { | |
0be8253f | 1220 | struct mv_xor_chan *chan; |
f7d12ef5 TP |
1221 | dma_cap_mask_t cap_mask; |
1222 | int irq; | |
6f166312 | 1223 | op_in_desc = (int)of_id->data; |
f7d12ef5 | 1224 | |
77757291 TP |
1225 | if (i >= max_channels) |
1226 | continue; | |
1227 | ||
f7d12ef5 | 1228 | dma_cap_zero(cap_mask); |
6d8f7abd TP |
1229 | dma_cap_set(DMA_MEMCPY, cap_mask); |
1230 | dma_cap_set(DMA_XOR, cap_mask); | |
1231 | dma_cap_set(DMA_INTERRUPT, cap_mask); | |
f7d12ef5 TP |
1232 | |
1233 | irq = irq_of_parse_and_map(np, 0); | |
f8eb9e7d TP |
1234 | if (!irq) { |
1235 | ret = -ENODEV; | |
f7d12ef5 TP |
1236 | goto err_channel_add; |
1237 | } | |
1238 | ||
0be8253f | 1239 | chan = mv_xor_channel_add(xordev, pdev, i, |
6f166312 | 1240 | cap_mask, irq, op_in_desc); |
0be8253f RK |
1241 | if (IS_ERR(chan)) { |
1242 | ret = PTR_ERR(chan); | |
f7d12ef5 TP |
1243 | irq_dispose_mapping(irq); |
1244 | goto err_channel_add; | |
1245 | } | |
1246 | ||
0be8253f | 1247 | xordev->channels[i] = chan; |
f7d12ef5 TP |
1248 | i++; |
1249 | } | |
1250 | } else if (pdata && pdata->channels) { | |
77757291 | 1251 | for (i = 0; i < max_channels; i++) { |
e39f6ec1 | 1252 | struct mv_xor_channel_data *cd; |
0be8253f | 1253 | struct mv_xor_chan *chan; |
60d151f3 TP |
1254 | int irq; |
1255 | ||
1256 | cd = &pdata->channels[i]; | |
1257 | if (!cd) { | |
1258 | ret = -ENODEV; | |
1259 | goto err_channel_add; | |
1260 | } | |
1261 | ||
1262 | irq = platform_get_irq(pdev, i); | |
1263 | if (irq < 0) { | |
1264 | ret = irq; | |
1265 | goto err_channel_add; | |
1266 | } | |
1267 | ||
0be8253f | 1268 | chan = mv_xor_channel_add(xordev, pdev, i, |
6f166312 LA |
1269 | cd->cap_mask, irq, |
1270 | XOR_MODE_IN_REG); | |
0be8253f RK |
1271 | if (IS_ERR(chan)) { |
1272 | ret = PTR_ERR(chan); | |
60d151f3 TP |
1273 | goto err_channel_add; |
1274 | } | |
0be8253f RK |
1275 | |
1276 | xordev->channels[i] = chan; | |
60d151f3 TP |
1277 | } |
1278 | } | |
c510182b | 1279 | |
ff7b0479 | 1280 | return 0; |
60d151f3 TP |
1281 | |
1282 | err_channel_add: | |
1283 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) | |
f7d12ef5 | 1284 | if (xordev->channels[i]) { |
ab6e439f | 1285 | mv_xor_channel_remove(xordev->channels[i]); |
f7d12ef5 TP |
1286 | if (pdev->dev.of_node) |
1287 | irq_dispose_mapping(xordev->channels[i]->irq); | |
f7d12ef5 | 1288 | } |
60d151f3 | 1289 | |
dab92064 TP |
1290 | if (!IS_ERR(xordev->clk)) { |
1291 | clk_disable_unprepare(xordev->clk); | |
1292 | clk_put(xordev->clk); | |
1293 | } | |
1294 | ||
60d151f3 | 1295 | return ret; |
ff7b0479 SB |
1296 | } |
1297 | ||
61971656 TP |
1298 | static struct platform_driver mv_xor_driver = { |
1299 | .probe = mv_xor_probe, | |
8b648436 TP |
1300 | .suspend = mv_xor_suspend, |
1301 | .resume = mv_xor_resume, | |
ff7b0479 | 1302 | .driver = { |
f7d12ef5 TP |
1303 | .name = MV_XOR_NAME, |
1304 | .of_match_table = of_match_ptr(mv_xor_dt_ids), | |
ff7b0479 SB |
1305 | }, |
1306 | }; | |
1307 | ||
1308 | ||
1309 | static int __init mv_xor_init(void) | |
1310 | { | |
61971656 | 1311 | return platform_driver_register(&mv_xor_driver); |
ff7b0479 | 1312 | } |
25cf68da | 1313 | device_initcall(mv_xor_init); |
ff7b0479 | 1314 | |
25cf68da | 1315 | /* |
ff7b0479 SB |
1316 | MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); |
1317 | MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); | |
1318 | MODULE_LICENSE("GPL"); | |
25cf68da | 1319 | */ |