Commit | Line | Data |
---|---|---|
9cd4360d ST |
1 | /* |
2 | * DMA driver for Xilinx Video DMA Engine | |
3 | * | |
4 | * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. | |
5 | * | |
6 | * Based on the Freescale DMA driver. | |
7 | * | |
8 | * Description: | |
9 | * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP | |
10 | * core that provides high-bandwidth direct memory access between memory | |
11 | * and AXI4-Stream type video target peripherals. The core provides efficient | |
12 | * two dimensional DMA operations with independent asynchronous read (S2MM) | |
13 | * and write (MM2S) channel operation. It can be configured to have either | |
14 | * one channel or two channels. If configured as two channels, one is to | |
15 | * transmit to the video device (MM2S) and another is to receive from the | |
16 | * video device (S2MM). Initialization, status, interrupt and management | |
17 | * registers are accessed through an AXI4-Lite slave interface. | |
18 | * | |
19 | * This program is free software: you can redistribute it and/or modify | |
20 | * it under the terms of the GNU General Public License as published by | |
21 | * the Free Software Foundation, either version 2 of the License, or | |
22 | * (at your option) any later version. | |
23 | */ | |
24 | ||
25 | #include <linux/amba/xilinx_dma.h> | |
26 | #include <linux/bitops.h> | |
27 | #include <linux/dmapool.h> | |
28 | #include <linux/init.h> | |
29 | #include <linux/interrupt.h> | |
30 | #include <linux/io.h> | |
31 | #include <linux/module.h> | |
32 | #include <linux/of_address.h> | |
33 | #include <linux/of_dma.h> | |
34 | #include <linux/of_platform.h> | |
35 | #include <linux/of_irq.h> | |
36 | #include <linux/slab.h> | |
37 | ||
38 | #include "../dmaengine.h" | |
39 | ||
40 | /* Register/Descriptor Offsets */ | |
41 | #define XILINX_VDMA_MM2S_CTRL_OFFSET 0x0000 | |
42 | #define XILINX_VDMA_S2MM_CTRL_OFFSET 0x0030 | |
43 | #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 | |
44 | #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 | |
45 | ||
46 | /* Control Registers */ | |
47 | #define XILINX_VDMA_REG_DMACR 0x0000 | |
48 | #define XILINX_VDMA_DMACR_DELAY_MAX 0xff | |
49 | #define XILINX_VDMA_DMACR_DELAY_SHIFT 24 | |
50 | #define XILINX_VDMA_DMACR_FRAME_COUNT_MAX 0xff | |
51 | #define XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT 16 | |
52 | #define XILINX_VDMA_DMACR_ERR_IRQ BIT(14) | |
53 | #define XILINX_VDMA_DMACR_DLY_CNT_IRQ BIT(13) | |
54 | #define XILINX_VDMA_DMACR_FRM_CNT_IRQ BIT(12) | |
55 | #define XILINX_VDMA_DMACR_MASTER_SHIFT 8 | |
56 | #define XILINX_VDMA_DMACR_FSYNCSRC_SHIFT 5 | |
57 | #define XILINX_VDMA_DMACR_FRAMECNT_EN BIT(4) | |
58 | #define XILINX_VDMA_DMACR_GENLOCK_EN BIT(3) | |
59 | #define XILINX_VDMA_DMACR_RESET BIT(2) | |
60 | #define XILINX_VDMA_DMACR_CIRC_EN BIT(1) | |
61 | #define XILINX_VDMA_DMACR_RUNSTOP BIT(0) | |
62 | #define XILINX_VDMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) | |
63 | ||
64 | #define XILINX_VDMA_REG_DMASR 0x0004 | |
65 | #define XILINX_VDMA_DMASR_EOL_LATE_ERR BIT(15) | |
66 | #define XILINX_VDMA_DMASR_ERR_IRQ BIT(14) | |
67 | #define XILINX_VDMA_DMASR_DLY_CNT_IRQ BIT(13) | |
68 | #define XILINX_VDMA_DMASR_FRM_CNT_IRQ BIT(12) | |
69 | #define XILINX_VDMA_DMASR_SOF_LATE_ERR BIT(11) | |
70 | #define XILINX_VDMA_DMASR_SG_DEC_ERR BIT(10) | |
71 | #define XILINX_VDMA_DMASR_SG_SLV_ERR BIT(9) | |
72 | #define XILINX_VDMA_DMASR_EOF_EARLY_ERR BIT(8) | |
73 | #define XILINX_VDMA_DMASR_SOF_EARLY_ERR BIT(7) | |
74 | #define XILINX_VDMA_DMASR_DMA_DEC_ERR BIT(6) | |
75 | #define XILINX_VDMA_DMASR_DMA_SLAVE_ERR BIT(5) | |
76 | #define XILINX_VDMA_DMASR_DMA_INT_ERR BIT(4) | |
77 | #define XILINX_VDMA_DMASR_IDLE BIT(1) | |
78 | #define XILINX_VDMA_DMASR_HALTED BIT(0) | |
79 | #define XILINX_VDMA_DMASR_DELAY_MASK GENMASK(31, 24) | |
80 | #define XILINX_VDMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) | |
81 | ||
82 | #define XILINX_VDMA_REG_CURDESC 0x0008 | |
83 | #define XILINX_VDMA_REG_TAILDESC 0x0010 | |
84 | #define XILINX_VDMA_REG_REG_INDEX 0x0014 | |
85 | #define XILINX_VDMA_REG_FRMSTORE 0x0018 | |
86 | #define XILINX_VDMA_REG_THRESHOLD 0x001c | |
87 | #define XILINX_VDMA_REG_FRMPTR_STS 0x0024 | |
88 | #define XILINX_VDMA_REG_PARK_PTR 0x0028 | |
89 | #define XILINX_VDMA_PARK_PTR_WR_REF_SHIFT 8 | |
90 | #define XILINX_VDMA_PARK_PTR_RD_REF_SHIFT 0 | |
91 | #define XILINX_VDMA_REG_VDMA_VERSION 0x002c | |
92 | ||
93 | /* Register Direct Mode Registers */ | |
94 | #define XILINX_VDMA_REG_VSIZE 0x0000 | |
95 | #define XILINX_VDMA_REG_HSIZE 0x0004 | |
96 | ||
97 | #define XILINX_VDMA_REG_FRMDLY_STRIDE 0x0008 | |
98 | #define XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 | |
99 | #define XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 | |
100 | ||
101 | #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) | |
102 | ||
103 | /* HW specific definitions */ | |
104 | #define XILINX_VDMA_MAX_CHANS_PER_DEVICE 0x2 | |
105 | ||
106 | #define XILINX_VDMA_DMAXR_ALL_IRQ_MASK \ | |
107 | (XILINX_VDMA_DMASR_FRM_CNT_IRQ | \ | |
108 | XILINX_VDMA_DMASR_DLY_CNT_IRQ | \ | |
109 | XILINX_VDMA_DMASR_ERR_IRQ) | |
110 | ||
111 | #define XILINX_VDMA_DMASR_ALL_ERR_MASK \ | |
112 | (XILINX_VDMA_DMASR_EOL_LATE_ERR | \ | |
113 | XILINX_VDMA_DMASR_SOF_LATE_ERR | \ | |
114 | XILINX_VDMA_DMASR_SG_DEC_ERR | \ | |
115 | XILINX_VDMA_DMASR_SG_SLV_ERR | \ | |
116 | XILINX_VDMA_DMASR_EOF_EARLY_ERR | \ | |
117 | XILINX_VDMA_DMASR_SOF_EARLY_ERR | \ | |
118 | XILINX_VDMA_DMASR_DMA_DEC_ERR | \ | |
119 | XILINX_VDMA_DMASR_DMA_SLAVE_ERR | \ | |
120 | XILINX_VDMA_DMASR_DMA_INT_ERR) | |
121 | ||
122 | /* | |
123 | * Recoverable errors are DMA Internal error, SOF Early, EOF Early | |
124 | * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC | |
125 | * is enabled in the h/w system. | |
126 | */ | |
127 | #define XILINX_VDMA_DMASR_ERR_RECOVER_MASK \ | |
128 | (XILINX_VDMA_DMASR_SOF_LATE_ERR | \ | |
129 | XILINX_VDMA_DMASR_EOF_EARLY_ERR | \ | |
130 | XILINX_VDMA_DMASR_SOF_EARLY_ERR | \ | |
131 | XILINX_VDMA_DMASR_DMA_INT_ERR) | |
132 | ||
133 | /* Axi VDMA Flush on Fsync bits */ | |
134 | #define XILINX_VDMA_FLUSH_S2MM 3 | |
135 | #define XILINX_VDMA_FLUSH_MM2S 2 | |
136 | #define XILINX_VDMA_FLUSH_BOTH 1 | |
137 | ||
138 | /* Delay loop counter to prevent hardware failure */ | |
139 | #define XILINX_VDMA_LOOP_COUNT 1000000 | |
140 | ||
141 | /** | |
142 | * struct xilinx_vdma_desc_hw - Hardware Descriptor | |
143 | * @next_desc: Next Descriptor Pointer @0x00 | |
144 | * @pad1: Reserved @0x04 | |
145 | * @buf_addr: Buffer address @0x08 | |
146 | * @pad2: Reserved @0x0C | |
147 | * @vsize: Vertical Size @0x10 | |
148 | * @hsize: Horizontal Size @0x14 | |
149 | * @stride: Number of bytes between the first | |
150 | * pixels of each horizontal line @0x18 | |
151 | */ | |
152 | struct xilinx_vdma_desc_hw { | |
153 | u32 next_desc; | |
154 | u32 pad1; | |
155 | u32 buf_addr; | |
156 | u32 pad2; | |
157 | u32 vsize; | |
158 | u32 hsize; | |
159 | u32 stride; | |
160 | } __aligned(64); | |
161 | ||
162 | /** | |
163 | * struct xilinx_vdma_tx_segment - Descriptor segment | |
164 | * @hw: Hardware descriptor | |
165 | * @node: Node in the descriptor segments list | |
166 | * @phys: Physical address of segment | |
167 | */ | |
168 | struct xilinx_vdma_tx_segment { | |
169 | struct xilinx_vdma_desc_hw hw; | |
170 | struct list_head node; | |
171 | dma_addr_t phys; | |
172 | } __aligned(64); | |
173 | ||
174 | /** | |
175 | * struct xilinx_vdma_tx_descriptor - Per Transaction structure | |
176 | * @async_tx: Async transaction descriptor | |
177 | * @segments: TX segments list | |
178 | * @node: Node in the channel descriptors list | |
179 | */ | |
180 | struct xilinx_vdma_tx_descriptor { | |
181 | struct dma_async_tx_descriptor async_tx; | |
182 | struct list_head segments; | |
183 | struct list_head node; | |
184 | }; | |
185 | ||
186 | /** | |
187 | * struct xilinx_vdma_chan - Driver specific VDMA channel structure | |
188 | * @xdev: Driver specific device structure | |
189 | * @ctrl_offset: Control registers offset | |
190 | * @desc_offset: TX descriptor registers offset | |
191 | * @lock: Descriptor operation lock | |
192 | * @pending_list: Descriptors waiting | |
193 | * @active_desc: Active descriptor | |
194 | * @allocated_desc: Allocated descriptor | |
195 | * @done_list: Complete descriptors | |
196 | * @common: DMA common channel | |
197 | * @desc_pool: Descriptors pool | |
198 | * @dev: The dma device | |
199 | * @irq: Channel IRQ | |
200 | * @id: Channel ID | |
201 | * @direction: Transfer direction | |
202 | * @num_frms: Number of frames | |
203 | * @has_sg: Support scatter transfers | |
204 | * @genlock: Support genlock mode | |
205 | * @err: Channel has errors | |
206 | * @tasklet: Cleanup work after irq | |
207 | * @config: Device configuration info | |
208 | * @flush_on_fsync: Flush on Frame sync | |
209 | */ | |
210 | struct xilinx_vdma_chan { | |
211 | struct xilinx_vdma_device *xdev; | |
212 | u32 ctrl_offset; | |
213 | u32 desc_offset; | |
214 | spinlock_t lock; | |
215 | struct list_head pending_list; | |
216 | struct xilinx_vdma_tx_descriptor *active_desc; | |
217 | struct xilinx_vdma_tx_descriptor *allocated_desc; | |
218 | struct list_head done_list; | |
219 | struct dma_chan common; | |
220 | struct dma_pool *desc_pool; | |
221 | struct device *dev; | |
222 | int irq; | |
223 | int id; | |
224 | enum dma_transfer_direction direction; | |
225 | int num_frms; | |
226 | bool has_sg; | |
227 | bool genlock; | |
228 | bool err; | |
229 | struct tasklet_struct tasklet; | |
230 | struct xilinx_vdma_config config; | |
231 | bool flush_on_fsync; | |
232 | }; | |
233 | ||
234 | /** | |
235 | * struct xilinx_vdma_device - VDMA device structure | |
236 | * @regs: I/O mapped base address | |
237 | * @dev: Device Structure | |
238 | * @common: DMA device structure | |
239 | * @chan: Driver specific VDMA channel | |
240 | * @has_sg: Specifies whether Scatter-Gather is present or not | |
241 | * @flush_on_fsync: Flush on frame sync | |
242 | */ | |
243 | struct xilinx_vdma_device { | |
244 | void __iomem *regs; | |
245 | struct device *dev; | |
246 | struct dma_device common; | |
247 | struct xilinx_vdma_chan *chan[XILINX_VDMA_MAX_CHANS_PER_DEVICE]; | |
248 | bool has_sg; | |
249 | u32 flush_on_fsync; | |
250 | }; | |
251 | ||
252 | /* Macros */ | |
253 | #define to_xilinx_chan(chan) \ | |
254 | container_of(chan, struct xilinx_vdma_chan, common) | |
255 | #define to_vdma_tx_descriptor(tx) \ | |
256 | container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx) | |
257 | ||
258 | /* IO accessors */ | |
259 | static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg) | |
260 | { | |
261 | return ioread32(chan->xdev->regs + reg); | |
262 | } | |
263 | ||
264 | static inline void vdma_write(struct xilinx_vdma_chan *chan, u32 reg, u32 value) | |
265 | { | |
266 | iowrite32(value, chan->xdev->regs + reg); | |
267 | } | |
268 | ||
269 | static inline void vdma_desc_write(struct xilinx_vdma_chan *chan, u32 reg, | |
270 | u32 value) | |
271 | { | |
272 | vdma_write(chan, chan->desc_offset + reg, value); | |
273 | } | |
274 | ||
275 | static inline u32 vdma_ctrl_read(struct xilinx_vdma_chan *chan, u32 reg) | |
276 | { | |
277 | return vdma_read(chan, chan->ctrl_offset + reg); | |
278 | } | |
279 | ||
280 | static inline void vdma_ctrl_write(struct xilinx_vdma_chan *chan, u32 reg, | |
281 | u32 value) | |
282 | { | |
283 | vdma_write(chan, chan->ctrl_offset + reg, value); | |
284 | } | |
285 | ||
286 | static inline void vdma_ctrl_clr(struct xilinx_vdma_chan *chan, u32 reg, | |
287 | u32 clr) | |
288 | { | |
289 | vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) & ~clr); | |
290 | } | |
291 | ||
292 | static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg, | |
293 | u32 set) | |
294 | { | |
295 | vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) | set); | |
296 | } | |
297 | ||
298 | /* ----------------------------------------------------------------------------- | |
299 | * Descriptors and segments alloc and free | |
300 | */ | |
301 | ||
302 | /** | |
303 | * xilinx_vdma_alloc_tx_segment - Allocate transaction segment | |
304 | * @chan: Driver specific VDMA channel | |
305 | * | |
306 | * Return: The allocated segment on success and NULL on failure. | |
307 | */ | |
308 | static struct xilinx_vdma_tx_segment * | |
309 | xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan) | |
310 | { | |
311 | struct xilinx_vdma_tx_segment *segment; | |
312 | dma_addr_t phys; | |
313 | ||
314 | segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); | |
315 | if (!segment) | |
316 | return NULL; | |
317 | ||
318 | memset(segment, 0, sizeof(*segment)); | |
319 | segment->phys = phys; | |
320 | ||
321 | return segment; | |
322 | } | |
323 | ||
324 | /** | |
325 | * xilinx_vdma_free_tx_segment - Free transaction segment | |
326 | * @chan: Driver specific VDMA channel | |
327 | * @segment: VDMA transaction segment | |
328 | */ | |
329 | static void xilinx_vdma_free_tx_segment(struct xilinx_vdma_chan *chan, | |
330 | struct xilinx_vdma_tx_segment *segment) | |
331 | { | |
332 | dma_pool_free(chan->desc_pool, segment, segment->phys); | |
333 | } | |
334 | ||
335 | /** | |
336 | * xilinx_vdma_tx_descriptor - Allocate transaction descriptor | |
337 | * @chan: Driver specific VDMA channel | |
338 | * | |
339 | * Return: The allocated descriptor on success and NULL on failure. | |
340 | */ | |
341 | static struct xilinx_vdma_tx_descriptor * | |
342 | xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan) | |
343 | { | |
344 | struct xilinx_vdma_tx_descriptor *desc; | |
345 | unsigned long flags; | |
346 | ||
347 | if (chan->allocated_desc) | |
348 | return chan->allocated_desc; | |
349 | ||
350 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); | |
351 | if (!desc) | |
352 | return NULL; | |
353 | ||
354 | spin_lock_irqsave(&chan->lock, flags); | |
355 | chan->allocated_desc = desc; | |
356 | spin_unlock_irqrestore(&chan->lock, flags); | |
357 | ||
358 | INIT_LIST_HEAD(&desc->segments); | |
359 | ||
360 | return desc; | |
361 | } | |
362 | ||
363 | /** | |
364 | * xilinx_vdma_free_tx_descriptor - Free transaction descriptor | |
365 | * @chan: Driver specific VDMA channel | |
366 | * @desc: VDMA transaction descriptor | |
367 | */ | |
368 | static void | |
369 | xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan, | |
370 | struct xilinx_vdma_tx_descriptor *desc) | |
371 | { | |
372 | struct xilinx_vdma_tx_segment *segment, *next; | |
373 | ||
374 | if (!desc) | |
375 | return; | |
376 | ||
377 | list_for_each_entry_safe(segment, next, &desc->segments, node) { | |
378 | list_del(&segment->node); | |
379 | xilinx_vdma_free_tx_segment(chan, segment); | |
380 | } | |
381 | ||
382 | kfree(desc); | |
383 | } | |
384 | ||
385 | /* Required functions */ | |
386 | ||
387 | /** | |
388 | * xilinx_vdma_free_desc_list - Free descriptors list | |
389 | * @chan: Driver specific VDMA channel | |
390 | * @list: List to parse and delete the descriptor | |
391 | */ | |
392 | static void xilinx_vdma_free_desc_list(struct xilinx_vdma_chan *chan, | |
393 | struct list_head *list) | |
394 | { | |
395 | struct xilinx_vdma_tx_descriptor *desc, *next; | |
396 | ||
397 | list_for_each_entry_safe(desc, next, list, node) { | |
398 | list_del(&desc->node); | |
399 | xilinx_vdma_free_tx_descriptor(chan, desc); | |
400 | } | |
401 | } | |
402 | ||
403 | /** | |
404 | * xilinx_vdma_free_descriptors - Free channel descriptors | |
405 | * @chan: Driver specific VDMA channel | |
406 | */ | |
407 | static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan) | |
408 | { | |
409 | unsigned long flags; | |
410 | ||
411 | spin_lock_irqsave(&chan->lock, flags); | |
412 | ||
413 | xilinx_vdma_free_desc_list(chan, &chan->pending_list); | |
414 | xilinx_vdma_free_desc_list(chan, &chan->done_list); | |
415 | ||
416 | xilinx_vdma_free_tx_descriptor(chan, chan->active_desc); | |
417 | chan->active_desc = NULL; | |
418 | ||
419 | spin_unlock_irqrestore(&chan->lock, flags); | |
420 | } | |
421 | ||
422 | /** | |
423 | * xilinx_vdma_free_chan_resources - Free channel resources | |
424 | * @dchan: DMA channel | |
425 | */ | |
426 | static void xilinx_vdma_free_chan_resources(struct dma_chan *dchan) | |
427 | { | |
428 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | |
429 | ||
430 | dev_dbg(chan->dev, "Free all channel resources.\n"); | |
431 | ||
432 | xilinx_vdma_free_descriptors(chan); | |
433 | dma_pool_destroy(chan->desc_pool); | |
434 | chan->desc_pool = NULL; | |
435 | } | |
436 | ||
437 | /** | |
438 | * xilinx_vdma_chan_desc_cleanup - Clean channel descriptors | |
439 | * @chan: Driver specific VDMA channel | |
440 | */ | |
441 | static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan) | |
442 | { | |
443 | struct xilinx_vdma_tx_descriptor *desc, *next; | |
444 | unsigned long flags; | |
445 | ||
446 | spin_lock_irqsave(&chan->lock, flags); | |
447 | ||
448 | list_for_each_entry_safe(desc, next, &chan->done_list, node) { | |
449 | dma_async_tx_callback callback; | |
450 | void *callback_param; | |
451 | ||
452 | /* Remove from the list of running transactions */ | |
453 | list_del(&desc->node); | |
454 | ||
455 | /* Run the link descriptor callback function */ | |
456 | callback = desc->async_tx.callback; | |
457 | callback_param = desc->async_tx.callback_param; | |
458 | if (callback) { | |
459 | spin_unlock_irqrestore(&chan->lock, flags); | |
460 | callback(callback_param); | |
461 | spin_lock_irqsave(&chan->lock, flags); | |
462 | } | |
463 | ||
464 | /* Run any dependencies, then free the descriptor */ | |
465 | dma_run_dependencies(&desc->async_tx); | |
466 | xilinx_vdma_free_tx_descriptor(chan, desc); | |
467 | } | |
468 | ||
469 | spin_unlock_irqrestore(&chan->lock, flags); | |
470 | } | |
471 | ||
472 | /** | |
473 | * xilinx_vdma_do_tasklet - Schedule completion tasklet | |
474 | * @data: Pointer to the Xilinx VDMA channel structure | |
475 | */ | |
476 | static void xilinx_vdma_do_tasklet(unsigned long data) | |
477 | { | |
478 | struct xilinx_vdma_chan *chan = (struct xilinx_vdma_chan *)data; | |
479 | ||
480 | xilinx_vdma_chan_desc_cleanup(chan); | |
481 | } | |
482 | ||
483 | /** | |
484 | * xilinx_vdma_alloc_chan_resources - Allocate channel resources | |
485 | * @dchan: DMA channel | |
486 | * | |
487 | * Return: '0' on success and failure value on error | |
488 | */ | |
489 | static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan) | |
490 | { | |
491 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | |
492 | ||
493 | /* Has this channel already been allocated? */ | |
494 | if (chan->desc_pool) | |
495 | return 0; | |
496 | ||
497 | /* | |
498 | * We need the descriptor to be aligned to 64bytes | |
499 | * for meeting Xilinx VDMA specification requirement. | |
500 | */ | |
501 | chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", | |
502 | chan->dev, | |
503 | sizeof(struct xilinx_vdma_tx_segment), | |
504 | __alignof__(struct xilinx_vdma_tx_segment), 0); | |
505 | if (!chan->desc_pool) { | |
506 | dev_err(chan->dev, | |
507 | "unable to allocate channel %d descriptor pool\n", | |
508 | chan->id); | |
509 | return -ENOMEM; | |
510 | } | |
511 | ||
512 | dma_cookie_init(dchan); | |
513 | return 0; | |
514 | } | |
515 | ||
516 | /** | |
517 | * xilinx_vdma_tx_status - Get VDMA transaction status | |
518 | * @dchan: DMA channel | |
519 | * @cookie: Transaction identifier | |
520 | * @txstate: Transaction state | |
521 | * | |
522 | * Return: DMA transaction status | |
523 | */ | |
524 | static enum dma_status xilinx_vdma_tx_status(struct dma_chan *dchan, | |
525 | dma_cookie_t cookie, | |
526 | struct dma_tx_state *txstate) | |
527 | { | |
528 | return dma_cookie_status(dchan, cookie, txstate); | |
529 | } | |
530 | ||
531 | /** | |
532 | * xilinx_vdma_is_running - Check if VDMA channel is running | |
533 | * @chan: Driver specific VDMA channel | |
534 | * | |
535 | * Return: '1' if running, '0' if not. | |
536 | */ | |
537 | static bool xilinx_vdma_is_running(struct xilinx_vdma_chan *chan) | |
538 | { | |
539 | return !(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | |
540 | XILINX_VDMA_DMASR_HALTED) && | |
541 | (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & | |
542 | XILINX_VDMA_DMACR_RUNSTOP); | |
543 | } | |
544 | ||
545 | /** | |
546 | * xilinx_vdma_is_idle - Check if VDMA channel is idle | |
547 | * @chan: Driver specific VDMA channel | |
548 | * | |
549 | * Return: '1' if idle, '0' if not. | |
550 | */ | |
551 | static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan) | |
552 | { | |
553 | return vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | |
554 | XILINX_VDMA_DMASR_IDLE; | |
555 | } | |
556 | ||
557 | /** | |
558 | * xilinx_vdma_halt - Halt VDMA channel | |
559 | * @chan: Driver specific VDMA channel | |
560 | */ | |
561 | static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan) | |
562 | { | |
563 | int loop = XILINX_VDMA_LOOP_COUNT; | |
564 | ||
565 | vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); | |
566 | ||
567 | /* Wait for the hardware to halt */ | |
568 | do { | |
569 | if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | |
570 | XILINX_VDMA_DMASR_HALTED) | |
571 | break; | |
572 | } while (loop--); | |
573 | ||
574 | if (!loop) { | |
575 | dev_err(chan->dev, "Cannot stop channel %p: %x\n", | |
576 | chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | |
577 | chan->err = true; | |
578 | } | |
579 | ||
580 | return; | |
581 | } | |
582 | ||
583 | /** | |
584 | * xilinx_vdma_start - Start VDMA channel | |
585 | * @chan: Driver specific VDMA channel | |
586 | */ | |
587 | static void xilinx_vdma_start(struct xilinx_vdma_chan *chan) | |
588 | { | |
589 | int loop = XILINX_VDMA_LOOP_COUNT; | |
590 | ||
591 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); | |
592 | ||
593 | /* Wait for the hardware to start */ | |
594 | do { | |
595 | if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | |
596 | XILINX_VDMA_DMASR_HALTED)) | |
597 | break; | |
598 | } while (loop--); | |
599 | ||
600 | if (!loop) { | |
601 | dev_err(chan->dev, "Cannot start channel %p: %x\n", | |
602 | chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | |
603 | ||
604 | chan->err = true; | |
605 | } | |
606 | ||
607 | return; | |
608 | } | |
609 | ||
610 | /** | |
611 | * xilinx_vdma_start_transfer - Starts VDMA transfer | |
612 | * @chan: Driver specific channel struct pointer | |
613 | */ | |
614 | static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | |
615 | { | |
616 | struct xilinx_vdma_config *config = &chan->config; | |
617 | struct xilinx_vdma_tx_descriptor *desc; | |
618 | unsigned long flags; | |
619 | u32 reg; | |
620 | struct xilinx_vdma_tx_segment *head, *tail = NULL; | |
621 | ||
622 | if (chan->err) | |
623 | return; | |
624 | ||
625 | spin_lock_irqsave(&chan->lock, flags); | |
626 | ||
627 | /* There's already an active descriptor, bail out. */ | |
628 | if (chan->active_desc) | |
629 | goto out_unlock; | |
630 | ||
631 | if (list_empty(&chan->pending_list)) | |
632 | goto out_unlock; | |
633 | ||
634 | desc = list_first_entry(&chan->pending_list, | |
635 | struct xilinx_vdma_tx_descriptor, node); | |
636 | ||
637 | /* If it is SG mode and hardware is busy, cannot submit */ | |
638 | if (chan->has_sg && xilinx_vdma_is_running(chan) && | |
639 | !xilinx_vdma_is_idle(chan)) { | |
640 | dev_dbg(chan->dev, "DMA controller still busy\n"); | |
641 | goto out_unlock; | |
642 | } | |
643 | ||
644 | /* | |
645 | * If hardware is idle, then all descriptors on the running lists are | |
646 | * done, start new transfers | |
647 | */ | |
648 | if (chan->has_sg) { | |
649 | head = list_first_entry(&desc->segments, | |
650 | struct xilinx_vdma_tx_segment, node); | |
651 | tail = list_entry(desc->segments.prev, | |
652 | struct xilinx_vdma_tx_segment, node); | |
653 | ||
654 | vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, head->phys); | |
655 | } | |
656 | ||
657 | /* Configure the hardware using info in the config structure */ | |
658 | reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); | |
659 | ||
660 | if (config->frm_cnt_en) | |
661 | reg |= XILINX_VDMA_DMACR_FRAMECNT_EN; | |
662 | else | |
663 | reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN; | |
664 | ||
665 | /* | |
666 | * With SG, start with circular mode, so that BDs can be fetched. | |
667 | * In direct register mode, if not parking, enable circular mode | |
668 | */ | |
669 | if (chan->has_sg || !config->park) | |
670 | reg |= XILINX_VDMA_DMACR_CIRC_EN; | |
671 | ||
672 | if (config->park) | |
673 | reg &= ~XILINX_VDMA_DMACR_CIRC_EN; | |
674 | ||
675 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, reg); | |
676 | ||
677 | if (config->park && (config->park_frm >= 0) && | |
678 | (config->park_frm < chan->num_frms)) { | |
679 | if (chan->direction == DMA_MEM_TO_DEV) | |
680 | vdma_write(chan, XILINX_VDMA_REG_PARK_PTR, | |
681 | config->park_frm << | |
682 | XILINX_VDMA_PARK_PTR_RD_REF_SHIFT); | |
683 | else | |
684 | vdma_write(chan, XILINX_VDMA_REG_PARK_PTR, | |
685 | config->park_frm << | |
686 | XILINX_VDMA_PARK_PTR_WR_REF_SHIFT); | |
687 | } | |
688 | ||
689 | /* Start the hardware */ | |
690 | xilinx_vdma_start(chan); | |
691 | ||
692 | if (chan->err) | |
693 | goto out_unlock; | |
694 | ||
695 | /* Start the transfer */ | |
696 | if (chan->has_sg) { | |
697 | vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, tail->phys); | |
698 | } else { | |
699 | struct xilinx_vdma_tx_segment *segment, *last = NULL; | |
700 | int i = 0; | |
701 | ||
702 | list_for_each_entry(segment, &desc->segments, node) { | |
703 | vdma_desc_write(chan, | |
704 | XILINX_VDMA_REG_START_ADDRESS(i++), | |
705 | segment->hw.buf_addr); | |
706 | last = segment; | |
707 | } | |
708 | ||
709 | if (!last) | |
710 | goto out_unlock; | |
711 | ||
712 | /* HW expects these parameters to be same for one transaction */ | |
713 | vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize); | |
714 | vdma_desc_write(chan, XILINX_VDMA_REG_FRMDLY_STRIDE, | |
715 | last->hw.stride); | |
716 | vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize); | |
717 | } | |
718 | ||
719 | list_del(&desc->node); | |
720 | chan->active_desc = desc; | |
721 | ||
722 | out_unlock: | |
723 | spin_unlock_irqrestore(&chan->lock, flags); | |
724 | } | |
725 | ||
726 | /** | |
727 | * xilinx_vdma_issue_pending - Issue pending transactions | |
728 | * @dchan: DMA channel | |
729 | */ | |
730 | static void xilinx_vdma_issue_pending(struct dma_chan *dchan) | |
731 | { | |
732 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | |
733 | ||
734 | xilinx_vdma_start_transfer(chan); | |
735 | } | |
736 | ||
737 | /** | |
738 | * xilinx_vdma_complete_descriptor - Mark the active descriptor as complete | |
739 | * @chan : xilinx DMA channel | |
740 | * | |
741 | * CONTEXT: hardirq | |
742 | */ | |
743 | static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan) | |
744 | { | |
745 | struct xilinx_vdma_tx_descriptor *desc; | |
746 | unsigned long flags; | |
747 | ||
748 | spin_lock_irqsave(&chan->lock, flags); | |
749 | ||
750 | desc = chan->active_desc; | |
751 | if (!desc) { | |
752 | dev_dbg(chan->dev, "no running descriptors\n"); | |
753 | goto out_unlock; | |
754 | } | |
755 | ||
756 | dma_cookie_complete(&desc->async_tx); | |
757 | list_add_tail(&desc->node, &chan->done_list); | |
758 | ||
759 | chan->active_desc = NULL; | |
760 | ||
761 | out_unlock: | |
762 | spin_unlock_irqrestore(&chan->lock, flags); | |
763 | } | |
764 | ||
765 | /** | |
766 | * xilinx_vdma_reset - Reset VDMA channel | |
767 | * @chan: Driver specific VDMA channel | |
768 | * | |
769 | * Return: '0' on success and failure value on error | |
770 | */ | |
771 | static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan) | |
772 | { | |
773 | int loop = XILINX_VDMA_LOOP_COUNT; | |
774 | u32 tmp; | |
775 | ||
776 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET); | |
777 | ||
778 | tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & | |
779 | XILINX_VDMA_DMACR_RESET; | |
780 | ||
781 | /* Wait for the hardware to finish reset */ | |
782 | do { | |
783 | tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & | |
784 | XILINX_VDMA_DMACR_RESET; | |
785 | } while (loop-- && tmp); | |
786 | ||
787 | if (!loop) { | |
788 | dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", | |
789 | vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR), | |
790 | vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | |
791 | return -ETIMEDOUT; | |
792 | } | |
793 | ||
794 | chan->err = false; | |
795 | ||
796 | return 0; | |
797 | } | |
798 | ||
799 | /** | |
800 | * xilinx_vdma_chan_reset - Reset VDMA channel and enable interrupts | |
801 | * @chan: Driver specific VDMA channel | |
802 | * | |
803 | * Return: '0' on success and failure value on error | |
804 | */ | |
805 | static int xilinx_vdma_chan_reset(struct xilinx_vdma_chan *chan) | |
806 | { | |
807 | int err; | |
808 | ||
809 | /* Reset VDMA */ | |
810 | err = xilinx_vdma_reset(chan); | |
811 | if (err) | |
812 | return err; | |
813 | ||
814 | /* Enable interrupts */ | |
815 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, | |
816 | XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | |
817 | ||
818 | return 0; | |
819 | } | |
820 | ||
821 | /** | |
822 | * xilinx_vdma_irq_handler - VDMA Interrupt handler | |
823 | * @irq: IRQ number | |
824 | * @data: Pointer to the Xilinx VDMA channel structure | |
825 | * | |
826 | * Return: IRQ_HANDLED/IRQ_NONE | |
827 | */ | |
828 | static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data) | |
829 | { | |
830 | struct xilinx_vdma_chan *chan = data; | |
831 | u32 status; | |
832 | ||
833 | /* Read the status and ack the interrupts. */ | |
834 | status = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR); | |
835 | if (!(status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK)) | |
836 | return IRQ_NONE; | |
837 | ||
838 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR, | |
839 | status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | |
840 | ||
841 | if (status & XILINX_VDMA_DMASR_ERR_IRQ) { | |
842 | /* | |
843 | * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the | |
844 | * error is recoverable, ignore it. Otherwise flag the error. | |
845 | * | |
846 | * Only recoverable errors can be cleared in the DMASR register, | |
847 | * make sure not to write to other error bits to 1. | |
848 | */ | |
849 | u32 errors = status & XILINX_VDMA_DMASR_ALL_ERR_MASK; | |
850 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR, | |
851 | errors & XILINX_VDMA_DMASR_ERR_RECOVER_MASK); | |
852 | ||
853 | if (!chan->flush_on_fsync || | |
854 | (errors & ~XILINX_VDMA_DMASR_ERR_RECOVER_MASK)) { | |
855 | dev_err(chan->dev, | |
856 | "Channel %p has errors %x, cdr %x tdr %x\n", | |
857 | chan, errors, | |
858 | vdma_ctrl_read(chan, XILINX_VDMA_REG_CURDESC), | |
859 | vdma_ctrl_read(chan, XILINX_VDMA_REG_TAILDESC)); | |
860 | chan->err = true; | |
861 | } | |
862 | } | |
863 | ||
864 | if (status & XILINX_VDMA_DMASR_DLY_CNT_IRQ) { | |
865 | /* | |
866 | * Device takes too long to do the transfer when user requires | |
867 | * responsiveness. | |
868 | */ | |
869 | dev_dbg(chan->dev, "Inter-packet latency too long\n"); | |
870 | } | |
871 | ||
872 | if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) { | |
873 | xilinx_vdma_complete_descriptor(chan); | |
874 | xilinx_vdma_start_transfer(chan); | |
875 | } | |
876 | ||
877 | tasklet_schedule(&chan->tasklet); | |
878 | return IRQ_HANDLED; | |
879 | } | |
880 | ||
881 | /** | |
882 | * xilinx_vdma_tx_submit - Submit DMA transaction | |
883 | * @tx: Async transaction descriptor | |
884 | * | |
885 | * Return: cookie value on success and failure value on error | |
886 | */ | |
887 | static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx) | |
888 | { | |
889 | struct xilinx_vdma_tx_descriptor *desc = to_vdma_tx_descriptor(tx); | |
890 | struct xilinx_vdma_chan *chan = to_xilinx_chan(tx->chan); | |
891 | dma_cookie_t cookie; | |
892 | unsigned long flags; | |
893 | int err; | |
894 | ||
895 | if (chan->err) { | |
896 | /* | |
897 | * If reset fails, need to hard reset the system. | |
898 | * Channel is no longer functional | |
899 | */ | |
900 | err = xilinx_vdma_chan_reset(chan); | |
901 | if (err < 0) | |
902 | return err; | |
903 | } | |
904 | ||
905 | spin_lock_irqsave(&chan->lock, flags); | |
906 | ||
907 | cookie = dma_cookie_assign(tx); | |
908 | ||
909 | /* Append the transaction to the pending transactions queue. */ | |
910 | list_add_tail(&desc->node, &chan->pending_list); | |
911 | ||
912 | /* Free the allocated desc */ | |
913 | chan->allocated_desc = NULL; | |
914 | ||
915 | spin_unlock_irqrestore(&chan->lock, flags); | |
916 | ||
917 | return cookie; | |
918 | } | |
919 | ||
920 | /** | |
921 | * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a | |
922 | * DMA_SLAVE transaction | |
923 | * @dchan: DMA channel | |
924 | * @xt: Interleaved template pointer | |
925 | * @flags: transfer ack flags | |
926 | * | |
927 | * Return: Async transaction descriptor on success and NULL on failure | |
928 | */ | |
929 | static struct dma_async_tx_descriptor * | |
930 | xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, | |
931 | struct dma_interleaved_template *xt, | |
932 | unsigned long flags) | |
933 | { | |
934 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | |
935 | struct xilinx_vdma_tx_descriptor *desc; | |
936 | struct xilinx_vdma_tx_segment *segment, *prev = NULL; | |
937 | struct xilinx_vdma_desc_hw *hw; | |
938 | ||
939 | if (!is_slave_direction(xt->dir)) | |
940 | return NULL; | |
941 | ||
942 | if (!xt->numf || !xt->sgl[0].size) | |
943 | return NULL; | |
944 | ||
a5e48e24 ST |
945 | if (xt->frame_size != 1) |
946 | return NULL; | |
947 | ||
9cd4360d ST |
948 | /* Allocate a transaction descriptor. */ |
949 | desc = xilinx_vdma_alloc_tx_descriptor(chan); | |
950 | if (!desc) | |
951 | return NULL; | |
952 | ||
953 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); | |
954 | desc->async_tx.tx_submit = xilinx_vdma_tx_submit; | |
955 | async_tx_ack(&desc->async_tx); | |
956 | ||
957 | /* Allocate the link descriptor from DMA pool */ | |
958 | segment = xilinx_vdma_alloc_tx_segment(chan); | |
959 | if (!segment) | |
960 | goto error; | |
961 | ||
962 | /* Fill in the hardware descriptor */ | |
963 | hw = &segment->hw; | |
964 | hw->vsize = xt->numf; | |
965 | hw->hsize = xt->sgl[0].size; | |
6d80f45f | 966 | hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << |
9cd4360d ST |
967 | XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT; |
968 | hw->stride |= chan->config.frm_dly << | |
969 | XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT; | |
970 | ||
971 | if (xt->dir != DMA_MEM_TO_DEV) | |
972 | hw->buf_addr = xt->dst_start; | |
973 | else | |
974 | hw->buf_addr = xt->src_start; | |
975 | ||
976 | /* Link the previous next descriptor to current */ | |
049c0d57 ST |
977 | if (!list_empty(&desc->segments)) { |
978 | prev = list_last_entry(&desc->segments, | |
979 | struct xilinx_vdma_tx_segment, node); | |
980 | prev->hw.next_desc = segment->phys; | |
981 | } | |
9cd4360d ST |
982 | |
983 | /* Insert the segment into the descriptor segments list. */ | |
984 | list_add_tail(&segment->node, &desc->segments); | |
985 | ||
986 | prev = segment; | |
987 | ||
988 | /* Link the last hardware descriptor with the first. */ | |
989 | segment = list_first_entry(&desc->segments, | |
990 | struct xilinx_vdma_tx_segment, node); | |
991 | prev->hw.next_desc = segment->phys; | |
992 | ||
993 | return &desc->async_tx; | |
994 | ||
995 | error: | |
996 | xilinx_vdma_free_tx_descriptor(chan, desc); | |
997 | return NULL; | |
998 | } | |
999 | ||
1000 | /** | |
1001 | * xilinx_vdma_terminate_all - Halt the channel and free descriptors | |
1002 | * @chan: Driver specific VDMA Channel pointer | |
1003 | */ | |
ba714046 | 1004 | static int xilinx_vdma_terminate_all(struct dma_chan *dchan) |
9cd4360d | 1005 | { |
ba714046 MR |
1006 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); |
1007 | ||
9cd4360d ST |
1008 | /* Halt the DMA engine */ |
1009 | xilinx_vdma_halt(chan); | |
1010 | ||
1011 | /* Remove and free all of the descriptors in the lists */ | |
1012 | xilinx_vdma_free_descriptors(chan); | |
ba714046 MR |
1013 | |
1014 | return 0; | |
9cd4360d ST |
1015 | } |
1016 | ||
1017 | /** | |
1018 | * xilinx_vdma_channel_set_config - Configure VDMA channel | |
1019 | * Run-time configuration for Axi VDMA, supports: | |
1020 | * . halt the channel | |
1021 | * . configure interrupt coalescing and inter-packet delay threshold | |
1022 | * . start/stop parking | |
1023 | * . enable genlock | |
1024 | * | |
1025 | * @dchan: DMA channel | |
1026 | * @cfg: VDMA device configuration pointer | |
1027 | * | |
1028 | * Return: '0' on success and failure value on error | |
1029 | */ | |
1030 | int xilinx_vdma_channel_set_config(struct dma_chan *dchan, | |
1031 | struct xilinx_vdma_config *cfg) | |
1032 | { | |
1033 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | |
1034 | u32 dmacr; | |
1035 | ||
1036 | if (cfg->reset) | |
1037 | return xilinx_vdma_chan_reset(chan); | |
1038 | ||
1039 | dmacr = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); | |
1040 | ||
1041 | chan->config.frm_dly = cfg->frm_dly; | |
1042 | chan->config.park = cfg->park; | |
1043 | ||
1044 | /* genlock settings */ | |
1045 | chan->config.gen_lock = cfg->gen_lock; | |
1046 | chan->config.master = cfg->master; | |
1047 | ||
1048 | if (cfg->gen_lock && chan->genlock) { | |
1049 | dmacr |= XILINX_VDMA_DMACR_GENLOCK_EN; | |
1050 | dmacr |= cfg->master << XILINX_VDMA_DMACR_MASTER_SHIFT; | |
1051 | } | |
1052 | ||
1053 | chan->config.frm_cnt_en = cfg->frm_cnt_en; | |
1054 | if (cfg->park) | |
1055 | chan->config.park_frm = cfg->park_frm; | |
1056 | else | |
1057 | chan->config.park_frm = -1; | |
1058 | ||
1059 | chan->config.coalesc = cfg->coalesc; | |
1060 | chan->config.delay = cfg->delay; | |
1061 | ||
1062 | if (cfg->coalesc <= XILINX_VDMA_DMACR_FRAME_COUNT_MAX) { | |
1063 | dmacr |= cfg->coalesc << XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT; | |
1064 | chan->config.coalesc = cfg->coalesc; | |
1065 | } | |
1066 | ||
1067 | if (cfg->delay <= XILINX_VDMA_DMACR_DELAY_MAX) { | |
1068 | dmacr |= cfg->delay << XILINX_VDMA_DMACR_DELAY_SHIFT; | |
1069 | chan->config.delay = cfg->delay; | |
1070 | } | |
1071 | ||
1072 | /* FSync Source selection */ | |
1073 | dmacr &= ~XILINX_VDMA_DMACR_FSYNCSRC_MASK; | |
1074 | dmacr |= cfg->ext_fsync << XILINX_VDMA_DMACR_FSYNCSRC_SHIFT; | |
1075 | ||
1076 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, dmacr); | |
1077 | ||
1078 | return 0; | |
1079 | } | |
1080 | EXPORT_SYMBOL(xilinx_vdma_channel_set_config); | |
1081 | ||
9cd4360d ST |
1082 | /* ----------------------------------------------------------------------------- |
1083 | * Probe and remove | |
1084 | */ | |
1085 | ||
1086 | /** | |
1087 | * xilinx_vdma_chan_remove - Per Channel remove function | |
1088 | * @chan: Driver specific VDMA channel | |
1089 | */ | |
1090 | static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan) | |
1091 | { | |
1092 | /* Disable all interrupts */ | |
1093 | vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, | |
1094 | XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | |
1095 | ||
1096 | if (chan->irq > 0) | |
1097 | free_irq(chan->irq, chan); | |
1098 | ||
1099 | tasklet_kill(&chan->tasklet); | |
1100 | ||
1101 | list_del(&chan->common.device_node); | |
1102 | } | |
1103 | ||
1104 | /** | |
1105 | * xilinx_vdma_chan_probe - Per Channel Probing | |
1106 | * It get channel features from the device tree entry and | |
1107 | * initialize special channel handling routines | |
1108 | * | |
1109 | * @xdev: Driver specific device structure | |
1110 | * @node: Device node | |
1111 | * | |
1112 | * Return: '0' on success and failure value on error | |
1113 | */ | |
1114 | static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, | |
1115 | struct device_node *node) | |
1116 | { | |
1117 | struct xilinx_vdma_chan *chan; | |
1118 | bool has_dre = false; | |
1119 | u32 value, width; | |
1120 | int err; | |
1121 | ||
1122 | /* Allocate and initialize the channel structure */ | |
1123 | chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); | |
1124 | if (!chan) | |
1125 | return -ENOMEM; | |
1126 | ||
1127 | chan->dev = xdev->dev; | |
1128 | chan->xdev = xdev; | |
1129 | chan->has_sg = xdev->has_sg; | |
1130 | ||
1131 | spin_lock_init(&chan->lock); | |
1132 | INIT_LIST_HEAD(&chan->pending_list); | |
1133 | INIT_LIST_HEAD(&chan->done_list); | |
1134 | ||
1135 | /* Retrieve the channel properties from the device tree */ | |
1136 | has_dre = of_property_read_bool(node, "xlnx,include-dre"); | |
1137 | ||
1138 | chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); | |
1139 | ||
1140 | err = of_property_read_u32(node, "xlnx,datawidth", &value); | |
1141 | if (err) { | |
1142 | dev_err(xdev->dev, "missing xlnx,datawidth property\n"); | |
1143 | return err; | |
1144 | } | |
1145 | width = value >> 3; /* Convert bits to bytes */ | |
1146 | ||
1147 | /* If data width is greater than 8 bytes, DRE is not in hw */ | |
1148 | if (width > 8) | |
1149 | has_dre = false; | |
1150 | ||
1151 | if (!has_dre) | |
1152 | xdev->common.copy_align = fls(width - 1); | |
1153 | ||
1154 | if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) { | |
1155 | chan->direction = DMA_MEM_TO_DEV; | |
1156 | chan->id = 0; | |
1157 | ||
1158 | chan->ctrl_offset = XILINX_VDMA_MM2S_CTRL_OFFSET; | |
1159 | chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; | |
1160 | ||
1161 | if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH || | |
1162 | xdev->flush_on_fsync == XILINX_VDMA_FLUSH_MM2S) | |
1163 | chan->flush_on_fsync = true; | |
1164 | } else if (of_device_is_compatible(node, | |
1165 | "xlnx,axi-vdma-s2mm-channel")) { | |
1166 | chan->direction = DMA_DEV_TO_MEM; | |
1167 | chan->id = 1; | |
1168 | ||
1169 | chan->ctrl_offset = XILINX_VDMA_S2MM_CTRL_OFFSET; | |
1170 | chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; | |
1171 | ||
1172 | if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH || | |
1173 | xdev->flush_on_fsync == XILINX_VDMA_FLUSH_S2MM) | |
1174 | chan->flush_on_fsync = true; | |
1175 | } else { | |
1176 | dev_err(xdev->dev, "Invalid channel compatible node\n"); | |
1177 | return -EINVAL; | |
1178 | } | |
1179 | ||
1180 | /* Request the interrupt */ | |
1181 | chan->irq = irq_of_parse_and_map(node, 0); | |
1182 | err = request_irq(chan->irq, xilinx_vdma_irq_handler, IRQF_SHARED, | |
1183 | "xilinx-vdma-controller", chan); | |
1184 | if (err) { | |
1185 | dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); | |
1186 | return err; | |
1187 | } | |
1188 | ||
1189 | /* Initialize the tasklet */ | |
1190 | tasklet_init(&chan->tasklet, xilinx_vdma_do_tasklet, | |
1191 | (unsigned long)chan); | |
1192 | ||
1193 | /* | |
1194 | * Initialize the DMA channel and add it to the DMA engine channels | |
1195 | * list. | |
1196 | */ | |
1197 | chan->common.device = &xdev->common; | |
1198 | ||
1199 | list_add_tail(&chan->common.device_node, &xdev->common.channels); | |
1200 | xdev->chan[chan->id] = chan; | |
1201 | ||
1202 | /* Reset the channel */ | |
1203 | err = xilinx_vdma_chan_reset(chan); | |
1204 | if (err < 0) { | |
1205 | dev_err(xdev->dev, "Reset channel failed\n"); | |
1206 | return err; | |
1207 | } | |
1208 | ||
1209 | return 0; | |
1210 | } | |
1211 | ||
1212 | /** | |
1213 | * of_dma_xilinx_xlate - Translation function | |
1214 | * @dma_spec: Pointer to DMA specifier as found in the device tree | |
1215 | * @ofdma: Pointer to DMA controller data | |
1216 | * | |
1217 | * Return: DMA channel pointer on success and NULL on error | |
1218 | */ | |
1219 | static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, | |
1220 | struct of_dma *ofdma) | |
1221 | { | |
1222 | struct xilinx_vdma_device *xdev = ofdma->of_dma_data; | |
1223 | int chan_id = dma_spec->args[0]; | |
1224 | ||
1225 | if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE) | |
1226 | return NULL; | |
1227 | ||
1228 | return dma_get_slave_channel(&xdev->chan[chan_id]->common); | |
1229 | } | |
1230 | ||
1231 | /** | |
1232 | * xilinx_vdma_probe - Driver probe function | |
1233 | * @pdev: Pointer to the platform_device structure | |
1234 | * | |
1235 | * Return: '0' on success and failure value on error | |
1236 | */ | |
1237 | static int xilinx_vdma_probe(struct platform_device *pdev) | |
1238 | { | |
1239 | struct device_node *node = pdev->dev.of_node; | |
1240 | struct xilinx_vdma_device *xdev; | |
1241 | struct device_node *child; | |
1242 | struct resource *io; | |
1243 | u32 num_frames; | |
1244 | int i, err; | |
1245 | ||
1246 | /* Allocate and initialize the DMA engine structure */ | |
1247 | xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); | |
1248 | if (!xdev) | |
1249 | return -ENOMEM; | |
1250 | ||
1251 | xdev->dev = &pdev->dev; | |
1252 | ||
1253 | /* Request and map I/O memory */ | |
1254 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1255 | xdev->regs = devm_ioremap_resource(&pdev->dev, io); | |
1256 | if (IS_ERR(xdev->regs)) | |
1257 | return PTR_ERR(xdev->regs); | |
1258 | ||
1259 | /* Retrieve the DMA engine properties from the device tree */ | |
1260 | xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); | |
1261 | ||
1262 | err = of_property_read_u32(node, "xlnx,num-fstores", &num_frames); | |
1263 | if (err < 0) { | |
1264 | dev_err(xdev->dev, "missing xlnx,num-fstores property\n"); | |
1265 | return err; | |
1266 | } | |
1267 | ||
1268 | err = of_property_read_u32(node, "xlnx,flush-fsync", | |
1269 | &xdev->flush_on_fsync); | |
1270 | if (err < 0) | |
1271 | dev_warn(xdev->dev, "missing xlnx,flush-fsync property\n"); | |
1272 | ||
1273 | /* Initialize the DMA engine */ | |
1274 | xdev->common.dev = &pdev->dev; | |
1275 | ||
1276 | INIT_LIST_HEAD(&xdev->common.channels); | |
1277 | dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); | |
1278 | dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); | |
1279 | ||
1280 | xdev->common.device_alloc_chan_resources = | |
1281 | xilinx_vdma_alloc_chan_resources; | |
1282 | xdev->common.device_free_chan_resources = | |
1283 | xilinx_vdma_free_chan_resources; | |
1284 | xdev->common.device_prep_interleaved_dma = | |
1285 | xilinx_vdma_dma_prep_interleaved; | |
ba714046 | 1286 | xdev->common.device_terminate_all = xilinx_vdma_terminate_all; |
9cd4360d ST |
1287 | xdev->common.device_tx_status = xilinx_vdma_tx_status; |
1288 | xdev->common.device_issue_pending = xilinx_vdma_issue_pending; | |
1289 | ||
1290 | platform_set_drvdata(pdev, xdev); | |
1291 | ||
1292 | /* Initialize the channels */ | |
1293 | for_each_child_of_node(node, child) { | |
1294 | err = xilinx_vdma_chan_probe(xdev, child); | |
1295 | if (err < 0) | |
1296 | goto error; | |
1297 | } | |
1298 | ||
1299 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | |
1300 | if (xdev->chan[i]) | |
1301 | xdev->chan[i]->num_frms = num_frames; | |
1302 | ||
1303 | /* Register the DMA engine with the core */ | |
1304 | dma_async_device_register(&xdev->common); | |
1305 | ||
1306 | err = of_dma_controller_register(node, of_dma_xilinx_xlate, | |
1307 | xdev); | |
1308 | if (err < 0) { | |
1309 | dev_err(&pdev->dev, "Unable to register DMA to DT\n"); | |
1310 | dma_async_device_unregister(&xdev->common); | |
1311 | goto error; | |
1312 | } | |
1313 | ||
1314 | dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); | |
1315 | ||
1316 | return 0; | |
1317 | ||
1318 | error: | |
1319 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | |
1320 | if (xdev->chan[i]) | |
1321 | xilinx_vdma_chan_remove(xdev->chan[i]); | |
1322 | ||
1323 | return err; | |
1324 | } | |
1325 | ||
1326 | /** | |
1327 | * xilinx_vdma_remove - Driver remove function | |
1328 | * @pdev: Pointer to the platform_device structure | |
1329 | * | |
1330 | * Return: Always '0' | |
1331 | */ | |
1332 | static int xilinx_vdma_remove(struct platform_device *pdev) | |
1333 | { | |
1334 | struct xilinx_vdma_device *xdev = platform_get_drvdata(pdev); | |
1335 | int i; | |
1336 | ||
1337 | of_dma_controller_free(pdev->dev.of_node); | |
1338 | ||
1339 | dma_async_device_unregister(&xdev->common); | |
1340 | ||
1341 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | |
1342 | if (xdev->chan[i]) | |
1343 | xilinx_vdma_chan_remove(xdev->chan[i]); | |
1344 | ||
1345 | return 0; | |
1346 | } | |
1347 | ||
1348 | static const struct of_device_id xilinx_vdma_of_ids[] = { | |
1349 | { .compatible = "xlnx,axi-vdma-1.00.a",}, | |
1350 | {} | |
1351 | }; | |
1352 | ||
1353 | static struct platform_driver xilinx_vdma_driver = { | |
1354 | .driver = { | |
1355 | .name = "xilinx-vdma", | |
9cd4360d ST |
1356 | .of_match_table = xilinx_vdma_of_ids, |
1357 | }, | |
1358 | .probe = xilinx_vdma_probe, | |
1359 | .remove = xilinx_vdma_remove, | |
1360 | }; | |
1361 | ||
1362 | module_platform_driver(xilinx_vdma_driver); | |
1363 | ||
1364 | MODULE_AUTHOR("Xilinx, Inc."); | |
1365 | MODULE_DESCRIPTION("Xilinx VDMA driver"); | |
1366 | MODULE_LICENSE("GPL v2"); |