Blackfin arch: add volatile markings to DMA MMRs
[deliverable/linux.git] / arch / blackfin / include / asm / dma.h
1 /*
2 * dma.h - Blackfin DMA defines/structures/etc...
3 *
4 * Copyright 2004-2008 Analog Devices Inc.
5 * Licensed under the GPL-2 or later.
6 */
7
8 #ifndef _BLACKFIN_DMA_H_
9 #define _BLACKFIN_DMA_H_
10
11 #include <linux/interrupt.h>
12 #include <mach/dma.h>
13 #include <asm/blackfin.h>
14 #include <asm/page.h>
15
16 #define MAX_DMA_ADDRESS PAGE_OFFSET
17
18 /*****************************************************************************
19 * Generic DMA Declarations
20 *
21 ****************************************************************************/
22 enum dma_chan_status {
23 DMA_CHANNEL_FREE,
24 DMA_CHANNEL_REQUESTED,
25 DMA_CHANNEL_ENABLED,
26 };
27
28 /*-------------------------
29 * config reg bits value
30 *-------------------------*/
31 #define DATA_SIZE_8 0
32 #define DATA_SIZE_16 1
33 #define DATA_SIZE_32 2
34
35 #define DMA_FLOW_STOP 0
36 #define DMA_FLOW_AUTO 1
37 #define DMA_FLOW_ARRAY 4
38 #define DMA_FLOW_SMALL 6
39 #define DMA_FLOW_LARGE 7
40
41 #define DIMENSION_LINEAR 0
42 #define DIMENSION_2D 1
43
44 #define DIR_READ 0
45 #define DIR_WRITE 1
46
47 #define INTR_DISABLE 0
48 #define INTR_ON_BUF 2
49 #define INTR_ON_ROW 3
50
51 #define DMA_NOSYNC_KEEP_DMA_BUF 0
52 #define DMA_SYNC_RESTART 1
53
54 struct dmasg {
55 void *next_desc_addr;
56 unsigned long start_addr;
57 unsigned short cfg;
58 unsigned short x_count;
59 short x_modify;
60 unsigned short y_count;
61 short y_modify;
62 } __attribute__((packed));
63
64 struct dma_register {
65 void *next_desc_ptr; /* DMA Next Descriptor Pointer register */
66 unsigned long start_addr; /* DMA Start address register */
67
68 unsigned short cfg; /* DMA Configuration register */
69 unsigned short dummy1; /* DMA Configuration register */
70
71 unsigned long reserved;
72
73 unsigned short x_count; /* DMA x_count register */
74 unsigned short dummy2;
75
76 short x_modify; /* DMA x_modify register */
77 unsigned short dummy3;
78
79 unsigned short y_count; /* DMA y_count register */
80 unsigned short dummy4;
81
82 short y_modify; /* DMA y_modify register */
83 unsigned short dummy5;
84
85 void *curr_desc_ptr; /* DMA Current Descriptor Pointer
86 register */
87 unsigned long curr_addr_ptr; /* DMA Current Address Pointer
88 register */
89 unsigned short irq_status; /* DMA irq status register */
90 unsigned short dummy6;
91
92 unsigned short peripheral_map; /* DMA peripheral map register */
93 unsigned short dummy7;
94
95 unsigned short curr_x_count; /* DMA Current x-count register */
96 unsigned short dummy8;
97
98 unsigned long reserved2;
99
100 unsigned short curr_y_count; /* DMA Current y-count register */
101 unsigned short dummy9;
102
103 unsigned long reserved3;
104
105 };
106
107 struct mutex;
108 struct dma_channel {
109 struct mutex dmalock;
110 const char *device_id;
111 enum dma_chan_status chan_status;
112 volatile struct dma_register *regs;
113 struct dmasg *sg; /* large mode descriptor */
114 unsigned int irq;
115 void *data;
116 #ifdef CONFIG_PM
117 unsigned short saved_peripheral_map;
118 #endif
119 };
120
121 #ifdef CONFIG_PM
122 int blackfin_dma_suspend(void);
123 void blackfin_dma_resume(void);
124 #endif
125
126 /*******************************************************************************
127 * DMA API's
128 *******************************************************************************/
129 extern struct dma_channel dma_ch[MAX_DMA_CHANNELS];
130 extern struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS];
131 extern int channel2irq(unsigned int channel);
132
133 static inline void set_dma_start_addr(unsigned int channel, unsigned long addr)
134 {
135 dma_ch[channel].regs->start_addr = addr;
136 }
137 static inline void set_dma_next_desc_addr(unsigned int channel, void *addr)
138 {
139 dma_ch[channel].regs->next_desc_ptr = addr;
140 }
141 static inline void set_dma_curr_desc_addr(unsigned int channel, void *addr)
142 {
143 dma_ch[channel].regs->curr_desc_ptr = addr;
144 }
145 static inline void set_dma_x_count(unsigned int channel, unsigned short x_count)
146 {
147 dma_ch[channel].regs->x_count = x_count;
148 }
149 static inline void set_dma_y_count(unsigned int channel, unsigned short y_count)
150 {
151 dma_ch[channel].regs->y_count = y_count;
152 }
153 static inline void set_dma_x_modify(unsigned int channel, short x_modify)
154 {
155 dma_ch[channel].regs->x_modify = x_modify;
156 }
157 static inline void set_dma_y_modify(unsigned int channel, short y_modify)
158 {
159 dma_ch[channel].regs->y_modify = y_modify;
160 }
161 static inline void set_dma_config(unsigned int channel, unsigned short config)
162 {
163 dma_ch[channel].regs->cfg = config;
164 }
165 static inline void set_dma_curr_addr(unsigned int channel, unsigned long addr)
166 {
167 dma_ch[channel].regs->curr_addr_ptr = addr;
168 }
169
170 static inline unsigned short
171 set_bfin_dma_config(char direction, char flow_mode,
172 char intr_mode, char dma_mode, char width, char syncmode)
173 {
174 return (direction << 1) | (width << 2) | (dma_mode << 4) |
175 (intr_mode << 6) | (flow_mode << 12) | (syncmode << 5);
176 }
177
178 static inline unsigned short get_dma_curr_irqstat(unsigned int channel)
179 {
180 return dma_ch[channel].regs->irq_status;
181 }
182 static inline unsigned short get_dma_curr_xcount(unsigned int channel)
183 {
184 return dma_ch[channel].regs->curr_x_count;
185 }
186 static inline unsigned short get_dma_curr_ycount(unsigned int channel)
187 {
188 return dma_ch[channel].regs->curr_y_count;
189 }
190 static inline void *get_dma_next_desc_ptr(unsigned int channel)
191 {
192 return dma_ch[channel].regs->next_desc_ptr;
193 }
194 static inline void *get_dma_curr_desc_ptr(unsigned int channel)
195 {
196 return dma_ch[channel].regs->curr_desc_ptr;
197 }
198 static inline unsigned long get_dma_curr_addr(unsigned int channel)
199 {
200 return dma_ch[channel].regs->curr_addr_ptr;
201 }
202
203 static inline void set_dma_sg(unsigned int channel, struct dmasg *sg, int ndsize)
204 {
205 dma_ch[channel].regs->cfg =
206 (dma_ch[channel].regs->cfg & ~(0xf << 8)) |
207 ((ndsize & 0xf) << 8);
208 dma_ch[channel].regs->next_desc_ptr = sg;
209 }
210
211 static inline int dma_channel_active(unsigned int channel)
212 {
213 if (dma_ch[channel].chan_status == DMA_CHANNEL_FREE)
214 return 0;
215 else
216 return 1;
217 }
218
219 static inline void disable_dma(unsigned int channel)
220 {
221 dma_ch[channel].regs->cfg &= ~DMAEN;
222 SSYNC();
223 dma_ch[channel].chan_status = DMA_CHANNEL_REQUESTED;
224 }
225 static inline void enable_dma(unsigned int channel)
226 {
227 dma_ch[channel].regs->curr_x_count = 0;
228 dma_ch[channel].regs->curr_y_count = 0;
229 dma_ch[channel].regs->cfg |= DMAEN;
230 dma_ch[channel].chan_status = DMA_CHANNEL_ENABLED;
231 }
232 void free_dma(unsigned int channel);
233 int request_dma(unsigned int channel, const char *device_id);
234 int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data);
235
236 static inline void dma_disable_irq(unsigned int channel)
237 {
238 disable_irq(dma_ch[channel].irq);
239 }
240 static inline void dma_enable_irq(unsigned int channel)
241 {
242 enable_irq(dma_ch[channel].irq);
243 }
244 static inline void clear_dma_irqstat(unsigned int channel)
245 {
246 dma_ch[channel].regs->irq_status = DMA_DONE | DMA_ERR;
247 }
248
249 void *dma_memcpy(void *dest, const void *src, size_t count);
250 void *safe_dma_memcpy(void *dest, const void *src, size_t count);
251 void blackfin_dma_early_init(void);
252
253 #endif
This page took 0.042906 seconds and 5 git commands to generate.