Commit | Line | Data |
---|---|---|
95b4ecbf SY |
1 | /* |
2 | * Intel MIC Platform Software Stack (MPSS) | |
3 | * | |
4 | * Copyright(c) 2014 Intel Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License, version 2, as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License for more details. | |
14 | * | |
15 | * The full GNU General Public License is included in this distribution in | |
16 | * the file called "COPYING". | |
17 | * | |
18 | * Intel MIC X100 DMA Driver. | |
19 | * | |
20 | * Adapted from IOAT dma driver. | |
21 | */ | |
22 | #include <linux/module.h> | |
23 | #include <linux/io.h> | |
24 | #include <linux/seq_file.h> | |
d6472302 | 25 | #include <linux/vmalloc.h> |
95b4ecbf SY |
26 | |
27 | #include "mic_x100_dma.h" | |
28 | ||
29 | #define MIC_DMA_MAX_XFER_SIZE_CARD (1 * 1024 * 1024 -\ | |
30 | MIC_DMA_ALIGN_BYTES) | |
31 | #define MIC_DMA_MAX_XFER_SIZE_HOST (1 * 1024 * 1024 >> 1) | |
32 | #define MIC_DMA_DESC_TYPE_SHIFT 60 | |
33 | #define MIC_DMA_MEMCPY_LEN_SHIFT 46 | |
34 | #define MIC_DMA_STAT_INTR_SHIFT 59 | |
35 | ||
36 | /* high-water mark for pushing dma descriptors */ | |
37 | static int mic_dma_pending_level = 4; | |
38 | ||
39 | /* Status descriptor is used to write a 64 bit value to a memory location */ | |
40 | enum mic_dma_desc_format_type { | |
41 | MIC_DMA_MEMCPY = 1, | |
42 | MIC_DMA_STATUS, | |
43 | }; | |
44 | ||
45 | static inline u32 mic_dma_hw_ring_inc(u32 val) | |
46 | { | |
47 | return (val + 1) % MIC_DMA_DESC_RX_SIZE; | |
48 | } | |
49 | ||
50 | static inline u32 mic_dma_hw_ring_dec(u32 val) | |
51 | { | |
52 | return val ? val - 1 : MIC_DMA_DESC_RX_SIZE - 1; | |
53 | } | |
54 | ||
55 | static inline void mic_dma_hw_ring_inc_head(struct mic_dma_chan *ch) | |
56 | { | |
57 | ch->head = mic_dma_hw_ring_inc(ch->head); | |
58 | } | |
59 | ||
60 | /* Prepare a memcpy desc */ | |
61 | static inline void mic_dma_memcpy_desc(struct mic_dma_desc *desc, | |
62 | dma_addr_t src_phys, dma_addr_t dst_phys, u64 size) | |
63 | { | |
64 | u64 qw0, qw1; | |
65 | ||
66 | qw0 = src_phys; | |
67 | qw0 |= (size >> MIC_DMA_ALIGN_SHIFT) << MIC_DMA_MEMCPY_LEN_SHIFT; | |
68 | qw1 = MIC_DMA_MEMCPY; | |
69 | qw1 <<= MIC_DMA_DESC_TYPE_SHIFT; | |
70 | qw1 |= dst_phys; | |
71 | desc->qw0 = qw0; | |
72 | desc->qw1 = qw1; | |
73 | } | |
74 | ||
75 | /* Prepare a status desc. with @data to be written at @dst_phys */ | |
76 | static inline void mic_dma_prep_status_desc(struct mic_dma_desc *desc, u64 data, | |
77 | dma_addr_t dst_phys, bool generate_intr) | |
78 | { | |
79 | u64 qw0, qw1; | |
80 | ||
81 | qw0 = data; | |
82 | qw1 = (u64) MIC_DMA_STATUS << MIC_DMA_DESC_TYPE_SHIFT | dst_phys; | |
83 | if (generate_intr) | |
84 | qw1 |= (1ULL << MIC_DMA_STAT_INTR_SHIFT); | |
85 | desc->qw0 = qw0; | |
86 | desc->qw1 = qw1; | |
87 | } | |
88 | ||
89 | static void mic_dma_cleanup(struct mic_dma_chan *ch) | |
90 | { | |
91 | struct dma_async_tx_descriptor *tx; | |
92 | u32 tail; | |
93 | u32 last_tail; | |
94 | ||
95 | spin_lock(&ch->cleanup_lock); | |
96 | tail = mic_dma_read_cmp_cnt(ch); | |
97 | /* | |
98 | * This is the barrier pair for smp_wmb() in fn. | |
99 | * mic_dma_tx_submit_unlock. It's required so that we read the | |
100 | * updated cookie value from tx->cookie. | |
101 | */ | |
102 | smp_rmb(); | |
103 | for (last_tail = ch->last_tail; tail != last_tail;) { | |
104 | tx = &ch->tx_array[last_tail]; | |
105 | if (tx->cookie) { | |
106 | dma_cookie_complete(tx); | |
107 | if (tx->callback) { | |
108 | tx->callback(tx->callback_param); | |
109 | tx->callback = NULL; | |
110 | } | |
111 | } | |
112 | last_tail = mic_dma_hw_ring_inc(last_tail); | |
113 | } | |
114 | /* finish all completion callbacks before incrementing tail */ | |
115 | smp_mb(); | |
116 | ch->last_tail = last_tail; | |
117 | spin_unlock(&ch->cleanup_lock); | |
118 | } | |
119 | ||
120 | static u32 mic_dma_ring_count(u32 head, u32 tail) | |
121 | { | |
122 | u32 count; | |
123 | ||
124 | if (head >= tail) | |
125 | count = (tail - 0) + (MIC_DMA_DESC_RX_SIZE - head); | |
126 | else | |
127 | count = tail - head; | |
128 | return count - 1; | |
129 | } | |
130 | ||
131 | /* Returns the num. of free descriptors on success, -ENOMEM on failure */ | |
132 | static int mic_dma_avail_desc_ring_space(struct mic_dma_chan *ch, int required) | |
133 | { | |
134 | struct device *dev = mic_dma_ch_to_device(ch); | |
135 | u32 count; | |
136 | ||
137 | count = mic_dma_ring_count(ch->head, ch->last_tail); | |
138 | if (count < required) { | |
139 | mic_dma_cleanup(ch); | |
140 | count = mic_dma_ring_count(ch->head, ch->last_tail); | |
141 | } | |
142 | ||
143 | if (count < required) { | |
144 | dev_dbg(dev, "Not enough desc space"); | |
145 | dev_dbg(dev, "%s %d required=%u, avail=%u\n", | |
146 | __func__, __LINE__, required, count); | |
147 | return -ENOMEM; | |
148 | } else { | |
149 | return count; | |
150 | } | |
151 | } | |
152 | ||
153 | /* Program memcpy descriptors into the descriptor ring and update s/w head ptr*/ | |
154 | static int mic_dma_prog_memcpy_desc(struct mic_dma_chan *ch, dma_addr_t src, | |
155 | dma_addr_t dst, size_t len) | |
156 | { | |
157 | size_t current_transfer_len; | |
158 | size_t max_xfer_size = to_mic_dma_dev(ch)->max_xfer_size; | |
159 | /* 3 is added to make sure we have enough space for status desc */ | |
160 | int num_desc = len / max_xfer_size + 3; | |
161 | int ret; | |
162 | ||
163 | if (len % max_xfer_size) | |
164 | num_desc++; | |
165 | ||
166 | ret = mic_dma_avail_desc_ring_space(ch, num_desc); | |
167 | if (ret < 0) | |
168 | return ret; | |
169 | do { | |
170 | current_transfer_len = min(len, max_xfer_size); | |
171 | mic_dma_memcpy_desc(&ch->desc_ring[ch->head], | |
172 | src, dst, current_transfer_len); | |
173 | mic_dma_hw_ring_inc_head(ch); | |
174 | len -= current_transfer_len; | |
175 | dst = dst + current_transfer_len; | |
176 | src = src + current_transfer_len; | |
177 | } while (len > 0); | |
178 | return 0; | |
179 | } | |
180 | ||
181 | /* It's a h/w quirk and h/w needs 2 status descriptors for every status desc */ | |
182 | static void mic_dma_prog_intr(struct mic_dma_chan *ch) | |
183 | { | |
184 | mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, | |
185 | ch->status_dest_micpa, false); | |
186 | mic_dma_hw_ring_inc_head(ch); | |
187 | mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, | |
188 | ch->status_dest_micpa, true); | |
189 | mic_dma_hw_ring_inc_head(ch); | |
190 | } | |
191 | ||
192 | /* Wrapper function to program memcpy descriptors/status descriptors */ | |
193 | static int mic_dma_do_dma(struct mic_dma_chan *ch, int flags, dma_addr_t src, | |
194 | dma_addr_t dst, size_t len) | |
195 | { | |
196 | if (-ENOMEM == mic_dma_prog_memcpy_desc(ch, src, dst, len)) | |
197 | return -ENOMEM; | |
198 | /* Above mic_dma_prog_memcpy_desc() makes sure we have enough space */ | |
199 | if (flags & DMA_PREP_FENCE) { | |
200 | mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, | |
201 | ch->status_dest_micpa, false); | |
202 | mic_dma_hw_ring_inc_head(ch); | |
203 | } | |
204 | ||
205 | if (flags & DMA_PREP_INTERRUPT) | |
206 | mic_dma_prog_intr(ch); | |
207 | ||
208 | return 0; | |
209 | } | |
210 | ||
211 | static inline void mic_dma_issue_pending(struct dma_chan *ch) | |
212 | { | |
213 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); | |
214 | ||
215 | spin_lock(&mic_ch->issue_lock); | |
216 | /* | |
217 | * Write to head triggers h/w to act on the descriptors. | |
218 | * On MIC, writing the same head value twice causes | |
219 | * a h/w error. On second write, h/w assumes we filled | |
220 | * the entire ring & overwrote some of the descriptors. | |
221 | */ | |
222 | if (mic_ch->issued == mic_ch->submitted) | |
223 | goto out; | |
224 | mic_ch->issued = mic_ch->submitted; | |
225 | /* | |
226 | * make descriptor updates visible before advancing head, | |
227 | * this is purposefully not smp_wmb() since we are also | |
228 | * publishing the descriptor updates to a dma device | |
229 | */ | |
230 | wmb(); | |
231 | mic_dma_write_reg(mic_ch, MIC_DMA_REG_DHPR, mic_ch->issued); | |
232 | out: | |
233 | spin_unlock(&mic_ch->issue_lock); | |
234 | } | |
235 | ||
236 | static inline void mic_dma_update_pending(struct mic_dma_chan *ch) | |
237 | { | |
238 | if (mic_dma_ring_count(ch->issued, ch->submitted) | |
239 | > mic_dma_pending_level) | |
240 | mic_dma_issue_pending(&ch->api_ch); | |
241 | } | |
242 | ||
243 | static dma_cookie_t mic_dma_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | |
244 | { | |
245 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(tx->chan); | |
246 | dma_cookie_t cookie; | |
247 | ||
248 | dma_cookie_assign(tx); | |
249 | cookie = tx->cookie; | |
250 | /* | |
251 | * We need an smp write barrier here because another CPU might see | |
252 | * an update to submitted and update h/w head even before we | |
253 | * assigned a cookie to this tx. | |
254 | */ | |
255 | smp_wmb(); | |
256 | mic_ch->submitted = mic_ch->head; | |
257 | spin_unlock(&mic_ch->prep_lock); | |
258 | mic_dma_update_pending(mic_ch); | |
259 | return cookie; | |
260 | } | |
261 | ||
262 | static inline struct dma_async_tx_descriptor * | |
263 | allocate_tx(struct mic_dma_chan *ch) | |
264 | { | |
265 | u32 idx = mic_dma_hw_ring_dec(ch->head); | |
266 | struct dma_async_tx_descriptor *tx = &ch->tx_array[idx]; | |
267 | ||
268 | dma_async_tx_descriptor_init(tx, &ch->api_ch); | |
269 | tx->tx_submit = mic_dma_tx_submit_unlock; | |
270 | return tx; | |
271 | } | |
272 | ||
273 | /* | |
274 | * Prepare a memcpy descriptor to be added to the ring. | |
275 | * Note that the temporary descriptor adds an extra overhead of copying the | |
276 | * descriptor to ring. So, we copy directly to the descriptor ring | |
277 | */ | |
278 | static struct dma_async_tx_descriptor * | |
279 | mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest, | |
280 | dma_addr_t dma_src, size_t len, unsigned long flags) | |
281 | { | |
282 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); | |
283 | struct device *dev = mic_dma_ch_to_device(mic_ch); | |
284 | int result; | |
285 | ||
286 | if (!len && !flags) | |
287 | return NULL; | |
288 | ||
289 | spin_lock(&mic_ch->prep_lock); | |
290 | result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len); | |
291 | if (result >= 0) | |
292 | return allocate_tx(mic_ch); | |
293 | dev_err(dev, "Error enqueueing dma, error=%d\n", result); | |
294 | spin_unlock(&mic_ch->prep_lock); | |
295 | return NULL; | |
296 | } | |
297 | ||
298 | static struct dma_async_tx_descriptor * | |
299 | mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags) | |
300 | { | |
301 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); | |
302 | int ret; | |
303 | ||
304 | spin_lock(&mic_ch->prep_lock); | |
305 | ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0); | |
306 | if (!ret) | |
307 | return allocate_tx(mic_ch); | |
308 | spin_unlock(&mic_ch->prep_lock); | |
309 | return NULL; | |
310 | } | |
311 | ||
312 | /* Return the status of the transaction */ | |
313 | static enum dma_status | |
314 | mic_dma_tx_status(struct dma_chan *ch, dma_cookie_t cookie, | |
315 | struct dma_tx_state *txstate) | |
316 | { | |
317 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); | |
318 | ||
319 | if (DMA_COMPLETE != dma_cookie_status(ch, cookie, txstate)) | |
320 | mic_dma_cleanup(mic_ch); | |
321 | ||
322 | return dma_cookie_status(ch, cookie, txstate); | |
323 | } | |
324 | ||
325 | static irqreturn_t mic_dma_thread_fn(int irq, void *data) | |
326 | { | |
327 | mic_dma_cleanup((struct mic_dma_chan *)data); | |
328 | return IRQ_HANDLED; | |
329 | } | |
330 | ||
331 | static irqreturn_t mic_dma_intr_handler(int irq, void *data) | |
332 | { | |
333 | struct mic_dma_chan *ch = ((struct mic_dma_chan *)data); | |
334 | ||
335 | mic_dma_ack_interrupt(ch); | |
336 | return IRQ_WAKE_THREAD; | |
337 | } | |
338 | ||
339 | static int mic_dma_alloc_desc_ring(struct mic_dma_chan *ch) | |
340 | { | |
341 | u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring); | |
342 | struct device *dev = &to_mbus_device(ch)->dev; | |
343 | ||
344 | desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES); | |
345 | ch->desc_ring = kzalloc(desc_ring_size, GFP_KERNEL); | |
346 | ||
347 | if (!ch->desc_ring) | |
348 | return -ENOMEM; | |
349 | ||
350 | ch->desc_ring_micpa = dma_map_single(dev, ch->desc_ring, | |
351 | desc_ring_size, DMA_BIDIRECTIONAL); | |
352 | if (dma_mapping_error(dev, ch->desc_ring_micpa)) | |
353 | goto map_error; | |
354 | ||
355 | ch->tx_array = vzalloc(MIC_DMA_DESC_RX_SIZE * sizeof(*ch->tx_array)); | |
356 | if (!ch->tx_array) | |
357 | goto tx_error; | |
358 | return 0; | |
359 | tx_error: | |
360 | dma_unmap_single(dev, ch->desc_ring_micpa, desc_ring_size, | |
361 | DMA_BIDIRECTIONAL); | |
362 | map_error: | |
363 | kfree(ch->desc_ring); | |
364 | return -ENOMEM; | |
365 | } | |
366 | ||
367 | static void mic_dma_free_desc_ring(struct mic_dma_chan *ch) | |
368 | { | |
369 | u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring); | |
370 | ||
371 | vfree(ch->tx_array); | |
372 | desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES); | |
373 | dma_unmap_single(&to_mbus_device(ch)->dev, ch->desc_ring_micpa, | |
374 | desc_ring_size, DMA_BIDIRECTIONAL); | |
375 | kfree(ch->desc_ring); | |
376 | ch->desc_ring = NULL; | |
377 | } | |
378 | ||
379 | static void mic_dma_free_status_dest(struct mic_dma_chan *ch) | |
380 | { | |
381 | dma_unmap_single(&to_mbus_device(ch)->dev, ch->status_dest_micpa, | |
382 | L1_CACHE_BYTES, DMA_BIDIRECTIONAL); | |
383 | kfree(ch->status_dest); | |
384 | } | |
385 | ||
386 | static int mic_dma_alloc_status_dest(struct mic_dma_chan *ch) | |
387 | { | |
388 | struct device *dev = &to_mbus_device(ch)->dev; | |
389 | ||
390 | ch->status_dest = kzalloc(L1_CACHE_BYTES, GFP_KERNEL); | |
391 | if (!ch->status_dest) | |
392 | return -ENOMEM; | |
393 | ch->status_dest_micpa = dma_map_single(dev, ch->status_dest, | |
394 | L1_CACHE_BYTES, DMA_BIDIRECTIONAL); | |
395 | if (dma_mapping_error(dev, ch->status_dest_micpa)) { | |
396 | kfree(ch->status_dest); | |
397 | ch->status_dest = NULL; | |
398 | return -ENOMEM; | |
399 | } | |
400 | return 0; | |
401 | } | |
402 | ||
403 | static int mic_dma_check_chan(struct mic_dma_chan *ch) | |
404 | { | |
405 | if (mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR) || | |
406 | mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT) & MIC_DMA_CHAN_QUIESCE) { | |
407 | mic_dma_disable_chan(ch); | |
408 | mic_dma_chan_mask_intr(ch); | |
409 | dev_err(mic_dma_ch_to_device(ch), | |
410 | "%s %d error setting up mic dma chan %d\n", | |
411 | __func__, __LINE__, ch->ch_num); | |
412 | return -EBUSY; | |
413 | } | |
414 | return 0; | |
415 | } | |
416 | ||
417 | static int mic_dma_chan_setup(struct mic_dma_chan *ch) | |
418 | { | |
419 | if (MIC_DMA_CHAN_MIC == ch->owner) | |
420 | mic_dma_chan_set_owner(ch); | |
421 | mic_dma_disable_chan(ch); | |
422 | mic_dma_chan_mask_intr(ch); | |
423 | mic_dma_write_reg(ch, MIC_DMA_REG_DCHERRMSK, 0); | |
424 | mic_dma_chan_set_desc_ring(ch); | |
425 | ch->last_tail = mic_dma_read_reg(ch, MIC_DMA_REG_DTPR); | |
426 | ch->head = ch->last_tail; | |
427 | ch->issued = 0; | |
428 | mic_dma_chan_unmask_intr(ch); | |
429 | mic_dma_enable_chan(ch); | |
430 | return mic_dma_check_chan(ch); | |
431 | } | |
432 | ||
433 | static void mic_dma_chan_destroy(struct mic_dma_chan *ch) | |
434 | { | |
435 | mic_dma_disable_chan(ch); | |
436 | mic_dma_chan_mask_intr(ch); | |
437 | } | |
438 | ||
439 | static void mic_dma_unregister_dma_device(struct mic_dma_device *mic_dma_dev) | |
440 | { | |
441 | dma_async_device_unregister(&mic_dma_dev->dma_dev); | |
442 | } | |
443 | ||
444 | static int mic_dma_setup_irq(struct mic_dma_chan *ch) | |
445 | { | |
446 | ch->cookie = | |
447 | to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch), | |
448 | mic_dma_intr_handler, mic_dma_thread_fn, | |
449 | "mic dma_channel", ch, ch->ch_num); | |
450 | if (IS_ERR(ch->cookie)) | |
451 | return IS_ERR(ch->cookie); | |
452 | return 0; | |
453 | } | |
454 | ||
455 | static inline void mic_dma_free_irq(struct mic_dma_chan *ch) | |
456 | { | |
457 | to_mbus_hw_ops(ch)->free_irq(to_mbus_device(ch), ch->cookie, ch); | |
458 | } | |
459 | ||
460 | static int mic_dma_chan_init(struct mic_dma_chan *ch) | |
461 | { | |
462 | int ret = mic_dma_alloc_desc_ring(ch); | |
463 | ||
464 | if (ret) | |
465 | goto ring_error; | |
466 | ret = mic_dma_alloc_status_dest(ch); | |
467 | if (ret) | |
468 | goto status_error; | |
469 | ret = mic_dma_chan_setup(ch); | |
470 | if (ret) | |
471 | goto chan_error; | |
472 | return ret; | |
473 | chan_error: | |
474 | mic_dma_free_status_dest(ch); | |
475 | status_error: | |
476 | mic_dma_free_desc_ring(ch); | |
477 | ring_error: | |
478 | return ret; | |
479 | } | |
480 | ||
481 | static int mic_dma_drain_chan(struct mic_dma_chan *ch) | |
482 | { | |
483 | struct dma_async_tx_descriptor *tx; | |
484 | int err = 0; | |
485 | dma_cookie_t cookie; | |
486 | ||
487 | tx = mic_dma_prep_memcpy_lock(&ch->api_ch, 0, 0, 0, DMA_PREP_FENCE); | |
488 | if (!tx) { | |
489 | err = -ENOMEM; | |
490 | goto error; | |
491 | } | |
492 | ||
493 | cookie = tx->tx_submit(tx); | |
494 | if (dma_submit_error(cookie)) | |
495 | err = -ENOMEM; | |
496 | else | |
497 | err = dma_sync_wait(&ch->api_ch, cookie); | |
498 | if (err) { | |
499 | dev_err(mic_dma_ch_to_device(ch), "%s %d TO chan 0x%x\n", | |
500 | __func__, __LINE__, ch->ch_num); | |
501 | err = -EIO; | |
502 | } | |
503 | error: | |
504 | mic_dma_cleanup(ch); | |
505 | return err; | |
506 | } | |
507 | ||
508 | static inline void mic_dma_chan_uninit(struct mic_dma_chan *ch) | |
509 | { | |
510 | mic_dma_chan_destroy(ch); | |
511 | mic_dma_cleanup(ch); | |
512 | mic_dma_free_status_dest(ch); | |
513 | mic_dma_free_desc_ring(ch); | |
514 | } | |
515 | ||
516 | static int mic_dma_init(struct mic_dma_device *mic_dma_dev, | |
517 | enum mic_dma_chan_owner owner) | |
518 | { | |
519 | int i, first_chan = mic_dma_dev->start_ch; | |
520 | struct mic_dma_chan *ch; | |
521 | int ret; | |
522 | ||
523 | for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { | |
524 | unsigned long data; | |
525 | ch = &mic_dma_dev->mic_ch[i]; | |
526 | data = (unsigned long)ch; | |
527 | ch->ch_num = i; | |
528 | ch->owner = owner; | |
529 | spin_lock_init(&ch->cleanup_lock); | |
530 | spin_lock_init(&ch->prep_lock); | |
531 | spin_lock_init(&ch->issue_lock); | |
532 | ret = mic_dma_setup_irq(ch); | |
533 | if (ret) | |
534 | goto error; | |
535 | } | |
536 | return 0; | |
537 | error: | |
538 | for (i = i - 1; i >= first_chan; i--) | |
539 | mic_dma_free_irq(ch); | |
540 | return ret; | |
541 | } | |
542 | ||
543 | static void mic_dma_uninit(struct mic_dma_device *mic_dma_dev) | |
544 | { | |
545 | int i, first_chan = mic_dma_dev->start_ch; | |
546 | struct mic_dma_chan *ch; | |
547 | ||
548 | for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { | |
549 | ch = &mic_dma_dev->mic_ch[i]; | |
550 | mic_dma_free_irq(ch); | |
551 | } | |
552 | } | |
553 | ||
554 | static int mic_dma_alloc_chan_resources(struct dma_chan *ch) | |
555 | { | |
556 | int ret = mic_dma_chan_init(to_mic_dma_chan(ch)); | |
557 | if (ret) | |
558 | return ret; | |
559 | return MIC_DMA_DESC_RX_SIZE; | |
560 | } | |
561 | ||
562 | static void mic_dma_free_chan_resources(struct dma_chan *ch) | |
563 | { | |
564 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); | |
565 | mic_dma_drain_chan(mic_ch); | |
566 | mic_dma_chan_uninit(mic_ch); | |
567 | } | |
568 | ||
569 | /* Set the fn. handlers and register the dma device with dma api */ | |
570 | static int mic_dma_register_dma_device(struct mic_dma_device *mic_dma_dev, | |
571 | enum mic_dma_chan_owner owner) | |
572 | { | |
573 | int i, first_chan = mic_dma_dev->start_ch; | |
574 | ||
575 | dma_cap_zero(mic_dma_dev->dma_dev.cap_mask); | |
576 | /* | |
577 | * This dma engine is not capable of host memory to host memory | |
578 | * transfers | |
579 | */ | |
580 | dma_cap_set(DMA_MEMCPY, mic_dma_dev->dma_dev.cap_mask); | |
581 | ||
582 | if (MIC_DMA_CHAN_HOST == owner) | |
583 | dma_cap_set(DMA_PRIVATE, mic_dma_dev->dma_dev.cap_mask); | |
584 | mic_dma_dev->dma_dev.device_alloc_chan_resources = | |
585 | mic_dma_alloc_chan_resources; | |
586 | mic_dma_dev->dma_dev.device_free_chan_resources = | |
587 | mic_dma_free_chan_resources; | |
588 | mic_dma_dev->dma_dev.device_tx_status = mic_dma_tx_status; | |
589 | mic_dma_dev->dma_dev.device_prep_dma_memcpy = mic_dma_prep_memcpy_lock; | |
590 | mic_dma_dev->dma_dev.device_prep_dma_interrupt = | |
591 | mic_dma_prep_interrupt_lock; | |
592 | mic_dma_dev->dma_dev.device_issue_pending = mic_dma_issue_pending; | |
593 | mic_dma_dev->dma_dev.copy_align = MIC_DMA_ALIGN_SHIFT; | |
594 | INIT_LIST_HEAD(&mic_dma_dev->dma_dev.channels); | |
595 | for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { | |
596 | mic_dma_dev->mic_ch[i].api_ch.device = &mic_dma_dev->dma_dev; | |
597 | dma_cookie_init(&mic_dma_dev->mic_ch[i].api_ch); | |
598 | list_add_tail(&mic_dma_dev->mic_ch[i].api_ch.device_node, | |
599 | &mic_dma_dev->dma_dev.channels); | |
600 | } | |
601 | return dma_async_device_register(&mic_dma_dev->dma_dev); | |
602 | } | |
603 | ||
604 | /* | |
605 | * Initializes dma channels and registers the dma device with the | |
606 | * dma engine api. | |
607 | */ | |
608 | static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev, | |
609 | enum mic_dma_chan_owner owner) | |
610 | { | |
611 | struct mic_dma_device *mic_dma_dev; | |
612 | int ret; | |
613 | struct device *dev = &mbdev->dev; | |
614 | ||
615 | mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL); | |
616 | if (!mic_dma_dev) { | |
617 | ret = -ENOMEM; | |
618 | goto alloc_error; | |
619 | } | |
620 | mic_dma_dev->mbdev = mbdev; | |
621 | mic_dma_dev->dma_dev.dev = dev; | |
622 | mic_dma_dev->mmio = mbdev->mmio_va; | |
623 | if (MIC_DMA_CHAN_HOST == owner) { | |
624 | mic_dma_dev->start_ch = 0; | |
625 | mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_HOST; | |
626 | } else { | |
627 | mic_dma_dev->start_ch = 4; | |
628 | mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_CARD; | |
629 | } | |
630 | ret = mic_dma_init(mic_dma_dev, owner); | |
631 | if (ret) | |
632 | goto init_error; | |
633 | ret = mic_dma_register_dma_device(mic_dma_dev, owner); | |
634 | if (ret) | |
635 | goto reg_error; | |
636 | return mic_dma_dev; | |
637 | reg_error: | |
638 | mic_dma_uninit(mic_dma_dev); | |
639 | init_error: | |
640 | kfree(mic_dma_dev); | |
641 | mic_dma_dev = NULL; | |
642 | alloc_error: | |
643 | dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret); | |
644 | return mic_dma_dev; | |
645 | } | |
646 | ||
647 | static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev) | |
648 | { | |
649 | mic_dma_unregister_dma_device(mic_dma_dev); | |
650 | mic_dma_uninit(mic_dma_dev); | |
651 | kfree(mic_dma_dev); | |
652 | } | |
653 | ||
654 | /* DEBUGFS CODE */ | |
655 | static int mic_dma_reg_seq_show(struct seq_file *s, void *pos) | |
656 | { | |
657 | struct mic_dma_device *mic_dma_dev = s->private; | |
658 | int i, chan_num, first_chan = mic_dma_dev->start_ch; | |
659 | struct mic_dma_chan *ch; | |
660 | ||
661 | seq_printf(s, "SBOX_DCR: %#x\n", | |
662 | mic_dma_mmio_read(&mic_dma_dev->mic_ch[first_chan], | |
663 | MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR)); | |
664 | seq_puts(s, "DMA Channel Registers\n"); | |
665 | seq_printf(s, "%-10s| %-10s %-10s %-10s %-10s %-10s", | |
666 | "Channel", "DCAR", "DTPR", "DHPR", "DRAR_HI", "DRAR_LO"); | |
667 | seq_printf(s, " %-11s %-14s %-10s\n", "DCHERR", "DCHERRMSK", "DSTAT"); | |
668 | for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { | |
669 | ch = &mic_dma_dev->mic_ch[i]; | |
670 | chan_num = ch->ch_num; | |
671 | seq_printf(s, "%-10i| %-#10x %-#10x %-#10x %-#10x", | |
672 | chan_num, | |
673 | mic_dma_read_reg(ch, MIC_DMA_REG_DCAR), | |
674 | mic_dma_read_reg(ch, MIC_DMA_REG_DTPR), | |
675 | mic_dma_read_reg(ch, MIC_DMA_REG_DHPR), | |
676 | mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_HI)); | |
677 | seq_printf(s, " %-#10x %-#10x %-#14x %-#10x\n", | |
678 | mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_LO), | |
679 | mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR), | |
680 | mic_dma_read_reg(ch, MIC_DMA_REG_DCHERRMSK), | |
681 | mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT)); | |
682 | } | |
683 | return 0; | |
684 | } | |
685 | ||
686 | static int mic_dma_reg_debug_open(struct inode *inode, struct file *file) | |
687 | { | |
688 | return single_open(file, mic_dma_reg_seq_show, inode->i_private); | |
689 | } | |
690 | ||
691 | static int mic_dma_reg_debug_release(struct inode *inode, struct file *file) | |
692 | { | |
693 | return single_release(inode, file); | |
694 | } | |
695 | ||
696 | static const struct file_operations mic_dma_reg_ops = { | |
697 | .owner = THIS_MODULE, | |
698 | .open = mic_dma_reg_debug_open, | |
699 | .read = seq_read, | |
700 | .llseek = seq_lseek, | |
701 | .release = mic_dma_reg_debug_release | |
702 | }; | |
703 | ||
704 | /* Debugfs parent dir */ | |
705 | static struct dentry *mic_dma_dbg; | |
706 | ||
707 | static int mic_dma_driver_probe(struct mbus_device *mbdev) | |
708 | { | |
709 | struct mic_dma_device *mic_dma_dev; | |
710 | enum mic_dma_chan_owner owner; | |
711 | ||
712 | if (MBUS_DEV_DMA_MIC == mbdev->id.device) | |
713 | owner = MIC_DMA_CHAN_MIC; | |
714 | else | |
715 | owner = MIC_DMA_CHAN_HOST; | |
716 | ||
717 | mic_dma_dev = mic_dma_dev_reg(mbdev, owner); | |
718 | dev_set_drvdata(&mbdev->dev, mic_dma_dev); | |
719 | ||
720 | if (mic_dma_dbg) { | |
721 | mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev), | |
722 | mic_dma_dbg); | |
723 | if (mic_dma_dev->dbg_dir) | |
724 | debugfs_create_file("mic_dma_reg", 0444, | |
725 | mic_dma_dev->dbg_dir, mic_dma_dev, | |
726 | &mic_dma_reg_ops); | |
727 | } | |
728 | return 0; | |
729 | } | |
730 | ||
731 | static void mic_dma_driver_remove(struct mbus_device *mbdev) | |
732 | { | |
733 | struct mic_dma_device *mic_dma_dev; | |
734 | ||
735 | mic_dma_dev = dev_get_drvdata(&mbdev->dev); | |
736 | debugfs_remove_recursive(mic_dma_dev->dbg_dir); | |
737 | mic_dma_dev_unreg(mic_dma_dev); | |
738 | } | |
739 | ||
740 | static struct mbus_device_id id_table[] = { | |
741 | {MBUS_DEV_DMA_MIC, MBUS_DEV_ANY_ID}, | |
742 | {MBUS_DEV_DMA_HOST, MBUS_DEV_ANY_ID}, | |
743 | {0}, | |
744 | }; | |
745 | ||
746 | static struct mbus_driver mic_dma_driver = { | |
747 | .driver.name = KBUILD_MODNAME, | |
748 | .driver.owner = THIS_MODULE, | |
749 | .id_table = id_table, | |
750 | .probe = mic_dma_driver_probe, | |
751 | .remove = mic_dma_driver_remove, | |
752 | }; | |
753 | ||
754 | static int __init mic_x100_dma_init(void) | |
755 | { | |
756 | int rc = mbus_register_driver(&mic_dma_driver); | |
757 | if (rc) | |
758 | return rc; | |
759 | mic_dma_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL); | |
760 | return 0; | |
761 | } | |
762 | ||
763 | static void __exit mic_x100_dma_exit(void) | |
764 | { | |
765 | debugfs_remove_recursive(mic_dma_dbg); | |
766 | mbus_unregister_driver(&mic_dma_driver); | |
767 | } | |
768 | ||
769 | module_init(mic_x100_dma_init); | |
770 | module_exit(mic_x100_dma_exit); | |
771 | ||
772 | MODULE_DEVICE_TABLE(mbus, id_table); | |
773 | MODULE_AUTHOR("Intel Corporation"); | |
774 | MODULE_DESCRIPTION("Intel(R) MIC X100 DMA Driver"); | |
775 | MODULE_LICENSE("GPL v2"); |