2 * Intel MIC Platform Software Stack (MPSS)
4 * Copyright(c) 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
18 * Intel MIC X100 DMA Driver.
20 * Adapted from IOAT dma driver.
22 #include <linux/module.h>
24 #include <linux/seq_file.h>
25 #include <linux/vmalloc.h>
27 #include "mic_x100_dma.h"
29 #define MIC_DMA_MAX_XFER_SIZE_CARD (1 * 1024 * 1024 -\
31 #define MIC_DMA_MAX_XFER_SIZE_HOST (1 * 1024 * 1024 >> 1)
32 #define MIC_DMA_DESC_TYPE_SHIFT 60
33 #define MIC_DMA_MEMCPY_LEN_SHIFT 46
34 #define MIC_DMA_STAT_INTR_SHIFT 59
36 /* high-water mark for pushing dma descriptors */
37 static int mic_dma_pending_level
= 4;
39 /* Status descriptor is used to write a 64 bit value to a memory location */
40 enum mic_dma_desc_format_type
{
45 static inline u32
mic_dma_hw_ring_inc(u32 val
)
47 return (val
+ 1) % MIC_DMA_DESC_RX_SIZE
;
50 static inline u32
mic_dma_hw_ring_dec(u32 val
)
52 return val
? val
- 1 : MIC_DMA_DESC_RX_SIZE
- 1;
55 static inline void mic_dma_hw_ring_inc_head(struct mic_dma_chan
*ch
)
57 ch
->head
= mic_dma_hw_ring_inc(ch
->head
);
60 /* Prepare a memcpy desc */
61 static inline void mic_dma_memcpy_desc(struct mic_dma_desc
*desc
,
62 dma_addr_t src_phys
, dma_addr_t dst_phys
, u64 size
)
67 qw0
|= (size
>> MIC_DMA_ALIGN_SHIFT
) << MIC_DMA_MEMCPY_LEN_SHIFT
;
69 qw1
<<= MIC_DMA_DESC_TYPE_SHIFT
;
75 /* Prepare a status desc. with @data to be written at @dst_phys */
76 static inline void mic_dma_prep_status_desc(struct mic_dma_desc
*desc
, u64 data
,
77 dma_addr_t dst_phys
, bool generate_intr
)
82 qw1
= (u64
) MIC_DMA_STATUS
<< MIC_DMA_DESC_TYPE_SHIFT
| dst_phys
;
84 qw1
|= (1ULL << MIC_DMA_STAT_INTR_SHIFT
);
89 static void mic_dma_cleanup(struct mic_dma_chan
*ch
)
91 struct dma_async_tx_descriptor
*tx
;
95 spin_lock(&ch
->cleanup_lock
);
96 tail
= mic_dma_read_cmp_cnt(ch
);
98 * This is the barrier pair for smp_wmb() in fn.
99 * mic_dma_tx_submit_unlock. It's required so that we read the
100 * updated cookie value from tx->cookie.
103 for (last_tail
= ch
->last_tail
; tail
!= last_tail
;) {
104 tx
= &ch
->tx_array
[last_tail
];
106 dma_cookie_complete(tx
);
108 tx
->callback(tx
->callback_param
);
112 last_tail
= mic_dma_hw_ring_inc(last_tail
);
114 /* finish all completion callbacks before incrementing tail */
116 ch
->last_tail
= last_tail
;
117 spin_unlock(&ch
->cleanup_lock
);
120 static u32
mic_dma_ring_count(u32 head
, u32 tail
)
125 count
= (tail
- 0) + (MIC_DMA_DESC_RX_SIZE
- head
);
131 /* Returns the num. of free descriptors on success, -ENOMEM on failure */
132 static int mic_dma_avail_desc_ring_space(struct mic_dma_chan
*ch
, int required
)
134 struct device
*dev
= mic_dma_ch_to_device(ch
);
137 count
= mic_dma_ring_count(ch
->head
, ch
->last_tail
);
138 if (count
< required
) {
140 count
= mic_dma_ring_count(ch
->head
, ch
->last_tail
);
143 if (count
< required
) {
144 dev_dbg(dev
, "Not enough desc space");
145 dev_dbg(dev
, "%s %d required=%u, avail=%u\n",
146 __func__
, __LINE__
, required
, count
);
153 /* Program memcpy descriptors into the descriptor ring and update s/w head ptr*/
154 static int mic_dma_prog_memcpy_desc(struct mic_dma_chan
*ch
, dma_addr_t src
,
155 dma_addr_t dst
, size_t len
)
157 size_t current_transfer_len
;
158 size_t max_xfer_size
= to_mic_dma_dev(ch
)->max_xfer_size
;
159 /* 3 is added to make sure we have enough space for status desc */
160 int num_desc
= len
/ max_xfer_size
+ 3;
163 if (len
% max_xfer_size
)
166 ret
= mic_dma_avail_desc_ring_space(ch
, num_desc
);
170 current_transfer_len
= min(len
, max_xfer_size
);
171 mic_dma_memcpy_desc(&ch
->desc_ring
[ch
->head
],
172 src
, dst
, current_transfer_len
);
173 mic_dma_hw_ring_inc_head(ch
);
174 len
-= current_transfer_len
;
175 dst
= dst
+ current_transfer_len
;
176 src
= src
+ current_transfer_len
;
181 /* It's a h/w quirk and h/w needs 2 status descriptors for every status desc */
182 static void mic_dma_prog_intr(struct mic_dma_chan
*ch
)
184 mic_dma_prep_status_desc(&ch
->desc_ring
[ch
->head
], 0,
185 ch
->status_dest_micpa
, false);
186 mic_dma_hw_ring_inc_head(ch
);
187 mic_dma_prep_status_desc(&ch
->desc_ring
[ch
->head
], 0,
188 ch
->status_dest_micpa
, true);
189 mic_dma_hw_ring_inc_head(ch
);
192 /* Wrapper function to program memcpy descriptors/status descriptors */
193 static int mic_dma_do_dma(struct mic_dma_chan
*ch
, int flags
, dma_addr_t src
,
194 dma_addr_t dst
, size_t len
)
196 if (len
&& -ENOMEM
== mic_dma_prog_memcpy_desc(ch
, src
, dst
, len
)) {
199 /* 3 is the maximum number of status descriptors */
200 int ret
= mic_dma_avail_desc_ring_space(ch
, 3);
206 /* Above mic_dma_prog_memcpy_desc() makes sure we have enough space */
207 if (flags
& DMA_PREP_FENCE
) {
208 mic_dma_prep_status_desc(&ch
->desc_ring
[ch
->head
], 0,
209 ch
->status_dest_micpa
, false);
210 mic_dma_hw_ring_inc_head(ch
);
213 if (flags
& DMA_PREP_INTERRUPT
)
214 mic_dma_prog_intr(ch
);
219 static inline void mic_dma_issue_pending(struct dma_chan
*ch
)
221 struct mic_dma_chan
*mic_ch
= to_mic_dma_chan(ch
);
223 spin_lock(&mic_ch
->issue_lock
);
225 * Write to head triggers h/w to act on the descriptors.
226 * On MIC, writing the same head value twice causes
227 * a h/w error. On second write, h/w assumes we filled
228 * the entire ring & overwrote some of the descriptors.
230 if (mic_ch
->issued
== mic_ch
->submitted
)
232 mic_ch
->issued
= mic_ch
->submitted
;
234 * make descriptor updates visible before advancing head,
235 * this is purposefully not smp_wmb() since we are also
236 * publishing the descriptor updates to a dma device
239 mic_dma_write_reg(mic_ch
, MIC_DMA_REG_DHPR
, mic_ch
->issued
);
241 spin_unlock(&mic_ch
->issue_lock
);
244 static inline void mic_dma_update_pending(struct mic_dma_chan
*ch
)
246 if (mic_dma_ring_count(ch
->issued
, ch
->submitted
)
247 > mic_dma_pending_level
)
248 mic_dma_issue_pending(&ch
->api_ch
);
251 static dma_cookie_t
mic_dma_tx_submit_unlock(struct dma_async_tx_descriptor
*tx
)
253 struct mic_dma_chan
*mic_ch
= to_mic_dma_chan(tx
->chan
);
256 dma_cookie_assign(tx
);
259 * We need an smp write barrier here because another CPU might see
260 * an update to submitted and update h/w head even before we
261 * assigned a cookie to this tx.
264 mic_ch
->submitted
= mic_ch
->head
;
265 spin_unlock(&mic_ch
->prep_lock
);
266 mic_dma_update_pending(mic_ch
);
270 static inline struct dma_async_tx_descriptor
*
271 allocate_tx(struct mic_dma_chan
*ch
)
273 u32 idx
= mic_dma_hw_ring_dec(ch
->head
);
274 struct dma_async_tx_descriptor
*tx
= &ch
->tx_array
[idx
];
276 dma_async_tx_descriptor_init(tx
, &ch
->api_ch
);
277 tx
->tx_submit
= mic_dma_tx_submit_unlock
;
281 /* Program a status descriptor with dst as address and value to be written */
282 static struct dma_async_tx_descriptor
*
283 mic_dma_prep_status_lock(struct dma_chan
*ch
, dma_addr_t dst
, u64 src_val
,
286 struct mic_dma_chan
*mic_ch
= to_mic_dma_chan(ch
);
289 spin_lock(&mic_ch
->prep_lock
);
290 result
= mic_dma_avail_desc_ring_space(mic_ch
, 4);
293 mic_dma_prep_status_desc(&mic_ch
->desc_ring
[mic_ch
->head
], src_val
, dst
,
295 mic_dma_hw_ring_inc_head(mic_ch
);
296 result
= mic_dma_do_dma(mic_ch
, flags
, 0, 0, 0);
300 return allocate_tx(mic_ch
);
302 dev_err(mic_dma_ch_to_device(mic_ch
),
303 "Error enqueueing dma status descriptor, error=%d\n", result
);
304 spin_unlock(&mic_ch
->prep_lock
);
309 * Prepare a memcpy descriptor to be added to the ring.
310 * Note that the temporary descriptor adds an extra overhead of copying the
311 * descriptor to ring. So, we copy directly to the descriptor ring
313 static struct dma_async_tx_descriptor
*
314 mic_dma_prep_memcpy_lock(struct dma_chan
*ch
, dma_addr_t dma_dest
,
315 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
317 struct mic_dma_chan
*mic_ch
= to_mic_dma_chan(ch
);
318 struct device
*dev
= mic_dma_ch_to_device(mic_ch
);
320 struct dma_async_tx_descriptor
*tx
= NULL
;
325 spin_lock(&mic_ch
->prep_lock
);
326 result
= mic_dma_do_dma(mic_ch
, flags
, dma_src
, dma_dest
, len
);
328 tx
= allocate_tx(mic_ch
);
331 dev_err(dev
, "Error enqueueing dma, error=%d\n", result
);
333 spin_unlock(&mic_ch
->prep_lock
);
337 static struct dma_async_tx_descriptor
*
338 mic_dma_prep_interrupt_lock(struct dma_chan
*ch
, unsigned long flags
)
340 struct mic_dma_chan
*mic_ch
= to_mic_dma_chan(ch
);
342 struct dma_async_tx_descriptor
*tx
= NULL
;
344 spin_lock(&mic_ch
->prep_lock
);
345 ret
= mic_dma_do_dma(mic_ch
, flags
, 0, 0, 0);
347 tx
= allocate_tx(mic_ch
);
348 spin_unlock(&mic_ch
->prep_lock
);
352 /* Return the status of the transaction */
353 static enum dma_status
354 mic_dma_tx_status(struct dma_chan
*ch
, dma_cookie_t cookie
,
355 struct dma_tx_state
*txstate
)
357 struct mic_dma_chan
*mic_ch
= to_mic_dma_chan(ch
);
359 if (DMA_COMPLETE
!= dma_cookie_status(ch
, cookie
, txstate
))
360 mic_dma_cleanup(mic_ch
);
362 return dma_cookie_status(ch
, cookie
, txstate
);
365 static irqreturn_t
mic_dma_thread_fn(int irq
, void *data
)
367 mic_dma_cleanup((struct mic_dma_chan
*)data
);
371 static irqreturn_t
mic_dma_intr_handler(int irq
, void *data
)
373 struct mic_dma_chan
*ch
= ((struct mic_dma_chan
*)data
);
375 mic_dma_ack_interrupt(ch
);
376 return IRQ_WAKE_THREAD
;
379 static int mic_dma_alloc_desc_ring(struct mic_dma_chan
*ch
)
381 u64 desc_ring_size
= MIC_DMA_DESC_RX_SIZE
* sizeof(*ch
->desc_ring
);
382 struct device
*dev
= &to_mbus_device(ch
)->dev
;
384 desc_ring_size
= ALIGN(desc_ring_size
, MIC_DMA_ALIGN_BYTES
);
385 ch
->desc_ring
= kzalloc(desc_ring_size
, GFP_KERNEL
);
390 ch
->desc_ring_micpa
= dma_map_single(dev
, ch
->desc_ring
,
391 desc_ring_size
, DMA_BIDIRECTIONAL
);
392 if (dma_mapping_error(dev
, ch
->desc_ring_micpa
))
395 ch
->tx_array
= vzalloc(MIC_DMA_DESC_RX_SIZE
* sizeof(*ch
->tx_array
));
400 dma_unmap_single(dev
, ch
->desc_ring_micpa
, desc_ring_size
,
403 kfree(ch
->desc_ring
);
407 static void mic_dma_free_desc_ring(struct mic_dma_chan
*ch
)
409 u64 desc_ring_size
= MIC_DMA_DESC_RX_SIZE
* sizeof(*ch
->desc_ring
);
412 desc_ring_size
= ALIGN(desc_ring_size
, MIC_DMA_ALIGN_BYTES
);
413 dma_unmap_single(&to_mbus_device(ch
)->dev
, ch
->desc_ring_micpa
,
414 desc_ring_size
, DMA_BIDIRECTIONAL
);
415 kfree(ch
->desc_ring
);
416 ch
->desc_ring
= NULL
;
419 static void mic_dma_free_status_dest(struct mic_dma_chan
*ch
)
421 dma_unmap_single(&to_mbus_device(ch
)->dev
, ch
->status_dest_micpa
,
422 L1_CACHE_BYTES
, DMA_BIDIRECTIONAL
);
423 kfree(ch
->status_dest
);
426 static int mic_dma_alloc_status_dest(struct mic_dma_chan
*ch
)
428 struct device
*dev
= &to_mbus_device(ch
)->dev
;
430 ch
->status_dest
= kzalloc(L1_CACHE_BYTES
, GFP_KERNEL
);
431 if (!ch
->status_dest
)
433 ch
->status_dest_micpa
= dma_map_single(dev
, ch
->status_dest
,
434 L1_CACHE_BYTES
, DMA_BIDIRECTIONAL
);
435 if (dma_mapping_error(dev
, ch
->status_dest_micpa
)) {
436 kfree(ch
->status_dest
);
437 ch
->status_dest
= NULL
;
443 static int mic_dma_check_chan(struct mic_dma_chan
*ch
)
445 if (mic_dma_read_reg(ch
, MIC_DMA_REG_DCHERR
) ||
446 mic_dma_read_reg(ch
, MIC_DMA_REG_DSTAT
) & MIC_DMA_CHAN_QUIESCE
) {
447 mic_dma_disable_chan(ch
);
448 mic_dma_chan_mask_intr(ch
);
449 dev_err(mic_dma_ch_to_device(ch
),
450 "%s %d error setting up mic dma chan %d\n",
451 __func__
, __LINE__
, ch
->ch_num
);
457 static int mic_dma_chan_setup(struct mic_dma_chan
*ch
)
459 if (MIC_DMA_CHAN_MIC
== ch
->owner
)
460 mic_dma_chan_set_owner(ch
);
461 mic_dma_disable_chan(ch
);
462 mic_dma_chan_mask_intr(ch
);
463 mic_dma_write_reg(ch
, MIC_DMA_REG_DCHERRMSK
, 0);
464 mic_dma_chan_set_desc_ring(ch
);
465 ch
->last_tail
= mic_dma_read_reg(ch
, MIC_DMA_REG_DTPR
);
466 ch
->head
= ch
->last_tail
;
468 mic_dma_chan_unmask_intr(ch
);
469 mic_dma_enable_chan(ch
);
470 return mic_dma_check_chan(ch
);
473 static void mic_dma_chan_destroy(struct mic_dma_chan
*ch
)
475 mic_dma_disable_chan(ch
);
476 mic_dma_chan_mask_intr(ch
);
479 static void mic_dma_unregister_dma_device(struct mic_dma_device
*mic_dma_dev
)
481 dma_async_device_unregister(&mic_dma_dev
->dma_dev
);
484 static int mic_dma_setup_irq(struct mic_dma_chan
*ch
)
487 to_mbus_hw_ops(ch
)->request_threaded_irq(to_mbus_device(ch
),
488 mic_dma_intr_handler
, mic_dma_thread_fn
,
489 "mic dma_channel", ch
, ch
->ch_num
);
490 if (IS_ERR(ch
->cookie
))
491 return IS_ERR(ch
->cookie
);
495 static inline void mic_dma_free_irq(struct mic_dma_chan
*ch
)
497 to_mbus_hw_ops(ch
)->free_irq(to_mbus_device(ch
), ch
->cookie
, ch
);
500 static int mic_dma_chan_init(struct mic_dma_chan
*ch
)
502 int ret
= mic_dma_alloc_desc_ring(ch
);
506 ret
= mic_dma_alloc_status_dest(ch
);
509 ret
= mic_dma_chan_setup(ch
);
514 mic_dma_free_status_dest(ch
);
516 mic_dma_free_desc_ring(ch
);
521 static int mic_dma_drain_chan(struct mic_dma_chan
*ch
)
523 struct dma_async_tx_descriptor
*tx
;
527 tx
= mic_dma_prep_memcpy_lock(&ch
->api_ch
, 0, 0, 0, DMA_PREP_FENCE
);
533 cookie
= tx
->tx_submit(tx
);
534 if (dma_submit_error(cookie
))
537 err
= dma_sync_wait(&ch
->api_ch
, cookie
);
539 dev_err(mic_dma_ch_to_device(ch
), "%s %d TO chan 0x%x\n",
540 __func__
, __LINE__
, ch
->ch_num
);
548 static inline void mic_dma_chan_uninit(struct mic_dma_chan
*ch
)
550 mic_dma_chan_destroy(ch
);
552 mic_dma_free_status_dest(ch
);
553 mic_dma_free_desc_ring(ch
);
556 static int mic_dma_init(struct mic_dma_device
*mic_dma_dev
,
557 enum mic_dma_chan_owner owner
)
559 int i
, first_chan
= mic_dma_dev
->start_ch
;
560 struct mic_dma_chan
*ch
;
563 for (i
= first_chan
; i
< first_chan
+ MIC_DMA_NUM_CHAN
; i
++) {
565 ch
= &mic_dma_dev
->mic_ch
[i
];
566 data
= (unsigned long)ch
;
569 spin_lock_init(&ch
->cleanup_lock
);
570 spin_lock_init(&ch
->prep_lock
);
571 spin_lock_init(&ch
->issue_lock
);
572 ret
= mic_dma_setup_irq(ch
);
578 for (i
= i
- 1; i
>= first_chan
; i
--)
579 mic_dma_free_irq(ch
);
583 static void mic_dma_uninit(struct mic_dma_device
*mic_dma_dev
)
585 int i
, first_chan
= mic_dma_dev
->start_ch
;
586 struct mic_dma_chan
*ch
;
588 for (i
= first_chan
; i
< first_chan
+ MIC_DMA_NUM_CHAN
; i
++) {
589 ch
= &mic_dma_dev
->mic_ch
[i
];
590 mic_dma_free_irq(ch
);
594 static int mic_dma_alloc_chan_resources(struct dma_chan
*ch
)
596 int ret
= mic_dma_chan_init(to_mic_dma_chan(ch
));
599 return MIC_DMA_DESC_RX_SIZE
;
602 static void mic_dma_free_chan_resources(struct dma_chan
*ch
)
604 struct mic_dma_chan
*mic_ch
= to_mic_dma_chan(ch
);
605 mic_dma_drain_chan(mic_ch
);
606 mic_dma_chan_uninit(mic_ch
);
609 /* Set the fn. handlers and register the dma device with dma api */
610 static int mic_dma_register_dma_device(struct mic_dma_device
*mic_dma_dev
,
611 enum mic_dma_chan_owner owner
)
613 int i
, first_chan
= mic_dma_dev
->start_ch
;
615 dma_cap_zero(mic_dma_dev
->dma_dev
.cap_mask
);
617 * This dma engine is not capable of host memory to host memory
620 dma_cap_set(DMA_MEMCPY
, mic_dma_dev
->dma_dev
.cap_mask
);
622 if (MIC_DMA_CHAN_HOST
== owner
)
623 dma_cap_set(DMA_PRIVATE
, mic_dma_dev
->dma_dev
.cap_mask
);
624 mic_dma_dev
->dma_dev
.device_alloc_chan_resources
=
625 mic_dma_alloc_chan_resources
;
626 mic_dma_dev
->dma_dev
.device_free_chan_resources
=
627 mic_dma_free_chan_resources
;
628 mic_dma_dev
->dma_dev
.device_tx_status
= mic_dma_tx_status
;
629 mic_dma_dev
->dma_dev
.device_prep_dma_memcpy
= mic_dma_prep_memcpy_lock
;
630 mic_dma_dev
->dma_dev
.device_prep_dma_imm_data
=
631 mic_dma_prep_status_lock
;
632 mic_dma_dev
->dma_dev
.device_prep_dma_interrupt
=
633 mic_dma_prep_interrupt_lock
;
634 mic_dma_dev
->dma_dev
.device_issue_pending
= mic_dma_issue_pending
;
635 mic_dma_dev
->dma_dev
.copy_align
= MIC_DMA_ALIGN_SHIFT
;
636 INIT_LIST_HEAD(&mic_dma_dev
->dma_dev
.channels
);
637 for (i
= first_chan
; i
< first_chan
+ MIC_DMA_NUM_CHAN
; i
++) {
638 mic_dma_dev
->mic_ch
[i
].api_ch
.device
= &mic_dma_dev
->dma_dev
;
639 dma_cookie_init(&mic_dma_dev
->mic_ch
[i
].api_ch
);
640 list_add_tail(&mic_dma_dev
->mic_ch
[i
].api_ch
.device_node
,
641 &mic_dma_dev
->dma_dev
.channels
);
643 return dma_async_device_register(&mic_dma_dev
->dma_dev
);
647 * Initializes dma channels and registers the dma device with the
650 static struct mic_dma_device
*mic_dma_dev_reg(struct mbus_device
*mbdev
,
651 enum mic_dma_chan_owner owner
)
653 struct mic_dma_device
*mic_dma_dev
;
655 struct device
*dev
= &mbdev
->dev
;
657 mic_dma_dev
= kzalloc(sizeof(*mic_dma_dev
), GFP_KERNEL
);
662 mic_dma_dev
->mbdev
= mbdev
;
663 mic_dma_dev
->dma_dev
.dev
= dev
;
664 mic_dma_dev
->mmio
= mbdev
->mmio_va
;
665 if (MIC_DMA_CHAN_HOST
== owner
) {
666 mic_dma_dev
->start_ch
= 0;
667 mic_dma_dev
->max_xfer_size
= MIC_DMA_MAX_XFER_SIZE_HOST
;
669 mic_dma_dev
->start_ch
= 4;
670 mic_dma_dev
->max_xfer_size
= MIC_DMA_MAX_XFER_SIZE_CARD
;
672 ret
= mic_dma_init(mic_dma_dev
, owner
);
675 ret
= mic_dma_register_dma_device(mic_dma_dev
, owner
);
680 mic_dma_uninit(mic_dma_dev
);
685 dev_err(dev
, "Error at %s %d ret=%d\n", __func__
, __LINE__
, ret
);
689 static void mic_dma_dev_unreg(struct mic_dma_device
*mic_dma_dev
)
691 mic_dma_unregister_dma_device(mic_dma_dev
);
692 mic_dma_uninit(mic_dma_dev
);
697 static int mic_dma_reg_seq_show(struct seq_file
*s
, void *pos
)
699 struct mic_dma_device
*mic_dma_dev
= s
->private;
700 int i
, chan_num
, first_chan
= mic_dma_dev
->start_ch
;
701 struct mic_dma_chan
*ch
;
703 seq_printf(s
, "SBOX_DCR: %#x\n",
704 mic_dma_mmio_read(&mic_dma_dev
->mic_ch
[first_chan
],
705 MIC_DMA_SBOX_BASE
+ MIC_DMA_SBOX_DCR
));
706 seq_puts(s
, "DMA Channel Registers\n");
707 seq_printf(s
, "%-10s| %-10s %-10s %-10s %-10s %-10s",
708 "Channel", "DCAR", "DTPR", "DHPR", "DRAR_HI", "DRAR_LO");
709 seq_printf(s
, " %-11s %-14s %-10s\n", "DCHERR", "DCHERRMSK", "DSTAT");
710 for (i
= first_chan
; i
< first_chan
+ MIC_DMA_NUM_CHAN
; i
++) {
711 ch
= &mic_dma_dev
->mic_ch
[i
];
712 chan_num
= ch
->ch_num
;
713 seq_printf(s
, "%-10i| %-#10x %-#10x %-#10x %-#10x",
715 mic_dma_read_reg(ch
, MIC_DMA_REG_DCAR
),
716 mic_dma_read_reg(ch
, MIC_DMA_REG_DTPR
),
717 mic_dma_read_reg(ch
, MIC_DMA_REG_DHPR
),
718 mic_dma_read_reg(ch
, MIC_DMA_REG_DRAR_HI
));
719 seq_printf(s
, " %-#10x %-#10x %-#14x %-#10x\n",
720 mic_dma_read_reg(ch
, MIC_DMA_REG_DRAR_LO
),
721 mic_dma_read_reg(ch
, MIC_DMA_REG_DCHERR
),
722 mic_dma_read_reg(ch
, MIC_DMA_REG_DCHERRMSK
),
723 mic_dma_read_reg(ch
, MIC_DMA_REG_DSTAT
));
728 static int mic_dma_reg_debug_open(struct inode
*inode
, struct file
*file
)
730 return single_open(file
, mic_dma_reg_seq_show
, inode
->i_private
);
733 static int mic_dma_reg_debug_release(struct inode
*inode
, struct file
*file
)
735 return single_release(inode
, file
);
738 static const struct file_operations mic_dma_reg_ops
= {
739 .owner
= THIS_MODULE
,
740 .open
= mic_dma_reg_debug_open
,
743 .release
= mic_dma_reg_debug_release
746 /* Debugfs parent dir */
747 static struct dentry
*mic_dma_dbg
;
749 static int mic_dma_driver_probe(struct mbus_device
*mbdev
)
751 struct mic_dma_device
*mic_dma_dev
;
752 enum mic_dma_chan_owner owner
;
754 if (MBUS_DEV_DMA_MIC
== mbdev
->id
.device
)
755 owner
= MIC_DMA_CHAN_MIC
;
757 owner
= MIC_DMA_CHAN_HOST
;
759 mic_dma_dev
= mic_dma_dev_reg(mbdev
, owner
);
760 dev_set_drvdata(&mbdev
->dev
, mic_dma_dev
);
763 mic_dma_dev
->dbg_dir
= debugfs_create_dir(dev_name(&mbdev
->dev
),
765 if (mic_dma_dev
->dbg_dir
)
766 debugfs_create_file("mic_dma_reg", 0444,
767 mic_dma_dev
->dbg_dir
, mic_dma_dev
,
773 static void mic_dma_driver_remove(struct mbus_device
*mbdev
)
775 struct mic_dma_device
*mic_dma_dev
;
777 mic_dma_dev
= dev_get_drvdata(&mbdev
->dev
);
778 debugfs_remove_recursive(mic_dma_dev
->dbg_dir
);
779 mic_dma_dev_unreg(mic_dma_dev
);
782 static struct mbus_device_id id_table
[] = {
783 {MBUS_DEV_DMA_MIC
, MBUS_DEV_ANY_ID
},
784 {MBUS_DEV_DMA_HOST
, MBUS_DEV_ANY_ID
},
788 static struct mbus_driver mic_dma_driver
= {
789 .driver
.name
= KBUILD_MODNAME
,
790 .driver
.owner
= THIS_MODULE
,
791 .id_table
= id_table
,
792 .probe
= mic_dma_driver_probe
,
793 .remove
= mic_dma_driver_remove
,
796 static int __init
mic_x100_dma_init(void)
798 int rc
= mbus_register_driver(&mic_dma_driver
);
801 mic_dma_dbg
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
805 static void __exit
mic_x100_dma_exit(void)
807 debugfs_remove_recursive(mic_dma_dbg
);
808 mbus_unregister_driver(&mic_dma_driver
);
811 module_init(mic_x100_dma_init
);
812 module_exit(mic_x100_dma_exit
);
814 MODULE_DEVICE_TABLE(mbus
, id_table
);
815 MODULE_AUTHOR("Intel Corporation");
816 MODULE_DESCRIPTION("Intel(R) MIC X100 DMA Driver");
817 MODULE_LICENSE("GPL v2");