2 * Qualcomm Technologies HIDMA DMA engine interface
4 * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
18 * Copyright (C) Semihalf 2009
19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
20 * Copyright (C) Alexander Popov, Promcontroller 2014
22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
23 * (defines, structures and comments) was taken from MPC5121 DMA driver
24 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
26 * Approved as OSADL project by a majority of OSADL members and funded
27 * by OSADL membership fees in 2009; for details see www.osadl.org.
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or (at your option)
34 * This program is distributed in the hope that it will be useful, but WITHOUT
35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
39 * The full GNU General Public License is included in this distribution in the
40 * file called COPYING.
43 /* Linux Foundation elects GPLv2 license only. */
45 #include <linux/dmaengine.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/list.h>
48 #include <linux/module.h>
49 #include <linux/platform_device.h>
50 #include <linux/slab.h>
51 #include <linux/spinlock.h>
52 #include <linux/of_dma.h>
53 #include <linux/property.h>
54 #include <linux/delay.h>
55 #include <linux/acpi.h>
56 #include <linux/irq.h>
57 #include <linux/atomic.h>
58 #include <linux/pm_runtime.h>
60 #include "../dmaengine.h"
64 * Default idle time is 2 seconds. This parameter can
65 * be overridden by changing the following
66 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
69 #define HIDMA_AUTOSUSPEND_TIMEOUT 2000
70 #define HIDMA_ERR_INFO_SW 0xFF
71 #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
72 #define HIDMA_NR_DEFAULT_DESC 10
74 static inline struct hidma_dev
*to_hidma_dev(struct dma_device
*dmadev
)
76 return container_of(dmadev
, struct hidma_dev
, ddev
);
80 struct hidma_dev
*to_hidma_dev_from_lldev(struct hidma_lldev
**_lldevp
)
82 return container_of(_lldevp
, struct hidma_dev
, lldev
);
85 static inline struct hidma_chan
*to_hidma_chan(struct dma_chan
*dmach
)
87 return container_of(dmach
, struct hidma_chan
, chan
);
91 struct hidma_desc
*to_hidma_desc(struct dma_async_tx_descriptor
*t
)
93 return container_of(t
, struct hidma_desc
, desc
);
96 static void hidma_free(struct hidma_dev
*dmadev
)
98 INIT_LIST_HEAD(&dmadev
->ddev
.channels
);
101 static unsigned int nr_desc_prm
;
102 module_param(nr_desc_prm
, uint
, 0644);
103 MODULE_PARM_DESC(nr_desc_prm
, "number of descriptors (default: 0)");
106 /* process completed descriptors */
107 static void hidma_process_completed(struct hidma_chan
*mchan
)
109 struct dma_device
*ddev
= mchan
->chan
.device
;
110 struct hidma_dev
*mdma
= to_hidma_dev(ddev
);
111 struct dma_async_tx_descriptor
*desc
;
112 dma_cookie_t last_cookie
;
113 struct hidma_desc
*mdesc
;
114 unsigned long irqflags
;
115 struct list_head list
;
117 INIT_LIST_HEAD(&list
);
119 /* Get all completed descriptors */
120 spin_lock_irqsave(&mchan
->lock
, irqflags
);
121 list_splice_tail_init(&mchan
->completed
, &list
);
122 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
124 /* Execute callbacks and run dependencies */
125 list_for_each_entry(mdesc
, &list
, node
) {
126 enum dma_status llstat
;
130 spin_lock_irqsave(&mchan
->lock
, irqflags
);
131 dma_cookie_complete(desc
);
132 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
134 llstat
= hidma_ll_status(mdma
->lldev
, mdesc
->tre_ch
);
135 if (desc
->callback
&& (llstat
== DMA_COMPLETE
))
136 desc
->callback(desc
->callback_param
);
138 last_cookie
= desc
->cookie
;
139 dma_run_dependencies(desc
);
142 /* Free descriptors */
143 spin_lock_irqsave(&mchan
->lock
, irqflags
);
144 list_splice_tail_init(&list
, &mchan
->free
);
145 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
150 * Called once for each submitted descriptor.
151 * PM is locked once for each descriptor that is currently
154 static void hidma_callback(void *data
)
156 struct hidma_desc
*mdesc
= data
;
157 struct hidma_chan
*mchan
= to_hidma_chan(mdesc
->desc
.chan
);
158 struct dma_device
*ddev
= mchan
->chan
.device
;
159 struct hidma_dev
*dmadev
= to_hidma_dev(ddev
);
160 unsigned long irqflags
;
163 spin_lock_irqsave(&mchan
->lock
, irqflags
);
164 if (mdesc
->node
.next
) {
165 /* Delete from the active list, add to completed list */
166 list_move_tail(&mdesc
->node
, &mchan
->completed
);
169 /* calculate the next running descriptor */
170 mchan
->running
= list_first_entry(&mchan
->active
,
171 struct hidma_desc
, node
);
173 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
175 hidma_process_completed(mchan
);
178 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
179 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
183 static int hidma_chan_init(struct hidma_dev
*dmadev
, u32 dma_sig
)
185 struct hidma_chan
*mchan
;
186 struct dma_device
*ddev
;
188 mchan
= devm_kzalloc(dmadev
->ddev
.dev
, sizeof(*mchan
), GFP_KERNEL
);
192 ddev
= &dmadev
->ddev
;
193 mchan
->dma_sig
= dma_sig
;
194 mchan
->dmadev
= dmadev
;
195 mchan
->chan
.device
= ddev
;
196 dma_cookie_init(&mchan
->chan
);
198 INIT_LIST_HEAD(&mchan
->free
);
199 INIT_LIST_HEAD(&mchan
->prepared
);
200 INIT_LIST_HEAD(&mchan
->active
);
201 INIT_LIST_HEAD(&mchan
->completed
);
203 spin_lock_init(&mchan
->lock
);
204 list_add_tail(&mchan
->chan
.device_node
, &ddev
->channels
);
205 dmadev
->ddev
.chancnt
++;
209 static void hidma_issue_task(unsigned long arg
)
211 struct hidma_dev
*dmadev
= (struct hidma_dev
*)arg
;
213 pm_runtime_get_sync(dmadev
->ddev
.dev
);
214 hidma_ll_start(dmadev
->lldev
);
217 static void hidma_issue_pending(struct dma_chan
*dmach
)
219 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
220 struct hidma_dev
*dmadev
= mchan
->dmadev
;
224 spin_lock_irqsave(&mchan
->lock
, flags
);
225 if (!mchan
->running
) {
226 struct hidma_desc
*desc
= list_first_entry(&mchan
->active
,
229 mchan
->running
= desc
;
231 spin_unlock_irqrestore(&mchan
->lock
, flags
);
233 /* PM will be released in hidma_callback function. */
234 status
= pm_runtime_get(dmadev
->ddev
.dev
);
236 tasklet_schedule(&dmadev
->task
);
238 hidma_ll_start(dmadev
->lldev
);
241 static enum dma_status
hidma_tx_status(struct dma_chan
*dmach
,
243 struct dma_tx_state
*txstate
)
245 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
248 ret
= dma_cookie_status(dmach
, cookie
, txstate
);
249 if (ret
== DMA_COMPLETE
)
252 if (mchan
->paused
&& (ret
== DMA_IN_PROGRESS
)) {
254 dma_cookie_t runcookie
;
256 spin_lock_irqsave(&mchan
->lock
, flags
);
258 runcookie
= mchan
->running
->desc
.cookie
;
262 if (runcookie
== cookie
)
265 spin_unlock_irqrestore(&mchan
->lock
, flags
);
272 * Submit descriptor to hardware.
273 * Lock the PM for each descriptor we are sending.
275 static dma_cookie_t
hidma_tx_submit(struct dma_async_tx_descriptor
*txd
)
277 struct hidma_chan
*mchan
= to_hidma_chan(txd
->chan
);
278 struct hidma_dev
*dmadev
= mchan
->dmadev
;
279 struct hidma_desc
*mdesc
;
280 unsigned long irqflags
;
283 pm_runtime_get_sync(dmadev
->ddev
.dev
);
284 if (!hidma_ll_isenabled(dmadev
->lldev
)) {
285 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
286 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
290 mdesc
= container_of(txd
, struct hidma_desc
, desc
);
291 spin_lock_irqsave(&mchan
->lock
, irqflags
);
293 /* Move descriptor to active */
294 list_move_tail(&mdesc
->node
, &mchan
->active
);
297 cookie
= dma_cookie_assign(txd
);
299 hidma_ll_queue_request(dmadev
->lldev
, mdesc
->tre_ch
);
300 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
305 static int hidma_alloc_chan_resources(struct dma_chan
*dmach
)
307 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
308 struct hidma_dev
*dmadev
= mchan
->dmadev
;
309 struct hidma_desc
*mdesc
, *tmp
;
310 unsigned long irqflags
;
315 if (mchan
->allocated
)
318 /* Alloc descriptors for this channel */
319 for (i
= 0; i
< dmadev
->nr_descriptors
; i
++) {
320 mdesc
= kzalloc(sizeof(struct hidma_desc
), GFP_NOWAIT
);
325 dma_async_tx_descriptor_init(&mdesc
->desc
, dmach
);
326 mdesc
->desc
.tx_submit
= hidma_tx_submit
;
328 rc
= hidma_ll_request(dmadev
->lldev
, mchan
->dma_sig
,
329 "DMA engine", hidma_callback
, mdesc
,
332 dev_err(dmach
->device
->dev
,
333 "channel alloc failed at %u\n", i
);
337 list_add_tail(&mdesc
->node
, &descs
);
341 /* return the allocated descriptors */
342 list_for_each_entry_safe(mdesc
, tmp
, &descs
, node
) {
343 hidma_ll_free(dmadev
->lldev
, mdesc
->tre_ch
);
349 spin_lock_irqsave(&mchan
->lock
, irqflags
);
350 list_splice_tail_init(&descs
, &mchan
->free
);
351 mchan
->allocated
= true;
352 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
356 static struct dma_async_tx_descriptor
*
357 hidma_prep_dma_memcpy(struct dma_chan
*dmach
, dma_addr_t dest
, dma_addr_t src
,
358 size_t len
, unsigned long flags
)
360 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
361 struct hidma_desc
*mdesc
= NULL
;
362 struct hidma_dev
*mdma
= mchan
->dmadev
;
363 unsigned long irqflags
;
365 /* Get free descriptor */
366 spin_lock_irqsave(&mchan
->lock
, irqflags
);
367 if (!list_empty(&mchan
->free
)) {
368 mdesc
= list_first_entry(&mchan
->free
, struct hidma_desc
, node
);
369 list_del(&mdesc
->node
);
371 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
376 hidma_ll_set_transfer_params(mdma
->lldev
, mdesc
->tre_ch
,
377 src
, dest
, len
, flags
);
379 /* Place descriptor in prepared list */
380 spin_lock_irqsave(&mchan
->lock
, irqflags
);
381 list_add_tail(&mdesc
->node
, &mchan
->prepared
);
382 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
387 static int hidma_terminate_channel(struct dma_chan
*chan
)
389 struct hidma_chan
*mchan
= to_hidma_chan(chan
);
390 struct hidma_dev
*dmadev
= to_hidma_dev(mchan
->chan
.device
);
391 struct hidma_desc
*tmp
, *mdesc
;
392 unsigned long irqflags
;
396 pm_runtime_get_sync(dmadev
->ddev
.dev
);
397 /* give completed requests a chance to finish */
398 hidma_process_completed(mchan
);
400 spin_lock_irqsave(&mchan
->lock
, irqflags
);
401 list_splice_init(&mchan
->active
, &list
);
402 list_splice_init(&mchan
->prepared
, &list
);
403 list_splice_init(&mchan
->completed
, &list
);
404 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
406 /* this suspends the existing transfer */
407 rc
= hidma_ll_disable(dmadev
->lldev
);
409 dev_err(dmadev
->ddev
.dev
, "channel did not pause\n");
413 /* return all user requests */
414 list_for_each_entry_safe(mdesc
, tmp
, &list
, node
) {
415 struct dma_async_tx_descriptor
*txd
= &mdesc
->desc
;
416 dma_async_tx_callback callback
= mdesc
->desc
.callback
;
417 void *param
= mdesc
->desc
.callback_param
;
419 dma_descriptor_unmap(txd
);
424 dma_run_dependencies(txd
);
426 /* move myself to free_list */
427 list_move(&mdesc
->node
, &mchan
->free
);
430 rc
= hidma_ll_enable(dmadev
->lldev
);
432 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
433 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
437 static int hidma_terminate_all(struct dma_chan
*chan
)
439 struct hidma_chan
*mchan
= to_hidma_chan(chan
);
440 struct hidma_dev
*dmadev
= to_hidma_dev(mchan
->chan
.device
);
443 rc
= hidma_terminate_channel(chan
);
447 /* reinitialize the hardware */
448 pm_runtime_get_sync(dmadev
->ddev
.dev
);
449 rc
= hidma_ll_setup(dmadev
->lldev
);
450 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
451 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
455 static void hidma_free_chan_resources(struct dma_chan
*dmach
)
457 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
458 struct hidma_dev
*mdma
= mchan
->dmadev
;
459 struct hidma_desc
*mdesc
, *tmp
;
460 unsigned long irqflags
;
463 /* terminate running transactions and free descriptors */
464 hidma_terminate_channel(dmach
);
466 spin_lock_irqsave(&mchan
->lock
, irqflags
);
469 list_splice_tail_init(&mchan
->free
, &descs
);
471 /* Free descriptors */
472 list_for_each_entry_safe(mdesc
, tmp
, &descs
, node
) {
473 hidma_ll_free(mdma
->lldev
, mdesc
->tre_ch
);
474 list_del(&mdesc
->node
);
478 mchan
->allocated
= 0;
479 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
482 static int hidma_pause(struct dma_chan
*chan
)
484 struct hidma_chan
*mchan
;
485 struct hidma_dev
*dmadev
;
487 mchan
= to_hidma_chan(chan
);
488 dmadev
= to_hidma_dev(mchan
->chan
.device
);
489 if (!mchan
->paused
) {
490 pm_runtime_get_sync(dmadev
->ddev
.dev
);
491 if (hidma_ll_disable(dmadev
->lldev
))
492 dev_warn(dmadev
->ddev
.dev
, "channel did not stop\n");
493 mchan
->paused
= true;
494 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
495 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
500 static int hidma_resume(struct dma_chan
*chan
)
502 struct hidma_chan
*mchan
;
503 struct hidma_dev
*dmadev
;
506 mchan
= to_hidma_chan(chan
);
507 dmadev
= to_hidma_dev(mchan
->chan
.device
);
509 pm_runtime_get_sync(dmadev
->ddev
.dev
);
510 rc
= hidma_ll_enable(dmadev
->lldev
);
512 mchan
->paused
= false;
514 dev_err(dmadev
->ddev
.dev
,
515 "failed to resume the channel");
516 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
517 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
522 static irqreturn_t
hidma_chirq_handler(int chirq
, void *arg
)
524 struct hidma_lldev
*lldev
= arg
;
527 * All interrupts are request driven.
528 * HW doesn't send an interrupt by itself.
530 return hidma_ll_inthandler(chirq
, lldev
);
533 static ssize_t
hidma_show_values(struct device
*dev
,
534 struct device_attribute
*attr
, char *buf
)
536 struct platform_device
*pdev
= to_platform_device(dev
);
537 struct hidma_dev
*mdev
= platform_get_drvdata(pdev
);
541 if (strcmp(attr
->attr
.name
, "chid") == 0)
542 sprintf(buf
, "%d\n", mdev
->chidx
);
547 static int hidma_create_sysfs_entry(struct hidma_dev
*dev
, char *name
,
550 struct device_attribute
*attrs
;
553 attrs
= devm_kmalloc(dev
->ddev
.dev
, sizeof(struct device_attribute
),
558 name_copy
= devm_kstrdup(dev
->ddev
.dev
, name
, GFP_KERNEL
);
562 attrs
->attr
.name
= name_copy
;
563 attrs
->attr
.mode
= mode
;
564 attrs
->show
= hidma_show_values
;
565 sysfs_attr_init(&attrs
->attr
);
567 return device_create_file(dev
->ddev
.dev
, attrs
);
570 static int hidma_probe(struct platform_device
*pdev
)
572 struct hidma_dev
*dmadev
;
573 struct resource
*trca_resource
;
574 struct resource
*evca_resource
;
580 pm_runtime_set_autosuspend_delay(&pdev
->dev
, HIDMA_AUTOSUSPEND_TIMEOUT
);
581 pm_runtime_use_autosuspend(&pdev
->dev
);
582 pm_runtime_set_active(&pdev
->dev
);
583 pm_runtime_enable(&pdev
->dev
);
585 trca_resource
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
586 trca
= devm_ioremap_resource(&pdev
->dev
, trca_resource
);
592 evca_resource
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
593 evca
= devm_ioremap_resource(&pdev
->dev
, evca_resource
);
600 * This driver only handles the channel IRQs.
601 * Common IRQ is handled by the management driver.
603 chirq
= platform_get_irq(pdev
, 0);
609 dmadev
= devm_kzalloc(&pdev
->dev
, sizeof(*dmadev
), GFP_KERNEL
);
615 INIT_LIST_HEAD(&dmadev
->ddev
.channels
);
616 spin_lock_init(&dmadev
->lock
);
617 dmadev
->ddev
.dev
= &pdev
->dev
;
618 pm_runtime_get_sync(dmadev
->ddev
.dev
);
620 dma_cap_set(DMA_MEMCPY
, dmadev
->ddev
.cap_mask
);
621 if (WARN_ON(!pdev
->dev
.dma_mask
)) {
626 dmadev
->dev_evca
= evca
;
627 dmadev
->evca_resource
= evca_resource
;
628 dmadev
->dev_trca
= trca
;
629 dmadev
->trca_resource
= trca_resource
;
630 dmadev
->ddev
.device_prep_dma_memcpy
= hidma_prep_dma_memcpy
;
631 dmadev
->ddev
.device_alloc_chan_resources
= hidma_alloc_chan_resources
;
632 dmadev
->ddev
.device_free_chan_resources
= hidma_free_chan_resources
;
633 dmadev
->ddev
.device_tx_status
= hidma_tx_status
;
634 dmadev
->ddev
.device_issue_pending
= hidma_issue_pending
;
635 dmadev
->ddev
.device_pause
= hidma_pause
;
636 dmadev
->ddev
.device_resume
= hidma_resume
;
637 dmadev
->ddev
.device_terminate_all
= hidma_terminate_all
;
638 dmadev
->ddev
.copy_align
= 8;
640 device_property_read_u32(&pdev
->dev
, "desc-count",
641 &dmadev
->nr_descriptors
);
643 if (!dmadev
->nr_descriptors
&& nr_desc_prm
)
644 dmadev
->nr_descriptors
= nr_desc_prm
;
646 if (!dmadev
->nr_descriptors
)
647 dmadev
->nr_descriptors
= HIDMA_NR_DEFAULT_DESC
;
649 dmadev
->chidx
= readl(dmadev
->dev_trca
+ 0x28);
651 /* Set DMA mask to 64 bits. */
652 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
654 dev_warn(&pdev
->dev
, "unable to set coherent mask to 64");
655 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
660 dmadev
->lldev
= hidma_ll_init(dmadev
->ddev
.dev
,
661 dmadev
->nr_descriptors
, dmadev
->dev_trca
,
662 dmadev
->dev_evca
, dmadev
->chidx
);
663 if (!dmadev
->lldev
) {
668 rc
= devm_request_irq(&pdev
->dev
, chirq
, hidma_chirq_handler
, 0,
669 "qcom-hidma", dmadev
->lldev
);
673 INIT_LIST_HEAD(&dmadev
->ddev
.channels
);
674 rc
= hidma_chan_init(dmadev
, 0);
678 rc
= dma_async_device_register(&dmadev
->ddev
);
683 tasklet_init(&dmadev
->task
, hidma_issue_task
, (unsigned long)dmadev
);
684 hidma_debug_init(dmadev
);
685 hidma_create_sysfs_entry(dmadev
, "chid", S_IRUGO
);
686 dev_info(&pdev
->dev
, "HI-DMA engine driver registration complete\n");
687 platform_set_drvdata(pdev
, dmadev
);
688 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
689 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
693 hidma_debug_uninit(dmadev
);
694 hidma_ll_uninit(dmadev
->lldev
);
699 pm_runtime_put_sync(&pdev
->dev
);
700 pm_runtime_disable(&pdev
->dev
);
704 static int hidma_remove(struct platform_device
*pdev
)
706 struct hidma_dev
*dmadev
= platform_get_drvdata(pdev
);
708 pm_runtime_get_sync(dmadev
->ddev
.dev
);
709 dma_async_device_unregister(&dmadev
->ddev
);
710 devm_free_irq(dmadev
->ddev
.dev
, dmadev
->irq
, dmadev
->lldev
);
711 tasklet_kill(&dmadev
->task
);
712 hidma_debug_uninit(dmadev
);
713 hidma_ll_uninit(dmadev
->lldev
);
716 dev_info(&pdev
->dev
, "HI-DMA engine removed\n");
717 pm_runtime_put_sync_suspend(&pdev
->dev
);
718 pm_runtime_disable(&pdev
->dev
);
723 #if IS_ENABLED(CONFIG_ACPI)
724 static const struct acpi_device_id hidma_acpi_ids
[] = {
730 static const struct of_device_id hidma_match
[] = {
731 {.compatible
= "qcom,hidma-1.0",},
734 MODULE_DEVICE_TABLE(of
, hidma_match
);
736 static struct platform_driver hidma_driver
= {
737 .probe
= hidma_probe
,
738 .remove
= hidma_remove
,
741 .of_match_table
= hidma_match
,
742 .acpi_match_table
= ACPI_PTR(hidma_acpi_ids
),
746 module_platform_driver(hidma_driver
);
747 MODULE_LICENSE("GPL v2");