Commit | Line | Data |
---|---|---|
e2f91578 OJ |
1 | /* |
2 | * Copyright (C) 2010 NVIDIA Corporation. | |
3 | * Copyright (C) 2010 Google, Inc. | |
4 | * | |
5 | * This software is licensed under the terms of the GNU General Public | |
6 | * License version 2, as published by the Free Software Foundation, and | |
7 | * may be copied, distributed, and modified under those terms. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | */ | |
15 | ||
a0524acc | 16 | #include <linux/completion.h> |
5b39fc0b | 17 | #include <linux/dmaengine.h> |
e2f91578 | 18 | #include <linux/dma-mapping.h> |
a0524acc TR |
19 | #include <linux/io.h> |
20 | #include <linux/kernel.h> | |
e2f91578 | 21 | #include <linux/mutex.h> |
a0524acc TR |
22 | #include <linux/of.h> |
23 | #include <linux/sched.h> | |
24 | #include <linux/spinlock.h> | |
e2f91578 | 25 | |
e2f91578 | 26 | #include "apbio.h" |
2be39c07 | 27 | #include "iomap.h" |
e2f91578 | 28 | |
6a2473c5 | 29 | #if defined(CONFIG_TEGRA20_APB_DMA) |
e2f91578 | 30 | static DEFINE_MUTEX(tegra_apb_dma_lock); |
e2f91578 OJ |
31 | static u32 *tegra_apb_bb; |
32 | static dma_addr_t tegra_apb_bb_phys; | |
33 | static DECLARE_COMPLETION(tegra_apb_wait); | |
34 | ||
3f394f80 PDS |
35 | static int tegra_apb_readl_direct(unsigned long offset, u32 *value); |
36 | static int tegra_apb_writel_direct(u32 value, unsigned long offset); | |
b861c275 | 37 | |
5b39fc0b LD |
38 | static struct dma_chan *tegra_apb_dma_chan; |
39 | static struct dma_slave_config dma_sconfig; | |
40 | ||
deeb8d19 | 41 | static bool tegra_apb_dma_init(void) |
5b39fc0b LD |
42 | { |
43 | dma_cap_mask_t mask; | |
44 | ||
45 | mutex_lock(&tegra_apb_dma_lock); | |
46 | ||
47 | /* Check to see if we raced to setup */ | |
48 | if (tegra_apb_dma_chan) | |
49 | goto skip_init; | |
50 | ||
51 | dma_cap_zero(mask); | |
52 | dma_cap_set(DMA_SLAVE, mask); | |
53 | tegra_apb_dma_chan = dma_request_channel(mask, NULL, NULL); | |
54 | if (!tegra_apb_dma_chan) { | |
55 | /* | |
56 | * This is common until the device is probed, so don't | |
57 | * shout about it. | |
58 | */ | |
59 | pr_debug("%s: can not allocate dma channel\n", __func__); | |
60 | goto err_dma_alloc; | |
61 | } | |
62 | ||
63 | tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32), | |
64 | &tegra_apb_bb_phys, GFP_KERNEL); | |
65 | if (!tegra_apb_bb) { | |
66 | pr_err("%s: can not allocate bounce buffer\n", __func__); | |
67 | goto err_buff_alloc; | |
68 | } | |
69 | ||
70 | dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | |
71 | dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | |
5b39fc0b LD |
72 | dma_sconfig.src_maxburst = 1; |
73 | dma_sconfig.dst_maxburst = 1; | |
74 | ||
75 | skip_init: | |
76 | mutex_unlock(&tegra_apb_dma_lock); | |
77 | return true; | |
78 | ||
79 | err_buff_alloc: | |
80 | dma_release_channel(tegra_apb_dma_chan); | |
81 | tegra_apb_dma_chan = NULL; | |
82 | ||
83 | err_dma_alloc: | |
84 | mutex_unlock(&tegra_apb_dma_lock); | |
85 | return false; | |
86 | } | |
87 | ||
88 | static void apb_dma_complete(void *args) | |
89 | { | |
90 | complete(&tegra_apb_wait); | |
91 | } | |
92 | ||
93 | static int do_dma_transfer(unsigned long apb_add, | |
94 | enum dma_transfer_direction dir) | |
95 | { | |
96 | struct dma_async_tx_descriptor *dma_desc; | |
97 | int ret; | |
98 | ||
99 | if (dir == DMA_DEV_TO_MEM) | |
100 | dma_sconfig.src_addr = apb_add; | |
101 | else | |
102 | dma_sconfig.dst_addr = apb_add; | |
103 | ||
104 | ret = dmaengine_slave_config(tegra_apb_dma_chan, &dma_sconfig); | |
105 | if (ret) | |
106 | return ret; | |
107 | ||
108 | dma_desc = dmaengine_prep_slave_single(tegra_apb_dma_chan, | |
109 | tegra_apb_bb_phys, sizeof(u32), dir, | |
110 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
111 | if (!dma_desc) | |
112 | return -EINVAL; | |
113 | ||
114 | dma_desc->callback = apb_dma_complete; | |
115 | dma_desc->callback_param = NULL; | |
116 | ||
16735d02 | 117 | reinit_completion(&tegra_apb_wait); |
5b39fc0b LD |
118 | |
119 | dmaengine_submit(dma_desc); | |
120 | dma_async_issue_pending(tegra_apb_dma_chan); | |
121 | ret = wait_for_completion_timeout(&tegra_apb_wait, | |
122 | msecs_to_jiffies(50)); | |
123 | ||
124 | if (WARN(ret == 0, "apb read dma timed out")) { | |
125 | dmaengine_terminate_all(tegra_apb_dma_chan); | |
126 | return -EFAULT; | |
127 | } | |
128 | return 0; | |
129 | } | |
130 | ||
3f394f80 | 131 | int tegra_apb_readl_using_dma(unsigned long offset, u32 *value) |
5b39fc0b LD |
132 | { |
133 | int ret; | |
134 | ||
135 | if (!tegra_apb_dma_chan && !tegra_apb_dma_init()) | |
3f394f80 | 136 | return tegra_apb_readl_direct(offset, value); |
5b39fc0b LD |
137 | |
138 | mutex_lock(&tegra_apb_dma_lock); | |
139 | ret = do_dma_transfer(offset, DMA_DEV_TO_MEM); | |
3f394f80 | 140 | if (ret < 0) |
5b39fc0b | 141 | pr_err("error in reading offset 0x%08lx using dma\n", offset); |
3f394f80 PDS |
142 | else |
143 | *value = *tegra_apb_bb; | |
144 | ||
5b39fc0b | 145 | mutex_unlock(&tegra_apb_dma_lock); |
3f394f80 PDS |
146 | |
147 | return ret; | |
5b39fc0b LD |
148 | } |
149 | ||
3f394f80 | 150 | int tegra_apb_writel_using_dma(u32 value, unsigned long offset) |
5b39fc0b LD |
151 | { |
152 | int ret; | |
153 | ||
3f394f80 PDS |
154 | if (!tegra_apb_dma_chan && !tegra_apb_dma_init()) |
155 | return tegra_apb_writel_direct(value, offset); | |
5b39fc0b LD |
156 | |
157 | mutex_lock(&tegra_apb_dma_lock); | |
158 | *((u32 *)tegra_apb_bb) = value; | |
159 | ret = do_dma_transfer(offset, DMA_MEM_TO_DEV); | |
3f394f80 | 160 | mutex_unlock(&tegra_apb_dma_lock); |
5b39fc0b LD |
161 | if (ret < 0) |
162 | pr_err("error in writing offset 0x%08lx using dma\n", offset); | |
3f394f80 PDS |
163 | |
164 | return ret; | |
5b39fc0b | 165 | } |
b861c275 LD |
166 | #else |
167 | #define tegra_apb_readl_using_dma tegra_apb_readl_direct | |
168 | #define tegra_apb_writel_using_dma tegra_apb_writel_direct | |
169 | #endif | |
170 | ||
3f394f80 PDS |
171 | typedef int (*apbio_read_fptr)(unsigned long offset, u32 *value); |
172 | typedef int (*apbio_write_fptr)(u32 value, unsigned long offset); | |
b861c275 LD |
173 | |
174 | static apbio_read_fptr apbio_read; | |
175 | static apbio_write_fptr apbio_write; | |
176 | ||
3f394f80 | 177 | static int tegra_apb_readl_direct(unsigned long offset, u32 *value) |
b861c275 | 178 | { |
3f394f80 PDS |
179 | *value = readl(IO_ADDRESS(offset)); |
180 | ||
181 | return 0; | |
b861c275 LD |
182 | } |
183 | ||
3f394f80 | 184 | static int tegra_apb_writel_direct(u32 value, unsigned long offset) |
b861c275 | 185 | { |
f8e798a9 | 186 | writel(value, IO_ADDRESS(offset)); |
3f394f80 PDS |
187 | |
188 | return 0; | |
b861c275 LD |
189 | } |
190 | ||
191 | void tegra_apb_io_init(void) | |
192 | { | |
193 | /* Need to use dma only when it is Tegra20 based platform */ | |
194 | if (of_machine_is_compatible("nvidia,tegra20") || | |
195 | !of_have_populated_dt()) { | |
196 | apbio_read = tegra_apb_readl_using_dma; | |
197 | apbio_write = tegra_apb_writel_using_dma; | |
198 | } else { | |
199 | apbio_read = tegra_apb_readl_direct; | |
200 | apbio_write = tegra_apb_writel_direct; | |
201 | } | |
202 | } | |
203 | ||
204 | u32 tegra_apb_readl(unsigned long offset) | |
205 | { | |
3f394f80 PDS |
206 | u32 val; |
207 | ||
208 | if (apbio_read(offset, &val) < 0) | |
209 | return 0; | |
210 | else | |
211 | return val; | |
b861c275 LD |
212 | } |
213 | ||
214 | void tegra_apb_writel(u32 value, unsigned long offset) | |
215 | { | |
216 | apbio_write(value, offset); | |
217 | } |