Merge branch 'next/drivers' into HEAD
[deliverable/linux.git] / arch / arm / mach-tegra / apbio.c
CommitLineData
e2f91578
OJ
1/*
2 * Copyright (C) 2010 NVIDIA Corporation.
3 * Copyright (C) 2010 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/io.h>
b861c275
LD
18#include <mach/iomap.h>
19#include <linux/of.h>
5b39fc0b 20#include <linux/dmaengine.h>
e2f91578
OJ
21#include <linux/dma-mapping.h>
22#include <linux/spinlock.h>
23#include <linux/completion.h>
24#include <linux/sched.h>
25#include <linux/mutex.h>
26
27#include <mach/dma.h>
e2f91578
OJ
28
29#include "apbio.h"
30
6a2473c5 31#if defined(CONFIG_TEGRA20_APB_DMA)
e2f91578 32static DEFINE_MUTEX(tegra_apb_dma_lock);
e2f91578
OJ
33static u32 *tegra_apb_bb;
34static dma_addr_t tegra_apb_bb_phys;
35static DECLARE_COMPLETION(tegra_apb_wait);
36
b861c275
LD
37static u32 tegra_apb_readl_direct(unsigned long offset);
38static void tegra_apb_writel_direct(u32 value, unsigned long offset);
39
5b39fc0b
LD
40static struct dma_chan *tegra_apb_dma_chan;
41static struct dma_slave_config dma_sconfig;
42
43bool tegra_apb_dma_init(void)
44{
45 dma_cap_mask_t mask;
46
47 mutex_lock(&tegra_apb_dma_lock);
48
49 /* Check to see if we raced to setup */
50 if (tegra_apb_dma_chan)
51 goto skip_init;
52
53 dma_cap_zero(mask);
54 dma_cap_set(DMA_SLAVE, mask);
55 tegra_apb_dma_chan = dma_request_channel(mask, NULL, NULL);
56 if (!tegra_apb_dma_chan) {
57 /*
58 * This is common until the device is probed, so don't
59 * shout about it.
60 */
61 pr_debug("%s: can not allocate dma channel\n", __func__);
62 goto err_dma_alloc;
63 }
64
65 tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
66 &tegra_apb_bb_phys, GFP_KERNEL);
67 if (!tegra_apb_bb) {
68 pr_err("%s: can not allocate bounce buffer\n", __func__);
69 goto err_buff_alloc;
70 }
71
72 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
73 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
74 dma_sconfig.slave_id = TEGRA_DMA_REQ_SEL_CNTR;
75 dma_sconfig.src_maxburst = 1;
76 dma_sconfig.dst_maxburst = 1;
77
78skip_init:
79 mutex_unlock(&tegra_apb_dma_lock);
80 return true;
81
82err_buff_alloc:
83 dma_release_channel(tegra_apb_dma_chan);
84 tegra_apb_dma_chan = NULL;
85
86err_dma_alloc:
87 mutex_unlock(&tegra_apb_dma_lock);
88 return false;
89}
90
91static void apb_dma_complete(void *args)
92{
93 complete(&tegra_apb_wait);
94}
95
96static int do_dma_transfer(unsigned long apb_add,
97 enum dma_transfer_direction dir)
98{
99 struct dma_async_tx_descriptor *dma_desc;
100 int ret;
101
102 if (dir == DMA_DEV_TO_MEM)
103 dma_sconfig.src_addr = apb_add;
104 else
105 dma_sconfig.dst_addr = apb_add;
106
107 ret = dmaengine_slave_config(tegra_apb_dma_chan, &dma_sconfig);
108 if (ret)
109 return ret;
110
111 dma_desc = dmaengine_prep_slave_single(tegra_apb_dma_chan,
112 tegra_apb_bb_phys, sizeof(u32), dir,
113 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
114 if (!dma_desc)
115 return -EINVAL;
116
117 dma_desc->callback = apb_dma_complete;
118 dma_desc->callback_param = NULL;
119
120 INIT_COMPLETION(tegra_apb_wait);
121
122 dmaengine_submit(dma_desc);
123 dma_async_issue_pending(tegra_apb_dma_chan);
124 ret = wait_for_completion_timeout(&tegra_apb_wait,
125 msecs_to_jiffies(50));
126
127 if (WARN(ret == 0, "apb read dma timed out")) {
128 dmaengine_terminate_all(tegra_apb_dma_chan);
129 return -EFAULT;
130 }
131 return 0;
132}
133
134static u32 tegra_apb_readl_using_dma(unsigned long offset)
135{
136 int ret;
137
138 if (!tegra_apb_dma_chan && !tegra_apb_dma_init())
139 return tegra_apb_readl_direct(offset);
140
141 mutex_lock(&tegra_apb_dma_lock);
142 ret = do_dma_transfer(offset, DMA_DEV_TO_MEM);
143 if (ret < 0) {
144 pr_err("error in reading offset 0x%08lx using dma\n", offset);
145 *(u32 *)tegra_apb_bb = 0;
146 }
147 mutex_unlock(&tegra_apb_dma_lock);
148 return *((u32 *)tegra_apb_bb);
149}
150
151static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
152{
153 int ret;
154
155 if (!tegra_apb_dma_chan && !tegra_apb_dma_init()) {
156 tegra_apb_writel_direct(value, offset);
157 return;
158 }
159
160 mutex_lock(&tegra_apb_dma_lock);
161 *((u32 *)tegra_apb_bb) = value;
162 ret = do_dma_transfer(offset, DMA_MEM_TO_DEV);
163 if (ret < 0)
164 pr_err("error in writing offset 0x%08lx using dma\n", offset);
165 mutex_unlock(&tegra_apb_dma_lock);
166}
b861c275
LD
167#else
168#define tegra_apb_readl_using_dma tegra_apb_readl_direct
169#define tegra_apb_writel_using_dma tegra_apb_writel_direct
170#endif
171
172typedef u32 (*apbio_read_fptr)(unsigned long offset);
173typedef void (*apbio_write_fptr)(u32 value, unsigned long offset);
174
175static apbio_read_fptr apbio_read;
176static apbio_write_fptr apbio_write;
177
178static u32 tegra_apb_readl_direct(unsigned long offset)
179{
f8e798a9 180 return readl(IO_ADDRESS(offset));
b861c275
LD
181}
182
183static void tegra_apb_writel_direct(u32 value, unsigned long offset)
184{
f8e798a9 185 writel(value, IO_ADDRESS(offset));
b861c275
LD
186}
187
188void tegra_apb_io_init(void)
189{
190 /* Need to use dma only when it is Tegra20 based platform */
191 if (of_machine_is_compatible("nvidia,tegra20") ||
192 !of_have_populated_dt()) {
193 apbio_read = tegra_apb_readl_using_dma;
194 apbio_write = tegra_apb_writel_using_dma;
195 } else {
196 apbio_read = tegra_apb_readl_direct;
197 apbio_write = tegra_apb_writel_direct;
198 }
199}
200
201u32 tegra_apb_readl(unsigned long offset)
202{
203 return apbio_read(offset);
204}
205
206void tegra_apb_writel(u32 value, unsigned long offset)
207{
208 apbio_write(value, offset);
209}
This page took 0.065862 seconds and 5 git commands to generate.