Commit | Line | Data |
---|---|---|
1162b070 VG |
1 | /* |
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | /* | |
10 | * DMA Coherent API Notes | |
11 | * | |
12 | * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is | |
13 | * implemented by accessintg it using a kernel virtual address, with | |
14 | * Cache bit off in the TLB entry. | |
15 | * | |
16 | * The default DMA address == Phy address which is 0x8000_0000 based. | |
1162b070 VG |
17 | */ |
18 | ||
19 | #include <linux/dma-mapping.h> | |
f2b0b25a | 20 | #include <asm/cache.h> |
1162b070 VG |
21 | #include <asm/cacheflush.h> |
22 | ||
052c96db CH |
23 | |
24 | static void *arc_dma_alloc(struct device *dev, size_t size, | |
25 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | |
1162b070 | 26 | { |
d98a15a5 VG |
27 | unsigned long order = get_order(size); |
28 | struct page *page; | |
29 | phys_addr_t paddr; | |
30 | void *kvaddr; | |
1162b070 | 31 | |
d98a15a5 VG |
32 | page = alloc_pages(gfp, order); |
33 | if (!page) | |
1162b070 VG |
34 | return NULL; |
35 | ||
d98a15a5 VG |
36 | /* This is linear addr (0x8000_0000 based) */ |
37 | paddr = page_to_phys(page); | |
38 | ||
39 | /* For now bus address is exactly same as paddr */ | |
40 | *dma_handle = paddr; | |
1162b070 | 41 | |
f2b0b25a AB |
42 | /* |
43 | * IOC relies on all data (even coherent DMA data) being in cache | |
44 | * Thus allocate normal cached memory | |
45 | * | |
46 | * The gains with IOC are two pronged: | |
47 | * -For streaming data, elides needs for cache maintenance, saving | |
48 | * cycles in flush code, and bus bandwidth as all the lines of a | |
49 | * buffer need to be flushed out to memory | |
50 | * -For coherent data, Read/Write to buffers terminate early in cache | |
51 | * (vs. always going to memory - thus are faster) | |
52 | */ | |
052c96db CH |
53 | if ((is_isa_arcv2() && ioc_exists) || |
54 | dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) | |
55 | return paddr; | |
1162b070 VG |
56 | |
57 | /* This is kernel Virtual address (0x7000_0000 based) */ | |
58 | kvaddr = ioremap_nocache((unsigned long)paddr, size); | |
d98a15a5 VG |
59 | if (kvaddr == NULL) { |
60 | __free_pages(page, order); | |
f718c2ef | 61 | return NULL; |
d98a15a5 | 62 | } |
1162b070 | 63 | |
795f4558 VG |
64 | /* |
65 | * Evict any existing L1 and/or L2 lines for the backing page | |
66 | * in case it was used earlier as a normal "cached" page. | |
67 | * Yeah this bit us - STAR 9000898266 | |
68 | * | |
69 | * Although core does call flush_cache_vmap(), it gets kvaddr hence | |
70 | * can't be used to efficiently flush L1 and/or L2 which need paddr | |
71 | * Currently flush_cache_vmap nukes the L1 cache completely which | |
72 | * will be optimized as a separate commit | |
73 | */ | |
74 | dma_cache_wback_inv((unsigned long)paddr, size); | |
75 | ||
1162b070 VG |
76 | return kvaddr; |
77 | } | |
1162b070 | 78 | |
052c96db CH |
79 | static void arc_dma_free(struct device *dev, size_t size, void *vaddr, |
80 | dma_addr_t dma_handle, struct dma_attrs *attrs) | |
1162b070 | 81 | { |
d98a15a5 VG |
82 | struct page *page = virt_to_page(dma_handle); |
83 | ||
052c96db CH |
84 | if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) && |
85 | !(is_isa_arcv2() && ioc_exists)) | |
86 | iounmap((void __force __iomem *)vaddr); | |
1162b070 | 87 | |
d98a15a5 | 88 | __free_pages(page, get_order(size)); |
1162b070 | 89 | } |
1162b070 VG |
90 | |
91 | /* | |
052c96db CH |
92 | * streaming DMA Mapping API... |
93 | * CPU accesses page via normal paddr, thus needs to explicitly made | |
94 | * consistent before each use | |
1162b070 | 95 | */ |
052c96db CH |
96 | static void _dma_cache_sync(unsigned long paddr, size_t size, |
97 | enum dma_data_direction dir) | |
98 | { | |
99 | switch (dir) { | |
100 | case DMA_FROM_DEVICE: | |
101 | dma_cache_inv(paddr, size); | |
102 | break; | |
103 | case DMA_TO_DEVICE: | |
104 | dma_cache_wback(paddr, size); | |
105 | break; | |
106 | case DMA_BIDIRECTIONAL: | |
107 | dma_cache_wback_inv(paddr, size); | |
108 | break; | |
109 | default: | |
110 | pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr); | |
111 | } | |
112 | } | |
113 | ||
114 | static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, | |
115 | unsigned long offset, size_t size, enum dma_data_direction dir, | |
116 | struct dma_attrs *attrs) | |
117 | { | |
118 | unsigned long paddr = page_to_phys(page) + offset; | |
119 | _dma_cache_sync(paddr, size, dir); | |
120 | return (dma_addr_t)paddr; | |
121 | } | |
122 | ||
123 | static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, | |
124 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | |
125 | { | |
126 | struct scatterlist *s; | |
127 | int i; | |
128 | ||
129 | for_each_sg(sg, s, nents, i) | |
130 | s->dma_address = dma_map_page(dev, sg_page(s), s->offset, | |
131 | s->length, dir); | |
132 | ||
133 | return nents; | |
134 | } | |
135 | ||
136 | static void arc_dma_sync_single_for_cpu(struct device *dev, | |
137 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | |
138 | { | |
139 | _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE); | |
140 | } | |
141 | ||
142 | static void arc_dma_sync_single_for_device(struct device *dev, | |
143 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | |
1162b070 | 144 | { |
052c96db | 145 | _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE); |
1162b070 | 146 | } |
052c96db CH |
147 | |
148 | static void arc_dma_sync_sg_for_cpu(struct device *dev, | |
149 | struct scatterlist *sglist, int nelems, | |
150 | enum dma_data_direction dir) | |
151 | { | |
152 | int i; | |
153 | struct scatterlist *sg; | |
154 | ||
155 | for_each_sg(sglist, sg, nelems, i) | |
156 | _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); | |
157 | } | |
158 | ||
159 | static void arc_dma_sync_sg_for_device(struct device *dev, | |
160 | struct scatterlist *sglist, int nelems, | |
161 | enum dma_data_direction dir) | |
162 | { | |
163 | int i; | |
164 | struct scatterlist *sg; | |
165 | ||
166 | for_each_sg(sglist, sg, nelems, i) | |
167 | _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); | |
168 | } | |
169 | ||
170 | static int arc_dma_supported(struct device *dev, u64 dma_mask) | |
171 | { | |
172 | /* Support 32 bit DMA mask exclusively */ | |
173 | return dma_mask == DMA_BIT_MASK(32); | |
174 | } | |
175 | ||
176 | struct dma_map_ops arc_dma_ops = { | |
177 | .alloc = arc_dma_alloc, | |
178 | .free = arc_dma_free, | |
179 | .map_page = arc_dma_map_page, | |
180 | .map_sg = arc_dma_map_sg, | |
181 | .sync_single_for_device = arc_dma_sync_single_for_device, | |
182 | .sync_single_for_cpu = arc_dma_sync_single_for_cpu, | |
183 | .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu, | |
184 | .sync_sg_for_device = arc_dma_sync_sg_for_device, | |
185 | .dma_supported = arc_dma_supported, | |
186 | }; | |
187 | EXPORT_SYMBOL(arc_dma_ops); |