tree-wide: replace config_enabled() with IS_ENABLED()
[deliverable/linux.git] / arch / mips / cavium-octeon / dma-octeon.c
CommitLineData
5b3b1688
DD
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
8 * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
9 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
10 * IP32 changes by Ilya.
b93b2abc 11 * Copyright (C) 2010 Cavium Networks, Inc.
5b3b1688 12 */
e8635b48 13#include <linux/dma-mapping.h>
e8635b48 14#include <linux/scatterlist.h>
b93b2abc 15#include <linux/bootmem.h>
cae39d13 16#include <linux/export.h>
b93b2abc
DD
17#include <linux/swiotlb.h>
18#include <linux/types.h>
19#include <linux/init.h>
20#include <linux/mm.h>
e8635b48 21
b93b2abc 22#include <asm/bootinfo.h>
e8635b48
DD
23
24#include <asm/octeon/octeon.h>
b93b2abc
DD
25
26#ifdef CONFIG_PCI
27#include <asm/octeon/pci-octeon.h>
e8635b48
DD
28#include <asm/octeon/cvmx-npi-defs.h>
29#include <asm/octeon/cvmx-pci-defs.h>
5b3b1688 30
b93b2abc
DD
31static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
32{
33 if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
34 return paddr - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
35 else
36 return paddr;
37}
5b3b1688 38
b93b2abc
DD
39static phys_addr_t octeon_hole_dma_to_phys(dma_addr_t daddr)
40{
41 if (daddr >= CVMX_PCIE_BAR1_RC_BASE)
42 return daddr + CVMX_PCIE_BAR1_PHYS_BASE - CVMX_PCIE_BAR1_RC_BASE;
43 else
44 return daddr;
45}
46
47static dma_addr_t octeon_gen1_phys_to_dma(struct device *dev, phys_addr_t paddr)
48{
49 if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
50 paddr -= 0x400000000ull;
51 return octeon_hole_phys_to_dma(paddr);
52}
e8635b48 53
b93b2abc
DD
54static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
55{
56 daddr = octeon_hole_dma_to_phys(daddr);
e8635b48 57
b93b2abc
DD
58 if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
59 daddr += 0x400000000ull;
e8635b48 60
b93b2abc
DD
61 return daddr;
62}
e8635b48 63
714c1f5c
DD
64static dma_addr_t octeon_gen2_phys_to_dma(struct device *dev, phys_addr_t paddr)
65{
66 return octeon_hole_phys_to_dma(paddr);
67}
68
69static phys_addr_t octeon_gen2_dma_to_phys(struct device *dev, dma_addr_t daddr)
70{
71 return octeon_hole_dma_to_phys(daddr);
72}
73
b93b2abc 74static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
5b3b1688 75{
b93b2abc
DD
76 if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
77 paddr -= 0x400000000ull;
78
79 /* Anything in the BAR1 hole or above goes via BAR2 */
80 if (paddr >= 0xf0000000ull)
81 paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
82
83 return paddr;
84}
85
86static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
87{
88 if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
89 daddr -= OCTEON_BAR2_PCI_ADDRESS;
90
91 if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
92 daddr += 0x400000000ull;
93 return daddr;
94}
95
96static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
97 phys_addr_t paddr)
98{
99 if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
100 paddr -= 0x400000000ull;
101
102 /* Anything not in the BAR1 range goes via BAR2 */
103 if (paddr >= octeon_bar1_pci_phys && paddr < octeon_bar1_pci_phys + 0x8000000ull)
104 paddr = paddr - octeon_bar1_pci_phys;
105 else
106 paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
107
108 return paddr;
109}
110
111static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
112 dma_addr_t daddr)
113{
114 if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
115 daddr -= OCTEON_BAR2_PCI_ADDRESS;
116 else
117 daddr += octeon_bar1_pci_phys;
118
119 if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
120 daddr += 0x400000000ull;
121 return daddr;
122}
123
124#endif /* CONFIG_PCI */
125
126static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
127 unsigned long offset, size_t size, enum dma_data_direction direction,
128 struct dma_attrs *attrs)
129{
130 dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
131 direction, attrs);
5b3b1688 132 mb();
e8635b48 133
b93b2abc
DD
134 return daddr;
135}
136
137static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
138 int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
139{
140 int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
e8635b48 141 mb();
b93b2abc
DD
142 return r;
143}
e8635b48 144
b93b2abc
DD
145static void octeon_dma_sync_single_for_device(struct device *dev,
146 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
147{
148 swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
149 mb();
150}
e8635b48 151
b93b2abc
DD
152static void octeon_dma_sync_sg_for_device(struct device *dev,
153 struct scatterlist *sg, int nelems, enum dma_data_direction direction)
154{
155 swiotlb_sync_sg_for_device(dev, sg, nelems, direction);
156 mb();
157}
e8635b48 158
b93b2abc 159static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
e8d51e54 160 dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
b93b2abc
DD
161{
162 void *ret;
e8635b48 163
b93b2abc
DD
164 /* ignore region specifiers */
165 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
166
167#ifdef CONFIG_ZONE_DMA
168 if (dev == NULL)
169 gfp |= __GFP_DMA;
170 else if (dev->coherent_dma_mask <= DMA_BIT_MASK(24))
171 gfp |= __GFP_DMA;
172 else
e8635b48 173#endif
b93b2abc
DD
174#ifdef CONFIG_ZONE_DMA32
175 if (dev->coherent_dma_mask <= DMA_BIT_MASK(32))
176 gfp |= __GFP_DMA32;
177 else
178#endif
179 ;
e8635b48 180
b93b2abc
DD
181 /* Don't invoke OOM killer */
182 gfp |= __GFP_NORETRY;
e8635b48 183
b93b2abc 184 ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
e8635b48 185
b93b2abc 186 mb();
e8635b48 187
b93b2abc
DD
188 return ret;
189}
e8635b48 190
b93b2abc 191static void octeon_dma_free_coherent(struct device *dev, size_t size,
e8d51e54 192 void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
b93b2abc 193{
b93b2abc 194 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5b3b1688
DD
195}
196
b93b2abc 197static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5b3b1688 198{
b93b2abc
DD
199 return paddr;
200}
e8635b48 201
b93b2abc
DD
202static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr)
203{
204 return daddr;
205}
206
207struct octeon_dma_map_ops {
208 struct dma_map_ops dma_map_ops;
209 dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
210 phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
211};
212
213dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
214{
215 struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
216 struct octeon_dma_map_ops,
217 dma_map_ops);
218
219 return ops->phys_to_dma(dev, paddr);
220}
221EXPORT_SYMBOL(phys_to_dma);
222
223phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
224{
225 struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
226 struct octeon_dma_map_ops,
227 dma_map_ops);
228
229 return ops->dma_to_phys(dev, daddr);
230}
231EXPORT_SYMBOL(dma_to_phys);
232
233static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
234 .dma_map_ops = {
e8d51e54
AP
235 .alloc = octeon_dma_alloc_coherent,
236 .free = octeon_dma_free_coherent,
b93b2abc
DD
237 .map_page = octeon_dma_map_page,
238 .unmap_page = swiotlb_unmap_page,
239 .map_sg = octeon_dma_map_sg,
240 .unmap_sg = swiotlb_unmap_sg_attrs,
241 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
242 .sync_single_for_device = octeon_dma_sync_single_for_device,
243 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
244 .sync_sg_for_device = octeon_dma_sync_sg_for_device,
245 .mapping_error = swiotlb_dma_mapping_error,
246 .dma_supported = swiotlb_dma_supported
247 },
248 .phys_to_dma = octeon_unity_phys_to_dma,
249 .dma_to_phys = octeon_unity_dma_to_phys
250};
251
252char *octeon_swiotlb;
253
254void __init plat_swiotlb_setup(void)
255{
256 int i;
15d45cce
RB
257 phys_addr_t max_addr;
258 phys_addr_t addr_size;
b93b2abc
DD
259 size_t swiotlbsize;
260 unsigned long swiotlb_nslabs;
261
262 max_addr = 0;
263 addr_size = 0;
264
265 for (i = 0 ; i < boot_mem_map.nr_map; i++) {
266 struct boot_mem_map_entry *e = &boot_mem_map.map[i];
714c1f5c 267 if (e->type != BOOT_MEM_RAM && e->type != BOOT_MEM_INIT_RAM)
b93b2abc
DD
268 continue;
269
270 /* These addresses map low for PCI. */
debe6a62 271 if (e->addr > 0x410000000ull && !OCTEON_IS_OCTEON2())
b93b2abc
DD
272 continue;
273
274 addr_size += e->size;
275
276 if (max_addr < e->addr + e->size)
277 max_addr = e->addr + e->size;
278
279 }
280
281 swiotlbsize = PAGE_SIZE;
282
283#ifdef CONFIG_PCI
e8635b48 284 /*
b93b2abc
DD
285 * For OCTEON_DMA_BAR_TYPE_SMALL, size the iotlb at 1/4 memory
286 * size to a maximum of 64MB
e8635b48 287 */
b93b2abc
DD
288 if (OCTEON_IS_MODEL(OCTEON_CN31XX)
289 || OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
290 swiotlbsize = addr_size / 4;
291 if (swiotlbsize > 64 * (1<<20))
292 swiotlbsize = 64 * (1<<20);
293 } else if (max_addr > 0xf0000000ul) {
294 /*
295 * Otherwise only allocate a big iotlb if there is
296 * memory past the BAR1 hole.
297 */
298 swiotlbsize = 64 * (1<<20);
299 }
714c1f5c 300#endif
a8667d70 301#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
714c1f5c 302 /* OCTEON II ohci is only 32-bit. */
debe6a62 303 if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
714c1f5c 304 swiotlbsize = 64 * (1<<20);
b93b2abc
DD
305#endif
306 swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
307 swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
308 swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
309
310 octeon_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
e8635b48 311
ac2cbab2
YL
312 if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
313 panic("Cannot allocate SWIOTLB buffer");
b93b2abc
DD
314
315 mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
316}
317
318#ifdef CONFIG_PCI
319static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
320 .dma_map_ops = {
e8d51e54
AP
321 .alloc = octeon_dma_alloc_coherent,
322 .free = octeon_dma_free_coherent,
b93b2abc
DD
323 .map_page = octeon_dma_map_page,
324 .unmap_page = swiotlb_unmap_page,
325 .map_sg = octeon_dma_map_sg,
326 .unmap_sg = swiotlb_unmap_sg_attrs,
327 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
328 .sync_single_for_device = octeon_dma_sync_single_for_device,
329 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
330 .sync_sg_for_device = octeon_dma_sync_sg_for_device,
331 .mapping_error = swiotlb_dma_mapping_error,
332 .dma_supported = swiotlb_dma_supported
333 },
334};
335
336struct dma_map_ops *octeon_pci_dma_map_ops;
337
338void __init octeon_pci_dma_init(void)
339{
e8635b48 340 switch (octeon_dma_bar_type) {
714c1f5c
DD
341 case OCTEON_DMA_BAR_TYPE_PCIE2:
342 _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen2_phys_to_dma;
343 _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen2_dma_to_phys;
344 break;
e8635b48 345 case OCTEON_DMA_BAR_TYPE_PCIE:
b93b2abc
DD
346 _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen1_phys_to_dma;
347 _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen1_dma_to_phys;
348 break;
e8635b48 349 case OCTEON_DMA_BAR_TYPE_BIG:
b93b2abc
DD
350 _octeon_pci_dma_map_ops.phys_to_dma = octeon_big_phys_to_dma;
351 _octeon_pci_dma_map_ops.dma_to_phys = octeon_big_dma_to_phys;
e8635b48 352 break;
e8635b48 353 case OCTEON_DMA_BAR_TYPE_SMALL:
b93b2abc
DD
354 _octeon_pci_dma_map_ops.phys_to_dma = octeon_small_phys_to_dma;
355 _octeon_pci_dma_map_ops.dma_to_phys = octeon_small_dma_to_phys;
e8635b48 356 break;
e8635b48 357 default:
b93b2abc 358 BUG();
e8635b48 359 }
b93b2abc 360 octeon_pci_dma_map_ops = &_octeon_pci_dma_map_ops.dma_map_ops;
5b3b1688 361}
b93b2abc 362#endif /* CONFIG_PCI */
This page took 0.409521 seconds and 5 git commands to generate.