ARC: dma: reintroduce platform specific dma<->phys
[deliverable/linux.git] / arch / arc / mm / ioremap.c
CommitLineData
1162b070
VG
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/vmalloc.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/io.h>
13#include <linux/mm.h>
14#include <linux/slab.h>
1ec9db10 15#include <linux/cache.h>
1162b070 16
f5db19e9 17void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
1162b070 18{
f5db19e9 19 phys_addr_t end;
1162b070
VG
20
21 /* Don't allow wraparound or zero size */
22 end = paddr + size - 1;
23 if (!size || (end < paddr))
24 return NULL;
25
f5db19e9
VG
26 /*
27 * If the region is h/w uncached, MMU mapping can be elided as optim
28 * The cast to u32 is fine as this region can only be inside 4GB
29 */
1162b070 30 if (paddr >= ARC_UNCACHED_ADDR_SPACE)
f5db19e9 31 return (void __iomem *)(u32)paddr;
1162b070 32
4368902b
GBY
33 return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
34}
35EXPORT_SYMBOL(ioremap);
36
37/*
38 * ioremap with access flags
39 * Cache semantics wise it is same as ioremap - "forced" uncached.
40 * However unline vanilla ioremap which bypasses ARC MMU for addresses in
41 * ARC hardware uncached region, this one still goes thru the MMU as caller
42 * might need finer access control (R/W/X)
43 */
44void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
45 unsigned long flags)
46{
f5db19e9 47 unsigned long vaddr;
4368902b 48 struct vm_struct *area;
f5db19e9 49 phys_addr_t off, end;
4368902b
GBY
50 pgprot_t prot = __pgprot(flags);
51
52 /* Don't allow wraparound, zero size */
53 end = paddr + size - 1;
54 if ((!size) || (end < paddr))
55 return NULL;
56
1162b070
VG
57 /* An early platform driver might end up here */
58 if (!slab_is_available())
59 return NULL;
60
4368902b
GBY
61 /* force uncached */
62 prot = pgprot_noncached(prot);
63
64 /* Mappings have to be page-aligned */
1162b070
VG
65 off = paddr & ~PAGE_MASK;
66 paddr &= PAGE_MASK;
67 size = PAGE_ALIGN(end + 1) - paddr;
68
69 /*
70 * Ok, go for it..
71 */
72 area = get_vm_area(size, VM_IOREMAP);
73 if (!area)
74 return NULL;
1162b070 75 area->phys_addr = paddr;
f5db19e9
VG
76 vaddr = (unsigned long)area->addr;
77 if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
4368902b 78 vunmap((void __force *)vaddr);
1162b070
VG
79 return NULL;
80 }
1162b070
VG
81 return (void __iomem *)(off + (char __iomem *)vaddr);
82}
4368902b
GBY
83EXPORT_SYMBOL(ioremap_prot);
84
1162b070
VG
85
86void iounmap(const void __iomem *addr)
87{
88 if (addr >= (void __force __iomem *)ARC_UNCACHED_ADDR_SPACE)
89 return;
90
91 vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
92}
93EXPORT_SYMBOL(iounmap);
This page took 0.153667 seconds and 5 git commands to generate.