Commit | Line | Data |
---|---|---|
ee7e5516 DB |
1 | /* |
2 | * Coherent per-device memory handling. | |
3 | * Borrowed from i386 | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/dma-mapping.h> | |
7 | ||
8 | struct dma_coherent_mem { | |
9 | void *virt_base; | |
10 | u32 device_base; | |
11 | int size; | |
12 | int flags; | |
13 | unsigned long *bitmap; | |
14 | }; | |
15 | ||
16 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | |
17 | dma_addr_t device_addr, size_t size, int flags) | |
18 | { | |
19 | void __iomem *mem_base = NULL; | |
20 | int pages = size >> PAGE_SHIFT; | |
21 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | |
22 | ||
23 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | |
24 | goto out; | |
25 | if (!size) | |
26 | goto out; | |
27 | if (dev->dma_mem) | |
28 | goto out; | |
29 | ||
30 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | |
31 | ||
32 | mem_base = ioremap(bus_addr, size); | |
33 | if (!mem_base) | |
34 | goto out; | |
35 | ||
36 | dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | |
37 | if (!dev->dma_mem) | |
38 | goto out; | |
39 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | |
40 | if (!dev->dma_mem->bitmap) | |
41 | goto free1_out; | |
42 | ||
43 | dev->dma_mem->virt_base = mem_base; | |
44 | dev->dma_mem->device_base = device_addr; | |
45 | dev->dma_mem->size = pages; | |
46 | dev->dma_mem->flags = flags; | |
47 | ||
48 | if (flags & DMA_MEMORY_MAP) | |
49 | return DMA_MEMORY_MAP; | |
50 | ||
51 | return DMA_MEMORY_IO; | |
52 | ||
53 | free1_out: | |
54 | kfree(dev->dma_mem); | |
55 | out: | |
56 | if (mem_base) | |
57 | iounmap(mem_base); | |
58 | return 0; | |
59 | } | |
60 | EXPORT_SYMBOL(dma_declare_coherent_memory); | |
61 | ||
62 | void dma_release_declared_memory(struct device *dev) | |
63 | { | |
64 | struct dma_coherent_mem *mem = dev->dma_mem; | |
65 | ||
66 | if (!mem) | |
67 | return; | |
68 | dev->dma_mem = NULL; | |
69 | iounmap(mem->virt_base); | |
70 | kfree(mem->bitmap); | |
71 | kfree(mem); | |
72 | } | |
73 | EXPORT_SYMBOL(dma_release_declared_memory); | |
74 | ||
75 | void *dma_mark_declared_memory_occupied(struct device *dev, | |
76 | dma_addr_t device_addr, size_t size) | |
77 | { | |
78 | struct dma_coherent_mem *mem = dev->dma_mem; | |
79 | int pos, err; | |
ee7e5516 | 80 | |
d2dc1f4a | 81 | size += device_addr & ~PAGE_MASK; |
ee7e5516 DB |
82 | |
83 | if (!mem) | |
84 | return ERR_PTR(-EINVAL); | |
85 | ||
86 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | |
d2dc1f4a | 87 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); |
ee7e5516 DB |
88 | if (err != 0) |
89 | return ERR_PTR(err); | |
90 | return mem->virt_base + (pos << PAGE_SHIFT); | |
91 | } | |
92 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | |
93 | ||
b6d4f7e3 | 94 | /** |
cb3952bf | 95 | * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area |
b6d4f7e3 DB |
96 | * |
97 | * @dev: device from which we allocate memory | |
98 | * @size: size of requested memory area | |
99 | * @dma_handle: This will be filled with the correct dma handle | |
100 | * @ret: This pointer will be filled with the virtual address | |
101 | * to allocated area. | |
102 | * | |
cb3952bf | 103 | * This function should be only called from per-arch dma_alloc_coherent() |
b6d4f7e3 DB |
104 | * to support allocation from per-device coherent memory pools. |
105 | * | |
106 | * Returns 0 if dma_alloc_coherent should continue with allocating from | |
cb3952bf | 107 | * generic memory areas, or !0 if dma_alloc_coherent should return @ret. |
b6d4f7e3 | 108 | */ |
ee7e5516 DB |
109 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, |
110 | dma_addr_t *dma_handle, void **ret) | |
111 | { | |
112 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | |
113 | int order = get_order(size); | |
114 | ||
115 | if (mem) { | |
116 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | |
117 | order); | |
118 | if (page >= 0) { | |
119 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | |
120 | *ret = mem->virt_base + (page << PAGE_SHIFT); | |
121 | memset(*ret, 0, size); | |
538c29d4 | 122 | } else if (mem->flags & DMA_MEMORY_EXCLUSIVE) |
ee7e5516 DB |
123 | *ret = NULL; |
124 | } | |
125 | return (mem != NULL); | |
126 | } | |
a38409fb | 127 | EXPORT_SYMBOL(dma_alloc_from_coherent); |
ee7e5516 | 128 | |
b6d4f7e3 | 129 | /** |
cb3952bf | 130 | * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool |
b6d4f7e3 DB |
131 | * @dev: device from which the memory was allocated |
132 | * @order: the order of pages allocated | |
133 | * @vaddr: virtual address of allocated pages | |
134 | * | |
135 | * This checks whether the memory was allocated from the per-device | |
136 | * coherent memory pool and if so, releases that memory. | |
137 | * | |
138 | * Returns 1 if we correctly released the memory, or 0 if | |
cb3952bf | 139 | * dma_release_coherent() should proceed with releasing memory from |
b6d4f7e3 DB |
140 | * generic pools. |
141 | */ | |
ee7e5516 DB |
142 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr) |
143 | { | |
144 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | |
145 | ||
146 | if (mem && vaddr >= mem->virt_base && vaddr < | |
147 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | |
148 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | |
149 | ||
150 | bitmap_release_region(mem->bitmap, page, order); | |
151 | return 1; | |
152 | } | |
153 | return 0; | |
154 | } | |
a38409fb | 155 | EXPORT_SYMBOL(dma_release_from_coherent); |