Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Dynamic DMA mapping support for AMD Hammer. | |
05fccb0e | 3 | * |
1da177e4 LT |
4 | * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI. |
5 | * This allows to use PCI devices that only support 32bit addresses on systems | |
05fccb0e | 6 | * with more than 4GB. |
1da177e4 | 7 | * |
5872fb94 | 8 | * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification. |
05fccb0e | 9 | * |
1da177e4 | 10 | * Copyright 2002 Andi Kleen, SuSE Labs. |
ff7f3649 | 11 | * Subject to the GNU General Public License v2 only. |
1da177e4 LT |
12 | */ |
13 | ||
1da177e4 LT |
14 | #include <linux/types.h> |
15 | #include <linux/ctype.h> | |
16 | #include <linux/agp_backend.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/mm.h> | |
d43c36dc | 19 | #include <linux/sched.h> |
1da177e4 LT |
20 | #include <linux/string.h> |
21 | #include <linux/spinlock.h> | |
22 | #include <linux/pci.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/topology.h> | |
25 | #include <linux/interrupt.h> | |
a66022c4 | 26 | #include <linux/bitmap.h> |
1eeb66a1 | 27 | #include <linux/kdebug.h> |
9ee1bea4 | 28 | #include <linux/scatterlist.h> |
fde9a109 | 29 | #include <linux/iommu-helper.h> |
cd76374e | 30 | #include <linux/sysdev.h> |
237a6224 | 31 | #include <linux/io.h> |
5a0e3ad6 | 32 | #include <linux/gfp.h> |
1da177e4 | 33 | #include <asm/atomic.h> |
1da177e4 LT |
34 | #include <asm/mtrr.h> |
35 | #include <asm/pgtable.h> | |
36 | #include <asm/proto.h> | |
46a7fa27 | 37 | #include <asm/iommu.h> |
395624fc | 38 | #include <asm/gart.h> |
1da177e4 | 39 | #include <asm/cacheflush.h> |
17a941d8 MBY |
40 | #include <asm/swiotlb.h> |
41 | #include <asm/dma.h> | |
23ac4ae8 | 42 | #include <asm/amd_nb.h> |
338bac52 | 43 | #include <asm/x86_init.h> |
22e6daf4 | 44 | #include <asm/iommu_table.h> |
1da177e4 | 45 | |
79da0874 | 46 | static unsigned long iommu_bus_base; /* GART remapping area (physical) */ |
05fccb0e | 47 | static unsigned long iommu_size; /* size of remapping area bytes */ |
1da177e4 LT |
48 | static unsigned long iommu_pages; /* .. and in pages */ |
49 | ||
05fccb0e | 50 | static u32 *iommu_gatt_base; /* Remapping table */ |
1da177e4 | 51 | |
42109197 FT |
52 | static dma_addr_t bad_dma_addr; |
53 | ||
05fccb0e IM |
54 | /* |
55 | * If this is disabled the IOMMU will use an optimized flushing strategy | |
56 | * of only flushing when an mapping is reused. With it true the GART is | |
57 | * flushed for every mapping. Problem is that doing the lazy flush seems | |
58 | * to trigger bugs with some popular PCI cards, in particular 3ware (but | |
59 | * has been also also seen with Qlogic at least). | |
60 | */ | |
c854c919 | 61 | static int iommu_fullflush = 1; |
1da177e4 | 62 | |
05fccb0e | 63 | /* Allocation bitmap for the remapping area: */ |
1da177e4 | 64 | static DEFINE_SPINLOCK(iommu_bitmap_lock); |
05fccb0e IM |
65 | /* Guarded by iommu_bitmap_lock: */ |
66 | static unsigned long *iommu_gart_bitmap; | |
1da177e4 | 67 | |
05fccb0e | 68 | static u32 gart_unmapped_entry; |
1da177e4 LT |
69 | |
70 | #define GPTE_VALID 1 | |
71 | #define GPTE_COHERENT 2 | |
72 | #define GPTE_ENCODE(x) \ | |
73 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) | |
74 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) | |
75 | ||
05fccb0e | 76 | #define EMERGENCY_PAGES 32 /* = 128KB */ |
1da177e4 LT |
77 | |
78 | #ifdef CONFIG_AGP | |
79 | #define AGPEXTERN extern | |
80 | #else | |
81 | #define AGPEXTERN | |
82 | #endif | |
83 | ||
84 | /* backdoor interface to AGP driver */ | |
85 | AGPEXTERN int agp_memory_reserved; | |
86 | AGPEXTERN __u32 *agp_gatt_table; | |
87 | ||
88 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ | |
3610f211 | 89 | static bool need_flush; /* global flush state. set for each gart wrap */ |
1da177e4 | 90 | |
7b22ff53 FT |
91 | static unsigned long alloc_iommu(struct device *dev, int size, |
92 | unsigned long align_mask) | |
05fccb0e | 93 | { |
1da177e4 | 94 | unsigned long offset, flags; |
fde9a109 FT |
95 | unsigned long boundary_size; |
96 | unsigned long base_index; | |
97 | ||
98 | base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), | |
99 | PAGE_SIZE) >> PAGE_SHIFT; | |
123bf0e2 | 100 | boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1, |
fde9a109 | 101 | PAGE_SIZE) >> PAGE_SHIFT; |
1da177e4 | 102 | |
05fccb0e | 103 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
fde9a109 | 104 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, |
7b22ff53 | 105 | size, base_index, boundary_size, align_mask); |
1da177e4 | 106 | if (offset == -1) { |
3610f211 | 107 | need_flush = true; |
fde9a109 | 108 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, |
7b22ff53 FT |
109 | size, base_index, boundary_size, |
110 | align_mask); | |
1da177e4 | 111 | } |
05fccb0e | 112 | if (offset != -1) { |
05fccb0e IM |
113 | next_bit = offset+size; |
114 | if (next_bit >= iommu_pages) { | |
1da177e4 | 115 | next_bit = 0; |
3610f211 | 116 | need_flush = true; |
05fccb0e IM |
117 | } |
118 | } | |
1da177e4 | 119 | if (iommu_fullflush) |
3610f211 | 120 | need_flush = true; |
05fccb0e IM |
121 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
122 | ||
1da177e4 | 123 | return offset; |
05fccb0e | 124 | } |
1da177e4 LT |
125 | |
126 | static void free_iommu(unsigned long offset, int size) | |
05fccb0e | 127 | { |
1da177e4 | 128 | unsigned long flags; |
05fccb0e | 129 | |
1da177e4 | 130 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
a66022c4 | 131 | bitmap_clear(iommu_gart_bitmap, offset, size); |
70d7d357 JR |
132 | if (offset >= next_bit) |
133 | next_bit = offset + size; | |
1da177e4 | 134 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
05fccb0e | 135 | } |
1da177e4 | 136 | |
05fccb0e | 137 | /* |
1da177e4 LT |
138 | * Use global flush state to avoid races with multiple flushers. |
139 | */ | |
a32073bf | 140 | static void flush_gart(void) |
05fccb0e | 141 | { |
1da177e4 | 142 | unsigned long flags; |
05fccb0e | 143 | |
1da177e4 | 144 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
a32073bf | 145 | if (need_flush) { |
eec1d4fa | 146 | amd_flush_garts(); |
3610f211 | 147 | need_flush = false; |
05fccb0e | 148 | } |
1da177e4 | 149 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
05fccb0e | 150 | } |
1da177e4 | 151 | |
1da177e4 | 152 | #ifdef CONFIG_IOMMU_LEAK |
1da177e4 | 153 | /* Debugging aid for drivers that don't free their IOMMU tables */ |
1da177e4 | 154 | static int leak_trace; |
79da0874 | 155 | static int iommu_leak_pages = 20; |
05fccb0e | 156 | |
79da0874 | 157 | static void dump_leak(void) |
1da177e4 | 158 | { |
05fccb0e IM |
159 | static int dump; |
160 | ||
19c1a6f5 | 161 | if (dump) |
05fccb0e | 162 | return; |
1da177e4 | 163 | dump = 1; |
05fccb0e | 164 | |
19c1a6f5 FT |
165 | show_stack(NULL, NULL); |
166 | debug_dma_dump_mappings(NULL); | |
1da177e4 | 167 | } |
1da177e4 LT |
168 | #endif |
169 | ||
17a941d8 | 170 | static void iommu_full(struct device *dev, size_t size, int dir) |
1da177e4 | 171 | { |
05fccb0e | 172 | /* |
1da177e4 LT |
173 | * Ran out of IOMMU space for this operation. This is very bad. |
174 | * Unfortunately the drivers cannot handle this operation properly. | |
05fccb0e | 175 | * Return some non mapped prereserved space in the aperture and |
1da177e4 LT |
176 | * let the Northbridge deal with it. This will result in garbage |
177 | * in the IO operation. When the size exceeds the prereserved space | |
05fccb0e | 178 | * memory corruption will occur or random memory will be DMAed |
1da177e4 | 179 | * out. Hopefully no network devices use single mappings that big. |
05fccb0e IM |
180 | */ |
181 | ||
fc3a8828 | 182 | dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size); |
1da177e4 | 183 | |
17a941d8 | 184 | if (size > PAGE_SIZE*EMERGENCY_PAGES) { |
1da177e4 LT |
185 | if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) |
186 | panic("PCI-DMA: Memory would be corrupted\n"); | |
05fccb0e IM |
187 | if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) |
188 | panic(KERN_ERR | |
189 | "PCI-DMA: Random memory would be DMAed\n"); | |
190 | } | |
1da177e4 | 191 | #ifdef CONFIG_IOMMU_LEAK |
05fccb0e | 192 | dump_leak(); |
1da177e4 | 193 | #endif |
05fccb0e | 194 | } |
1da177e4 | 195 | |
05fccb0e IM |
196 | static inline int |
197 | need_iommu(struct device *dev, unsigned long addr, size_t size) | |
198 | { | |
a4c2baa6 | 199 | return force_iommu || !dma_capable(dev, addr, size); |
1da177e4 LT |
200 | } |
201 | ||
05fccb0e IM |
202 | static inline int |
203 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | |
204 | { | |
a4c2baa6 | 205 | return !dma_capable(dev, addr, size); |
1da177e4 LT |
206 | } |
207 | ||
208 | /* Map a single continuous physical area into the IOMMU. | |
209 | * Caller needs to check if the iommu is needed and flush. | |
210 | */ | |
17a941d8 | 211 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, |
7b22ff53 | 212 | size_t size, int dir, unsigned long align_mask) |
05fccb0e | 213 | { |
1477b8e5 | 214 | unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); |
7b22ff53 | 215 | unsigned long iommu_page = alloc_iommu(dev, npages, align_mask); |
1da177e4 | 216 | int i; |
05fccb0e | 217 | |
1da177e4 LT |
218 | if (iommu_page == -1) { |
219 | if (!nonforced_iommu(dev, phys_mem, size)) | |
05fccb0e | 220 | return phys_mem; |
1da177e4 LT |
221 | if (panic_on_overflow) |
222 | panic("dma_map_area overflow %lu bytes\n", size); | |
17a941d8 | 223 | iommu_full(dev, size, dir); |
42109197 | 224 | return bad_dma_addr; |
1da177e4 LT |
225 | } |
226 | ||
227 | for (i = 0; i < npages; i++) { | |
228 | iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); | |
1da177e4 LT |
229 | phys_mem += PAGE_SIZE; |
230 | } | |
231 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); | |
232 | } | |
233 | ||
234 | /* Map a single area into the IOMMU */ | |
052aedbf FT |
235 | static dma_addr_t gart_map_page(struct device *dev, struct page *page, |
236 | unsigned long offset, size_t size, | |
237 | enum dma_data_direction dir, | |
238 | struct dma_attrs *attrs) | |
1da177e4 | 239 | { |
2be62149 | 240 | unsigned long bus; |
052aedbf | 241 | phys_addr_t paddr = page_to_phys(page) + offset; |
1da177e4 | 242 | |
1da177e4 | 243 | if (!dev) |
6c505ce3 | 244 | dev = &x86_dma_fallback_dev; |
1da177e4 | 245 | |
2be62149 IM |
246 | if (!need_iommu(dev, paddr, size)) |
247 | return paddr; | |
1da177e4 | 248 | |
7b22ff53 FT |
249 | bus = dma_map_area(dev, paddr, size, dir, 0); |
250 | flush_gart(); | |
05fccb0e IM |
251 | |
252 | return bus; | |
17a941d8 MBY |
253 | } |
254 | ||
7c2d9cd2 JM |
255 | /* |
256 | * Free a DMA mapping. | |
257 | */ | |
052aedbf FT |
258 | static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, |
259 | size_t size, enum dma_data_direction dir, | |
260 | struct dma_attrs *attrs) | |
7c2d9cd2 JM |
261 | { |
262 | unsigned long iommu_page; | |
263 | int npages; | |
264 | int i; | |
265 | ||
266 | if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || | |
267 | dma_addr >= iommu_bus_base + iommu_size) | |
268 | return; | |
05fccb0e | 269 | |
7c2d9cd2 | 270 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; |
1477b8e5 | 271 | npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); |
7c2d9cd2 JM |
272 | for (i = 0; i < npages; i++) { |
273 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; | |
7c2d9cd2 JM |
274 | } |
275 | free_iommu(iommu_page, npages); | |
276 | } | |
277 | ||
17a941d8 MBY |
278 | /* |
279 | * Wrapper for pci_unmap_single working with scatterlists. | |
280 | */ | |
160c1d8e FT |
281 | static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
282 | enum dma_data_direction dir, struct dma_attrs *attrs) | |
17a941d8 | 283 | { |
9ee1bea4 | 284 | struct scatterlist *s; |
17a941d8 MBY |
285 | int i; |
286 | ||
9ee1bea4 | 287 | for_each_sg(sg, s, nents, i) { |
60b08c67 | 288 | if (!s->dma_length || !s->length) |
17a941d8 | 289 | break; |
d7dff840 | 290 | gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL); |
17a941d8 MBY |
291 | } |
292 | } | |
1da177e4 LT |
293 | |
294 | /* Fallback for dma_map_sg in case of overflow */ | |
295 | static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |
296 | int nents, int dir) | |
297 | { | |
9ee1bea4 | 298 | struct scatterlist *s; |
1da177e4 LT |
299 | int i; |
300 | ||
301 | #ifdef CONFIG_IOMMU_DEBUG | |
123bf0e2 | 302 | pr_debug("dma_map_sg overflow\n"); |
1da177e4 LT |
303 | #endif |
304 | ||
9ee1bea4 | 305 | for_each_sg(sg, s, nents, i) { |
58b053e4 | 306 | unsigned long addr = sg_phys(s); |
05fccb0e IM |
307 | |
308 | if (nonforced_iommu(dev, addr, s->length)) { | |
7b22ff53 | 309 | addr = dma_map_area(dev, addr, s->length, dir, 0); |
42109197 | 310 | if (addr == bad_dma_addr) { |
05fccb0e | 311 | if (i > 0) |
160c1d8e | 312 | gart_unmap_sg(dev, sg, i, dir, NULL); |
05fccb0e | 313 | nents = 0; |
1da177e4 LT |
314 | sg[0].dma_length = 0; |
315 | break; | |
316 | } | |
317 | } | |
318 | s->dma_address = addr; | |
319 | s->dma_length = s->length; | |
320 | } | |
a32073bf | 321 | flush_gart(); |
05fccb0e | 322 | |
1da177e4 LT |
323 | return nents; |
324 | } | |
325 | ||
326 | /* Map multiple scatterlist entries continuous into the first. */ | |
fde9a109 FT |
327 | static int __dma_map_cont(struct device *dev, struct scatterlist *start, |
328 | int nelems, struct scatterlist *sout, | |
329 | unsigned long pages) | |
1da177e4 | 330 | { |
7b22ff53 | 331 | unsigned long iommu_start = alloc_iommu(dev, pages, 0); |
05fccb0e | 332 | unsigned long iommu_page = iommu_start; |
9ee1bea4 | 333 | struct scatterlist *s; |
1da177e4 LT |
334 | int i; |
335 | ||
336 | if (iommu_start == -1) | |
337 | return -1; | |
9ee1bea4 JA |
338 | |
339 | for_each_sg(start, s, nelems, i) { | |
1da177e4 LT |
340 | unsigned long pages, addr; |
341 | unsigned long phys_addr = s->dma_address; | |
05fccb0e | 342 | |
9ee1bea4 JA |
343 | BUG_ON(s != start && s->offset); |
344 | if (s == start) { | |
1da177e4 LT |
345 | sout->dma_address = iommu_bus_base; |
346 | sout->dma_address += iommu_page*PAGE_SIZE + s->offset; | |
347 | sout->dma_length = s->length; | |
05fccb0e IM |
348 | } else { |
349 | sout->dma_length += s->length; | |
1da177e4 LT |
350 | } |
351 | ||
352 | addr = phys_addr; | |
1477b8e5 | 353 | pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE); |
05fccb0e IM |
354 | while (pages--) { |
355 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); | |
1da177e4 LT |
356 | addr += PAGE_SIZE; |
357 | iommu_page++; | |
0d541064 | 358 | } |
05fccb0e IM |
359 | } |
360 | BUG_ON(iommu_page - iommu_start != pages); | |
361 | ||
1da177e4 LT |
362 | return 0; |
363 | } | |
364 | ||
05fccb0e | 365 | static inline int |
fde9a109 FT |
366 | dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, |
367 | struct scatterlist *sout, unsigned long pages, int need) | |
1da177e4 | 368 | { |
9ee1bea4 JA |
369 | if (!need) { |
370 | BUG_ON(nelems != 1); | |
e88a39de | 371 | sout->dma_address = start->dma_address; |
9ee1bea4 | 372 | sout->dma_length = start->length; |
1da177e4 | 373 | return 0; |
9ee1bea4 | 374 | } |
fde9a109 | 375 | return __dma_map_cont(dev, start, nelems, sout, pages); |
1da177e4 | 376 | } |
05fccb0e | 377 | |
1da177e4 LT |
378 | /* |
379 | * DMA map all entries in a scatterlist. | |
05fccb0e | 380 | * Merge chunks that have page aligned sizes into a continuous mapping. |
1da177e4 | 381 | */ |
160c1d8e FT |
382 | static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
383 | enum dma_data_direction dir, struct dma_attrs *attrs) | |
1da177e4 | 384 | { |
9ee1bea4 | 385 | struct scatterlist *s, *ps, *start_sg, *sgmap; |
05fccb0e IM |
386 | int need = 0, nextneed, i, out, start; |
387 | unsigned long pages = 0; | |
42d00284 FT |
388 | unsigned int seg_size; |
389 | unsigned int max_seg_size; | |
1da177e4 | 390 | |
05fccb0e | 391 | if (nents == 0) |
1da177e4 LT |
392 | return 0; |
393 | ||
1da177e4 | 394 | if (!dev) |
6c505ce3 | 395 | dev = &x86_dma_fallback_dev; |
1da177e4 | 396 | |
123bf0e2 IM |
397 | out = 0; |
398 | start = 0; | |
399 | start_sg = sg; | |
400 | sgmap = sg; | |
401 | seg_size = 0; | |
402 | max_seg_size = dma_get_max_seg_size(dev); | |
403 | ps = NULL; /* shut up gcc */ | |
404 | ||
9ee1bea4 | 405 | for_each_sg(sg, s, nents, i) { |
58b053e4 | 406 | dma_addr_t addr = sg_phys(s); |
05fccb0e | 407 | |
1da177e4 | 408 | s->dma_address = addr; |
05fccb0e | 409 | BUG_ON(s->length == 0); |
1da177e4 | 410 | |
05fccb0e | 411 | nextneed = need_iommu(dev, addr, s->length); |
1da177e4 LT |
412 | |
413 | /* Handle the previous not yet processed entries */ | |
414 | if (i > start) { | |
05fccb0e IM |
415 | /* |
416 | * Can only merge when the last chunk ends on a | |
417 | * page boundary and the new one doesn't have an | |
418 | * offset. | |
419 | */ | |
1da177e4 | 420 | if (!iommu_merge || !nextneed || !need || s->offset || |
42d00284 | 421 | (s->length + seg_size > max_seg_size) || |
9ee1bea4 | 422 | (ps->offset + ps->length) % PAGE_SIZE) { |
fde9a109 FT |
423 | if (dma_map_cont(dev, start_sg, i - start, |
424 | sgmap, pages, need) < 0) | |
1da177e4 LT |
425 | goto error; |
426 | out++; | |
123bf0e2 IM |
427 | |
428 | seg_size = 0; | |
429 | sgmap = sg_next(sgmap); | |
430 | pages = 0; | |
431 | start = i; | |
432 | start_sg = s; | |
1da177e4 LT |
433 | } |
434 | } | |
435 | ||
42d00284 | 436 | seg_size += s->length; |
1da177e4 | 437 | need = nextneed; |
1477b8e5 | 438 | pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE); |
9ee1bea4 | 439 | ps = s; |
1da177e4 | 440 | } |
fde9a109 | 441 | if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) |
1da177e4 LT |
442 | goto error; |
443 | out++; | |
a32073bf | 444 | flush_gart(); |
9ee1bea4 JA |
445 | if (out < nents) { |
446 | sgmap = sg_next(sgmap); | |
447 | sgmap->dma_length = 0; | |
448 | } | |
1da177e4 LT |
449 | return out; |
450 | ||
451 | error: | |
a32073bf | 452 | flush_gart(); |
160c1d8e | 453 | gart_unmap_sg(dev, sg, out, dir, NULL); |
05fccb0e | 454 | |
a1002a48 KV |
455 | /* When it was forced or merged try again in a dumb way */ |
456 | if (force_iommu || iommu_merge) { | |
457 | out = dma_map_sg_nonforce(dev, sg, nents, dir); | |
458 | if (out > 0) | |
459 | return out; | |
460 | } | |
1da177e4 LT |
461 | if (panic_on_overflow) |
462 | panic("dma_map_sg: overflow on %lu pages\n", pages); | |
05fccb0e | 463 | |
17a941d8 | 464 | iommu_full(dev, pages << PAGE_SHIFT, dir); |
9ee1bea4 | 465 | for_each_sg(sg, s, nents, i) |
42109197 | 466 | s->dma_address = bad_dma_addr; |
1da177e4 | 467 | return 0; |
05fccb0e | 468 | } |
1da177e4 | 469 | |
94581094 JR |
470 | /* allocate and map a coherent mapping */ |
471 | static void * | |
472 | gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, | |
473 | gfp_t flag) | |
474 | { | |
f6a32a36 | 475 | dma_addr_t paddr; |
421076e2 | 476 | unsigned long align_mask; |
1d990882 FT |
477 | struct page *page; |
478 | ||
479 | if (force_iommu && !(flag & GFP_DMA)) { | |
480 | flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | |
481 | page = alloc_pages(flag | __GFP_ZERO, get_order(size)); | |
482 | if (!page) | |
483 | return NULL; | |
484 | ||
485 | align_mask = (1UL << get_order(size)) - 1; | |
486 | paddr = dma_map_area(dev, page_to_phys(page), size, | |
487 | DMA_BIDIRECTIONAL, align_mask); | |
488 | ||
489 | flush_gart(); | |
42109197 | 490 | if (paddr != bad_dma_addr) { |
1d990882 FT |
491 | *dma_addr = paddr; |
492 | return page_address(page); | |
493 | } | |
494 | __free_pages(page, get_order(size)); | |
495 | } else | |
496 | return dma_generic_alloc_coherent(dev, size, dma_addr, flag); | |
94581094 JR |
497 | |
498 | return NULL; | |
499 | } | |
500 | ||
43a5a5a0 JR |
501 | /* free a coherent mapping */ |
502 | static void | |
503 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, | |
504 | dma_addr_t dma_addr) | |
505 | { | |
d7dff840 | 506 | gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); |
43a5a5a0 JR |
507 | free_pages((unsigned long)vaddr, get_order(size)); |
508 | } | |
509 | ||
42109197 FT |
510 | static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr) |
511 | { | |
512 | return (dma_addr == bad_dma_addr); | |
513 | } | |
514 | ||
17a941d8 | 515 | static int no_agp; |
1da177e4 LT |
516 | |
517 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) | |
05fccb0e IM |
518 | { |
519 | unsigned long a; | |
520 | ||
521 | if (!iommu_size) { | |
522 | iommu_size = aper_size; | |
523 | if (!no_agp) | |
524 | iommu_size /= 2; | |
525 | } | |
526 | ||
527 | a = aper + iommu_size; | |
31422c51 | 528 | iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; |
1da177e4 | 529 | |
05fccb0e | 530 | if (iommu_size < 64*1024*1024) { |
123bf0e2 | 531 | pr_warning( |
05fccb0e IM |
532 | "PCI-DMA: Warning: Small IOMMU %luMB." |
533 | " Consider increasing the AGP aperture in BIOS\n", | |
534 | iommu_size >> 20); | |
535 | } | |
536 | ||
1da177e4 | 537 | return iommu_size; |
05fccb0e | 538 | } |
1da177e4 | 539 | |
05fccb0e IM |
540 | static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) |
541 | { | |
542 | unsigned aper_size = 0, aper_base_32, aper_order; | |
1da177e4 | 543 | u64 aper_base; |
1da177e4 | 544 | |
3bb6fbf9 PM |
545 | pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32); |
546 | pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order); | |
05fccb0e | 547 | aper_order = (aper_order >> 1) & 7; |
1da177e4 | 548 | |
05fccb0e | 549 | aper_base = aper_base_32 & 0x7fff; |
1da177e4 LT |
550 | aper_base <<= 25; |
551 | ||
05fccb0e IM |
552 | aper_size = (32 * 1024 * 1024) << aper_order; |
553 | if (aper_base + aper_size > 0x100000000UL || !aper_size) | |
1da177e4 LT |
554 | aper_base = 0; |
555 | ||
556 | *size = aper_size; | |
557 | return aper_base; | |
05fccb0e | 558 | } |
1da177e4 | 559 | |
6703f6d1 RW |
560 | static void enable_gart_translations(void) |
561 | { | |
562 | int i; | |
563 | ||
9653a5c7 | 564 | if (!amd_nb_has_feature(AMD_NB_GART)) |
900f9ac9 AH |
565 | return; |
566 | ||
9653a5c7 HR |
567 | for (i = 0; i < amd_nb_num(); i++) { |
568 | struct pci_dev *dev = node_to_amd_nb(i)->misc; | |
6703f6d1 RW |
569 | |
570 | enable_gart_translation(dev, __pa(agp_gatt_table)); | |
571 | } | |
4b83873d JR |
572 | |
573 | /* Flush the GART-TLB to remove stale entries */ | |
eec1d4fa | 574 | amd_flush_garts(); |
6703f6d1 RW |
575 | } |
576 | ||
577 | /* | |
578 | * If fix_up_north_bridges is set, the north bridges have to be fixed up on | |
579 | * resume in the same way as they are handled in gart_iommu_hole_init(). | |
580 | */ | |
581 | static bool fix_up_north_bridges; | |
582 | static u32 aperture_order; | |
583 | static u32 aperture_alloc; | |
584 | ||
585 | void set_up_gart_resume(u32 aper_order, u32 aper_alloc) | |
586 | { | |
587 | fix_up_north_bridges = true; | |
588 | aperture_order = aper_order; | |
589 | aperture_alloc = aper_alloc; | |
590 | } | |
591 | ||
123bf0e2 | 592 | static void gart_fixup_northbridges(struct sys_device *dev) |
cd76374e | 593 | { |
123bf0e2 | 594 | int i; |
6703f6d1 | 595 | |
123bf0e2 IM |
596 | if (!fix_up_north_bridges) |
597 | return; | |
6703f6d1 | 598 | |
9653a5c7 | 599 | if (!amd_nb_has_feature(AMD_NB_GART)) |
900f9ac9 AH |
600 | return; |
601 | ||
123bf0e2 | 602 | pr_info("PCI-DMA: Restoring GART aperture settings\n"); |
6703f6d1 | 603 | |
9653a5c7 HR |
604 | for (i = 0; i < amd_nb_num(); i++) { |
605 | struct pci_dev *dev = node_to_amd_nb(i)->misc; | |
6703f6d1 | 606 | |
123bf0e2 IM |
607 | /* |
608 | * Don't enable translations just yet. That is the next | |
609 | * step. Restore the pre-suspend aperture settings. | |
610 | */ | |
260133ab | 611 | gart_set_size_and_enable(dev, aperture_order); |
123bf0e2 | 612 | pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25); |
6703f6d1 | 613 | } |
123bf0e2 IM |
614 | } |
615 | ||
616 | static int gart_resume(struct sys_device *dev) | |
617 | { | |
618 | pr_info("PCI-DMA: Resuming GART IOMMU\n"); | |
619 | ||
620 | gart_fixup_northbridges(dev); | |
6703f6d1 RW |
621 | |
622 | enable_gart_translations(); | |
623 | ||
cd76374e PM |
624 | return 0; |
625 | } | |
626 | ||
627 | static int gart_suspend(struct sys_device *dev, pm_message_t state) | |
628 | { | |
6703f6d1 | 629 | return 0; |
cd76374e PM |
630 | } |
631 | ||
632 | static struct sysdev_class gart_sysdev_class = { | |
123bf0e2 IM |
633 | .name = "gart", |
634 | .suspend = gart_suspend, | |
635 | .resume = gart_resume, | |
cd76374e PM |
636 | |
637 | }; | |
638 | ||
639 | static struct sys_device device_gart = { | |
123bf0e2 | 640 | .cls = &gart_sysdev_class, |
cd76374e PM |
641 | }; |
642 | ||
05fccb0e | 643 | /* |
1da177e4 | 644 | * Private Northbridge GATT initialization in case we cannot use the |
05fccb0e | 645 | * AGP driver for some reason. |
1da177e4 | 646 | */ |
eec1d4fa | 647 | static __init int init_amd_gatt(struct agp_kern_info *info) |
05fccb0e IM |
648 | { |
649 | unsigned aper_size, gatt_size, new_aper_size; | |
650 | unsigned aper_base, new_aper_base; | |
1da177e4 LT |
651 | struct pci_dev *dev; |
652 | void *gatt; | |
cd76374e | 653 | int i, error; |
a32073bf | 654 | |
123bf0e2 IM |
655 | pr_info("PCI-DMA: Disabling AGP.\n"); |
656 | ||
1da177e4 | 657 | aper_size = aper_base = info->aper_size = 0; |
a32073bf | 658 | dev = NULL; |
9653a5c7 HR |
659 | for (i = 0; i < amd_nb_num(); i++) { |
660 | dev = node_to_amd_nb(i)->misc; | |
05fccb0e IM |
661 | new_aper_base = read_aperture(dev, &new_aper_size); |
662 | if (!new_aper_base) | |
663 | goto nommu; | |
664 | ||
665 | if (!aper_base) { | |
1da177e4 LT |
666 | aper_size = new_aper_size; |
667 | aper_base = new_aper_base; | |
05fccb0e IM |
668 | } |
669 | if (aper_size != new_aper_size || aper_base != new_aper_base) | |
1da177e4 LT |
670 | goto nommu; |
671 | } | |
672 | if (!aper_base) | |
05fccb0e | 673 | goto nommu; |
123bf0e2 | 674 | |
1da177e4 | 675 | info->aper_base = aper_base; |
05fccb0e | 676 | info->aper_size = aper_size >> 20; |
1da177e4 | 677 | |
05fccb0e | 678 | gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); |
0114267b JR |
679 | gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
680 | get_order(gatt_size)); | |
05fccb0e | 681 | if (!gatt) |
cf6387da | 682 | panic("Cannot allocate GATT table"); |
6d238cc4 | 683 | if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) |
cf6387da | 684 | panic("Could not set GART PTEs to uncacheable pages"); |
cf6387da | 685 | |
1da177e4 | 686 | agp_gatt_table = gatt; |
a32073bf | 687 | |
cd76374e PM |
688 | error = sysdev_class_register(&gart_sysdev_class); |
689 | if (!error) | |
690 | error = sysdev_register(&device_gart); | |
691 | if (error) | |
237a6224 JR |
692 | panic("Could not register gart_sysdev -- " |
693 | "would corrupt data on next suspend"); | |
6703f6d1 | 694 | |
a32073bf | 695 | flush_gart(); |
05fccb0e | 696 | |
123bf0e2 | 697 | pr_info("PCI-DMA: aperture base @ %x size %u KB\n", |
05fccb0e | 698 | aper_base, aper_size>>10); |
7ab073b6 | 699 | |
1da177e4 LT |
700 | return 0; |
701 | ||
702 | nommu: | |
05fccb0e | 703 | /* Should not happen anymore */ |
123bf0e2 | 704 | pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n" |
ad361c98 | 705 | "falling back to iommu=soft.\n"); |
05fccb0e IM |
706 | return -1; |
707 | } | |
1da177e4 | 708 | |
160c1d8e | 709 | static struct dma_map_ops gart_dma_ops = { |
05fccb0e IM |
710 | .map_sg = gart_map_sg, |
711 | .unmap_sg = gart_unmap_sg, | |
052aedbf FT |
712 | .map_page = gart_map_page, |
713 | .unmap_page = gart_unmap_page, | |
94581094 | 714 | .alloc_coherent = gart_alloc_coherent, |
43a5a5a0 | 715 | .free_coherent = gart_free_coherent, |
42109197 | 716 | .mapping_error = gart_mapping_error, |
17a941d8 MBY |
717 | }; |
718 | ||
338bac52 | 719 | static void gart_iommu_shutdown(void) |
bc2cea6a YL |
720 | { |
721 | struct pci_dev *dev; | |
722 | int i; | |
723 | ||
f3eee542 YL |
724 | /* don't shutdown it if there is AGP installed */ |
725 | if (!no_agp) | |
bc2cea6a YL |
726 | return; |
727 | ||
9653a5c7 | 728 | if (!amd_nb_has_feature(AMD_NB_GART)) |
900f9ac9 AH |
729 | return; |
730 | ||
9653a5c7 | 731 | for (i = 0; i < amd_nb_num(); i++) { |
05fccb0e | 732 | u32 ctl; |
bc2cea6a | 733 | |
9653a5c7 | 734 | dev = node_to_amd_nb(i)->misc; |
3bb6fbf9 | 735 | pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); |
bc2cea6a | 736 | |
3bb6fbf9 | 737 | ctl &= ~GARTEN; |
bc2cea6a | 738 | |
3bb6fbf9 | 739 | pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); |
05fccb0e | 740 | } |
bc2cea6a YL |
741 | } |
742 | ||
de957628 | 743 | int __init gart_iommu_init(void) |
05fccb0e | 744 | { |
1da177e4 | 745 | struct agp_kern_info info; |
1da177e4 | 746 | unsigned long iommu_start; |
d99e9016 YL |
747 | unsigned long aper_base, aper_size; |
748 | unsigned long start_pfn, end_pfn; | |
1da177e4 LT |
749 | unsigned long scratch; |
750 | long i; | |
751 | ||
9653a5c7 | 752 | if (!amd_nb_has_feature(AMD_NB_GART)) |
de957628 | 753 | return 0; |
a32073bf | 754 | |
1da177e4 | 755 | #ifndef CONFIG_AGP_AMD64 |
05fccb0e | 756 | no_agp = 1; |
1da177e4 LT |
757 | #else |
758 | /* Makefile puts PCI initialization via subsys_initcall first. */ | |
eec1d4fa | 759 | /* Add other AMD AGP bridge drivers here */ |
05fccb0e IM |
760 | no_agp = no_agp || |
761 | (agp_amd64_init() < 0) || | |
1da177e4 | 762 | (agp_copy_info(agp_bridge, &info) < 0); |
05fccb0e | 763 | #endif |
1da177e4 | 764 | |
1da177e4 | 765 | if (no_iommu || |
c987d12f | 766 | (!force_iommu && max_pfn <= MAX_DMA32_PFN) || |
0440d4c0 | 767 | !gart_iommu_aperture || |
eec1d4fa | 768 | (no_agp && init_amd_gatt(&info) < 0)) { |
c987d12f | 769 | if (max_pfn > MAX_DMA32_PFN) { |
123bf0e2 IM |
770 | pr_warning("More than 4GB of memory but GART IOMMU not available.\n"); |
771 | pr_warning("falling back to iommu=soft.\n"); | |
5b7b644c | 772 | } |
de957628 | 773 | return 0; |
1da177e4 LT |
774 | } |
775 | ||
d99e9016 | 776 | /* need to map that range */ |
123bf0e2 IM |
777 | aper_size = info.aper_size << 20; |
778 | aper_base = info.aper_base; | |
779 | end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); | |
780 | ||
d99e9016 YL |
781 | if (end_pfn > max_low_pfn_mapped) { |
782 | start_pfn = (aper_base>>PAGE_SHIFT); | |
783 | init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); | |
784 | } | |
785 | ||
123bf0e2 | 786 | pr_info("PCI-DMA: using GART IOMMU.\n"); |
05fccb0e IM |
787 | iommu_size = check_iommu_size(info.aper_base, aper_size); |
788 | iommu_pages = iommu_size >> PAGE_SHIFT; | |
789 | ||
0114267b | 790 | iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, |
05fccb0e IM |
791 | get_order(iommu_pages/8)); |
792 | if (!iommu_gart_bitmap) | |
793 | panic("Cannot allocate iommu bitmap\n"); | |
1da177e4 LT |
794 | |
795 | #ifdef CONFIG_IOMMU_LEAK | |
05fccb0e | 796 | if (leak_trace) { |
19c1a6f5 FT |
797 | int ret; |
798 | ||
799 | ret = dma_debug_resize_entries(iommu_pages); | |
800 | if (ret) | |
123bf0e2 | 801 | pr_debug("PCI-DMA: Cannot trace all the entries\n"); |
05fccb0e | 802 | } |
1da177e4 LT |
803 | #endif |
804 | ||
05fccb0e | 805 | /* |
1da177e4 | 806 | * Out of IOMMU space handling. |
05fccb0e IM |
807 | * Reserve some invalid pages at the beginning of the GART. |
808 | */ | |
a66022c4 | 809 | bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES); |
1da177e4 | 810 | |
123bf0e2 | 811 | pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", |
05fccb0e | 812 | iommu_size >> 20); |
1da177e4 | 813 | |
123bf0e2 IM |
814 | agp_memory_reserved = iommu_size; |
815 | iommu_start = aper_size - iommu_size; | |
816 | iommu_bus_base = info.aper_base + iommu_start; | |
817 | bad_dma_addr = iommu_bus_base; | |
818 | iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); | |
1da177e4 | 819 | |
05fccb0e | 820 | /* |
1da177e4 LT |
821 | * Unmap the IOMMU part of the GART. The alias of the page is |
822 | * always mapped with cache enabled and there is no full cache | |
823 | * coherency across the GART remapping. The unmapping avoids | |
824 | * automatic prefetches from the CPU allocating cache lines in | |
825 | * there. All CPU accesses are done via the direct mapping to | |
826 | * the backing memory. The GART address is only used by PCI | |
05fccb0e | 827 | * devices. |
1da177e4 | 828 | */ |
28d6ee41 AK |
829 | set_memory_np((unsigned long)__va(iommu_bus_base), |
830 | iommu_size >> PAGE_SHIFT); | |
184652eb IM |
831 | /* |
832 | * Tricky. The GART table remaps the physical memory range, | |
833 | * so the CPU wont notice potential aliases and if the memory | |
834 | * is remapped to UC later on, we might surprise the PCI devices | |
835 | * with a stray writeout of a cacheline. So play it sure and | |
836 | * do an explicit, full-scale wbinvd() _after_ having marked all | |
837 | * the pages as Not-Present: | |
838 | */ | |
839 | wbinvd(); | |
123bf0e2 | 840 | |
fe2245c9 ML |
841 | /* |
842 | * Now all caches are flushed and we can safely enable | |
843 | * GART hardware. Doing it early leaves the possibility | |
844 | * of stale cache entries that can lead to GART PTE | |
845 | * errors. | |
846 | */ | |
847 | enable_gart_translations(); | |
1da177e4 | 848 | |
05fccb0e | 849 | /* |
fa3d319a | 850 | * Try to workaround a bug (thanks to BenH): |
05fccb0e | 851 | * Set unmapped entries to a scratch page instead of 0. |
1da177e4 | 852 | * Any prefetches that hit unmapped entries won't get an bus abort |
fa3d319a | 853 | * then. (P2P bridge may be prefetching on DMA reads). |
1da177e4 | 854 | */ |
05fccb0e IM |
855 | scratch = get_zeroed_page(GFP_KERNEL); |
856 | if (!scratch) | |
1da177e4 LT |
857 | panic("Cannot allocate iommu scratch page"); |
858 | gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); | |
05fccb0e | 859 | for (i = EMERGENCY_PAGES; i < iommu_pages; i++) |
1da177e4 LT |
860 | iommu_gatt_base[i] = gart_unmapped_entry; |
861 | ||
a32073bf | 862 | flush_gart(); |
17a941d8 | 863 | dma_ops = &gart_dma_ops; |
338bac52 | 864 | x86_platform.iommu_shutdown = gart_iommu_shutdown; |
75f1cdf1 | 865 | swiotlb = 0; |
de957628 FT |
866 | |
867 | return 0; | |
05fccb0e | 868 | } |
1da177e4 | 869 | |
43999d9e | 870 | void __init gart_parse_options(char *p) |
17a941d8 MBY |
871 | { |
872 | int arg; | |
873 | ||
1da177e4 | 874 | #ifdef CONFIG_IOMMU_LEAK |
05fccb0e | 875 | if (!strncmp(p, "leak", 4)) { |
17a941d8 MBY |
876 | leak_trace = 1; |
877 | p += 4; | |
237a6224 JR |
878 | if (*p == '=') |
879 | ++p; | |
17a941d8 MBY |
880 | if (isdigit(*p) && get_option(&p, &arg)) |
881 | iommu_leak_pages = arg; | |
882 | } | |
1da177e4 | 883 | #endif |
17a941d8 MBY |
884 | if (isdigit(*p) && get_option(&p, &arg)) |
885 | iommu_size = arg; | |
41855b77 | 886 | if (!strncmp(p, "fullflush", 9)) |
17a941d8 | 887 | iommu_fullflush = 1; |
05fccb0e | 888 | if (!strncmp(p, "nofullflush", 11)) |
17a941d8 | 889 | iommu_fullflush = 0; |
05fccb0e | 890 | if (!strncmp(p, "noagp", 5)) |
17a941d8 | 891 | no_agp = 1; |
05fccb0e | 892 | if (!strncmp(p, "noaperture", 10)) |
17a941d8 MBY |
893 | fix_aperture = 0; |
894 | /* duplicated from pci-dma.c */ | |
05fccb0e | 895 | if (!strncmp(p, "force", 5)) |
0440d4c0 | 896 | gart_iommu_aperture_allowed = 1; |
05fccb0e | 897 | if (!strncmp(p, "allowed", 7)) |
0440d4c0 | 898 | gart_iommu_aperture_allowed = 1; |
17a941d8 MBY |
899 | if (!strncmp(p, "memaper", 7)) { |
900 | fallback_aper_force = 1; | |
901 | p += 7; | |
902 | if (*p == '=') { | |
903 | ++p; | |
904 | if (get_option(&p, &arg)) | |
905 | fallback_aper_order = arg; | |
906 | } | |
907 | } | |
908 | } | |
22e6daf4 | 909 | IOMMU_INIT_POST(gart_iommu_hole_init); |