Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Dynamic DMA mapping support for AMD Hammer. | |
05fccb0e | 3 | * |
1da177e4 LT |
4 | * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI. |
5 | * This allows to use PCI devices that only support 32bit addresses on systems | |
05fccb0e | 6 | * with more than 4GB. |
1da177e4 LT |
7 | * |
8 | * See Documentation/DMA-mapping.txt for the interface specification. | |
05fccb0e | 9 | * |
1da177e4 | 10 | * Copyright 2002 Andi Kleen, SuSE Labs. |
ff7f3649 | 11 | * Subject to the GNU General Public License v2 only. |
1da177e4 LT |
12 | */ |
13 | ||
1da177e4 LT |
14 | #include <linux/types.h> |
15 | #include <linux/ctype.h> | |
16 | #include <linux/agp_backend.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/pci.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/topology.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/bitops.h> | |
1eeb66a1 | 26 | #include <linux/kdebug.h> |
9ee1bea4 | 27 | #include <linux/scatterlist.h> |
fde9a109 | 28 | #include <linux/iommu-helper.h> |
cd76374e | 29 | #include <linux/sysdev.h> |
1da177e4 LT |
30 | #include <asm/atomic.h> |
31 | #include <asm/io.h> | |
32 | #include <asm/mtrr.h> | |
33 | #include <asm/pgtable.h> | |
34 | #include <asm/proto.h> | |
46a7fa27 | 35 | #include <asm/iommu.h> |
395624fc | 36 | #include <asm/gart.h> |
1da177e4 | 37 | #include <asm/cacheflush.h> |
17a941d8 MBY |
38 | #include <asm/swiotlb.h> |
39 | #include <asm/dma.h> | |
a32073bf | 40 | #include <asm/k8.h> |
1da177e4 | 41 | |
79da0874 | 42 | static unsigned long iommu_bus_base; /* GART remapping area (physical) */ |
05fccb0e | 43 | static unsigned long iommu_size; /* size of remapping area bytes */ |
1da177e4 LT |
44 | static unsigned long iommu_pages; /* .. and in pages */ |
45 | ||
05fccb0e | 46 | static u32 *iommu_gatt_base; /* Remapping table */ |
1da177e4 | 47 | |
05fccb0e IM |
48 | /* |
49 | * If this is disabled the IOMMU will use an optimized flushing strategy | |
50 | * of only flushing when an mapping is reused. With it true the GART is | |
51 | * flushed for every mapping. Problem is that doing the lazy flush seems | |
52 | * to trigger bugs with some popular PCI cards, in particular 3ware (but | |
53 | * has been also also seen with Qlogic at least). | |
54 | */ | |
1da177e4 LT |
55 | int iommu_fullflush = 1; |
56 | ||
05fccb0e | 57 | /* Allocation bitmap for the remapping area: */ |
1da177e4 | 58 | static DEFINE_SPINLOCK(iommu_bitmap_lock); |
05fccb0e IM |
59 | /* Guarded by iommu_bitmap_lock: */ |
60 | static unsigned long *iommu_gart_bitmap; | |
1da177e4 | 61 | |
05fccb0e | 62 | static u32 gart_unmapped_entry; |
1da177e4 LT |
63 | |
64 | #define GPTE_VALID 1 | |
65 | #define GPTE_COHERENT 2 | |
66 | #define GPTE_ENCODE(x) \ | |
67 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) | |
68 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) | |
69 | ||
05fccb0e | 70 | #define to_pages(addr, size) \ |
1da177e4 LT |
71 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) |
72 | ||
05fccb0e | 73 | #define EMERGENCY_PAGES 32 /* = 128KB */ |
1da177e4 LT |
74 | |
75 | #ifdef CONFIG_AGP | |
76 | #define AGPEXTERN extern | |
77 | #else | |
78 | #define AGPEXTERN | |
79 | #endif | |
80 | ||
81 | /* backdoor interface to AGP driver */ | |
82 | AGPEXTERN int agp_memory_reserved; | |
83 | AGPEXTERN __u32 *agp_gatt_table; | |
84 | ||
85 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ | |
05fccb0e | 86 | static int need_flush; /* global flush state. set for each gart wrap */ |
1da177e4 | 87 | |
fde9a109 | 88 | static unsigned long alloc_iommu(struct device *dev, int size) |
05fccb0e | 89 | { |
1da177e4 | 90 | unsigned long offset, flags; |
fde9a109 FT |
91 | unsigned long boundary_size; |
92 | unsigned long base_index; | |
93 | ||
94 | base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), | |
95 | PAGE_SIZE) >> PAGE_SHIFT; | |
96 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | |
97 | PAGE_SIZE) >> PAGE_SHIFT; | |
1da177e4 | 98 | |
05fccb0e | 99 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
fde9a109 FT |
100 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, |
101 | size, base_index, boundary_size, 0); | |
1da177e4 LT |
102 | if (offset == -1) { |
103 | need_flush = 1; | |
fde9a109 FT |
104 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, |
105 | size, base_index, boundary_size, 0); | |
1da177e4 | 106 | } |
05fccb0e | 107 | if (offset != -1) { |
05fccb0e IM |
108 | next_bit = offset+size; |
109 | if (next_bit >= iommu_pages) { | |
1da177e4 LT |
110 | next_bit = 0; |
111 | need_flush = 1; | |
05fccb0e IM |
112 | } |
113 | } | |
1da177e4 LT |
114 | if (iommu_fullflush) |
115 | need_flush = 1; | |
05fccb0e IM |
116 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
117 | ||
1da177e4 | 118 | return offset; |
05fccb0e | 119 | } |
1da177e4 LT |
120 | |
121 | static void free_iommu(unsigned long offset, int size) | |
05fccb0e | 122 | { |
1da177e4 | 123 | unsigned long flags; |
05fccb0e | 124 | |
1da177e4 | 125 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
fde9a109 | 126 | iommu_area_free(iommu_gart_bitmap, offset, size); |
1da177e4 | 127 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
05fccb0e | 128 | } |
1da177e4 | 129 | |
05fccb0e | 130 | /* |
1da177e4 LT |
131 | * Use global flush state to avoid races with multiple flushers. |
132 | */ | |
a32073bf | 133 | static void flush_gart(void) |
05fccb0e | 134 | { |
1da177e4 | 135 | unsigned long flags; |
05fccb0e | 136 | |
1da177e4 | 137 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
a32073bf AK |
138 | if (need_flush) { |
139 | k8_flush_garts(); | |
1da177e4 | 140 | need_flush = 0; |
05fccb0e | 141 | } |
1da177e4 | 142 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
05fccb0e | 143 | } |
1da177e4 | 144 | |
1da177e4 LT |
145 | #ifdef CONFIG_IOMMU_LEAK |
146 | ||
05fccb0e IM |
147 | #define SET_LEAK(x) \ |
148 | do { \ | |
149 | if (iommu_leak_tab) \ | |
150 | iommu_leak_tab[x] = __builtin_return_address(0);\ | |
151 | } while (0) | |
152 | ||
153 | #define CLEAR_LEAK(x) \ | |
154 | do { \ | |
155 | if (iommu_leak_tab) \ | |
156 | iommu_leak_tab[x] = NULL; \ | |
157 | } while (0) | |
1da177e4 LT |
158 | |
159 | /* Debugging aid for drivers that don't free their IOMMU tables */ | |
05fccb0e | 160 | static void **iommu_leak_tab; |
1da177e4 | 161 | static int leak_trace; |
79da0874 | 162 | static int iommu_leak_pages = 20; |
05fccb0e | 163 | |
79da0874 | 164 | static void dump_leak(void) |
1da177e4 LT |
165 | { |
166 | int i; | |
05fccb0e IM |
167 | static int dump; |
168 | ||
169 | if (dump || !iommu_leak_tab) | |
170 | return; | |
1da177e4 | 171 | dump = 1; |
05fccb0e IM |
172 | show_stack(NULL, NULL); |
173 | ||
174 | /* Very crude. dump some from the end of the table too */ | |
175 | printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n", | |
176 | iommu_leak_pages); | |
177 | for (i = 0; i < iommu_leak_pages; i += 2) { | |
178 | printk(KERN_DEBUG "%lu: ", iommu_pages-i); | |
bc850d6b | 179 | printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0); |
05fccb0e IM |
180 | printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' '); |
181 | } | |
182 | printk(KERN_DEBUG "\n"); | |
1da177e4 LT |
183 | } |
184 | #else | |
05fccb0e IM |
185 | # define SET_LEAK(x) |
186 | # define CLEAR_LEAK(x) | |
1da177e4 LT |
187 | #endif |
188 | ||
17a941d8 | 189 | static void iommu_full(struct device *dev, size_t size, int dir) |
1da177e4 | 190 | { |
05fccb0e | 191 | /* |
1da177e4 LT |
192 | * Ran out of IOMMU space for this operation. This is very bad. |
193 | * Unfortunately the drivers cannot handle this operation properly. | |
05fccb0e | 194 | * Return some non mapped prereserved space in the aperture and |
1da177e4 LT |
195 | * let the Northbridge deal with it. This will result in garbage |
196 | * in the IO operation. When the size exceeds the prereserved space | |
05fccb0e | 197 | * memory corruption will occur or random memory will be DMAed |
1da177e4 | 198 | * out. Hopefully no network devices use single mappings that big. |
05fccb0e IM |
199 | */ |
200 | ||
201 | printk(KERN_ERR | |
202 | "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n", | |
203 | size, dev->bus_id); | |
1da177e4 | 204 | |
17a941d8 | 205 | if (size > PAGE_SIZE*EMERGENCY_PAGES) { |
1da177e4 LT |
206 | if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) |
207 | panic("PCI-DMA: Memory would be corrupted\n"); | |
05fccb0e IM |
208 | if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) |
209 | panic(KERN_ERR | |
210 | "PCI-DMA: Random memory would be DMAed\n"); | |
211 | } | |
1da177e4 | 212 | #ifdef CONFIG_IOMMU_LEAK |
05fccb0e | 213 | dump_leak(); |
1da177e4 | 214 | #endif |
05fccb0e | 215 | } |
1da177e4 | 216 | |
05fccb0e IM |
217 | static inline int |
218 | need_iommu(struct device *dev, unsigned long addr, size_t size) | |
219 | { | |
1da177e4 | 220 | u64 mask = *dev->dma_mask; |
00edefae | 221 | int high = addr + size > mask; |
1da177e4 | 222 | int mmu = high; |
05fccb0e IM |
223 | |
224 | if (force_iommu) | |
225 | mmu = 1; | |
226 | ||
227 | return mmu; | |
1da177e4 LT |
228 | } |
229 | ||
05fccb0e IM |
230 | static inline int |
231 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | |
232 | { | |
1da177e4 | 233 | u64 mask = *dev->dma_mask; |
00edefae | 234 | int high = addr + size > mask; |
1da177e4 | 235 | int mmu = high; |
05fccb0e IM |
236 | |
237 | return mmu; | |
1da177e4 LT |
238 | } |
239 | ||
240 | /* Map a single continuous physical area into the IOMMU. | |
241 | * Caller needs to check if the iommu is needed and flush. | |
242 | */ | |
17a941d8 MBY |
243 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, |
244 | size_t size, int dir) | |
05fccb0e | 245 | { |
1da177e4 | 246 | unsigned long npages = to_pages(phys_mem, size); |
fde9a109 | 247 | unsigned long iommu_page = alloc_iommu(dev, npages); |
1da177e4 | 248 | int i; |
05fccb0e | 249 | |
1da177e4 LT |
250 | if (iommu_page == -1) { |
251 | if (!nonforced_iommu(dev, phys_mem, size)) | |
05fccb0e | 252 | return phys_mem; |
1da177e4 LT |
253 | if (panic_on_overflow) |
254 | panic("dma_map_area overflow %lu bytes\n", size); | |
17a941d8 | 255 | iommu_full(dev, size, dir); |
1da177e4 LT |
256 | return bad_dma_address; |
257 | } | |
258 | ||
259 | for (i = 0; i < npages; i++) { | |
260 | iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); | |
261 | SET_LEAK(iommu_page + i); | |
262 | phys_mem += PAGE_SIZE; | |
263 | } | |
264 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); | |
265 | } | |
266 | ||
05fccb0e | 267 | static dma_addr_t |
2be62149 | 268 | gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir) |
17a941d8 | 269 | { |
2be62149 | 270 | dma_addr_t map = dma_map_area(dev, paddr, size, dir); |
05fccb0e | 271 | |
a32073bf | 272 | flush_gart(); |
05fccb0e | 273 | |
17a941d8 MBY |
274 | return map; |
275 | } | |
276 | ||
1da177e4 | 277 | /* Map a single area into the IOMMU */ |
05fccb0e | 278 | static dma_addr_t |
2be62149 | 279 | gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) |
1da177e4 | 280 | { |
2be62149 | 281 | unsigned long bus; |
1da177e4 | 282 | |
1da177e4 LT |
283 | if (!dev) |
284 | dev = &fallback_dev; | |
285 | ||
2be62149 IM |
286 | if (!need_iommu(dev, paddr, size)) |
287 | return paddr; | |
1da177e4 | 288 | |
2be62149 | 289 | bus = gart_map_simple(dev, paddr, size, dir); |
05fccb0e IM |
290 | |
291 | return bus; | |
17a941d8 MBY |
292 | } |
293 | ||
7c2d9cd2 JM |
294 | /* |
295 | * Free a DMA mapping. | |
296 | */ | |
1048fa52 | 297 | static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, |
05fccb0e | 298 | size_t size, int direction) |
7c2d9cd2 JM |
299 | { |
300 | unsigned long iommu_page; | |
301 | int npages; | |
302 | int i; | |
303 | ||
304 | if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || | |
305 | dma_addr >= iommu_bus_base + iommu_size) | |
306 | return; | |
05fccb0e | 307 | |
7c2d9cd2 JM |
308 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; |
309 | npages = to_pages(dma_addr, size); | |
310 | for (i = 0; i < npages; i++) { | |
311 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; | |
312 | CLEAR_LEAK(iommu_page + i); | |
313 | } | |
314 | free_iommu(iommu_page, npages); | |
315 | } | |
316 | ||
17a941d8 MBY |
317 | /* |
318 | * Wrapper for pci_unmap_single working with scatterlists. | |
319 | */ | |
05fccb0e IM |
320 | static void |
321 | gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |
17a941d8 | 322 | { |
9ee1bea4 | 323 | struct scatterlist *s; |
17a941d8 MBY |
324 | int i; |
325 | ||
9ee1bea4 | 326 | for_each_sg(sg, s, nents, i) { |
60b08c67 | 327 | if (!s->dma_length || !s->length) |
17a941d8 | 328 | break; |
7c2d9cd2 | 329 | gart_unmap_single(dev, s->dma_address, s->dma_length, dir); |
17a941d8 MBY |
330 | } |
331 | } | |
1da177e4 LT |
332 | |
333 | /* Fallback for dma_map_sg in case of overflow */ | |
334 | static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |
335 | int nents, int dir) | |
336 | { | |
9ee1bea4 | 337 | struct scatterlist *s; |
1da177e4 LT |
338 | int i; |
339 | ||
340 | #ifdef CONFIG_IOMMU_DEBUG | |
341 | printk(KERN_DEBUG "dma_map_sg overflow\n"); | |
342 | #endif | |
343 | ||
9ee1bea4 | 344 | for_each_sg(sg, s, nents, i) { |
58b053e4 | 345 | unsigned long addr = sg_phys(s); |
05fccb0e IM |
346 | |
347 | if (nonforced_iommu(dev, addr, s->length)) { | |
17a941d8 | 348 | addr = dma_map_area(dev, addr, s->length, dir); |
05fccb0e IM |
349 | if (addr == bad_dma_address) { |
350 | if (i > 0) | |
17a941d8 | 351 | gart_unmap_sg(dev, sg, i, dir); |
05fccb0e | 352 | nents = 0; |
1da177e4 LT |
353 | sg[0].dma_length = 0; |
354 | break; | |
355 | } | |
356 | } | |
357 | s->dma_address = addr; | |
358 | s->dma_length = s->length; | |
359 | } | |
a32073bf | 360 | flush_gart(); |
05fccb0e | 361 | |
1da177e4 LT |
362 | return nents; |
363 | } | |
364 | ||
365 | /* Map multiple scatterlist entries continuous into the first. */ | |
fde9a109 FT |
366 | static int __dma_map_cont(struct device *dev, struct scatterlist *start, |
367 | int nelems, struct scatterlist *sout, | |
368 | unsigned long pages) | |
1da177e4 | 369 | { |
fde9a109 | 370 | unsigned long iommu_start = alloc_iommu(dev, pages); |
05fccb0e | 371 | unsigned long iommu_page = iommu_start; |
9ee1bea4 | 372 | struct scatterlist *s; |
1da177e4 LT |
373 | int i; |
374 | ||
375 | if (iommu_start == -1) | |
376 | return -1; | |
9ee1bea4 JA |
377 | |
378 | for_each_sg(start, s, nelems, i) { | |
1da177e4 LT |
379 | unsigned long pages, addr; |
380 | unsigned long phys_addr = s->dma_address; | |
05fccb0e | 381 | |
9ee1bea4 JA |
382 | BUG_ON(s != start && s->offset); |
383 | if (s == start) { | |
1da177e4 LT |
384 | sout->dma_address = iommu_bus_base; |
385 | sout->dma_address += iommu_page*PAGE_SIZE + s->offset; | |
386 | sout->dma_length = s->length; | |
05fccb0e IM |
387 | } else { |
388 | sout->dma_length += s->length; | |
1da177e4 LT |
389 | } |
390 | ||
391 | addr = phys_addr; | |
05fccb0e IM |
392 | pages = to_pages(s->offset, s->length); |
393 | while (pages--) { | |
394 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); | |
1da177e4 LT |
395 | SET_LEAK(iommu_page); |
396 | addr += PAGE_SIZE; | |
397 | iommu_page++; | |
0d541064 | 398 | } |
05fccb0e IM |
399 | } |
400 | BUG_ON(iommu_page - iommu_start != pages); | |
401 | ||
1da177e4 LT |
402 | return 0; |
403 | } | |
404 | ||
05fccb0e | 405 | static inline int |
fde9a109 FT |
406 | dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, |
407 | struct scatterlist *sout, unsigned long pages, int need) | |
1da177e4 | 408 | { |
9ee1bea4 JA |
409 | if (!need) { |
410 | BUG_ON(nelems != 1); | |
e88a39de | 411 | sout->dma_address = start->dma_address; |
9ee1bea4 | 412 | sout->dma_length = start->length; |
1da177e4 | 413 | return 0; |
9ee1bea4 | 414 | } |
fde9a109 | 415 | return __dma_map_cont(dev, start, nelems, sout, pages); |
1da177e4 | 416 | } |
05fccb0e | 417 | |
1da177e4 LT |
418 | /* |
419 | * DMA map all entries in a scatterlist. | |
05fccb0e | 420 | * Merge chunks that have page aligned sizes into a continuous mapping. |
1da177e4 | 421 | */ |
05fccb0e IM |
422 | static int |
423 | gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |
1da177e4 | 424 | { |
9ee1bea4 | 425 | struct scatterlist *s, *ps, *start_sg, *sgmap; |
05fccb0e IM |
426 | int need = 0, nextneed, i, out, start; |
427 | unsigned long pages = 0; | |
42d00284 FT |
428 | unsigned int seg_size; |
429 | unsigned int max_seg_size; | |
1da177e4 | 430 | |
05fccb0e | 431 | if (nents == 0) |
1da177e4 LT |
432 | return 0; |
433 | ||
1da177e4 LT |
434 | if (!dev) |
435 | dev = &fallback_dev; | |
436 | ||
437 | out = 0; | |
438 | start = 0; | |
9ee1bea4 | 439 | start_sg = sgmap = sg; |
42d00284 FT |
440 | seg_size = 0; |
441 | max_seg_size = dma_get_max_seg_size(dev); | |
9ee1bea4 JA |
442 | ps = NULL; /* shut up gcc */ |
443 | for_each_sg(sg, s, nents, i) { | |
58b053e4 | 444 | dma_addr_t addr = sg_phys(s); |
05fccb0e | 445 | |
1da177e4 | 446 | s->dma_address = addr; |
05fccb0e | 447 | BUG_ON(s->length == 0); |
1da177e4 | 448 | |
05fccb0e | 449 | nextneed = need_iommu(dev, addr, s->length); |
1da177e4 LT |
450 | |
451 | /* Handle the previous not yet processed entries */ | |
452 | if (i > start) { | |
05fccb0e IM |
453 | /* |
454 | * Can only merge when the last chunk ends on a | |
455 | * page boundary and the new one doesn't have an | |
456 | * offset. | |
457 | */ | |
1da177e4 | 458 | if (!iommu_merge || !nextneed || !need || s->offset || |
42d00284 | 459 | (s->length + seg_size > max_seg_size) || |
9ee1bea4 | 460 | (ps->offset + ps->length) % PAGE_SIZE) { |
fde9a109 FT |
461 | if (dma_map_cont(dev, start_sg, i - start, |
462 | sgmap, pages, need) < 0) | |
1da177e4 LT |
463 | goto error; |
464 | out++; | |
42d00284 | 465 | seg_size = 0; |
9ee1bea4 | 466 | sgmap = sg_next(sgmap); |
1da177e4 | 467 | pages = 0; |
9ee1bea4 JA |
468 | start = i; |
469 | start_sg = s; | |
1da177e4 LT |
470 | } |
471 | } | |
472 | ||
42d00284 | 473 | seg_size += s->length; |
1da177e4 LT |
474 | need = nextneed; |
475 | pages += to_pages(s->offset, s->length); | |
9ee1bea4 | 476 | ps = s; |
1da177e4 | 477 | } |
fde9a109 | 478 | if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) |
1da177e4 LT |
479 | goto error; |
480 | out++; | |
a32073bf | 481 | flush_gart(); |
9ee1bea4 JA |
482 | if (out < nents) { |
483 | sgmap = sg_next(sgmap); | |
484 | sgmap->dma_length = 0; | |
485 | } | |
1da177e4 LT |
486 | return out; |
487 | ||
488 | error: | |
a32073bf | 489 | flush_gart(); |
5336940d | 490 | gart_unmap_sg(dev, sg, out, dir); |
05fccb0e | 491 | |
a1002a48 KV |
492 | /* When it was forced or merged try again in a dumb way */ |
493 | if (force_iommu || iommu_merge) { | |
494 | out = dma_map_sg_nonforce(dev, sg, nents, dir); | |
495 | if (out > 0) | |
496 | return out; | |
497 | } | |
1da177e4 LT |
498 | if (panic_on_overflow) |
499 | panic("dma_map_sg: overflow on %lu pages\n", pages); | |
05fccb0e | 500 | |
17a941d8 | 501 | iommu_full(dev, pages << PAGE_SHIFT, dir); |
9ee1bea4 JA |
502 | for_each_sg(sg, s, nents, i) |
503 | s->dma_address = bad_dma_address; | |
1da177e4 | 504 | return 0; |
05fccb0e | 505 | } |
1da177e4 | 506 | |
17a941d8 | 507 | static int no_agp; |
1da177e4 LT |
508 | |
509 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) | |
05fccb0e IM |
510 | { |
511 | unsigned long a; | |
512 | ||
513 | if (!iommu_size) { | |
514 | iommu_size = aper_size; | |
515 | if (!no_agp) | |
516 | iommu_size /= 2; | |
517 | } | |
518 | ||
519 | a = aper + iommu_size; | |
31422c51 | 520 | iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; |
1da177e4 | 521 | |
05fccb0e | 522 | if (iommu_size < 64*1024*1024) { |
1da177e4 | 523 | printk(KERN_WARNING |
05fccb0e IM |
524 | "PCI-DMA: Warning: Small IOMMU %luMB." |
525 | " Consider increasing the AGP aperture in BIOS\n", | |
526 | iommu_size >> 20); | |
527 | } | |
528 | ||
1da177e4 | 529 | return iommu_size; |
05fccb0e | 530 | } |
1da177e4 | 531 | |
05fccb0e IM |
532 | static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) |
533 | { | |
534 | unsigned aper_size = 0, aper_base_32, aper_order; | |
1da177e4 | 535 | u64 aper_base; |
1da177e4 | 536 | |
3bb6fbf9 PM |
537 | pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32); |
538 | pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order); | |
05fccb0e | 539 | aper_order = (aper_order >> 1) & 7; |
1da177e4 | 540 | |
05fccb0e | 541 | aper_base = aper_base_32 & 0x7fff; |
1da177e4 LT |
542 | aper_base <<= 25; |
543 | ||
05fccb0e IM |
544 | aper_size = (32 * 1024 * 1024) << aper_order; |
545 | if (aper_base + aper_size > 0x100000000UL || !aper_size) | |
1da177e4 LT |
546 | aper_base = 0; |
547 | ||
548 | *size = aper_size; | |
549 | return aper_base; | |
05fccb0e | 550 | } |
1da177e4 | 551 | |
6703f6d1 RW |
552 | static void enable_gart_translations(void) |
553 | { | |
554 | int i; | |
555 | ||
556 | for (i = 0; i < num_k8_northbridges; i++) { | |
557 | struct pci_dev *dev = k8_northbridges[i]; | |
558 | ||
559 | enable_gart_translation(dev, __pa(agp_gatt_table)); | |
560 | } | |
561 | } | |
562 | ||
563 | /* | |
564 | * If fix_up_north_bridges is set, the north bridges have to be fixed up on | |
565 | * resume in the same way as they are handled in gart_iommu_hole_init(). | |
566 | */ | |
567 | static bool fix_up_north_bridges; | |
568 | static u32 aperture_order; | |
569 | static u32 aperture_alloc; | |
570 | ||
571 | void set_up_gart_resume(u32 aper_order, u32 aper_alloc) | |
572 | { | |
573 | fix_up_north_bridges = true; | |
574 | aperture_order = aper_order; | |
575 | aperture_alloc = aper_alloc; | |
576 | } | |
577 | ||
cd76374e PM |
578 | static int gart_resume(struct sys_device *dev) |
579 | { | |
6703f6d1 RW |
580 | printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n"); |
581 | ||
582 | if (fix_up_north_bridges) { | |
583 | int i; | |
584 | ||
585 | printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n"); | |
586 | ||
587 | for (i = 0; i < num_k8_northbridges; i++) { | |
588 | struct pci_dev *dev = k8_northbridges[i]; | |
589 | ||
590 | /* | |
591 | * Don't enable translations just yet. That is the next | |
592 | * step. Restore the pre-suspend aperture settings. | |
593 | */ | |
594 | pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, | |
595 | aperture_order << 1); | |
596 | pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, | |
597 | aperture_alloc >> 25); | |
598 | } | |
599 | } | |
600 | ||
601 | enable_gart_translations(); | |
602 | ||
cd76374e PM |
603 | return 0; |
604 | } | |
605 | ||
606 | static int gart_suspend(struct sys_device *dev, pm_message_t state) | |
607 | { | |
6703f6d1 | 608 | return 0; |
cd76374e PM |
609 | } |
610 | ||
611 | static struct sysdev_class gart_sysdev_class = { | |
612 | .name = "gart", | |
613 | .suspend = gart_suspend, | |
614 | .resume = gart_resume, | |
615 | ||
616 | }; | |
617 | ||
618 | static struct sys_device device_gart = { | |
619 | .id = 0, | |
620 | .cls = &gart_sysdev_class, | |
621 | }; | |
622 | ||
05fccb0e | 623 | /* |
1da177e4 | 624 | * Private Northbridge GATT initialization in case we cannot use the |
05fccb0e | 625 | * AGP driver for some reason. |
1da177e4 LT |
626 | */ |
627 | static __init int init_k8_gatt(struct agp_kern_info *info) | |
05fccb0e IM |
628 | { |
629 | unsigned aper_size, gatt_size, new_aper_size; | |
630 | unsigned aper_base, new_aper_base; | |
1da177e4 LT |
631 | struct pci_dev *dev; |
632 | void *gatt; | |
cd76374e | 633 | int i, error; |
7ab073b6 | 634 | unsigned long start_pfn, end_pfn; |
a32073bf | 635 | |
1da177e4 LT |
636 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); |
637 | aper_size = aper_base = info->aper_size = 0; | |
a32073bf AK |
638 | dev = NULL; |
639 | for (i = 0; i < num_k8_northbridges; i++) { | |
640 | dev = k8_northbridges[i]; | |
05fccb0e IM |
641 | new_aper_base = read_aperture(dev, &new_aper_size); |
642 | if (!new_aper_base) | |
643 | goto nommu; | |
644 | ||
645 | if (!aper_base) { | |
1da177e4 LT |
646 | aper_size = new_aper_size; |
647 | aper_base = new_aper_base; | |
05fccb0e IM |
648 | } |
649 | if (aper_size != new_aper_size || aper_base != new_aper_base) | |
1da177e4 LT |
650 | goto nommu; |
651 | } | |
652 | if (!aper_base) | |
05fccb0e | 653 | goto nommu; |
1da177e4 | 654 | info->aper_base = aper_base; |
05fccb0e | 655 | info->aper_size = aper_size >> 20; |
1da177e4 | 656 | |
05fccb0e IM |
657 | gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); |
658 | gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); | |
659 | if (!gatt) | |
cf6387da | 660 | panic("Cannot allocate GATT table"); |
6d238cc4 | 661 | if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) |
cf6387da | 662 | panic("Could not set GART PTEs to uncacheable pages"); |
cf6387da | 663 | |
05fccb0e | 664 | memset(gatt, 0, gatt_size); |
1da177e4 | 665 | agp_gatt_table = gatt; |
a32073bf | 666 | |
6703f6d1 | 667 | enable_gart_translations(); |
cd76374e PM |
668 | |
669 | error = sysdev_class_register(&gart_sysdev_class); | |
670 | if (!error) | |
671 | error = sysdev_register(&device_gart); | |
672 | if (error) | |
673 | panic("Could not register gart_sysdev -- would corrupt data on next suspend"); | |
6703f6d1 | 674 | |
a32073bf | 675 | flush_gart(); |
05fccb0e IM |
676 | |
677 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", | |
678 | aper_base, aper_size>>10); | |
7ab073b6 YL |
679 | |
680 | /* need to map that range */ | |
681 | end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); | |
682 | if (end_pfn > max_low_pfn_mapped) { | |
32b23e9a YL |
683 | start_pfn = (aper_base>>PAGE_SHIFT); |
684 | init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); | |
7ab073b6 | 685 | } |
1da177e4 LT |
686 | return 0; |
687 | ||
688 | nommu: | |
05fccb0e | 689 | /* Should not happen anymore */ |
8f59610d PM |
690 | printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n" |
691 | KERN_WARNING "falling back to iommu=soft.\n"); | |
05fccb0e IM |
692 | return -1; |
693 | } | |
1da177e4 LT |
694 | |
695 | extern int agp_amd64_init(void); | |
696 | ||
e6584504 | 697 | static const struct dma_mapping_ops gart_dma_ops = { |
05fccb0e IM |
698 | .mapping_error = NULL, |
699 | .map_single = gart_map_single, | |
700 | .map_simple = gart_map_simple, | |
701 | .unmap_single = gart_unmap_single, | |
702 | .sync_single_for_cpu = NULL, | |
703 | .sync_single_for_device = NULL, | |
704 | .sync_single_range_for_cpu = NULL, | |
705 | .sync_single_range_for_device = NULL, | |
706 | .sync_sg_for_cpu = NULL, | |
707 | .sync_sg_for_device = NULL, | |
708 | .map_sg = gart_map_sg, | |
709 | .unmap_sg = gart_unmap_sg, | |
17a941d8 MBY |
710 | }; |
711 | ||
bc2cea6a YL |
712 | void gart_iommu_shutdown(void) |
713 | { | |
714 | struct pci_dev *dev; | |
715 | int i; | |
716 | ||
717 | if (no_agp && (dma_ops != &gart_dma_ops)) | |
718 | return; | |
719 | ||
05fccb0e IM |
720 | for (i = 0; i < num_k8_northbridges; i++) { |
721 | u32 ctl; | |
bc2cea6a | 722 | |
05fccb0e | 723 | dev = k8_northbridges[i]; |
3bb6fbf9 | 724 | pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); |
bc2cea6a | 725 | |
3bb6fbf9 | 726 | ctl &= ~GARTEN; |
bc2cea6a | 727 | |
3bb6fbf9 | 728 | pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); |
05fccb0e | 729 | } |
bc2cea6a YL |
730 | } |
731 | ||
0dc243ae | 732 | void __init gart_iommu_init(void) |
05fccb0e | 733 | { |
1da177e4 | 734 | struct agp_kern_info info; |
1da177e4 | 735 | unsigned long iommu_start; |
05fccb0e | 736 | unsigned long aper_size; |
1da177e4 LT |
737 | unsigned long scratch; |
738 | long i; | |
739 | ||
a32073bf AK |
740 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { |
741 | printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n"); | |
0dc243ae | 742 | return; |
a32073bf AK |
743 | } |
744 | ||
1da177e4 | 745 | #ifndef CONFIG_AGP_AMD64 |
05fccb0e | 746 | no_agp = 1; |
1da177e4 LT |
747 | #else |
748 | /* Makefile puts PCI initialization via subsys_initcall first. */ | |
749 | /* Add other K8 AGP bridge drivers here */ | |
05fccb0e IM |
750 | no_agp = no_agp || |
751 | (agp_amd64_init() < 0) || | |
1da177e4 | 752 | (agp_copy_info(agp_bridge, &info) < 0); |
05fccb0e | 753 | #endif |
1da177e4 | 754 | |
60b08c67 | 755 | if (swiotlb) |
0dc243ae | 756 | return; |
60b08c67 | 757 | |
8d4f6b93 | 758 | /* Did we detect a different HW IOMMU? */ |
0440d4c0 | 759 | if (iommu_detected && !gart_iommu_aperture) |
0dc243ae | 760 | return; |
8d4f6b93 | 761 | |
1da177e4 | 762 | if (no_iommu || |
c987d12f | 763 | (!force_iommu && max_pfn <= MAX_DMA32_PFN) || |
0440d4c0 | 764 | !gart_iommu_aperture || |
1da177e4 | 765 | (no_agp && init_k8_gatt(&info) < 0)) { |
c987d12f | 766 | if (max_pfn > MAX_DMA32_PFN) { |
8f59610d PM |
767 | printk(KERN_WARNING "More than 4GB of memory " |
768 | "but GART IOMMU not available.\n" | |
769 | KERN_WARNING "falling back to iommu=soft.\n"); | |
5b7b644c | 770 | } |
0dc243ae | 771 | return; |
1da177e4 LT |
772 | } |
773 | ||
5b7b644c | 774 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); |
05fccb0e IM |
775 | aper_size = info.aper_size * 1024 * 1024; |
776 | iommu_size = check_iommu_size(info.aper_base, aper_size); | |
777 | iommu_pages = iommu_size >> PAGE_SHIFT; | |
778 | ||
779 | iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL, | |
780 | get_order(iommu_pages/8)); | |
781 | if (!iommu_gart_bitmap) | |
782 | panic("Cannot allocate iommu bitmap\n"); | |
1da177e4 LT |
783 | memset(iommu_gart_bitmap, 0, iommu_pages/8); |
784 | ||
785 | #ifdef CONFIG_IOMMU_LEAK | |
05fccb0e IM |
786 | if (leak_trace) { |
787 | iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, | |
1da177e4 | 788 | get_order(iommu_pages*sizeof(void *))); |
05fccb0e IM |
789 | if (iommu_leak_tab) |
790 | memset(iommu_leak_tab, 0, iommu_pages * 8); | |
1da177e4 | 791 | else |
05fccb0e IM |
792 | printk(KERN_DEBUG |
793 | "PCI-DMA: Cannot allocate leak trace area\n"); | |
794 | } | |
1da177e4 LT |
795 | #endif |
796 | ||
05fccb0e | 797 | /* |
1da177e4 | 798 | * Out of IOMMU space handling. |
05fccb0e IM |
799 | * Reserve some invalid pages at the beginning of the GART. |
800 | */ | |
801 | set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); | |
1da177e4 | 802 | |
05fccb0e | 803 | agp_memory_reserved = iommu_size; |
1da177e4 LT |
804 | printk(KERN_INFO |
805 | "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", | |
05fccb0e | 806 | iommu_size >> 20); |
1da177e4 | 807 | |
05fccb0e IM |
808 | iommu_start = aper_size - iommu_size; |
809 | iommu_bus_base = info.aper_base + iommu_start; | |
1da177e4 LT |
810 | bad_dma_address = iommu_bus_base; |
811 | iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); | |
812 | ||
05fccb0e | 813 | /* |
1da177e4 LT |
814 | * Unmap the IOMMU part of the GART. The alias of the page is |
815 | * always mapped with cache enabled and there is no full cache | |
816 | * coherency across the GART remapping. The unmapping avoids | |
817 | * automatic prefetches from the CPU allocating cache lines in | |
818 | * there. All CPU accesses are done via the direct mapping to | |
819 | * the backing memory. The GART address is only used by PCI | |
05fccb0e | 820 | * devices. |
1da177e4 | 821 | */ |
28d6ee41 AK |
822 | set_memory_np((unsigned long)__va(iommu_bus_base), |
823 | iommu_size >> PAGE_SHIFT); | |
184652eb IM |
824 | /* |
825 | * Tricky. The GART table remaps the physical memory range, | |
826 | * so the CPU wont notice potential aliases and if the memory | |
827 | * is remapped to UC later on, we might surprise the PCI devices | |
828 | * with a stray writeout of a cacheline. So play it sure and | |
829 | * do an explicit, full-scale wbinvd() _after_ having marked all | |
830 | * the pages as Not-Present: | |
831 | */ | |
832 | wbinvd(); | |
1da177e4 | 833 | |
05fccb0e | 834 | /* |
fa3d319a | 835 | * Try to workaround a bug (thanks to BenH): |
05fccb0e | 836 | * Set unmapped entries to a scratch page instead of 0. |
1da177e4 | 837 | * Any prefetches that hit unmapped entries won't get an bus abort |
fa3d319a | 838 | * then. (P2P bridge may be prefetching on DMA reads). |
1da177e4 | 839 | */ |
05fccb0e IM |
840 | scratch = get_zeroed_page(GFP_KERNEL); |
841 | if (!scratch) | |
1da177e4 LT |
842 | panic("Cannot allocate iommu scratch page"); |
843 | gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); | |
05fccb0e | 844 | for (i = EMERGENCY_PAGES; i < iommu_pages; i++) |
1da177e4 LT |
845 | iommu_gatt_base[i] = gart_unmapped_entry; |
846 | ||
a32073bf | 847 | flush_gart(); |
17a941d8 | 848 | dma_ops = &gart_dma_ops; |
05fccb0e | 849 | } |
1da177e4 | 850 | |
43999d9e | 851 | void __init gart_parse_options(char *p) |
17a941d8 MBY |
852 | { |
853 | int arg; | |
854 | ||
1da177e4 | 855 | #ifdef CONFIG_IOMMU_LEAK |
05fccb0e | 856 | if (!strncmp(p, "leak", 4)) { |
17a941d8 MBY |
857 | leak_trace = 1; |
858 | p += 4; | |
859 | if (*p == '=') ++p; | |
860 | if (isdigit(*p) && get_option(&p, &arg)) | |
861 | iommu_leak_pages = arg; | |
862 | } | |
1da177e4 | 863 | #endif |
17a941d8 MBY |
864 | if (isdigit(*p) && get_option(&p, &arg)) |
865 | iommu_size = arg; | |
05fccb0e | 866 | if (!strncmp(p, "fullflush", 8)) |
17a941d8 | 867 | iommu_fullflush = 1; |
05fccb0e | 868 | if (!strncmp(p, "nofullflush", 11)) |
17a941d8 | 869 | iommu_fullflush = 0; |
05fccb0e | 870 | if (!strncmp(p, "noagp", 5)) |
17a941d8 | 871 | no_agp = 1; |
05fccb0e | 872 | if (!strncmp(p, "noaperture", 10)) |
17a941d8 MBY |
873 | fix_aperture = 0; |
874 | /* duplicated from pci-dma.c */ | |
05fccb0e | 875 | if (!strncmp(p, "force", 5)) |
0440d4c0 | 876 | gart_iommu_aperture_allowed = 1; |
05fccb0e | 877 | if (!strncmp(p, "allowed", 7)) |
0440d4c0 | 878 | gart_iommu_aperture_allowed = 1; |
17a941d8 MBY |
879 | if (!strncmp(p, "memaper", 7)) { |
880 | fallback_aper_force = 1; | |
881 | p += 7; | |
882 | if (*p == '=') { | |
883 | ++p; | |
884 | if (get_option(&p, &arg)) | |
885 | fallback_aper_order = arg; | |
886 | } | |
887 | } | |
888 | } |