Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Dynamic DMA mapping support for AMD Hammer. | |
05fccb0e | 3 | * |
1da177e4 LT |
4 | * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI. |
5 | * This allows to use PCI devices that only support 32bit addresses on systems | |
05fccb0e | 6 | * with more than 4GB. |
1da177e4 LT |
7 | * |
8 | * See Documentation/DMA-mapping.txt for the interface specification. | |
05fccb0e | 9 | * |
1da177e4 | 10 | * Copyright 2002 Andi Kleen, SuSE Labs. |
ff7f3649 | 11 | * Subject to the GNU General Public License v2 only. |
1da177e4 LT |
12 | */ |
13 | ||
1da177e4 LT |
14 | #include <linux/types.h> |
15 | #include <linux/ctype.h> | |
16 | #include <linux/agp_backend.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/pci.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/topology.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/bitops.h> | |
1eeb66a1 | 26 | #include <linux/kdebug.h> |
9ee1bea4 | 27 | #include <linux/scatterlist.h> |
1da177e4 LT |
28 | #include <asm/atomic.h> |
29 | #include <asm/io.h> | |
30 | #include <asm/mtrr.h> | |
31 | #include <asm/pgtable.h> | |
32 | #include <asm/proto.h> | |
395624fc | 33 | #include <asm/gart.h> |
1da177e4 | 34 | #include <asm/cacheflush.h> |
17a941d8 MBY |
35 | #include <asm/swiotlb.h> |
36 | #include <asm/dma.h> | |
a32073bf | 37 | #include <asm/k8.h> |
1da177e4 | 38 | |
79da0874 | 39 | static unsigned long iommu_bus_base; /* GART remapping area (physical) */ |
05fccb0e | 40 | static unsigned long iommu_size; /* size of remapping area bytes */ |
1da177e4 LT |
41 | static unsigned long iommu_pages; /* .. and in pages */ |
42 | ||
05fccb0e | 43 | static u32 *iommu_gatt_base; /* Remapping table */ |
1da177e4 | 44 | |
05fccb0e IM |
45 | /* |
46 | * If this is disabled the IOMMU will use an optimized flushing strategy | |
47 | * of only flushing when an mapping is reused. With it true the GART is | |
48 | * flushed for every mapping. Problem is that doing the lazy flush seems | |
49 | * to trigger bugs with some popular PCI cards, in particular 3ware (but | |
50 | * has been also also seen with Qlogic at least). | |
51 | */ | |
1da177e4 LT |
52 | int iommu_fullflush = 1; |
53 | ||
05fccb0e | 54 | /* Allocation bitmap for the remapping area: */ |
1da177e4 | 55 | static DEFINE_SPINLOCK(iommu_bitmap_lock); |
05fccb0e IM |
56 | /* Guarded by iommu_bitmap_lock: */ |
57 | static unsigned long *iommu_gart_bitmap; | |
1da177e4 | 58 | |
05fccb0e | 59 | static u32 gart_unmapped_entry; |
1da177e4 LT |
60 | |
61 | #define GPTE_VALID 1 | |
62 | #define GPTE_COHERENT 2 | |
63 | #define GPTE_ENCODE(x) \ | |
64 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) | |
65 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) | |
66 | ||
05fccb0e | 67 | #define to_pages(addr, size) \ |
1da177e4 LT |
68 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) |
69 | ||
05fccb0e | 70 | #define EMERGENCY_PAGES 32 /* = 128KB */ |
1da177e4 LT |
71 | |
72 | #ifdef CONFIG_AGP | |
73 | #define AGPEXTERN extern | |
74 | #else | |
75 | #define AGPEXTERN | |
76 | #endif | |
77 | ||
78 | /* backdoor interface to AGP driver */ | |
79 | AGPEXTERN int agp_memory_reserved; | |
80 | AGPEXTERN __u32 *agp_gatt_table; | |
81 | ||
82 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ | |
05fccb0e | 83 | static int need_flush; /* global flush state. set for each gart wrap */ |
1da177e4 | 84 | |
05fccb0e IM |
85 | static unsigned long alloc_iommu(int size) |
86 | { | |
1da177e4 LT |
87 | unsigned long offset, flags; |
88 | ||
05fccb0e IM |
89 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
90 | offset = find_next_zero_string(iommu_gart_bitmap, next_bit, | |
91 | iommu_pages, size); | |
1da177e4 LT |
92 | if (offset == -1) { |
93 | need_flush = 1; | |
05fccb0e IM |
94 | offset = find_next_zero_string(iommu_gart_bitmap, 0, |
95 | iommu_pages, size); | |
1da177e4 | 96 | } |
05fccb0e IM |
97 | if (offset != -1) { |
98 | set_bit_string(iommu_gart_bitmap, offset, size); | |
99 | next_bit = offset+size; | |
100 | if (next_bit >= iommu_pages) { | |
1da177e4 LT |
101 | next_bit = 0; |
102 | need_flush = 1; | |
05fccb0e IM |
103 | } |
104 | } | |
1da177e4 LT |
105 | if (iommu_fullflush) |
106 | need_flush = 1; | |
05fccb0e IM |
107 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
108 | ||
1da177e4 | 109 | return offset; |
05fccb0e | 110 | } |
1da177e4 LT |
111 | |
112 | static void free_iommu(unsigned long offset, int size) | |
05fccb0e | 113 | { |
1da177e4 | 114 | unsigned long flags; |
05fccb0e | 115 | |
1da177e4 LT |
116 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
117 | __clear_bit_string(iommu_gart_bitmap, offset, size); | |
118 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | |
05fccb0e | 119 | } |
1da177e4 | 120 | |
05fccb0e | 121 | /* |
1da177e4 LT |
122 | * Use global flush state to avoid races with multiple flushers. |
123 | */ | |
a32073bf | 124 | static void flush_gart(void) |
05fccb0e | 125 | { |
1da177e4 | 126 | unsigned long flags; |
05fccb0e | 127 | |
1da177e4 | 128 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
a32073bf AK |
129 | if (need_flush) { |
130 | k8_flush_garts(); | |
1da177e4 | 131 | need_flush = 0; |
05fccb0e | 132 | } |
1da177e4 | 133 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
05fccb0e | 134 | } |
1da177e4 | 135 | |
1da177e4 LT |
136 | #ifdef CONFIG_IOMMU_LEAK |
137 | ||
05fccb0e IM |
138 | #define SET_LEAK(x) \ |
139 | do { \ | |
140 | if (iommu_leak_tab) \ | |
141 | iommu_leak_tab[x] = __builtin_return_address(0);\ | |
142 | } while (0) | |
143 | ||
144 | #define CLEAR_LEAK(x) \ | |
145 | do { \ | |
146 | if (iommu_leak_tab) \ | |
147 | iommu_leak_tab[x] = NULL; \ | |
148 | } while (0) | |
1da177e4 LT |
149 | |
150 | /* Debugging aid for drivers that don't free their IOMMU tables */ | |
05fccb0e | 151 | static void **iommu_leak_tab; |
1da177e4 | 152 | static int leak_trace; |
79da0874 | 153 | static int iommu_leak_pages = 20; |
05fccb0e | 154 | |
79da0874 | 155 | static void dump_leak(void) |
1da177e4 LT |
156 | { |
157 | int i; | |
05fccb0e IM |
158 | static int dump; |
159 | ||
160 | if (dump || !iommu_leak_tab) | |
161 | return; | |
1da177e4 | 162 | dump = 1; |
05fccb0e IM |
163 | show_stack(NULL, NULL); |
164 | ||
165 | /* Very crude. dump some from the end of the table too */ | |
166 | printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n", | |
167 | iommu_leak_pages); | |
168 | for (i = 0; i < iommu_leak_pages; i += 2) { | |
169 | printk(KERN_DEBUG "%lu: ", iommu_pages-i); | |
bc850d6b | 170 | printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0); |
05fccb0e IM |
171 | printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' '); |
172 | } | |
173 | printk(KERN_DEBUG "\n"); | |
1da177e4 LT |
174 | } |
175 | #else | |
05fccb0e IM |
176 | # define SET_LEAK(x) |
177 | # define CLEAR_LEAK(x) | |
1da177e4 LT |
178 | #endif |
179 | ||
17a941d8 | 180 | static void iommu_full(struct device *dev, size_t size, int dir) |
1da177e4 | 181 | { |
05fccb0e | 182 | /* |
1da177e4 LT |
183 | * Ran out of IOMMU space for this operation. This is very bad. |
184 | * Unfortunately the drivers cannot handle this operation properly. | |
05fccb0e | 185 | * Return some non mapped prereserved space in the aperture and |
1da177e4 LT |
186 | * let the Northbridge deal with it. This will result in garbage |
187 | * in the IO operation. When the size exceeds the prereserved space | |
05fccb0e | 188 | * memory corruption will occur or random memory will be DMAed |
1da177e4 | 189 | * out. Hopefully no network devices use single mappings that big. |
05fccb0e IM |
190 | */ |
191 | ||
192 | printk(KERN_ERR | |
193 | "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n", | |
194 | size, dev->bus_id); | |
1da177e4 | 195 | |
17a941d8 | 196 | if (size > PAGE_SIZE*EMERGENCY_PAGES) { |
1da177e4 LT |
197 | if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) |
198 | panic("PCI-DMA: Memory would be corrupted\n"); | |
05fccb0e IM |
199 | if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) |
200 | panic(KERN_ERR | |
201 | "PCI-DMA: Random memory would be DMAed\n"); | |
202 | } | |
1da177e4 | 203 | #ifdef CONFIG_IOMMU_LEAK |
05fccb0e | 204 | dump_leak(); |
1da177e4 | 205 | #endif |
05fccb0e | 206 | } |
1da177e4 | 207 | |
05fccb0e IM |
208 | static inline int |
209 | need_iommu(struct device *dev, unsigned long addr, size_t size) | |
210 | { | |
1da177e4 | 211 | u64 mask = *dev->dma_mask; |
00edefae | 212 | int high = addr + size > mask; |
1da177e4 | 213 | int mmu = high; |
05fccb0e IM |
214 | |
215 | if (force_iommu) | |
216 | mmu = 1; | |
217 | ||
218 | return mmu; | |
1da177e4 LT |
219 | } |
220 | ||
05fccb0e IM |
221 | static inline int |
222 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | |
223 | { | |
1da177e4 | 224 | u64 mask = *dev->dma_mask; |
00edefae | 225 | int high = addr + size > mask; |
1da177e4 | 226 | int mmu = high; |
05fccb0e IM |
227 | |
228 | return mmu; | |
1da177e4 LT |
229 | } |
230 | ||
231 | /* Map a single continuous physical area into the IOMMU. | |
232 | * Caller needs to check if the iommu is needed and flush. | |
233 | */ | |
17a941d8 MBY |
234 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, |
235 | size_t size, int dir) | |
05fccb0e | 236 | { |
1da177e4 LT |
237 | unsigned long npages = to_pages(phys_mem, size); |
238 | unsigned long iommu_page = alloc_iommu(npages); | |
239 | int i; | |
05fccb0e | 240 | |
1da177e4 LT |
241 | if (iommu_page == -1) { |
242 | if (!nonforced_iommu(dev, phys_mem, size)) | |
05fccb0e | 243 | return phys_mem; |
1da177e4 LT |
244 | if (panic_on_overflow) |
245 | panic("dma_map_area overflow %lu bytes\n", size); | |
17a941d8 | 246 | iommu_full(dev, size, dir); |
1da177e4 LT |
247 | return bad_dma_address; |
248 | } | |
249 | ||
250 | for (i = 0; i < npages; i++) { | |
251 | iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); | |
252 | SET_LEAK(iommu_page + i); | |
253 | phys_mem += PAGE_SIZE; | |
254 | } | |
255 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); | |
256 | } | |
257 | ||
05fccb0e IM |
258 | static dma_addr_t |
259 | gart_map_simple(struct device *dev, char *buf, size_t size, int dir) | |
17a941d8 MBY |
260 | { |
261 | dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir); | |
05fccb0e | 262 | |
a32073bf | 263 | flush_gart(); |
05fccb0e | 264 | |
17a941d8 MBY |
265 | return map; |
266 | } | |
267 | ||
1da177e4 | 268 | /* Map a single area into the IOMMU */ |
05fccb0e IM |
269 | static dma_addr_t |
270 | gart_map_single(struct device *dev, void *addr, size_t size, int dir) | |
1da177e4 LT |
271 | { |
272 | unsigned long phys_mem, bus; | |
273 | ||
1da177e4 LT |
274 | if (!dev) |
275 | dev = &fallback_dev; | |
276 | ||
05fccb0e | 277 | phys_mem = virt_to_phys(addr); |
1da177e4 | 278 | if (!need_iommu(dev, phys_mem, size)) |
05fccb0e | 279 | return phys_mem; |
1da177e4 | 280 | |
17a941d8 | 281 | bus = gart_map_simple(dev, addr, size, dir); |
05fccb0e IM |
282 | |
283 | return bus; | |
17a941d8 MBY |
284 | } |
285 | ||
7c2d9cd2 JM |
286 | /* |
287 | * Free a DMA mapping. | |
288 | */ | |
1048fa52 | 289 | static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, |
05fccb0e | 290 | size_t size, int direction) |
7c2d9cd2 JM |
291 | { |
292 | unsigned long iommu_page; | |
293 | int npages; | |
294 | int i; | |
295 | ||
296 | if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || | |
297 | dma_addr >= iommu_bus_base + iommu_size) | |
298 | return; | |
05fccb0e | 299 | |
7c2d9cd2 JM |
300 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; |
301 | npages = to_pages(dma_addr, size); | |
302 | for (i = 0; i < npages; i++) { | |
303 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; | |
304 | CLEAR_LEAK(iommu_page + i); | |
305 | } | |
306 | free_iommu(iommu_page, npages); | |
307 | } | |
308 | ||
17a941d8 MBY |
309 | /* |
310 | * Wrapper for pci_unmap_single working with scatterlists. | |
311 | */ | |
05fccb0e IM |
312 | static void |
313 | gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |
17a941d8 | 314 | { |
9ee1bea4 | 315 | struct scatterlist *s; |
17a941d8 MBY |
316 | int i; |
317 | ||
9ee1bea4 | 318 | for_each_sg(sg, s, nents, i) { |
60b08c67 | 319 | if (!s->dma_length || !s->length) |
17a941d8 | 320 | break; |
7c2d9cd2 | 321 | gart_unmap_single(dev, s->dma_address, s->dma_length, dir); |
17a941d8 MBY |
322 | } |
323 | } | |
1da177e4 LT |
324 | |
325 | /* Fallback for dma_map_sg in case of overflow */ | |
326 | static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |
327 | int nents, int dir) | |
328 | { | |
9ee1bea4 | 329 | struct scatterlist *s; |
1da177e4 LT |
330 | int i; |
331 | ||
332 | #ifdef CONFIG_IOMMU_DEBUG | |
333 | printk(KERN_DEBUG "dma_map_sg overflow\n"); | |
334 | #endif | |
335 | ||
9ee1bea4 | 336 | for_each_sg(sg, s, nents, i) { |
58b053e4 | 337 | unsigned long addr = sg_phys(s); |
05fccb0e IM |
338 | |
339 | if (nonforced_iommu(dev, addr, s->length)) { | |
17a941d8 | 340 | addr = dma_map_area(dev, addr, s->length, dir); |
05fccb0e IM |
341 | if (addr == bad_dma_address) { |
342 | if (i > 0) | |
17a941d8 | 343 | gart_unmap_sg(dev, sg, i, dir); |
05fccb0e | 344 | nents = 0; |
1da177e4 LT |
345 | sg[0].dma_length = 0; |
346 | break; | |
347 | } | |
348 | } | |
349 | s->dma_address = addr; | |
350 | s->dma_length = s->length; | |
351 | } | |
a32073bf | 352 | flush_gart(); |
05fccb0e | 353 | |
1da177e4 LT |
354 | return nents; |
355 | } | |
356 | ||
357 | /* Map multiple scatterlist entries continuous into the first. */ | |
9ee1bea4 | 358 | static int __dma_map_cont(struct scatterlist *start, int nelems, |
05fccb0e | 359 | struct scatterlist *sout, unsigned long pages) |
1da177e4 LT |
360 | { |
361 | unsigned long iommu_start = alloc_iommu(pages); | |
05fccb0e | 362 | unsigned long iommu_page = iommu_start; |
9ee1bea4 | 363 | struct scatterlist *s; |
1da177e4 LT |
364 | int i; |
365 | ||
366 | if (iommu_start == -1) | |
367 | return -1; | |
9ee1bea4 JA |
368 | |
369 | for_each_sg(start, s, nelems, i) { | |
1da177e4 LT |
370 | unsigned long pages, addr; |
371 | unsigned long phys_addr = s->dma_address; | |
05fccb0e | 372 | |
9ee1bea4 JA |
373 | BUG_ON(s != start && s->offset); |
374 | if (s == start) { | |
1da177e4 LT |
375 | sout->dma_address = iommu_bus_base; |
376 | sout->dma_address += iommu_page*PAGE_SIZE + s->offset; | |
377 | sout->dma_length = s->length; | |
05fccb0e IM |
378 | } else { |
379 | sout->dma_length += s->length; | |
1da177e4 LT |
380 | } |
381 | ||
382 | addr = phys_addr; | |
05fccb0e IM |
383 | pages = to_pages(s->offset, s->length); |
384 | while (pages--) { | |
385 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); | |
1da177e4 LT |
386 | SET_LEAK(iommu_page); |
387 | addr += PAGE_SIZE; | |
388 | iommu_page++; | |
0d541064 | 389 | } |
05fccb0e IM |
390 | } |
391 | BUG_ON(iommu_page - iommu_start != pages); | |
392 | ||
1da177e4 LT |
393 | return 0; |
394 | } | |
395 | ||
05fccb0e IM |
396 | static inline int |
397 | dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout, | |
398 | unsigned long pages, int need) | |
1da177e4 | 399 | { |
9ee1bea4 JA |
400 | if (!need) { |
401 | BUG_ON(nelems != 1); | |
e88a39de | 402 | sout->dma_address = start->dma_address; |
9ee1bea4 | 403 | sout->dma_length = start->length; |
1da177e4 | 404 | return 0; |
9ee1bea4 JA |
405 | } |
406 | return __dma_map_cont(start, nelems, sout, pages); | |
1da177e4 | 407 | } |
05fccb0e | 408 | |
1da177e4 LT |
409 | /* |
410 | * DMA map all entries in a scatterlist. | |
05fccb0e | 411 | * Merge chunks that have page aligned sizes into a continuous mapping. |
1da177e4 | 412 | */ |
05fccb0e IM |
413 | static int |
414 | gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |
1da177e4 | 415 | { |
9ee1bea4 | 416 | struct scatterlist *s, *ps, *start_sg, *sgmap; |
05fccb0e IM |
417 | int need = 0, nextneed, i, out, start; |
418 | unsigned long pages = 0; | |
1da177e4 | 419 | |
05fccb0e | 420 | if (nents == 0) |
1da177e4 LT |
421 | return 0; |
422 | ||
1da177e4 LT |
423 | if (!dev) |
424 | dev = &fallback_dev; | |
425 | ||
426 | out = 0; | |
427 | start = 0; | |
9ee1bea4 JA |
428 | start_sg = sgmap = sg; |
429 | ps = NULL; /* shut up gcc */ | |
430 | for_each_sg(sg, s, nents, i) { | |
58b053e4 | 431 | dma_addr_t addr = sg_phys(s); |
05fccb0e | 432 | |
1da177e4 | 433 | s->dma_address = addr; |
05fccb0e | 434 | BUG_ON(s->length == 0); |
1da177e4 | 435 | |
05fccb0e | 436 | nextneed = need_iommu(dev, addr, s->length); |
1da177e4 LT |
437 | |
438 | /* Handle the previous not yet processed entries */ | |
439 | if (i > start) { | |
05fccb0e IM |
440 | /* |
441 | * Can only merge when the last chunk ends on a | |
442 | * page boundary and the new one doesn't have an | |
443 | * offset. | |
444 | */ | |
1da177e4 | 445 | if (!iommu_merge || !nextneed || !need || s->offset || |
9ee1bea4 JA |
446 | (ps->offset + ps->length) % PAGE_SIZE) { |
447 | if (dma_map_cont(start_sg, i - start, sgmap, | |
448 | pages, need) < 0) | |
1da177e4 LT |
449 | goto error; |
450 | out++; | |
9ee1bea4 | 451 | sgmap = sg_next(sgmap); |
1da177e4 | 452 | pages = 0; |
9ee1bea4 JA |
453 | start = i; |
454 | start_sg = s; | |
1da177e4 LT |
455 | } |
456 | } | |
457 | ||
458 | need = nextneed; | |
459 | pages += to_pages(s->offset, s->length); | |
9ee1bea4 | 460 | ps = s; |
1da177e4 | 461 | } |
9ee1bea4 | 462 | if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0) |
1da177e4 LT |
463 | goto error; |
464 | out++; | |
a32073bf | 465 | flush_gart(); |
9ee1bea4 JA |
466 | if (out < nents) { |
467 | sgmap = sg_next(sgmap); | |
468 | sgmap->dma_length = 0; | |
469 | } | |
1da177e4 LT |
470 | return out; |
471 | ||
472 | error: | |
a32073bf | 473 | flush_gart(); |
5336940d | 474 | gart_unmap_sg(dev, sg, out, dir); |
05fccb0e | 475 | |
a1002a48 KV |
476 | /* When it was forced or merged try again in a dumb way */ |
477 | if (force_iommu || iommu_merge) { | |
478 | out = dma_map_sg_nonforce(dev, sg, nents, dir); | |
479 | if (out > 0) | |
480 | return out; | |
481 | } | |
1da177e4 LT |
482 | if (panic_on_overflow) |
483 | panic("dma_map_sg: overflow on %lu pages\n", pages); | |
05fccb0e | 484 | |
17a941d8 | 485 | iommu_full(dev, pages << PAGE_SHIFT, dir); |
9ee1bea4 JA |
486 | for_each_sg(sg, s, nents, i) |
487 | s->dma_address = bad_dma_address; | |
1da177e4 | 488 | return 0; |
05fccb0e | 489 | } |
1da177e4 | 490 | |
17a941d8 | 491 | static int no_agp; |
1da177e4 LT |
492 | |
493 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) | |
05fccb0e IM |
494 | { |
495 | unsigned long a; | |
496 | ||
497 | if (!iommu_size) { | |
498 | iommu_size = aper_size; | |
499 | if (!no_agp) | |
500 | iommu_size /= 2; | |
501 | } | |
502 | ||
503 | a = aper + iommu_size; | |
1da177e4 LT |
504 | iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a; |
505 | ||
05fccb0e | 506 | if (iommu_size < 64*1024*1024) { |
1da177e4 | 507 | printk(KERN_WARNING |
05fccb0e IM |
508 | "PCI-DMA: Warning: Small IOMMU %luMB." |
509 | " Consider increasing the AGP aperture in BIOS\n", | |
510 | iommu_size >> 20); | |
511 | } | |
512 | ||
1da177e4 | 513 | return iommu_size; |
05fccb0e | 514 | } |
1da177e4 | 515 | |
05fccb0e IM |
516 | static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) |
517 | { | |
518 | unsigned aper_size = 0, aper_base_32, aper_order; | |
1da177e4 | 519 | u64 aper_base; |
1da177e4 | 520 | |
05fccb0e | 521 | pci_read_config_dword(dev, 0x94, &aper_base_32); |
1da177e4 | 522 | pci_read_config_dword(dev, 0x90, &aper_order); |
05fccb0e | 523 | aper_order = (aper_order >> 1) & 7; |
1da177e4 | 524 | |
05fccb0e | 525 | aper_base = aper_base_32 & 0x7fff; |
1da177e4 LT |
526 | aper_base <<= 25; |
527 | ||
05fccb0e IM |
528 | aper_size = (32 * 1024 * 1024) << aper_order; |
529 | if (aper_base + aper_size > 0x100000000UL || !aper_size) | |
1da177e4 LT |
530 | aper_base = 0; |
531 | ||
532 | *size = aper_size; | |
533 | return aper_base; | |
05fccb0e | 534 | } |
1da177e4 | 535 | |
05fccb0e | 536 | /* |
1da177e4 | 537 | * Private Northbridge GATT initialization in case we cannot use the |
05fccb0e | 538 | * AGP driver for some reason. |
1da177e4 LT |
539 | */ |
540 | static __init int init_k8_gatt(struct agp_kern_info *info) | |
05fccb0e IM |
541 | { |
542 | unsigned aper_size, gatt_size, new_aper_size; | |
543 | unsigned aper_base, new_aper_base; | |
1da177e4 LT |
544 | struct pci_dev *dev; |
545 | void *gatt; | |
a32073bf AK |
546 | int i; |
547 | ||
1da177e4 LT |
548 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); |
549 | aper_size = aper_base = info->aper_size = 0; | |
a32073bf AK |
550 | dev = NULL; |
551 | for (i = 0; i < num_k8_northbridges; i++) { | |
552 | dev = k8_northbridges[i]; | |
05fccb0e IM |
553 | new_aper_base = read_aperture(dev, &new_aper_size); |
554 | if (!new_aper_base) | |
555 | goto nommu; | |
556 | ||
557 | if (!aper_base) { | |
1da177e4 LT |
558 | aper_size = new_aper_size; |
559 | aper_base = new_aper_base; | |
05fccb0e IM |
560 | } |
561 | if (aper_size != new_aper_size || aper_base != new_aper_base) | |
1da177e4 LT |
562 | goto nommu; |
563 | } | |
564 | if (!aper_base) | |
05fccb0e | 565 | goto nommu; |
1da177e4 | 566 | info->aper_base = aper_base; |
05fccb0e | 567 | info->aper_size = aper_size >> 20; |
1da177e4 | 568 | |
05fccb0e IM |
569 | gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); |
570 | gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); | |
571 | if (!gatt) | |
cf6387da | 572 | panic("Cannot allocate GATT table"); |
05fccb0e IM |
573 | if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT, |
574 | PAGE_KERNEL_NOCACHE)) | |
cf6387da JD |
575 | panic("Could not set GART PTEs to uncacheable pages"); |
576 | global_flush_tlb(); | |
577 | ||
05fccb0e | 578 | memset(gatt, 0, gatt_size); |
1da177e4 | 579 | agp_gatt_table = gatt; |
a32073bf AK |
580 | |
581 | for (i = 0; i < num_k8_northbridges; i++) { | |
05fccb0e IM |
582 | u32 gatt_reg; |
583 | u32 ctl; | |
1da177e4 | 584 | |
a32073bf | 585 | dev = k8_northbridges[i]; |
05fccb0e IM |
586 | gatt_reg = __pa(gatt) >> 12; |
587 | gatt_reg <<= 4; | |
1da177e4 | 588 | pci_write_config_dword(dev, 0x98, gatt_reg); |
05fccb0e | 589 | pci_read_config_dword(dev, 0x90, &ctl); |
1da177e4 LT |
590 | |
591 | ctl |= 1; | |
592 | ctl &= ~((1<<4) | (1<<5)); | |
593 | ||
05fccb0e | 594 | pci_write_config_dword(dev, 0x90, ctl); |
1da177e4 | 595 | } |
a32073bf | 596 | flush_gart(); |
05fccb0e IM |
597 | |
598 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", | |
599 | aper_base, aper_size>>10); | |
1da177e4 LT |
600 | return 0; |
601 | ||
602 | nommu: | |
05fccb0e | 603 | /* Should not happen anymore */ |
1da177e4 | 604 | printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n" |
f46ace69 | 605 | KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n"); |
05fccb0e IM |
606 | return -1; |
607 | } | |
1da177e4 LT |
608 | |
609 | extern int agp_amd64_init(void); | |
610 | ||
e6584504 | 611 | static const struct dma_mapping_ops gart_dma_ops = { |
05fccb0e IM |
612 | .mapping_error = NULL, |
613 | .map_single = gart_map_single, | |
614 | .map_simple = gart_map_simple, | |
615 | .unmap_single = gart_unmap_single, | |
616 | .sync_single_for_cpu = NULL, | |
617 | .sync_single_for_device = NULL, | |
618 | .sync_single_range_for_cpu = NULL, | |
619 | .sync_single_range_for_device = NULL, | |
620 | .sync_sg_for_cpu = NULL, | |
621 | .sync_sg_for_device = NULL, | |
622 | .map_sg = gart_map_sg, | |
623 | .unmap_sg = gart_unmap_sg, | |
17a941d8 MBY |
624 | }; |
625 | ||
bc2cea6a YL |
626 | void gart_iommu_shutdown(void) |
627 | { | |
628 | struct pci_dev *dev; | |
629 | int i; | |
630 | ||
631 | if (no_agp && (dma_ops != &gart_dma_ops)) | |
632 | return; | |
633 | ||
05fccb0e IM |
634 | for (i = 0; i < num_k8_northbridges; i++) { |
635 | u32 ctl; | |
bc2cea6a | 636 | |
05fccb0e IM |
637 | dev = k8_northbridges[i]; |
638 | pci_read_config_dword(dev, 0x90, &ctl); | |
bc2cea6a | 639 | |
05fccb0e | 640 | ctl &= ~1; |
bc2cea6a | 641 | |
05fccb0e IM |
642 | pci_write_config_dword(dev, 0x90, ctl); |
643 | } | |
bc2cea6a YL |
644 | } |
645 | ||
0dc243ae | 646 | void __init gart_iommu_init(void) |
05fccb0e | 647 | { |
1da177e4 | 648 | struct agp_kern_info info; |
1da177e4 | 649 | unsigned long iommu_start; |
05fccb0e | 650 | unsigned long aper_size; |
1da177e4 LT |
651 | unsigned long scratch; |
652 | long i; | |
653 | ||
a32073bf AK |
654 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { |
655 | printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n"); | |
0dc243ae | 656 | return; |
a32073bf AK |
657 | } |
658 | ||
1da177e4 | 659 | #ifndef CONFIG_AGP_AMD64 |
05fccb0e | 660 | no_agp = 1; |
1da177e4 LT |
661 | #else |
662 | /* Makefile puts PCI initialization via subsys_initcall first. */ | |
663 | /* Add other K8 AGP bridge drivers here */ | |
05fccb0e IM |
664 | no_agp = no_agp || |
665 | (agp_amd64_init() < 0) || | |
1da177e4 | 666 | (agp_copy_info(agp_bridge, &info) < 0); |
05fccb0e | 667 | #endif |
1da177e4 | 668 | |
60b08c67 | 669 | if (swiotlb) |
0dc243ae | 670 | return; |
60b08c67 | 671 | |
8d4f6b93 | 672 | /* Did we detect a different HW IOMMU? */ |
0440d4c0 | 673 | if (iommu_detected && !gart_iommu_aperture) |
0dc243ae | 674 | return; |
8d4f6b93 | 675 | |
1da177e4 | 676 | if (no_iommu || |
17a941d8 | 677 | (!force_iommu && end_pfn <= MAX_DMA32_PFN) || |
0440d4c0 | 678 | !gart_iommu_aperture || |
1da177e4 | 679 | (no_agp && init_k8_gatt(&info) < 0)) { |
5b7b644c JM |
680 | if (end_pfn > MAX_DMA32_PFN) { |
681 | printk(KERN_ERR "WARNING more than 4GB of memory " | |
3807fd46 | 682 | "but GART IOMMU not available.\n" |
dc9a7195 | 683 | KERN_ERR "WARNING 32bit PCI may malfunction.\n"); |
5b7b644c | 684 | } |
0dc243ae | 685 | return; |
1da177e4 LT |
686 | } |
687 | ||
5b7b644c | 688 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); |
05fccb0e IM |
689 | aper_size = info.aper_size * 1024 * 1024; |
690 | iommu_size = check_iommu_size(info.aper_base, aper_size); | |
691 | iommu_pages = iommu_size >> PAGE_SHIFT; | |
692 | ||
693 | iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL, | |
694 | get_order(iommu_pages/8)); | |
695 | if (!iommu_gart_bitmap) | |
696 | panic("Cannot allocate iommu bitmap\n"); | |
1da177e4 LT |
697 | memset(iommu_gart_bitmap, 0, iommu_pages/8); |
698 | ||
699 | #ifdef CONFIG_IOMMU_LEAK | |
05fccb0e IM |
700 | if (leak_trace) { |
701 | iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, | |
1da177e4 | 702 | get_order(iommu_pages*sizeof(void *))); |
05fccb0e IM |
703 | if (iommu_leak_tab) |
704 | memset(iommu_leak_tab, 0, iommu_pages * 8); | |
1da177e4 | 705 | else |
05fccb0e IM |
706 | printk(KERN_DEBUG |
707 | "PCI-DMA: Cannot allocate leak trace area\n"); | |
708 | } | |
1da177e4 LT |
709 | #endif |
710 | ||
05fccb0e | 711 | /* |
1da177e4 | 712 | * Out of IOMMU space handling. |
05fccb0e IM |
713 | * Reserve some invalid pages at the beginning of the GART. |
714 | */ | |
715 | set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); | |
1da177e4 | 716 | |
05fccb0e | 717 | agp_memory_reserved = iommu_size; |
1da177e4 LT |
718 | printk(KERN_INFO |
719 | "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", | |
05fccb0e | 720 | iommu_size >> 20); |
1da177e4 | 721 | |
05fccb0e IM |
722 | iommu_start = aper_size - iommu_size; |
723 | iommu_bus_base = info.aper_base + iommu_start; | |
1da177e4 LT |
724 | bad_dma_address = iommu_bus_base; |
725 | iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); | |
726 | ||
05fccb0e | 727 | /* |
1da177e4 LT |
728 | * Unmap the IOMMU part of the GART. The alias of the page is |
729 | * always mapped with cache enabled and there is no full cache | |
730 | * coherency across the GART remapping. The unmapping avoids | |
731 | * automatic prefetches from the CPU allocating cache lines in | |
732 | * there. All CPU accesses are done via the direct mapping to | |
733 | * the backing memory. The GART address is only used by PCI | |
05fccb0e | 734 | * devices. |
1da177e4 LT |
735 | */ |
736 | clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size); | |
737 | ||
05fccb0e IM |
738 | /* |
739 | * Try to workaround a bug (thanks to BenH) | |
740 | * Set unmapped entries to a scratch page instead of 0. | |
1da177e4 LT |
741 | * Any prefetches that hit unmapped entries won't get an bus abort |
742 | * then. | |
743 | */ | |
05fccb0e IM |
744 | scratch = get_zeroed_page(GFP_KERNEL); |
745 | if (!scratch) | |
1da177e4 LT |
746 | panic("Cannot allocate iommu scratch page"); |
747 | gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); | |
05fccb0e | 748 | for (i = EMERGENCY_PAGES; i < iommu_pages; i++) |
1da177e4 LT |
749 | iommu_gatt_base[i] = gart_unmapped_entry; |
750 | ||
a32073bf | 751 | flush_gart(); |
17a941d8 | 752 | dma_ops = &gart_dma_ops; |
05fccb0e | 753 | } |
1da177e4 | 754 | |
43999d9e | 755 | void __init gart_parse_options(char *p) |
17a941d8 MBY |
756 | { |
757 | int arg; | |
758 | ||
1da177e4 | 759 | #ifdef CONFIG_IOMMU_LEAK |
05fccb0e | 760 | if (!strncmp(p, "leak", 4)) { |
17a941d8 MBY |
761 | leak_trace = 1; |
762 | p += 4; | |
763 | if (*p == '=') ++p; | |
764 | if (isdigit(*p) && get_option(&p, &arg)) | |
765 | iommu_leak_pages = arg; | |
766 | } | |
1da177e4 | 767 | #endif |
17a941d8 MBY |
768 | if (isdigit(*p) && get_option(&p, &arg)) |
769 | iommu_size = arg; | |
05fccb0e | 770 | if (!strncmp(p, "fullflush", 8)) |
17a941d8 | 771 | iommu_fullflush = 1; |
05fccb0e | 772 | if (!strncmp(p, "nofullflush", 11)) |
17a941d8 | 773 | iommu_fullflush = 0; |
05fccb0e | 774 | if (!strncmp(p, "noagp", 5)) |
17a941d8 | 775 | no_agp = 1; |
05fccb0e | 776 | if (!strncmp(p, "noaperture", 10)) |
17a941d8 MBY |
777 | fix_aperture = 0; |
778 | /* duplicated from pci-dma.c */ | |
05fccb0e | 779 | if (!strncmp(p, "force", 5)) |
0440d4c0 | 780 | gart_iommu_aperture_allowed = 1; |
05fccb0e | 781 | if (!strncmp(p, "allowed", 7)) |
0440d4c0 | 782 | gart_iommu_aperture_allowed = 1; |
17a941d8 MBY |
783 | if (!strncmp(p, "memaper", 7)) { |
784 | fallback_aper_force = 1; | |
785 | p += 7; | |
786 | if (*p == '=') { | |
787 | ++p; | |
788 | if (get_option(&p, &arg)) | |
789 | fallback_aper_order = arg; | |
790 | } | |
791 | } | |
792 | } |