x86: cpa, fix lookup_address
[deliverable/linux.git] / arch / x86 / kernel / pci-gart_64.c
CommitLineData
1da177e4
LT
1/*
2 * Dynamic DMA mapping support for AMD Hammer.
05fccb0e 3 *
1da177e4
LT
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
05fccb0e 6 * with more than 4GB.
1da177e4
LT
7 *
8 * See Documentation/DMA-mapping.txt for the interface specification.
05fccb0e 9 *
1da177e4 10 * Copyright 2002 Andi Kleen, SuSE Labs.
ff7f3649 11 * Subject to the GNU General Public License v2 only.
1da177e4
LT
12 */
13
1da177e4
LT
14#include <linux/types.h>
15#include <linux/ctype.h>
16#include <linux/agp_backend.h>
17#include <linux/init.h>
18#include <linux/mm.h>
19#include <linux/string.h>
20#include <linux/spinlock.h>
21#include <linux/pci.h>
22#include <linux/module.h>
23#include <linux/topology.h>
24#include <linux/interrupt.h>
25#include <linux/bitops.h>
1eeb66a1 26#include <linux/kdebug.h>
9ee1bea4 27#include <linux/scatterlist.h>
1da177e4
LT
28#include <asm/atomic.h>
29#include <asm/io.h>
30#include <asm/mtrr.h>
31#include <asm/pgtable.h>
32#include <asm/proto.h>
395624fc 33#include <asm/gart.h>
1da177e4 34#include <asm/cacheflush.h>
17a941d8
MBY
35#include <asm/swiotlb.h>
36#include <asm/dma.h>
a32073bf 37#include <asm/k8.h>
1da177e4 38
79da0874 39static unsigned long iommu_bus_base; /* GART remapping area (physical) */
05fccb0e 40static unsigned long iommu_size; /* size of remapping area bytes */
1da177e4
LT
41static unsigned long iommu_pages; /* .. and in pages */
42
05fccb0e 43static u32 *iommu_gatt_base; /* Remapping table */
1da177e4 44
05fccb0e
IM
45/*
46 * If this is disabled the IOMMU will use an optimized flushing strategy
47 * of only flushing when an mapping is reused. With it true the GART is
48 * flushed for every mapping. Problem is that doing the lazy flush seems
49 * to trigger bugs with some popular PCI cards, in particular 3ware (but
50 * has been also also seen with Qlogic at least).
51 */
1da177e4
LT
52int iommu_fullflush = 1;
53
05fccb0e 54/* Allocation bitmap for the remapping area: */
1da177e4 55static DEFINE_SPINLOCK(iommu_bitmap_lock);
05fccb0e
IM
56/* Guarded by iommu_bitmap_lock: */
57static unsigned long *iommu_gart_bitmap;
1da177e4 58
05fccb0e 59static u32 gart_unmapped_entry;
1da177e4
LT
60
61#define GPTE_VALID 1
62#define GPTE_COHERENT 2
63#define GPTE_ENCODE(x) \
64 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
65#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
66
05fccb0e 67#define to_pages(addr, size) \
1da177e4
LT
68 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
69
05fccb0e 70#define EMERGENCY_PAGES 32 /* = 128KB */
1da177e4
LT
71
72#ifdef CONFIG_AGP
73#define AGPEXTERN extern
74#else
75#define AGPEXTERN
76#endif
77
78/* backdoor interface to AGP driver */
79AGPEXTERN int agp_memory_reserved;
80AGPEXTERN __u32 *agp_gatt_table;
81
82static unsigned long next_bit; /* protected by iommu_bitmap_lock */
05fccb0e 83static int need_flush; /* global flush state. set for each gart wrap */
1da177e4 84
05fccb0e
IM
85static unsigned long alloc_iommu(int size)
86{
1da177e4
LT
87 unsigned long offset, flags;
88
05fccb0e
IM
89 spin_lock_irqsave(&iommu_bitmap_lock, flags);
90 offset = find_next_zero_string(iommu_gart_bitmap, next_bit,
91 iommu_pages, size);
1da177e4
LT
92 if (offset == -1) {
93 need_flush = 1;
05fccb0e
IM
94 offset = find_next_zero_string(iommu_gart_bitmap, 0,
95 iommu_pages, size);
1da177e4 96 }
05fccb0e
IM
97 if (offset != -1) {
98 set_bit_string(iommu_gart_bitmap, offset, size);
99 next_bit = offset+size;
100 if (next_bit >= iommu_pages) {
1da177e4
LT
101 next_bit = 0;
102 need_flush = 1;
05fccb0e
IM
103 }
104 }
1da177e4
LT
105 if (iommu_fullflush)
106 need_flush = 1;
05fccb0e
IM
107 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
108
1da177e4 109 return offset;
05fccb0e 110}
1da177e4
LT
111
112static void free_iommu(unsigned long offset, int size)
05fccb0e 113{
1da177e4 114 unsigned long flags;
05fccb0e 115
1da177e4
LT
116 spin_lock_irqsave(&iommu_bitmap_lock, flags);
117 __clear_bit_string(iommu_gart_bitmap, offset, size);
118 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
05fccb0e 119}
1da177e4 120
05fccb0e 121/*
1da177e4
LT
122 * Use global flush state to avoid races with multiple flushers.
123 */
a32073bf 124static void flush_gart(void)
05fccb0e 125{
1da177e4 126 unsigned long flags;
05fccb0e 127
1da177e4 128 spin_lock_irqsave(&iommu_bitmap_lock, flags);
a32073bf
AK
129 if (need_flush) {
130 k8_flush_garts();
1da177e4 131 need_flush = 0;
05fccb0e 132 }
1da177e4 133 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
05fccb0e 134}
1da177e4 135
1da177e4
LT
136#ifdef CONFIG_IOMMU_LEAK
137
05fccb0e
IM
138#define SET_LEAK(x) \
139 do { \
140 if (iommu_leak_tab) \
141 iommu_leak_tab[x] = __builtin_return_address(0);\
142 } while (0)
143
144#define CLEAR_LEAK(x) \
145 do { \
146 if (iommu_leak_tab) \
147 iommu_leak_tab[x] = NULL; \
148 } while (0)
1da177e4
LT
149
150/* Debugging aid for drivers that don't free their IOMMU tables */
05fccb0e 151static void **iommu_leak_tab;
1da177e4 152static int leak_trace;
79da0874 153static int iommu_leak_pages = 20;
05fccb0e 154
79da0874 155static void dump_leak(void)
1da177e4
LT
156{
157 int i;
05fccb0e
IM
158 static int dump;
159
160 if (dump || !iommu_leak_tab)
161 return;
1da177e4 162 dump = 1;
05fccb0e
IM
163 show_stack(NULL, NULL);
164
165 /* Very crude. dump some from the end of the table too */
166 printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
167 iommu_leak_pages);
168 for (i = 0; i < iommu_leak_pages; i += 2) {
169 printk(KERN_DEBUG "%lu: ", iommu_pages-i);
bc850d6b 170 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0);
05fccb0e
IM
171 printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
172 }
173 printk(KERN_DEBUG "\n");
1da177e4
LT
174}
175#else
05fccb0e
IM
176# define SET_LEAK(x)
177# define CLEAR_LEAK(x)
1da177e4
LT
178#endif
179
17a941d8 180static void iommu_full(struct device *dev, size_t size, int dir)
1da177e4 181{
05fccb0e 182 /*
1da177e4
LT
183 * Ran out of IOMMU space for this operation. This is very bad.
184 * Unfortunately the drivers cannot handle this operation properly.
05fccb0e 185 * Return some non mapped prereserved space in the aperture and
1da177e4
LT
186 * let the Northbridge deal with it. This will result in garbage
187 * in the IO operation. When the size exceeds the prereserved space
05fccb0e 188 * memory corruption will occur or random memory will be DMAed
1da177e4 189 * out. Hopefully no network devices use single mappings that big.
05fccb0e
IM
190 */
191
192 printk(KERN_ERR
193 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
194 size, dev->bus_id);
1da177e4 195
17a941d8 196 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
1da177e4
LT
197 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
198 panic("PCI-DMA: Memory would be corrupted\n");
05fccb0e
IM
199 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
200 panic(KERN_ERR
201 "PCI-DMA: Random memory would be DMAed\n");
202 }
1da177e4 203#ifdef CONFIG_IOMMU_LEAK
05fccb0e 204 dump_leak();
1da177e4 205#endif
05fccb0e 206}
1da177e4 207
05fccb0e
IM
208static inline int
209need_iommu(struct device *dev, unsigned long addr, size_t size)
210{
1da177e4 211 u64 mask = *dev->dma_mask;
00edefae 212 int high = addr + size > mask;
1da177e4 213 int mmu = high;
05fccb0e
IM
214
215 if (force_iommu)
216 mmu = 1;
217
218 return mmu;
1da177e4
LT
219}
220
05fccb0e
IM
221static inline int
222nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
223{
1da177e4 224 u64 mask = *dev->dma_mask;
00edefae 225 int high = addr + size > mask;
1da177e4 226 int mmu = high;
05fccb0e
IM
227
228 return mmu;
1da177e4
LT
229}
230
231/* Map a single continuous physical area into the IOMMU.
232 * Caller needs to check if the iommu is needed and flush.
233 */
17a941d8
MBY
234static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
235 size_t size, int dir)
05fccb0e 236{
1da177e4
LT
237 unsigned long npages = to_pages(phys_mem, size);
238 unsigned long iommu_page = alloc_iommu(npages);
239 int i;
05fccb0e 240
1da177e4
LT
241 if (iommu_page == -1) {
242 if (!nonforced_iommu(dev, phys_mem, size))
05fccb0e 243 return phys_mem;
1da177e4
LT
244 if (panic_on_overflow)
245 panic("dma_map_area overflow %lu bytes\n", size);
17a941d8 246 iommu_full(dev, size, dir);
1da177e4
LT
247 return bad_dma_address;
248 }
249
250 for (i = 0; i < npages; i++) {
251 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
252 SET_LEAK(iommu_page + i);
253 phys_mem += PAGE_SIZE;
254 }
255 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
256}
257
05fccb0e
IM
258static dma_addr_t
259gart_map_simple(struct device *dev, char *buf, size_t size, int dir)
17a941d8
MBY
260{
261 dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
05fccb0e 262
a32073bf 263 flush_gart();
05fccb0e 264
17a941d8
MBY
265 return map;
266}
267
1da177e4 268/* Map a single area into the IOMMU */
05fccb0e
IM
269static dma_addr_t
270gart_map_single(struct device *dev, void *addr, size_t size, int dir)
1da177e4
LT
271{
272 unsigned long phys_mem, bus;
273
1da177e4
LT
274 if (!dev)
275 dev = &fallback_dev;
276
05fccb0e 277 phys_mem = virt_to_phys(addr);
1da177e4 278 if (!need_iommu(dev, phys_mem, size))
05fccb0e 279 return phys_mem;
1da177e4 280
17a941d8 281 bus = gart_map_simple(dev, addr, size, dir);
05fccb0e
IM
282
283 return bus;
17a941d8
MBY
284}
285
7c2d9cd2
JM
286/*
287 * Free a DMA mapping.
288 */
1048fa52 289static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
05fccb0e 290 size_t size, int direction)
7c2d9cd2
JM
291{
292 unsigned long iommu_page;
293 int npages;
294 int i;
295
296 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
297 dma_addr >= iommu_bus_base + iommu_size)
298 return;
05fccb0e 299
7c2d9cd2
JM
300 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
301 npages = to_pages(dma_addr, size);
302 for (i = 0; i < npages; i++) {
303 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
304 CLEAR_LEAK(iommu_page + i);
305 }
306 free_iommu(iommu_page, npages);
307}
308
17a941d8
MBY
309/*
310 * Wrapper for pci_unmap_single working with scatterlists.
311 */
05fccb0e
IM
312static void
313gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
17a941d8 314{
9ee1bea4 315 struct scatterlist *s;
17a941d8
MBY
316 int i;
317
9ee1bea4 318 for_each_sg(sg, s, nents, i) {
60b08c67 319 if (!s->dma_length || !s->length)
17a941d8 320 break;
7c2d9cd2 321 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
17a941d8
MBY
322 }
323}
1da177e4
LT
324
325/* Fallback for dma_map_sg in case of overflow */
326static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
327 int nents, int dir)
328{
9ee1bea4 329 struct scatterlist *s;
1da177e4
LT
330 int i;
331
332#ifdef CONFIG_IOMMU_DEBUG
333 printk(KERN_DEBUG "dma_map_sg overflow\n");
334#endif
335
9ee1bea4 336 for_each_sg(sg, s, nents, i) {
58b053e4 337 unsigned long addr = sg_phys(s);
05fccb0e
IM
338
339 if (nonforced_iommu(dev, addr, s->length)) {
17a941d8 340 addr = dma_map_area(dev, addr, s->length, dir);
05fccb0e
IM
341 if (addr == bad_dma_address) {
342 if (i > 0)
17a941d8 343 gart_unmap_sg(dev, sg, i, dir);
05fccb0e 344 nents = 0;
1da177e4
LT
345 sg[0].dma_length = 0;
346 break;
347 }
348 }
349 s->dma_address = addr;
350 s->dma_length = s->length;
351 }
a32073bf 352 flush_gart();
05fccb0e 353
1da177e4
LT
354 return nents;
355}
356
357/* Map multiple scatterlist entries continuous into the first. */
9ee1bea4 358static int __dma_map_cont(struct scatterlist *start, int nelems,
05fccb0e 359 struct scatterlist *sout, unsigned long pages)
1da177e4
LT
360{
361 unsigned long iommu_start = alloc_iommu(pages);
05fccb0e 362 unsigned long iommu_page = iommu_start;
9ee1bea4 363 struct scatterlist *s;
1da177e4
LT
364 int i;
365
366 if (iommu_start == -1)
367 return -1;
9ee1bea4
JA
368
369 for_each_sg(start, s, nelems, i) {
1da177e4
LT
370 unsigned long pages, addr;
371 unsigned long phys_addr = s->dma_address;
05fccb0e 372
9ee1bea4
JA
373 BUG_ON(s != start && s->offset);
374 if (s == start) {
1da177e4
LT
375 sout->dma_address = iommu_bus_base;
376 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
377 sout->dma_length = s->length;
05fccb0e
IM
378 } else {
379 sout->dma_length += s->length;
1da177e4
LT
380 }
381
382 addr = phys_addr;
05fccb0e
IM
383 pages = to_pages(s->offset, s->length);
384 while (pages--) {
385 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
1da177e4
LT
386 SET_LEAK(iommu_page);
387 addr += PAGE_SIZE;
388 iommu_page++;
0d541064 389 }
05fccb0e
IM
390 }
391 BUG_ON(iommu_page - iommu_start != pages);
392
1da177e4
LT
393 return 0;
394}
395
05fccb0e
IM
396static inline int
397dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout,
398 unsigned long pages, int need)
1da177e4 399{
9ee1bea4
JA
400 if (!need) {
401 BUG_ON(nelems != 1);
e88a39de 402 sout->dma_address = start->dma_address;
9ee1bea4 403 sout->dma_length = start->length;
1da177e4 404 return 0;
9ee1bea4
JA
405 }
406 return __dma_map_cont(start, nelems, sout, pages);
1da177e4 407}
05fccb0e 408
1da177e4
LT
409/*
410 * DMA map all entries in a scatterlist.
05fccb0e 411 * Merge chunks that have page aligned sizes into a continuous mapping.
1da177e4 412 */
05fccb0e
IM
413static int
414gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
1da177e4 415{
9ee1bea4 416 struct scatterlist *s, *ps, *start_sg, *sgmap;
05fccb0e
IM
417 int need = 0, nextneed, i, out, start;
418 unsigned long pages = 0;
1da177e4 419
05fccb0e 420 if (nents == 0)
1da177e4
LT
421 return 0;
422
1da177e4
LT
423 if (!dev)
424 dev = &fallback_dev;
425
426 out = 0;
427 start = 0;
9ee1bea4
JA
428 start_sg = sgmap = sg;
429 ps = NULL; /* shut up gcc */
430 for_each_sg(sg, s, nents, i) {
58b053e4 431 dma_addr_t addr = sg_phys(s);
05fccb0e 432
1da177e4 433 s->dma_address = addr;
05fccb0e 434 BUG_ON(s->length == 0);
1da177e4 435
05fccb0e 436 nextneed = need_iommu(dev, addr, s->length);
1da177e4
LT
437
438 /* Handle the previous not yet processed entries */
439 if (i > start) {
05fccb0e
IM
440 /*
441 * Can only merge when the last chunk ends on a
442 * page boundary and the new one doesn't have an
443 * offset.
444 */
1da177e4 445 if (!iommu_merge || !nextneed || !need || s->offset ||
9ee1bea4
JA
446 (ps->offset + ps->length) % PAGE_SIZE) {
447 if (dma_map_cont(start_sg, i - start, sgmap,
448 pages, need) < 0)
1da177e4
LT
449 goto error;
450 out++;
9ee1bea4 451 sgmap = sg_next(sgmap);
1da177e4 452 pages = 0;
9ee1bea4
JA
453 start = i;
454 start_sg = s;
1da177e4
LT
455 }
456 }
457
458 need = nextneed;
459 pages += to_pages(s->offset, s->length);
9ee1bea4 460 ps = s;
1da177e4 461 }
9ee1bea4 462 if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0)
1da177e4
LT
463 goto error;
464 out++;
a32073bf 465 flush_gart();
9ee1bea4
JA
466 if (out < nents) {
467 sgmap = sg_next(sgmap);
468 sgmap->dma_length = 0;
469 }
1da177e4
LT
470 return out;
471
472error:
a32073bf 473 flush_gart();
5336940d 474 gart_unmap_sg(dev, sg, out, dir);
05fccb0e 475
a1002a48
KV
476 /* When it was forced or merged try again in a dumb way */
477 if (force_iommu || iommu_merge) {
478 out = dma_map_sg_nonforce(dev, sg, nents, dir);
479 if (out > 0)
480 return out;
481 }
1da177e4
LT
482 if (panic_on_overflow)
483 panic("dma_map_sg: overflow on %lu pages\n", pages);
05fccb0e 484
17a941d8 485 iommu_full(dev, pages << PAGE_SHIFT, dir);
9ee1bea4
JA
486 for_each_sg(sg, s, nents, i)
487 s->dma_address = bad_dma_address;
1da177e4 488 return 0;
05fccb0e 489}
1da177e4 490
17a941d8 491static int no_agp;
1da177e4
LT
492
493static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
05fccb0e
IM
494{
495 unsigned long a;
496
497 if (!iommu_size) {
498 iommu_size = aper_size;
499 if (!no_agp)
500 iommu_size /= 2;
501 }
502
503 a = aper + iommu_size;
1da177e4
LT
504 iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
505
05fccb0e 506 if (iommu_size < 64*1024*1024) {
1da177e4 507 printk(KERN_WARNING
05fccb0e
IM
508 "PCI-DMA: Warning: Small IOMMU %luMB."
509 " Consider increasing the AGP aperture in BIOS\n",
510 iommu_size >> 20);
511 }
512
1da177e4 513 return iommu_size;
05fccb0e 514}
1da177e4 515
05fccb0e
IM
516static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
517{
518 unsigned aper_size = 0, aper_base_32, aper_order;
1da177e4 519 u64 aper_base;
1da177e4 520
05fccb0e 521 pci_read_config_dword(dev, 0x94, &aper_base_32);
1da177e4 522 pci_read_config_dword(dev, 0x90, &aper_order);
05fccb0e 523 aper_order = (aper_order >> 1) & 7;
1da177e4 524
05fccb0e 525 aper_base = aper_base_32 & 0x7fff;
1da177e4
LT
526 aper_base <<= 25;
527
05fccb0e
IM
528 aper_size = (32 * 1024 * 1024) << aper_order;
529 if (aper_base + aper_size > 0x100000000UL || !aper_size)
1da177e4
LT
530 aper_base = 0;
531
532 *size = aper_size;
533 return aper_base;
05fccb0e 534}
1da177e4 535
05fccb0e 536/*
1da177e4 537 * Private Northbridge GATT initialization in case we cannot use the
05fccb0e 538 * AGP driver for some reason.
1da177e4
LT
539 */
540static __init int init_k8_gatt(struct agp_kern_info *info)
05fccb0e
IM
541{
542 unsigned aper_size, gatt_size, new_aper_size;
543 unsigned aper_base, new_aper_base;
1da177e4
LT
544 struct pci_dev *dev;
545 void *gatt;
a32073bf
AK
546 int i;
547
1da177e4
LT
548 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
549 aper_size = aper_base = info->aper_size = 0;
a32073bf
AK
550 dev = NULL;
551 for (i = 0; i < num_k8_northbridges; i++) {
552 dev = k8_northbridges[i];
05fccb0e
IM
553 new_aper_base = read_aperture(dev, &new_aper_size);
554 if (!new_aper_base)
555 goto nommu;
556
557 if (!aper_base) {
1da177e4
LT
558 aper_size = new_aper_size;
559 aper_base = new_aper_base;
05fccb0e
IM
560 }
561 if (aper_size != new_aper_size || aper_base != new_aper_base)
1da177e4
LT
562 goto nommu;
563 }
564 if (!aper_base)
05fccb0e 565 goto nommu;
1da177e4 566 info->aper_base = aper_base;
05fccb0e 567 info->aper_size = aper_size >> 20;
1da177e4 568
05fccb0e
IM
569 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
570 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
571 if (!gatt)
cf6387da 572 panic("Cannot allocate GATT table");
6d238cc4 573 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
cf6387da 574 panic("Could not set GART PTEs to uncacheable pages");
cf6387da 575
05fccb0e 576 memset(gatt, 0, gatt_size);
1da177e4 577 agp_gatt_table = gatt;
a32073bf
AK
578
579 for (i = 0; i < num_k8_northbridges; i++) {
05fccb0e
IM
580 u32 gatt_reg;
581 u32 ctl;
1da177e4 582
a32073bf 583 dev = k8_northbridges[i];
05fccb0e
IM
584 gatt_reg = __pa(gatt) >> 12;
585 gatt_reg <<= 4;
1da177e4 586 pci_write_config_dword(dev, 0x98, gatt_reg);
05fccb0e 587 pci_read_config_dword(dev, 0x90, &ctl);
1da177e4
LT
588
589 ctl |= 1;
590 ctl &= ~((1<<4) | (1<<5));
591
05fccb0e 592 pci_write_config_dword(dev, 0x90, ctl);
1da177e4 593 }
a32073bf 594 flush_gart();
05fccb0e
IM
595
596 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
597 aper_base, aper_size>>10);
1da177e4
LT
598 return 0;
599
600 nommu:
05fccb0e 601 /* Should not happen anymore */
1da177e4 602 printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
f46ace69 603 KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");
05fccb0e
IM
604 return -1;
605}
1da177e4
LT
606
607extern int agp_amd64_init(void);
608
e6584504 609static const struct dma_mapping_ops gart_dma_ops = {
05fccb0e
IM
610 .mapping_error = NULL,
611 .map_single = gart_map_single,
612 .map_simple = gart_map_simple,
613 .unmap_single = gart_unmap_single,
614 .sync_single_for_cpu = NULL,
615 .sync_single_for_device = NULL,
616 .sync_single_range_for_cpu = NULL,
617 .sync_single_range_for_device = NULL,
618 .sync_sg_for_cpu = NULL,
619 .sync_sg_for_device = NULL,
620 .map_sg = gart_map_sg,
621 .unmap_sg = gart_unmap_sg,
17a941d8
MBY
622};
623
bc2cea6a
YL
624void gart_iommu_shutdown(void)
625{
626 struct pci_dev *dev;
627 int i;
628
629 if (no_agp && (dma_ops != &gart_dma_ops))
630 return;
631
05fccb0e
IM
632 for (i = 0; i < num_k8_northbridges; i++) {
633 u32 ctl;
bc2cea6a 634
05fccb0e
IM
635 dev = k8_northbridges[i];
636 pci_read_config_dword(dev, 0x90, &ctl);
bc2cea6a 637
05fccb0e 638 ctl &= ~1;
bc2cea6a 639
05fccb0e
IM
640 pci_write_config_dword(dev, 0x90, ctl);
641 }
bc2cea6a
YL
642}
643
0dc243ae 644void __init gart_iommu_init(void)
05fccb0e 645{
1da177e4 646 struct agp_kern_info info;
1da177e4 647 unsigned long iommu_start;
05fccb0e 648 unsigned long aper_size;
1da177e4
LT
649 unsigned long scratch;
650 long i;
651
a32073bf
AK
652 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
653 printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
0dc243ae 654 return;
a32073bf
AK
655 }
656
1da177e4 657#ifndef CONFIG_AGP_AMD64
05fccb0e 658 no_agp = 1;
1da177e4
LT
659#else
660 /* Makefile puts PCI initialization via subsys_initcall first. */
661 /* Add other K8 AGP bridge drivers here */
05fccb0e
IM
662 no_agp = no_agp ||
663 (agp_amd64_init() < 0) ||
1da177e4 664 (agp_copy_info(agp_bridge, &info) < 0);
05fccb0e 665#endif
1da177e4 666
60b08c67 667 if (swiotlb)
0dc243ae 668 return;
60b08c67 669
8d4f6b93 670 /* Did we detect a different HW IOMMU? */
0440d4c0 671 if (iommu_detected && !gart_iommu_aperture)
0dc243ae 672 return;
8d4f6b93 673
1da177e4 674 if (no_iommu ||
17a941d8 675 (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
0440d4c0 676 !gart_iommu_aperture ||
1da177e4 677 (no_agp && init_k8_gatt(&info) < 0)) {
5b7b644c
JM
678 if (end_pfn > MAX_DMA32_PFN) {
679 printk(KERN_ERR "WARNING more than 4GB of memory "
3807fd46 680 "but GART IOMMU not available.\n"
dc9a7195 681 KERN_ERR "WARNING 32bit PCI may malfunction.\n");
5b7b644c 682 }
0dc243ae 683 return;
1da177e4
LT
684 }
685
5b7b644c 686 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
05fccb0e
IM
687 aper_size = info.aper_size * 1024 * 1024;
688 iommu_size = check_iommu_size(info.aper_base, aper_size);
689 iommu_pages = iommu_size >> PAGE_SHIFT;
690
691 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
692 get_order(iommu_pages/8));
693 if (!iommu_gart_bitmap)
694 panic("Cannot allocate iommu bitmap\n");
1da177e4
LT
695 memset(iommu_gart_bitmap, 0, iommu_pages/8);
696
697#ifdef CONFIG_IOMMU_LEAK
05fccb0e
IM
698 if (leak_trace) {
699 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
1da177e4 700 get_order(iommu_pages*sizeof(void *)));
05fccb0e
IM
701 if (iommu_leak_tab)
702 memset(iommu_leak_tab, 0, iommu_pages * 8);
1da177e4 703 else
05fccb0e
IM
704 printk(KERN_DEBUG
705 "PCI-DMA: Cannot allocate leak trace area\n");
706 }
1da177e4
LT
707#endif
708
05fccb0e 709 /*
1da177e4 710 * Out of IOMMU space handling.
05fccb0e
IM
711 * Reserve some invalid pages at the beginning of the GART.
712 */
713 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
1da177e4 714
05fccb0e 715 agp_memory_reserved = iommu_size;
1da177e4
LT
716 printk(KERN_INFO
717 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
05fccb0e 718 iommu_size >> 20);
1da177e4 719
05fccb0e
IM
720 iommu_start = aper_size - iommu_size;
721 iommu_bus_base = info.aper_base + iommu_start;
1da177e4
LT
722 bad_dma_address = iommu_bus_base;
723 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
724
05fccb0e 725 /*
1da177e4
LT
726 * Unmap the IOMMU part of the GART. The alias of the page is
727 * always mapped with cache enabled and there is no full cache
728 * coherency across the GART remapping. The unmapping avoids
729 * automatic prefetches from the CPU allocating cache lines in
730 * there. All CPU accesses are done via the direct mapping to
731 * the backing memory. The GART address is only used by PCI
05fccb0e 732 * devices.
1da177e4
LT
733 */
734 clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
735
05fccb0e
IM
736 /*
737 * Try to workaround a bug (thanks to BenH)
738 * Set unmapped entries to a scratch page instead of 0.
1da177e4
LT
739 * Any prefetches that hit unmapped entries won't get an bus abort
740 * then.
741 */
05fccb0e
IM
742 scratch = get_zeroed_page(GFP_KERNEL);
743 if (!scratch)
1da177e4
LT
744 panic("Cannot allocate iommu scratch page");
745 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
05fccb0e 746 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
1da177e4
LT
747 iommu_gatt_base[i] = gart_unmapped_entry;
748
a32073bf 749 flush_gart();
17a941d8 750 dma_ops = &gart_dma_ops;
05fccb0e 751}
1da177e4 752
43999d9e 753void __init gart_parse_options(char *p)
17a941d8
MBY
754{
755 int arg;
756
1da177e4 757#ifdef CONFIG_IOMMU_LEAK
05fccb0e 758 if (!strncmp(p, "leak", 4)) {
17a941d8
MBY
759 leak_trace = 1;
760 p += 4;
761 if (*p == '=') ++p;
762 if (isdigit(*p) && get_option(&p, &arg))
763 iommu_leak_pages = arg;
764 }
1da177e4 765#endif
17a941d8
MBY
766 if (isdigit(*p) && get_option(&p, &arg))
767 iommu_size = arg;
05fccb0e 768 if (!strncmp(p, "fullflush", 8))
17a941d8 769 iommu_fullflush = 1;
05fccb0e 770 if (!strncmp(p, "nofullflush", 11))
17a941d8 771 iommu_fullflush = 0;
05fccb0e 772 if (!strncmp(p, "noagp", 5))
17a941d8 773 no_agp = 1;
05fccb0e 774 if (!strncmp(p, "noaperture", 10))
17a941d8
MBY
775 fix_aperture = 0;
776 /* duplicated from pci-dma.c */
05fccb0e 777 if (!strncmp(p, "force", 5))
0440d4c0 778 gart_iommu_aperture_allowed = 1;
05fccb0e 779 if (!strncmp(p, "allowed", 7))
0440d4c0 780 gart_iommu_aperture_allowed = 1;
17a941d8
MBY
781 if (!strncmp(p, "memaper", 7)) {
782 fallback_aper_force = 1;
783 p += 7;
784 if (*p == '=') {
785 ++p;
786 if (get_option(&p, &arg))
787 fallback_aper_order = arg;
788 }
789 }
790}
This page took 0.345502 seconds and 5 git commands to generate.