Merge branch 'x86/gart' into x86/iommu
[deliverable/linux.git] / arch / x86 / kernel / pci-gart_64.c
CommitLineData
1da177e4
LT
1/*
2 * Dynamic DMA mapping support for AMD Hammer.
05fccb0e 3 *
1da177e4
LT
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
05fccb0e 6 * with more than 4GB.
1da177e4
LT
7 *
8 * See Documentation/DMA-mapping.txt for the interface specification.
05fccb0e 9 *
1da177e4 10 * Copyright 2002 Andi Kleen, SuSE Labs.
ff7f3649 11 * Subject to the GNU General Public License v2 only.
1da177e4
LT
12 */
13
1da177e4
LT
14#include <linux/types.h>
15#include <linux/ctype.h>
16#include <linux/agp_backend.h>
17#include <linux/init.h>
18#include <linux/mm.h>
19#include <linux/string.h>
20#include <linux/spinlock.h>
21#include <linux/pci.h>
22#include <linux/module.h>
23#include <linux/topology.h>
24#include <linux/interrupt.h>
25#include <linux/bitops.h>
1eeb66a1 26#include <linux/kdebug.h>
9ee1bea4 27#include <linux/scatterlist.h>
fde9a109 28#include <linux/iommu-helper.h>
cd76374e 29#include <linux/sysdev.h>
1da177e4
LT
30#include <asm/atomic.h>
31#include <asm/io.h>
32#include <asm/mtrr.h>
33#include <asm/pgtable.h>
34#include <asm/proto.h>
46a7fa27 35#include <asm/iommu.h>
395624fc 36#include <asm/gart.h>
1da177e4 37#include <asm/cacheflush.h>
17a941d8
MBY
38#include <asm/swiotlb.h>
39#include <asm/dma.h>
a32073bf 40#include <asm/k8.h>
1da177e4 41
79da0874 42static unsigned long iommu_bus_base; /* GART remapping area (physical) */
05fccb0e 43static unsigned long iommu_size; /* size of remapping area bytes */
1da177e4
LT
44static unsigned long iommu_pages; /* .. and in pages */
45
05fccb0e 46static u32 *iommu_gatt_base; /* Remapping table */
1da177e4 47
05fccb0e
IM
48/*
49 * If this is disabled the IOMMU will use an optimized flushing strategy
50 * of only flushing when an mapping is reused. With it true the GART is
51 * flushed for every mapping. Problem is that doing the lazy flush seems
52 * to trigger bugs with some popular PCI cards, in particular 3ware (but
53 * has been also also seen with Qlogic at least).
54 */
1da177e4
LT
55int iommu_fullflush = 1;
56
05fccb0e 57/* Allocation bitmap for the remapping area: */
1da177e4 58static DEFINE_SPINLOCK(iommu_bitmap_lock);
05fccb0e
IM
59/* Guarded by iommu_bitmap_lock: */
60static unsigned long *iommu_gart_bitmap;
1da177e4 61
05fccb0e 62static u32 gart_unmapped_entry;
1da177e4
LT
63
64#define GPTE_VALID 1
65#define GPTE_COHERENT 2
66#define GPTE_ENCODE(x) \
67 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
68#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
69
05fccb0e 70#define EMERGENCY_PAGES 32 /* = 128KB */
1da177e4
LT
71
72#ifdef CONFIG_AGP
73#define AGPEXTERN extern
74#else
75#define AGPEXTERN
76#endif
77
78/* backdoor interface to AGP driver */
79AGPEXTERN int agp_memory_reserved;
80AGPEXTERN __u32 *agp_gatt_table;
81
82static unsigned long next_bit; /* protected by iommu_bitmap_lock */
05fccb0e 83static int need_flush; /* global flush state. set for each gart wrap */
1da177e4 84
7b22ff53
FT
85static unsigned long alloc_iommu(struct device *dev, int size,
86 unsigned long align_mask)
05fccb0e 87{
1da177e4 88 unsigned long offset, flags;
fde9a109
FT
89 unsigned long boundary_size;
90 unsigned long base_index;
91
92 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
93 PAGE_SIZE) >> PAGE_SHIFT;
05d3ed0a 94 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
fde9a109 95 PAGE_SIZE) >> PAGE_SHIFT;
1da177e4 96
05fccb0e 97 spin_lock_irqsave(&iommu_bitmap_lock, flags);
fde9a109 98 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
7b22ff53 99 size, base_index, boundary_size, align_mask);
1da177e4
LT
100 if (offset == -1) {
101 need_flush = 1;
fde9a109 102 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
7b22ff53
FT
103 size, base_index, boundary_size,
104 align_mask);
1da177e4 105 }
05fccb0e 106 if (offset != -1) {
05fccb0e
IM
107 next_bit = offset+size;
108 if (next_bit >= iommu_pages) {
1da177e4
LT
109 next_bit = 0;
110 need_flush = 1;
05fccb0e
IM
111 }
112 }
1da177e4
LT
113 if (iommu_fullflush)
114 need_flush = 1;
05fccb0e
IM
115 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
116
1da177e4 117 return offset;
05fccb0e 118}
1da177e4
LT
119
120static void free_iommu(unsigned long offset, int size)
05fccb0e 121{
1da177e4 122 unsigned long flags;
05fccb0e 123
1da177e4 124 spin_lock_irqsave(&iommu_bitmap_lock, flags);
fde9a109 125 iommu_area_free(iommu_gart_bitmap, offset, size);
1da177e4 126 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
05fccb0e 127}
1da177e4 128
05fccb0e 129/*
1da177e4
LT
130 * Use global flush state to avoid races with multiple flushers.
131 */
a32073bf 132static void flush_gart(void)
05fccb0e 133{
1da177e4 134 unsigned long flags;
05fccb0e 135
1da177e4 136 spin_lock_irqsave(&iommu_bitmap_lock, flags);
a32073bf
AK
137 if (need_flush) {
138 k8_flush_garts();
1da177e4 139 need_flush = 0;
05fccb0e 140 }
1da177e4 141 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
05fccb0e 142}
1da177e4 143
1da177e4
LT
144#ifdef CONFIG_IOMMU_LEAK
145
05fccb0e
IM
146#define SET_LEAK(x) \
147 do { \
148 if (iommu_leak_tab) \
149 iommu_leak_tab[x] = __builtin_return_address(0);\
150 } while (0)
151
152#define CLEAR_LEAK(x) \
153 do { \
154 if (iommu_leak_tab) \
155 iommu_leak_tab[x] = NULL; \
156 } while (0)
1da177e4
LT
157
158/* Debugging aid for drivers that don't free their IOMMU tables */
05fccb0e 159static void **iommu_leak_tab;
1da177e4 160static int leak_trace;
79da0874 161static int iommu_leak_pages = 20;
05fccb0e 162
79da0874 163static void dump_leak(void)
1da177e4
LT
164{
165 int i;
05fccb0e
IM
166 static int dump;
167
168 if (dump || !iommu_leak_tab)
169 return;
1da177e4 170 dump = 1;
05fccb0e
IM
171 show_stack(NULL, NULL);
172
173 /* Very crude. dump some from the end of the table too */
174 printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
175 iommu_leak_pages);
176 for (i = 0; i < iommu_leak_pages; i += 2) {
177 printk(KERN_DEBUG "%lu: ", iommu_pages-i);
bc850d6b 178 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0);
05fccb0e
IM
179 printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
180 }
181 printk(KERN_DEBUG "\n");
1da177e4
LT
182}
183#else
05fccb0e
IM
184# define SET_LEAK(x)
185# define CLEAR_LEAK(x)
1da177e4
LT
186#endif
187
17a941d8 188static void iommu_full(struct device *dev, size_t size, int dir)
1da177e4 189{
05fccb0e 190 /*
1da177e4
LT
191 * Ran out of IOMMU space for this operation. This is very bad.
192 * Unfortunately the drivers cannot handle this operation properly.
05fccb0e 193 * Return some non mapped prereserved space in the aperture and
1da177e4
LT
194 * let the Northbridge deal with it. This will result in garbage
195 * in the IO operation. When the size exceeds the prereserved space
05fccb0e 196 * memory corruption will occur or random memory will be DMAed
1da177e4 197 * out. Hopefully no network devices use single mappings that big.
05fccb0e
IM
198 */
199
fc3a8828 200 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
1da177e4 201
17a941d8 202 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
1da177e4
LT
203 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
204 panic("PCI-DMA: Memory would be corrupted\n");
05fccb0e
IM
205 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
206 panic(KERN_ERR
207 "PCI-DMA: Random memory would be DMAed\n");
208 }
1da177e4 209#ifdef CONFIG_IOMMU_LEAK
05fccb0e 210 dump_leak();
1da177e4 211#endif
05fccb0e 212}
1da177e4 213
05fccb0e
IM
214static inline int
215need_iommu(struct device *dev, unsigned long addr, size_t size)
216{
1da177e4 217 u64 mask = *dev->dma_mask;
00edefae 218 int high = addr + size > mask;
1da177e4 219 int mmu = high;
05fccb0e
IM
220
221 if (force_iommu)
222 mmu = 1;
223
224 return mmu;
1da177e4
LT
225}
226
05fccb0e
IM
227static inline int
228nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
229{
1da177e4 230 u64 mask = *dev->dma_mask;
00edefae 231 int high = addr + size > mask;
1da177e4 232 int mmu = high;
05fccb0e
IM
233
234 return mmu;
1da177e4
LT
235}
236
237/* Map a single continuous physical area into the IOMMU.
238 * Caller needs to check if the iommu is needed and flush.
239 */
17a941d8 240static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
7b22ff53 241 size_t size, int dir, unsigned long align_mask)
05fccb0e 242{
87e39ea5 243 unsigned long npages = iommu_num_pages(phys_mem, size);
7b22ff53 244 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
1da177e4 245 int i;
05fccb0e 246
1da177e4
LT
247 if (iommu_page == -1) {
248 if (!nonforced_iommu(dev, phys_mem, size))
05fccb0e 249 return phys_mem;
1da177e4
LT
250 if (panic_on_overflow)
251 panic("dma_map_area overflow %lu bytes\n", size);
17a941d8 252 iommu_full(dev, size, dir);
1da177e4
LT
253 return bad_dma_address;
254 }
255
256 for (i = 0; i < npages; i++) {
257 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
258 SET_LEAK(iommu_page + i);
259 phys_mem += PAGE_SIZE;
260 }
261 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
262}
263
05fccb0e 264static dma_addr_t
2be62149 265gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
17a941d8 266{
7b22ff53
FT
267 dma_addr_t map;
268 unsigned long align_mask;
269
270 align_mask = (1UL << get_order(size)) - 1;
271 map = dma_map_area(dev, paddr, size, dir, align_mask);
05fccb0e 272
a32073bf 273 flush_gart();
05fccb0e 274
17a941d8
MBY
275 return map;
276}
277
1da177e4 278/* Map a single area into the IOMMU */
05fccb0e 279static dma_addr_t
2be62149 280gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
1da177e4 281{
2be62149 282 unsigned long bus;
1da177e4 283
1da177e4 284 if (!dev)
6c505ce3 285 dev = &x86_dma_fallback_dev;
1da177e4 286
2be62149
IM
287 if (!need_iommu(dev, paddr, size))
288 return paddr;
1da177e4 289
7b22ff53
FT
290 bus = dma_map_area(dev, paddr, size, dir, 0);
291 flush_gart();
05fccb0e
IM
292
293 return bus;
17a941d8
MBY
294}
295
7c2d9cd2
JM
296/*
297 * Free a DMA mapping.
298 */
1048fa52 299static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
05fccb0e 300 size_t size, int direction)
7c2d9cd2
JM
301{
302 unsigned long iommu_page;
303 int npages;
304 int i;
305
306 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
307 dma_addr >= iommu_bus_base + iommu_size)
308 return;
05fccb0e 309
7c2d9cd2 310 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
87e39ea5 311 npages = iommu_num_pages(dma_addr, size);
7c2d9cd2
JM
312 for (i = 0; i < npages; i++) {
313 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
314 CLEAR_LEAK(iommu_page + i);
315 }
316 free_iommu(iommu_page, npages);
317}
318
17a941d8
MBY
319/*
320 * Wrapper for pci_unmap_single working with scatterlists.
321 */
05fccb0e
IM
322static void
323gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
17a941d8 324{
9ee1bea4 325 struct scatterlist *s;
17a941d8
MBY
326 int i;
327
9ee1bea4 328 for_each_sg(sg, s, nents, i) {
60b08c67 329 if (!s->dma_length || !s->length)
17a941d8 330 break;
7c2d9cd2 331 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
17a941d8
MBY
332 }
333}
1da177e4
LT
334
335/* Fallback for dma_map_sg in case of overflow */
336static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
337 int nents, int dir)
338{
9ee1bea4 339 struct scatterlist *s;
1da177e4
LT
340 int i;
341
342#ifdef CONFIG_IOMMU_DEBUG
343 printk(KERN_DEBUG "dma_map_sg overflow\n");
344#endif
345
9ee1bea4 346 for_each_sg(sg, s, nents, i) {
58b053e4 347 unsigned long addr = sg_phys(s);
05fccb0e
IM
348
349 if (nonforced_iommu(dev, addr, s->length)) {
7b22ff53 350 addr = dma_map_area(dev, addr, s->length, dir, 0);
05fccb0e
IM
351 if (addr == bad_dma_address) {
352 if (i > 0)
17a941d8 353 gart_unmap_sg(dev, sg, i, dir);
05fccb0e 354 nents = 0;
1da177e4
LT
355 sg[0].dma_length = 0;
356 break;
357 }
358 }
359 s->dma_address = addr;
360 s->dma_length = s->length;
361 }
a32073bf 362 flush_gart();
05fccb0e 363
1da177e4
LT
364 return nents;
365}
366
367/* Map multiple scatterlist entries continuous into the first. */
fde9a109
FT
368static int __dma_map_cont(struct device *dev, struct scatterlist *start,
369 int nelems, struct scatterlist *sout,
370 unsigned long pages)
1da177e4 371{
7b22ff53 372 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
05fccb0e 373 unsigned long iommu_page = iommu_start;
9ee1bea4 374 struct scatterlist *s;
1da177e4
LT
375 int i;
376
377 if (iommu_start == -1)
378 return -1;
9ee1bea4
JA
379
380 for_each_sg(start, s, nelems, i) {
1da177e4
LT
381 unsigned long pages, addr;
382 unsigned long phys_addr = s->dma_address;
05fccb0e 383
9ee1bea4
JA
384 BUG_ON(s != start && s->offset);
385 if (s == start) {
1da177e4
LT
386 sout->dma_address = iommu_bus_base;
387 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
388 sout->dma_length = s->length;
05fccb0e
IM
389 } else {
390 sout->dma_length += s->length;
1da177e4
LT
391 }
392
393 addr = phys_addr;
87e39ea5 394 pages = iommu_num_pages(s->offset, s->length);
05fccb0e
IM
395 while (pages--) {
396 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
1da177e4
LT
397 SET_LEAK(iommu_page);
398 addr += PAGE_SIZE;
399 iommu_page++;
0d541064 400 }
05fccb0e
IM
401 }
402 BUG_ON(iommu_page - iommu_start != pages);
403
1da177e4
LT
404 return 0;
405}
406
05fccb0e 407static inline int
fde9a109
FT
408dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
409 struct scatterlist *sout, unsigned long pages, int need)
1da177e4 410{
9ee1bea4
JA
411 if (!need) {
412 BUG_ON(nelems != 1);
e88a39de 413 sout->dma_address = start->dma_address;
9ee1bea4 414 sout->dma_length = start->length;
1da177e4 415 return 0;
9ee1bea4 416 }
fde9a109 417 return __dma_map_cont(dev, start, nelems, sout, pages);
1da177e4 418}
05fccb0e 419
1da177e4
LT
420/*
421 * DMA map all entries in a scatterlist.
05fccb0e 422 * Merge chunks that have page aligned sizes into a continuous mapping.
1da177e4 423 */
05fccb0e
IM
424static int
425gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
1da177e4 426{
9ee1bea4 427 struct scatterlist *s, *ps, *start_sg, *sgmap;
05fccb0e
IM
428 int need = 0, nextneed, i, out, start;
429 unsigned long pages = 0;
42d00284
FT
430 unsigned int seg_size;
431 unsigned int max_seg_size;
1da177e4 432
05fccb0e 433 if (nents == 0)
1da177e4
LT
434 return 0;
435
1da177e4 436 if (!dev)
6c505ce3 437 dev = &x86_dma_fallback_dev;
1da177e4
LT
438
439 out = 0;
440 start = 0;
9ee1bea4 441 start_sg = sgmap = sg;
42d00284
FT
442 seg_size = 0;
443 max_seg_size = dma_get_max_seg_size(dev);
9ee1bea4
JA
444 ps = NULL; /* shut up gcc */
445 for_each_sg(sg, s, nents, i) {
58b053e4 446 dma_addr_t addr = sg_phys(s);
05fccb0e 447
1da177e4 448 s->dma_address = addr;
05fccb0e 449 BUG_ON(s->length == 0);
1da177e4 450
05fccb0e 451 nextneed = need_iommu(dev, addr, s->length);
1da177e4
LT
452
453 /* Handle the previous not yet processed entries */
454 if (i > start) {
05fccb0e
IM
455 /*
456 * Can only merge when the last chunk ends on a
457 * page boundary and the new one doesn't have an
458 * offset.
459 */
1da177e4 460 if (!iommu_merge || !nextneed || !need || s->offset ||
42d00284 461 (s->length + seg_size > max_seg_size) ||
9ee1bea4 462 (ps->offset + ps->length) % PAGE_SIZE) {
fde9a109
FT
463 if (dma_map_cont(dev, start_sg, i - start,
464 sgmap, pages, need) < 0)
1da177e4
LT
465 goto error;
466 out++;
42d00284 467 seg_size = 0;
9ee1bea4 468 sgmap = sg_next(sgmap);
1da177e4 469 pages = 0;
9ee1bea4
JA
470 start = i;
471 start_sg = s;
1da177e4
LT
472 }
473 }
474
42d00284 475 seg_size += s->length;
1da177e4 476 need = nextneed;
87e39ea5 477 pages += iommu_num_pages(s->offset, s->length);
9ee1bea4 478 ps = s;
1da177e4 479 }
fde9a109 480 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
1da177e4
LT
481 goto error;
482 out++;
a32073bf 483 flush_gart();
9ee1bea4
JA
484 if (out < nents) {
485 sgmap = sg_next(sgmap);
486 sgmap->dma_length = 0;
487 }
1da177e4
LT
488 return out;
489
490error:
a32073bf 491 flush_gart();
5336940d 492 gart_unmap_sg(dev, sg, out, dir);
05fccb0e 493
a1002a48
KV
494 /* When it was forced or merged try again in a dumb way */
495 if (force_iommu || iommu_merge) {
496 out = dma_map_sg_nonforce(dev, sg, nents, dir);
497 if (out > 0)
498 return out;
499 }
1da177e4
LT
500 if (panic_on_overflow)
501 panic("dma_map_sg: overflow on %lu pages\n", pages);
05fccb0e 502
17a941d8 503 iommu_full(dev, pages << PAGE_SHIFT, dir);
9ee1bea4
JA
504 for_each_sg(sg, s, nents, i)
505 s->dma_address = bad_dma_address;
1da177e4 506 return 0;
05fccb0e 507}
1da177e4 508
94581094
JR
509/* allocate and map a coherent mapping */
510static void *
511gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
512 gfp_t flag)
513{
514 void *vaddr;
515
516 vaddr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size));
517 if (!vaddr)
518 return NULL;
519
520 *dma_addr = gart_map_single(dev, __pa(vaddr), size, DMA_BIDIRECTIONAL);
521 if (*dma_addr != bad_dma_address)
522 return vaddr;
523
524 free_pages((unsigned long)vaddr, get_order(size));
525
526 return NULL;
527}
528
43a5a5a0
JR
529/* free a coherent mapping */
530static void
531gart_free_coherent(struct device *dev, size_t size, void *vaddr,
532 dma_addr_t dma_addr)
533{
534 gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
535 free_pages((unsigned long)vaddr, get_order(size));
536}
537
17a941d8 538static int no_agp;
1da177e4
LT
539
540static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
05fccb0e
IM
541{
542 unsigned long a;
543
544 if (!iommu_size) {
545 iommu_size = aper_size;
546 if (!no_agp)
547 iommu_size /= 2;
548 }
549
550 a = aper + iommu_size;
31422c51 551 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
1da177e4 552
05fccb0e 553 if (iommu_size < 64*1024*1024) {
1da177e4 554 printk(KERN_WARNING
05fccb0e
IM
555 "PCI-DMA: Warning: Small IOMMU %luMB."
556 " Consider increasing the AGP aperture in BIOS\n",
557 iommu_size >> 20);
558 }
559
1da177e4 560 return iommu_size;
05fccb0e 561}
1da177e4 562
05fccb0e
IM
563static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
564{
565 unsigned aper_size = 0, aper_base_32, aper_order;
1da177e4 566 u64 aper_base;
1da177e4 567
3bb6fbf9
PM
568 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
569 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
05fccb0e 570 aper_order = (aper_order >> 1) & 7;
1da177e4 571
05fccb0e 572 aper_base = aper_base_32 & 0x7fff;
1da177e4
LT
573 aper_base <<= 25;
574
05fccb0e
IM
575 aper_size = (32 * 1024 * 1024) << aper_order;
576 if (aper_base + aper_size > 0x100000000UL || !aper_size)
1da177e4
LT
577 aper_base = 0;
578
579 *size = aper_size;
580 return aper_base;
05fccb0e 581}
1da177e4 582
6703f6d1
RW
583static void enable_gart_translations(void)
584{
585 int i;
586
587 for (i = 0; i < num_k8_northbridges; i++) {
588 struct pci_dev *dev = k8_northbridges[i];
589
590 enable_gart_translation(dev, __pa(agp_gatt_table));
591 }
592}
593
594/*
595 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
596 * resume in the same way as they are handled in gart_iommu_hole_init().
597 */
598static bool fix_up_north_bridges;
599static u32 aperture_order;
600static u32 aperture_alloc;
601
602void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
603{
604 fix_up_north_bridges = true;
605 aperture_order = aper_order;
606 aperture_alloc = aper_alloc;
607}
608
cd76374e
PM
609static int gart_resume(struct sys_device *dev)
610{
6703f6d1
RW
611 printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");
612
613 if (fix_up_north_bridges) {
614 int i;
615
616 printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");
617
618 for (i = 0; i < num_k8_northbridges; i++) {
619 struct pci_dev *dev = k8_northbridges[i];
620
621 /*
622 * Don't enable translations just yet. That is the next
623 * step. Restore the pre-suspend aperture settings.
624 */
625 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
626 aperture_order << 1);
627 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
628 aperture_alloc >> 25);
629 }
630 }
631
632 enable_gart_translations();
633
cd76374e
PM
634 return 0;
635}
636
637static int gart_suspend(struct sys_device *dev, pm_message_t state)
638{
6703f6d1 639 return 0;
cd76374e
PM
640}
641
642static struct sysdev_class gart_sysdev_class = {
643 .name = "gart",
644 .suspend = gart_suspend,
645 .resume = gart_resume,
646
647};
648
649static struct sys_device device_gart = {
650 .id = 0,
651 .cls = &gart_sysdev_class,
652};
653
05fccb0e 654/*
1da177e4 655 * Private Northbridge GATT initialization in case we cannot use the
05fccb0e 656 * AGP driver for some reason.
1da177e4
LT
657 */
658static __init int init_k8_gatt(struct agp_kern_info *info)
05fccb0e
IM
659{
660 unsigned aper_size, gatt_size, new_aper_size;
661 unsigned aper_base, new_aper_base;
1da177e4
LT
662 struct pci_dev *dev;
663 void *gatt;
cd76374e 664 int i, error;
7ab073b6 665 unsigned long start_pfn, end_pfn;
a32073bf 666
1da177e4
LT
667 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
668 aper_size = aper_base = info->aper_size = 0;
a32073bf
AK
669 dev = NULL;
670 for (i = 0; i < num_k8_northbridges; i++) {
671 dev = k8_northbridges[i];
05fccb0e
IM
672 new_aper_base = read_aperture(dev, &new_aper_size);
673 if (!new_aper_base)
674 goto nommu;
675
676 if (!aper_base) {
1da177e4
LT
677 aper_size = new_aper_size;
678 aper_base = new_aper_base;
05fccb0e
IM
679 }
680 if (aper_size != new_aper_size || aper_base != new_aper_base)
1da177e4
LT
681 goto nommu;
682 }
683 if (!aper_base)
05fccb0e 684 goto nommu;
1da177e4 685 info->aper_base = aper_base;
05fccb0e 686 info->aper_size = aper_size >> 20;
1da177e4 687
05fccb0e
IM
688 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
689 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
690 if (!gatt)
cf6387da 691 panic("Cannot allocate GATT table");
6d238cc4 692 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
cf6387da 693 panic("Could not set GART PTEs to uncacheable pages");
cf6387da 694
05fccb0e 695 memset(gatt, 0, gatt_size);
1da177e4 696 agp_gatt_table = gatt;
a32073bf 697
6703f6d1 698 enable_gart_translations();
cd76374e
PM
699
700 error = sysdev_class_register(&gart_sysdev_class);
701 if (!error)
702 error = sysdev_register(&device_gart);
703 if (error)
704 panic("Could not register gart_sysdev -- would corrupt data on next suspend");
6703f6d1 705
a32073bf 706 flush_gart();
05fccb0e
IM
707
708 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
709 aper_base, aper_size>>10);
7ab073b6
YL
710
711 /* need to map that range */
712 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
713 if (end_pfn > max_low_pfn_mapped) {
32b23e9a
YL
714 start_pfn = (aper_base>>PAGE_SHIFT);
715 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
7ab073b6 716 }
1da177e4
LT
717 return 0;
718
719 nommu:
05fccb0e 720 /* Should not happen anymore */
8f59610d
PM
721 printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
722 KERN_WARNING "falling back to iommu=soft.\n");
05fccb0e
IM
723 return -1;
724}
1da177e4
LT
725
726extern int agp_amd64_init(void);
727
8d8bb39b 728static struct dma_mapping_ops gart_dma_ops = {
05fccb0e 729 .map_single = gart_map_single,
05fccb0e
IM
730 .unmap_single = gart_unmap_single,
731 .sync_single_for_cpu = NULL,
732 .sync_single_for_device = NULL,
733 .sync_single_range_for_cpu = NULL,
734 .sync_single_range_for_device = NULL,
735 .sync_sg_for_cpu = NULL,
736 .sync_sg_for_device = NULL,
737 .map_sg = gart_map_sg,
738 .unmap_sg = gart_unmap_sg,
94581094 739 .alloc_coherent = gart_alloc_coherent,
43a5a5a0 740 .free_coherent = gart_free_coherent,
17a941d8
MBY
741};
742
bc2cea6a
YL
743void gart_iommu_shutdown(void)
744{
745 struct pci_dev *dev;
746 int i;
747
748 if (no_agp && (dma_ops != &gart_dma_ops))
749 return;
750
05fccb0e
IM
751 for (i = 0; i < num_k8_northbridges; i++) {
752 u32 ctl;
bc2cea6a 753
05fccb0e 754 dev = k8_northbridges[i];
3bb6fbf9 755 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
bc2cea6a 756
3bb6fbf9 757 ctl &= ~GARTEN;
bc2cea6a 758
3bb6fbf9 759 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
05fccb0e 760 }
bc2cea6a
YL
761}
762
0dc243ae 763void __init gart_iommu_init(void)
05fccb0e 764{
1da177e4 765 struct agp_kern_info info;
1da177e4 766 unsigned long iommu_start;
05fccb0e 767 unsigned long aper_size;
1da177e4
LT
768 unsigned long scratch;
769 long i;
770
a32073bf
AK
771 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
772 printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
0dc243ae 773 return;
a32073bf
AK
774 }
775
1da177e4 776#ifndef CONFIG_AGP_AMD64
05fccb0e 777 no_agp = 1;
1da177e4
LT
778#else
779 /* Makefile puts PCI initialization via subsys_initcall first. */
780 /* Add other K8 AGP bridge drivers here */
05fccb0e
IM
781 no_agp = no_agp ||
782 (agp_amd64_init() < 0) ||
1da177e4 783 (agp_copy_info(agp_bridge, &info) < 0);
05fccb0e 784#endif
1da177e4 785
60b08c67 786 if (swiotlb)
0dc243ae 787 return;
60b08c67 788
8d4f6b93 789 /* Did we detect a different HW IOMMU? */
0440d4c0 790 if (iommu_detected && !gart_iommu_aperture)
0dc243ae 791 return;
8d4f6b93 792
1da177e4 793 if (no_iommu ||
c987d12f 794 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
0440d4c0 795 !gart_iommu_aperture ||
1da177e4 796 (no_agp && init_k8_gatt(&info) < 0)) {
c987d12f 797 if (max_pfn > MAX_DMA32_PFN) {
8f59610d
PM
798 printk(KERN_WARNING "More than 4GB of memory "
799 "but GART IOMMU not available.\n"
800 KERN_WARNING "falling back to iommu=soft.\n");
5b7b644c 801 }
0dc243ae 802 return;
1da177e4
LT
803 }
804
5b7b644c 805 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
05fccb0e
IM
806 aper_size = info.aper_size * 1024 * 1024;
807 iommu_size = check_iommu_size(info.aper_base, aper_size);
808 iommu_pages = iommu_size >> PAGE_SHIFT;
809
810 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
811 get_order(iommu_pages/8));
812 if (!iommu_gart_bitmap)
813 panic("Cannot allocate iommu bitmap\n");
1da177e4
LT
814 memset(iommu_gart_bitmap, 0, iommu_pages/8);
815
816#ifdef CONFIG_IOMMU_LEAK
05fccb0e
IM
817 if (leak_trace) {
818 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
1da177e4 819 get_order(iommu_pages*sizeof(void *)));
05fccb0e
IM
820 if (iommu_leak_tab)
821 memset(iommu_leak_tab, 0, iommu_pages * 8);
1da177e4 822 else
05fccb0e
IM
823 printk(KERN_DEBUG
824 "PCI-DMA: Cannot allocate leak trace area\n");
825 }
1da177e4
LT
826#endif
827
05fccb0e 828 /*
1da177e4 829 * Out of IOMMU space handling.
05fccb0e
IM
830 * Reserve some invalid pages at the beginning of the GART.
831 */
832 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
1da177e4 833
05fccb0e 834 agp_memory_reserved = iommu_size;
1da177e4
LT
835 printk(KERN_INFO
836 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
05fccb0e 837 iommu_size >> 20);
1da177e4 838
05fccb0e
IM
839 iommu_start = aper_size - iommu_size;
840 iommu_bus_base = info.aper_base + iommu_start;
1da177e4
LT
841 bad_dma_address = iommu_bus_base;
842 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
843
05fccb0e 844 /*
1da177e4
LT
845 * Unmap the IOMMU part of the GART. The alias of the page is
846 * always mapped with cache enabled and there is no full cache
847 * coherency across the GART remapping. The unmapping avoids
848 * automatic prefetches from the CPU allocating cache lines in
849 * there. All CPU accesses are done via the direct mapping to
850 * the backing memory. The GART address is only used by PCI
05fccb0e 851 * devices.
1da177e4 852 */
28d6ee41
AK
853 set_memory_np((unsigned long)__va(iommu_bus_base),
854 iommu_size >> PAGE_SHIFT);
184652eb
IM
855 /*
856 * Tricky. The GART table remaps the physical memory range,
857 * so the CPU wont notice potential aliases and if the memory
858 * is remapped to UC later on, we might surprise the PCI devices
859 * with a stray writeout of a cacheline. So play it sure and
860 * do an explicit, full-scale wbinvd() _after_ having marked all
861 * the pages as Not-Present:
862 */
863 wbinvd();
1da177e4 864
05fccb0e 865 /*
fa3d319a 866 * Try to workaround a bug (thanks to BenH):
05fccb0e 867 * Set unmapped entries to a scratch page instead of 0.
1da177e4 868 * Any prefetches that hit unmapped entries won't get an bus abort
fa3d319a 869 * then. (P2P bridge may be prefetching on DMA reads).
1da177e4 870 */
05fccb0e
IM
871 scratch = get_zeroed_page(GFP_KERNEL);
872 if (!scratch)
1da177e4
LT
873 panic("Cannot allocate iommu scratch page");
874 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
05fccb0e 875 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
1da177e4
LT
876 iommu_gatt_base[i] = gart_unmapped_entry;
877
a32073bf 878 flush_gart();
17a941d8 879 dma_ops = &gart_dma_ops;
05fccb0e 880}
1da177e4 881
43999d9e 882void __init gart_parse_options(char *p)
17a941d8
MBY
883{
884 int arg;
885
1da177e4 886#ifdef CONFIG_IOMMU_LEAK
05fccb0e 887 if (!strncmp(p, "leak", 4)) {
17a941d8
MBY
888 leak_trace = 1;
889 p += 4;
890 if (*p == '=') ++p;
891 if (isdigit(*p) && get_option(&p, &arg))
892 iommu_leak_pages = arg;
893 }
1da177e4 894#endif
17a941d8
MBY
895 if (isdigit(*p) && get_option(&p, &arg))
896 iommu_size = arg;
05fccb0e 897 if (!strncmp(p, "fullflush", 8))
17a941d8 898 iommu_fullflush = 1;
05fccb0e 899 if (!strncmp(p, "nofullflush", 11))
17a941d8 900 iommu_fullflush = 0;
05fccb0e 901 if (!strncmp(p, "noagp", 5))
17a941d8 902 no_agp = 1;
05fccb0e 903 if (!strncmp(p, "noaperture", 10))
17a941d8
MBY
904 fix_aperture = 0;
905 /* duplicated from pci-dma.c */
05fccb0e 906 if (!strncmp(p, "force", 5))
0440d4c0 907 gart_iommu_aperture_allowed = 1;
05fccb0e 908 if (!strncmp(p, "allowed", 7))
0440d4c0 909 gart_iommu_aperture_allowed = 1;
17a941d8
MBY
910 if (!strncmp(p, "memaper", 7)) {
911 fallback_aper_force = 1;
912 p += 7;
913 if (*p == '=') {
914 ++p;
915 if (get_option(&p, &arg))
916 fallback_aper_order = arg;
917 }
918 }
919}
This page took 0.463897 seconds and 5 git commands to generate.