x86, swiotlb: add map_page and unmap_page
[deliverable/linux.git] / arch / x86 / kernel / pci-gart_64.c
CommitLineData
1da177e4
LT
1/*
2 * Dynamic DMA mapping support for AMD Hammer.
05fccb0e 3 *
1da177e4
LT
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
05fccb0e 6 * with more than 4GB.
1da177e4
LT
7 *
8 * See Documentation/DMA-mapping.txt for the interface specification.
05fccb0e 9 *
1da177e4 10 * Copyright 2002 Andi Kleen, SuSE Labs.
ff7f3649 11 * Subject to the GNU General Public License v2 only.
1da177e4
LT
12 */
13
1da177e4
LT
14#include <linux/types.h>
15#include <linux/ctype.h>
16#include <linux/agp_backend.h>
17#include <linux/init.h>
18#include <linux/mm.h>
19#include <linux/string.h>
20#include <linux/spinlock.h>
21#include <linux/pci.h>
22#include <linux/module.h>
23#include <linux/topology.h>
24#include <linux/interrupt.h>
25#include <linux/bitops.h>
1eeb66a1 26#include <linux/kdebug.h>
9ee1bea4 27#include <linux/scatterlist.h>
fde9a109 28#include <linux/iommu-helper.h>
cd76374e 29#include <linux/sysdev.h>
237a6224 30#include <linux/io.h>
1da177e4 31#include <asm/atomic.h>
1da177e4
LT
32#include <asm/mtrr.h>
33#include <asm/pgtable.h>
34#include <asm/proto.h>
46a7fa27 35#include <asm/iommu.h>
395624fc 36#include <asm/gart.h>
1da177e4 37#include <asm/cacheflush.h>
17a941d8
MBY
38#include <asm/swiotlb.h>
39#include <asm/dma.h>
a32073bf 40#include <asm/k8.h>
1da177e4 41
79da0874 42static unsigned long iommu_bus_base; /* GART remapping area (physical) */
05fccb0e 43static unsigned long iommu_size; /* size of remapping area bytes */
1da177e4
LT
44static unsigned long iommu_pages; /* .. and in pages */
45
05fccb0e 46static u32 *iommu_gatt_base; /* Remapping table */
1da177e4 47
05fccb0e
IM
48/*
49 * If this is disabled the IOMMU will use an optimized flushing strategy
50 * of only flushing when an mapping is reused. With it true the GART is
51 * flushed for every mapping. Problem is that doing the lazy flush seems
52 * to trigger bugs with some popular PCI cards, in particular 3ware (but
53 * has been also also seen with Qlogic at least).
54 */
c854c919 55static int iommu_fullflush = 1;
1da177e4 56
05fccb0e 57/* Allocation bitmap for the remapping area: */
1da177e4 58static DEFINE_SPINLOCK(iommu_bitmap_lock);
05fccb0e
IM
59/* Guarded by iommu_bitmap_lock: */
60static unsigned long *iommu_gart_bitmap;
1da177e4 61
05fccb0e 62static u32 gart_unmapped_entry;
1da177e4
LT
63
64#define GPTE_VALID 1
65#define GPTE_COHERENT 2
66#define GPTE_ENCODE(x) \
67 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
68#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
69
05fccb0e 70#define EMERGENCY_PAGES 32 /* = 128KB */
1da177e4
LT
71
72#ifdef CONFIG_AGP
73#define AGPEXTERN extern
74#else
75#define AGPEXTERN
76#endif
77
78/* backdoor interface to AGP driver */
79AGPEXTERN int agp_memory_reserved;
80AGPEXTERN __u32 *agp_gatt_table;
81
82static unsigned long next_bit; /* protected by iommu_bitmap_lock */
3610f211 83static bool need_flush; /* global flush state. set for each gart wrap */
1da177e4 84
7b22ff53
FT
85static unsigned long alloc_iommu(struct device *dev, int size,
86 unsigned long align_mask)
05fccb0e 87{
1da177e4 88 unsigned long offset, flags;
fde9a109
FT
89 unsigned long boundary_size;
90 unsigned long base_index;
91
92 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
93 PAGE_SIZE) >> PAGE_SHIFT;
05d3ed0a 94 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
fde9a109 95 PAGE_SIZE) >> PAGE_SHIFT;
1da177e4 96
05fccb0e 97 spin_lock_irqsave(&iommu_bitmap_lock, flags);
fde9a109 98 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
7b22ff53 99 size, base_index, boundary_size, align_mask);
1da177e4 100 if (offset == -1) {
3610f211 101 need_flush = true;
fde9a109 102 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
7b22ff53
FT
103 size, base_index, boundary_size,
104 align_mask);
1da177e4 105 }
05fccb0e 106 if (offset != -1) {
05fccb0e
IM
107 next_bit = offset+size;
108 if (next_bit >= iommu_pages) {
1da177e4 109 next_bit = 0;
3610f211 110 need_flush = true;
05fccb0e
IM
111 }
112 }
1da177e4 113 if (iommu_fullflush)
3610f211 114 need_flush = true;
05fccb0e
IM
115 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
116
1da177e4 117 return offset;
05fccb0e 118}
1da177e4
LT
119
120static void free_iommu(unsigned long offset, int size)
05fccb0e 121{
1da177e4 122 unsigned long flags;
05fccb0e 123
1da177e4 124 spin_lock_irqsave(&iommu_bitmap_lock, flags);
fde9a109 125 iommu_area_free(iommu_gart_bitmap, offset, size);
70d7d357
JR
126 if (offset >= next_bit)
127 next_bit = offset + size;
1da177e4 128 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
05fccb0e 129}
1da177e4 130
05fccb0e 131/*
1da177e4
LT
132 * Use global flush state to avoid races with multiple flushers.
133 */
a32073bf 134static void flush_gart(void)
05fccb0e 135{
1da177e4 136 unsigned long flags;
05fccb0e 137
1da177e4 138 spin_lock_irqsave(&iommu_bitmap_lock, flags);
a32073bf
AK
139 if (need_flush) {
140 k8_flush_garts();
3610f211 141 need_flush = false;
05fccb0e 142 }
1da177e4 143 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
05fccb0e 144}
1da177e4 145
1da177e4
LT
146#ifdef CONFIG_IOMMU_LEAK
147
05fccb0e
IM
148#define SET_LEAK(x) \
149 do { \
150 if (iommu_leak_tab) \
151 iommu_leak_tab[x] = __builtin_return_address(0);\
152 } while (0)
153
154#define CLEAR_LEAK(x) \
155 do { \
156 if (iommu_leak_tab) \
157 iommu_leak_tab[x] = NULL; \
158 } while (0)
1da177e4
LT
159
160/* Debugging aid for drivers that don't free their IOMMU tables */
05fccb0e 161static void **iommu_leak_tab;
1da177e4 162static int leak_trace;
79da0874 163static int iommu_leak_pages = 20;
05fccb0e 164
79da0874 165static void dump_leak(void)
1da177e4
LT
166{
167 int i;
05fccb0e
IM
168 static int dump;
169
170 if (dump || !iommu_leak_tab)
171 return;
1da177e4 172 dump = 1;
05fccb0e
IM
173 show_stack(NULL, NULL);
174
175 /* Very crude. dump some from the end of the table too */
176 printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
177 iommu_leak_pages);
178 for (i = 0; i < iommu_leak_pages; i += 2) {
179 printk(KERN_DEBUG "%lu: ", iommu_pages-i);
237a6224
JR
180 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i],
181 0);
05fccb0e
IM
182 printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
183 }
184 printk(KERN_DEBUG "\n");
1da177e4
LT
185}
186#else
05fccb0e
IM
187# define SET_LEAK(x)
188# define CLEAR_LEAK(x)
1da177e4
LT
189#endif
190
17a941d8 191static void iommu_full(struct device *dev, size_t size, int dir)
1da177e4 192{
05fccb0e 193 /*
1da177e4
LT
194 * Ran out of IOMMU space for this operation. This is very bad.
195 * Unfortunately the drivers cannot handle this operation properly.
05fccb0e 196 * Return some non mapped prereserved space in the aperture and
1da177e4
LT
197 * let the Northbridge deal with it. This will result in garbage
198 * in the IO operation. When the size exceeds the prereserved space
05fccb0e 199 * memory corruption will occur or random memory will be DMAed
1da177e4 200 * out. Hopefully no network devices use single mappings that big.
05fccb0e
IM
201 */
202
fc3a8828 203 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
1da177e4 204
17a941d8 205 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
1da177e4
LT
206 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
207 panic("PCI-DMA: Memory would be corrupted\n");
05fccb0e
IM
208 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
209 panic(KERN_ERR
210 "PCI-DMA: Random memory would be DMAed\n");
211 }
1da177e4 212#ifdef CONFIG_IOMMU_LEAK
05fccb0e 213 dump_leak();
1da177e4 214#endif
05fccb0e 215}
1da177e4 216
05fccb0e
IM
217static inline int
218need_iommu(struct device *dev, unsigned long addr, size_t size)
219{
ac4ff656
FT
220 return force_iommu ||
221 !is_buffer_dma_capable(*dev->dma_mask, addr, size);
1da177e4
LT
222}
223
05fccb0e
IM
224static inline int
225nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
226{
ac4ff656 227 return !is_buffer_dma_capable(*dev->dma_mask, addr, size);
1da177e4
LT
228}
229
230/* Map a single continuous physical area into the IOMMU.
231 * Caller needs to check if the iommu is needed and flush.
232 */
17a941d8 233static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
7b22ff53 234 size_t size, int dir, unsigned long align_mask)
05fccb0e 235{
1477b8e5 236 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
7b22ff53 237 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
1da177e4 238 int i;
05fccb0e 239
1da177e4
LT
240 if (iommu_page == -1) {
241 if (!nonforced_iommu(dev, phys_mem, size))
05fccb0e 242 return phys_mem;
1da177e4
LT
243 if (panic_on_overflow)
244 panic("dma_map_area overflow %lu bytes\n", size);
17a941d8 245 iommu_full(dev, size, dir);
1da177e4
LT
246 return bad_dma_address;
247 }
248
249 for (i = 0; i < npages; i++) {
250 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
251 SET_LEAK(iommu_page + i);
252 phys_mem += PAGE_SIZE;
253 }
254 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
255}
256
257/* Map a single area into the IOMMU */
05fccb0e 258static dma_addr_t
2be62149 259gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
1da177e4 260{
2be62149 261 unsigned long bus;
1da177e4 262
1da177e4 263 if (!dev)
6c505ce3 264 dev = &x86_dma_fallback_dev;
1da177e4 265
2be62149
IM
266 if (!need_iommu(dev, paddr, size))
267 return paddr;
1da177e4 268
7b22ff53
FT
269 bus = dma_map_area(dev, paddr, size, dir, 0);
270 flush_gart();
05fccb0e
IM
271
272 return bus;
17a941d8
MBY
273}
274
7c2d9cd2
JM
275/*
276 * Free a DMA mapping.
277 */
1048fa52 278static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
05fccb0e 279 size_t size, int direction)
7c2d9cd2
JM
280{
281 unsigned long iommu_page;
282 int npages;
283 int i;
284
285 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
286 dma_addr >= iommu_bus_base + iommu_size)
287 return;
05fccb0e 288
7c2d9cd2 289 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
1477b8e5 290 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
7c2d9cd2
JM
291 for (i = 0; i < npages; i++) {
292 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
293 CLEAR_LEAK(iommu_page + i);
294 }
295 free_iommu(iommu_page, npages);
296}
297
17a941d8
MBY
298/*
299 * Wrapper for pci_unmap_single working with scatterlists.
300 */
05fccb0e
IM
301static void
302gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
17a941d8 303{
9ee1bea4 304 struct scatterlist *s;
17a941d8
MBY
305 int i;
306
9ee1bea4 307 for_each_sg(sg, s, nents, i) {
60b08c67 308 if (!s->dma_length || !s->length)
17a941d8 309 break;
7c2d9cd2 310 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
17a941d8
MBY
311 }
312}
1da177e4
LT
313
314/* Fallback for dma_map_sg in case of overflow */
315static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
316 int nents, int dir)
317{
9ee1bea4 318 struct scatterlist *s;
1da177e4
LT
319 int i;
320
321#ifdef CONFIG_IOMMU_DEBUG
322 printk(KERN_DEBUG "dma_map_sg overflow\n");
323#endif
324
9ee1bea4 325 for_each_sg(sg, s, nents, i) {
58b053e4 326 unsigned long addr = sg_phys(s);
05fccb0e
IM
327
328 if (nonforced_iommu(dev, addr, s->length)) {
7b22ff53 329 addr = dma_map_area(dev, addr, s->length, dir, 0);
05fccb0e
IM
330 if (addr == bad_dma_address) {
331 if (i > 0)
17a941d8 332 gart_unmap_sg(dev, sg, i, dir);
05fccb0e 333 nents = 0;
1da177e4
LT
334 sg[0].dma_length = 0;
335 break;
336 }
337 }
338 s->dma_address = addr;
339 s->dma_length = s->length;
340 }
a32073bf 341 flush_gart();
05fccb0e 342
1da177e4
LT
343 return nents;
344}
345
346/* Map multiple scatterlist entries continuous into the first. */
fde9a109
FT
347static int __dma_map_cont(struct device *dev, struct scatterlist *start,
348 int nelems, struct scatterlist *sout,
349 unsigned long pages)
1da177e4 350{
7b22ff53 351 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
05fccb0e 352 unsigned long iommu_page = iommu_start;
9ee1bea4 353 struct scatterlist *s;
1da177e4
LT
354 int i;
355
356 if (iommu_start == -1)
357 return -1;
9ee1bea4
JA
358
359 for_each_sg(start, s, nelems, i) {
1da177e4
LT
360 unsigned long pages, addr;
361 unsigned long phys_addr = s->dma_address;
05fccb0e 362
9ee1bea4
JA
363 BUG_ON(s != start && s->offset);
364 if (s == start) {
1da177e4
LT
365 sout->dma_address = iommu_bus_base;
366 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
367 sout->dma_length = s->length;
05fccb0e
IM
368 } else {
369 sout->dma_length += s->length;
1da177e4
LT
370 }
371
372 addr = phys_addr;
1477b8e5 373 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
05fccb0e
IM
374 while (pages--) {
375 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
1da177e4
LT
376 SET_LEAK(iommu_page);
377 addr += PAGE_SIZE;
378 iommu_page++;
0d541064 379 }
05fccb0e
IM
380 }
381 BUG_ON(iommu_page - iommu_start != pages);
382
1da177e4
LT
383 return 0;
384}
385
05fccb0e 386static inline int
fde9a109
FT
387dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
388 struct scatterlist *sout, unsigned long pages, int need)
1da177e4 389{
9ee1bea4
JA
390 if (!need) {
391 BUG_ON(nelems != 1);
e88a39de 392 sout->dma_address = start->dma_address;
9ee1bea4 393 sout->dma_length = start->length;
1da177e4 394 return 0;
9ee1bea4 395 }
fde9a109 396 return __dma_map_cont(dev, start, nelems, sout, pages);
1da177e4 397}
05fccb0e 398
1da177e4
LT
399/*
400 * DMA map all entries in a scatterlist.
05fccb0e 401 * Merge chunks that have page aligned sizes into a continuous mapping.
1da177e4 402 */
05fccb0e
IM
403static int
404gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
1da177e4 405{
9ee1bea4 406 struct scatterlist *s, *ps, *start_sg, *sgmap;
05fccb0e
IM
407 int need = 0, nextneed, i, out, start;
408 unsigned long pages = 0;
42d00284
FT
409 unsigned int seg_size;
410 unsigned int max_seg_size;
1da177e4 411
05fccb0e 412 if (nents == 0)
1da177e4
LT
413 return 0;
414
1da177e4 415 if (!dev)
6c505ce3 416 dev = &x86_dma_fallback_dev;
1da177e4
LT
417
418 out = 0;
419 start = 0;
9ee1bea4 420 start_sg = sgmap = sg;
42d00284
FT
421 seg_size = 0;
422 max_seg_size = dma_get_max_seg_size(dev);
9ee1bea4
JA
423 ps = NULL; /* shut up gcc */
424 for_each_sg(sg, s, nents, i) {
58b053e4 425 dma_addr_t addr = sg_phys(s);
05fccb0e 426
1da177e4 427 s->dma_address = addr;
05fccb0e 428 BUG_ON(s->length == 0);
1da177e4 429
05fccb0e 430 nextneed = need_iommu(dev, addr, s->length);
1da177e4
LT
431
432 /* Handle the previous not yet processed entries */
433 if (i > start) {
05fccb0e
IM
434 /*
435 * Can only merge when the last chunk ends on a
436 * page boundary and the new one doesn't have an
437 * offset.
438 */
1da177e4 439 if (!iommu_merge || !nextneed || !need || s->offset ||
42d00284 440 (s->length + seg_size > max_seg_size) ||
9ee1bea4 441 (ps->offset + ps->length) % PAGE_SIZE) {
fde9a109
FT
442 if (dma_map_cont(dev, start_sg, i - start,
443 sgmap, pages, need) < 0)
1da177e4
LT
444 goto error;
445 out++;
42d00284 446 seg_size = 0;
9ee1bea4 447 sgmap = sg_next(sgmap);
1da177e4 448 pages = 0;
9ee1bea4
JA
449 start = i;
450 start_sg = s;
1da177e4
LT
451 }
452 }
453
42d00284 454 seg_size += s->length;
1da177e4 455 need = nextneed;
1477b8e5 456 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
9ee1bea4 457 ps = s;
1da177e4 458 }
fde9a109 459 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
1da177e4
LT
460 goto error;
461 out++;
a32073bf 462 flush_gart();
9ee1bea4
JA
463 if (out < nents) {
464 sgmap = sg_next(sgmap);
465 sgmap->dma_length = 0;
466 }
1da177e4
LT
467 return out;
468
469error:
a32073bf 470 flush_gart();
5336940d 471 gart_unmap_sg(dev, sg, out, dir);
05fccb0e 472
a1002a48
KV
473 /* When it was forced or merged try again in a dumb way */
474 if (force_iommu || iommu_merge) {
475 out = dma_map_sg_nonforce(dev, sg, nents, dir);
476 if (out > 0)
477 return out;
478 }
1da177e4
LT
479 if (panic_on_overflow)
480 panic("dma_map_sg: overflow on %lu pages\n", pages);
05fccb0e 481
17a941d8 482 iommu_full(dev, pages << PAGE_SHIFT, dir);
9ee1bea4
JA
483 for_each_sg(sg, s, nents, i)
484 s->dma_address = bad_dma_address;
1da177e4 485 return 0;
05fccb0e 486}
1da177e4 487
94581094
JR
488/* allocate and map a coherent mapping */
489static void *
490gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
491 gfp_t flag)
492{
f6a32a36 493 dma_addr_t paddr;
421076e2 494 unsigned long align_mask;
1d990882
FT
495 struct page *page;
496
497 if (force_iommu && !(flag & GFP_DMA)) {
498 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
499 page = alloc_pages(flag | __GFP_ZERO, get_order(size));
500 if (!page)
501 return NULL;
502
503 align_mask = (1UL << get_order(size)) - 1;
504 paddr = dma_map_area(dev, page_to_phys(page), size,
505 DMA_BIDIRECTIONAL, align_mask);
506
507 flush_gart();
508 if (paddr != bad_dma_address) {
509 *dma_addr = paddr;
510 return page_address(page);
511 }
512 __free_pages(page, get_order(size));
513 } else
514 return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
94581094
JR
515
516 return NULL;
517}
518
43a5a5a0
JR
519/* free a coherent mapping */
520static void
521gart_free_coherent(struct device *dev, size_t size, void *vaddr,
522 dma_addr_t dma_addr)
523{
524 gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
525 free_pages((unsigned long)vaddr, get_order(size));
526}
527
17a941d8 528static int no_agp;
1da177e4
LT
529
530static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
05fccb0e
IM
531{
532 unsigned long a;
533
534 if (!iommu_size) {
535 iommu_size = aper_size;
536 if (!no_agp)
537 iommu_size /= 2;
538 }
539
540 a = aper + iommu_size;
31422c51 541 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
1da177e4 542
05fccb0e 543 if (iommu_size < 64*1024*1024) {
1da177e4 544 printk(KERN_WARNING
05fccb0e
IM
545 "PCI-DMA: Warning: Small IOMMU %luMB."
546 " Consider increasing the AGP aperture in BIOS\n",
547 iommu_size >> 20);
548 }
549
1da177e4 550 return iommu_size;
05fccb0e 551}
1da177e4 552
05fccb0e
IM
553static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
554{
555 unsigned aper_size = 0, aper_base_32, aper_order;
1da177e4 556 u64 aper_base;
1da177e4 557
3bb6fbf9
PM
558 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
559 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
05fccb0e 560 aper_order = (aper_order >> 1) & 7;
1da177e4 561
05fccb0e 562 aper_base = aper_base_32 & 0x7fff;
1da177e4
LT
563 aper_base <<= 25;
564
05fccb0e
IM
565 aper_size = (32 * 1024 * 1024) << aper_order;
566 if (aper_base + aper_size > 0x100000000UL || !aper_size)
1da177e4
LT
567 aper_base = 0;
568
569 *size = aper_size;
570 return aper_base;
05fccb0e 571}
1da177e4 572
6703f6d1
RW
573static void enable_gart_translations(void)
574{
575 int i;
576
577 for (i = 0; i < num_k8_northbridges; i++) {
578 struct pci_dev *dev = k8_northbridges[i];
579
580 enable_gart_translation(dev, __pa(agp_gatt_table));
581 }
582}
583
584/*
585 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
586 * resume in the same way as they are handled in gart_iommu_hole_init().
587 */
588static bool fix_up_north_bridges;
589static u32 aperture_order;
590static u32 aperture_alloc;
591
592void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
593{
594 fix_up_north_bridges = true;
595 aperture_order = aper_order;
596 aperture_alloc = aper_alloc;
597}
598
cd76374e
PM
599static int gart_resume(struct sys_device *dev)
600{
6703f6d1
RW
601 printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");
602
603 if (fix_up_north_bridges) {
604 int i;
605
606 printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");
607
608 for (i = 0; i < num_k8_northbridges; i++) {
609 struct pci_dev *dev = k8_northbridges[i];
610
611 /*
612 * Don't enable translations just yet. That is the next
613 * step. Restore the pre-suspend aperture settings.
614 */
615 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
616 aperture_order << 1);
617 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
618 aperture_alloc >> 25);
619 }
620 }
621
622 enable_gart_translations();
623
cd76374e
PM
624 return 0;
625}
626
627static int gart_suspend(struct sys_device *dev, pm_message_t state)
628{
6703f6d1 629 return 0;
cd76374e
PM
630}
631
632static struct sysdev_class gart_sysdev_class = {
633 .name = "gart",
634 .suspend = gart_suspend,
635 .resume = gart_resume,
636
637};
638
639static struct sys_device device_gart = {
640 .id = 0,
641 .cls = &gart_sysdev_class,
642};
643
05fccb0e 644/*
1da177e4 645 * Private Northbridge GATT initialization in case we cannot use the
05fccb0e 646 * AGP driver for some reason.
1da177e4
LT
647 */
648static __init int init_k8_gatt(struct agp_kern_info *info)
05fccb0e
IM
649{
650 unsigned aper_size, gatt_size, new_aper_size;
651 unsigned aper_base, new_aper_base;
1da177e4
LT
652 struct pci_dev *dev;
653 void *gatt;
cd76374e 654 int i, error;
a32073bf 655
1da177e4
LT
656 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
657 aper_size = aper_base = info->aper_size = 0;
a32073bf
AK
658 dev = NULL;
659 for (i = 0; i < num_k8_northbridges; i++) {
660 dev = k8_northbridges[i];
05fccb0e
IM
661 new_aper_base = read_aperture(dev, &new_aper_size);
662 if (!new_aper_base)
663 goto nommu;
664
665 if (!aper_base) {
1da177e4
LT
666 aper_size = new_aper_size;
667 aper_base = new_aper_base;
05fccb0e
IM
668 }
669 if (aper_size != new_aper_size || aper_base != new_aper_base)
1da177e4
LT
670 goto nommu;
671 }
672 if (!aper_base)
05fccb0e 673 goto nommu;
1da177e4 674 info->aper_base = aper_base;
05fccb0e 675 info->aper_size = aper_size >> 20;
1da177e4 676
05fccb0e 677 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
0114267b
JR
678 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
679 get_order(gatt_size));
05fccb0e 680 if (!gatt)
cf6387da 681 panic("Cannot allocate GATT table");
6d238cc4 682 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
cf6387da 683 panic("Could not set GART PTEs to uncacheable pages");
cf6387da 684
1da177e4 685 agp_gatt_table = gatt;
a32073bf 686
6703f6d1 687 enable_gart_translations();
cd76374e
PM
688
689 error = sysdev_class_register(&gart_sysdev_class);
690 if (!error)
691 error = sysdev_register(&device_gart);
692 if (error)
237a6224
JR
693 panic("Could not register gart_sysdev -- "
694 "would corrupt data on next suspend");
6703f6d1 695
a32073bf 696 flush_gart();
05fccb0e
IM
697
698 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
699 aper_base, aper_size>>10);
7ab073b6 700
1da177e4
LT
701 return 0;
702
703 nommu:
05fccb0e 704 /* Should not happen anymore */
8f59610d
PM
705 printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
706 KERN_WARNING "falling back to iommu=soft.\n");
05fccb0e
IM
707 return -1;
708}
1da177e4 709
8d8bb39b 710static struct dma_mapping_ops gart_dma_ops = {
05fccb0e 711 .map_single = gart_map_single,
05fccb0e 712 .unmap_single = gart_unmap_single,
05fccb0e
IM
713 .map_sg = gart_map_sg,
714 .unmap_sg = gart_unmap_sg,
94581094 715 .alloc_coherent = gart_alloc_coherent,
43a5a5a0 716 .free_coherent = gart_free_coherent,
17a941d8
MBY
717};
718
bc2cea6a
YL
719void gart_iommu_shutdown(void)
720{
721 struct pci_dev *dev;
722 int i;
723
724 if (no_agp && (dma_ops != &gart_dma_ops))
725 return;
726
05fccb0e
IM
727 for (i = 0; i < num_k8_northbridges; i++) {
728 u32 ctl;
bc2cea6a 729
05fccb0e 730 dev = k8_northbridges[i];
3bb6fbf9 731 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
bc2cea6a 732
3bb6fbf9 733 ctl &= ~GARTEN;
bc2cea6a 734
3bb6fbf9 735 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
05fccb0e 736 }
bc2cea6a
YL
737}
738
0dc243ae 739void __init gart_iommu_init(void)
05fccb0e 740{
1da177e4 741 struct agp_kern_info info;
1da177e4 742 unsigned long iommu_start;
d99e9016
YL
743 unsigned long aper_base, aper_size;
744 unsigned long start_pfn, end_pfn;
1da177e4
LT
745 unsigned long scratch;
746 long i;
747
55aab5f4 748 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
0dc243ae 749 return;
a32073bf 750
1da177e4 751#ifndef CONFIG_AGP_AMD64
05fccb0e 752 no_agp = 1;
1da177e4
LT
753#else
754 /* Makefile puts PCI initialization via subsys_initcall first. */
755 /* Add other K8 AGP bridge drivers here */
05fccb0e
IM
756 no_agp = no_agp ||
757 (agp_amd64_init() < 0) ||
1da177e4 758 (agp_copy_info(agp_bridge, &info) < 0);
05fccb0e 759#endif
1da177e4 760
60b08c67 761 if (swiotlb)
0dc243ae 762 return;
60b08c67 763
8d4f6b93 764 /* Did we detect a different HW IOMMU? */
0440d4c0 765 if (iommu_detected && !gart_iommu_aperture)
0dc243ae 766 return;
8d4f6b93 767
1da177e4 768 if (no_iommu ||
c987d12f 769 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
0440d4c0 770 !gart_iommu_aperture ||
1da177e4 771 (no_agp && init_k8_gatt(&info) < 0)) {
c987d12f 772 if (max_pfn > MAX_DMA32_PFN) {
8f59610d 773 printk(KERN_WARNING "More than 4GB of memory "
237a6224
JR
774 "but GART IOMMU not available.\n");
775 printk(KERN_WARNING "falling back to iommu=soft.\n");
5b7b644c 776 }
0dc243ae 777 return;
1da177e4
LT
778 }
779
d99e9016
YL
780 /* need to map that range */
781 aper_size = info.aper_size << 20;
782 aper_base = info.aper_base;
783 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
784 if (end_pfn > max_low_pfn_mapped) {
785 start_pfn = (aper_base>>PAGE_SHIFT);
786 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
787 }
788
5b7b644c 789 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
05fccb0e
IM
790 iommu_size = check_iommu_size(info.aper_base, aper_size);
791 iommu_pages = iommu_size >> PAGE_SHIFT;
792
0114267b 793 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
05fccb0e
IM
794 get_order(iommu_pages/8));
795 if (!iommu_gart_bitmap)
796 panic("Cannot allocate iommu bitmap\n");
1da177e4
LT
797
798#ifdef CONFIG_IOMMU_LEAK
05fccb0e 799 if (leak_trace) {
0114267b 800 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
1da177e4 801 get_order(iommu_pages*sizeof(void *)));
0114267b 802 if (!iommu_leak_tab)
05fccb0e
IM
803 printk(KERN_DEBUG
804 "PCI-DMA: Cannot allocate leak trace area\n");
805 }
1da177e4
LT
806#endif
807
05fccb0e 808 /*
1da177e4 809 * Out of IOMMU space handling.
05fccb0e
IM
810 * Reserve some invalid pages at the beginning of the GART.
811 */
d26dbc5c 812 iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
1da177e4 813
05fccb0e 814 agp_memory_reserved = iommu_size;
1da177e4
LT
815 printk(KERN_INFO
816 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
05fccb0e 817 iommu_size >> 20);
1da177e4 818
05fccb0e
IM
819 iommu_start = aper_size - iommu_size;
820 iommu_bus_base = info.aper_base + iommu_start;
1da177e4
LT
821 bad_dma_address = iommu_bus_base;
822 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
823
05fccb0e 824 /*
1da177e4
LT
825 * Unmap the IOMMU part of the GART. The alias of the page is
826 * always mapped with cache enabled and there is no full cache
827 * coherency across the GART remapping. The unmapping avoids
828 * automatic prefetches from the CPU allocating cache lines in
829 * there. All CPU accesses are done via the direct mapping to
830 * the backing memory. The GART address is only used by PCI
05fccb0e 831 * devices.
1da177e4 832 */
28d6ee41
AK
833 set_memory_np((unsigned long)__va(iommu_bus_base),
834 iommu_size >> PAGE_SHIFT);
184652eb
IM
835 /*
836 * Tricky. The GART table remaps the physical memory range,
837 * so the CPU wont notice potential aliases and if the memory
838 * is remapped to UC later on, we might surprise the PCI devices
839 * with a stray writeout of a cacheline. So play it sure and
840 * do an explicit, full-scale wbinvd() _after_ having marked all
841 * the pages as Not-Present:
842 */
843 wbinvd();
1da177e4 844
05fccb0e 845 /*
fa3d319a 846 * Try to workaround a bug (thanks to BenH):
05fccb0e 847 * Set unmapped entries to a scratch page instead of 0.
1da177e4 848 * Any prefetches that hit unmapped entries won't get an bus abort
fa3d319a 849 * then. (P2P bridge may be prefetching on DMA reads).
1da177e4 850 */
05fccb0e
IM
851 scratch = get_zeroed_page(GFP_KERNEL);
852 if (!scratch)
1da177e4
LT
853 panic("Cannot allocate iommu scratch page");
854 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
05fccb0e 855 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
1da177e4
LT
856 iommu_gatt_base[i] = gart_unmapped_entry;
857
a32073bf 858 flush_gart();
17a941d8 859 dma_ops = &gart_dma_ops;
05fccb0e 860}
1da177e4 861
43999d9e 862void __init gart_parse_options(char *p)
17a941d8
MBY
863{
864 int arg;
865
1da177e4 866#ifdef CONFIG_IOMMU_LEAK
05fccb0e 867 if (!strncmp(p, "leak", 4)) {
17a941d8
MBY
868 leak_trace = 1;
869 p += 4;
237a6224
JR
870 if (*p == '=')
871 ++p;
17a941d8
MBY
872 if (isdigit(*p) && get_option(&p, &arg))
873 iommu_leak_pages = arg;
874 }
1da177e4 875#endif
17a941d8
MBY
876 if (isdigit(*p) && get_option(&p, &arg))
877 iommu_size = arg;
05fccb0e 878 if (!strncmp(p, "fullflush", 8))
17a941d8 879 iommu_fullflush = 1;
05fccb0e 880 if (!strncmp(p, "nofullflush", 11))
17a941d8 881 iommu_fullflush = 0;
05fccb0e 882 if (!strncmp(p, "noagp", 5))
17a941d8 883 no_agp = 1;
05fccb0e 884 if (!strncmp(p, "noaperture", 10))
17a941d8
MBY
885 fix_aperture = 0;
886 /* duplicated from pci-dma.c */
05fccb0e 887 if (!strncmp(p, "force", 5))
0440d4c0 888 gart_iommu_aperture_allowed = 1;
05fccb0e 889 if (!strncmp(p, "allowed", 7))
0440d4c0 890 gart_iommu_aperture_allowed = 1;
17a941d8
MBY
891 if (!strncmp(p, "memaper", 7)) {
892 fallback_aper_force = 1;
893 p += 7;
894 if (*p == '=') {
895 ++p;
896 if (get_option(&p, &arg))
897 fallback_aper_order = arg;
898 }
899 }
900}
This page took 0.635405 seconds and 5 git commands to generate.