intel-gtt: drop agp scratch page support stuff
[deliverable/linux.git] / drivers / char / agp / intel-gtt.c
1 /*
2 * Intel GTT (Graphics Translation Table) routines
3 *
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
10 *
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
13 * here.
14 *
15 * /fairy-tale-mode off
16 */
17
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/pagemap.h>
23 #include <linux/agp_backend.h>
24 #include <asm/smp.h>
25 #include "agp.h"
26 #include "intel-agp.h"
27 #include <linux/intel-gtt.h>
28 #include <drm/intel-gtt.h>
29
30 /*
31 * If we have Intel graphics, we're not going to have anything other than
32 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
33 * on the Intel IOMMU support (CONFIG_DMAR).
34 * Only newer chipsets need to bother with this, of course.
35 */
36 #ifdef CONFIG_DMAR
37 #define USE_PCI_DMA_API 1
38 #else
39 #define USE_PCI_DMA_API 0
40 #endif
41
42 /* Max amount of stolen space, anything above will be returned to Linux */
43 int intel_max_stolen = 32 * 1024 * 1024;
44 EXPORT_SYMBOL(intel_max_stolen);
45
46 static const struct aper_size_info_fixed intel_i810_sizes[] =
47 {
48 {64, 16384, 4},
49 /* The 32M mode still requires a 64k gatt */
50 {32, 8192, 4}
51 };
52
53 #define AGP_DCACHE_MEMORY 1
54 #define AGP_PHYS_MEMORY 2
55 #define INTEL_AGP_CACHED_MEMORY 3
56
57 static struct gatt_mask intel_i810_masks[] =
58 {
59 {.mask = I810_PTE_VALID, .type = 0},
60 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
61 {.mask = I810_PTE_VALID, .type = 0},
62 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
63 .type = INTEL_AGP_CACHED_MEMORY}
64 };
65
66 #define INTEL_AGP_UNCACHED_MEMORY 0
67 #define INTEL_AGP_CACHED_MEMORY_LLC 1
68 #define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
69 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
70 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
71
72 static struct gatt_mask intel_gen6_masks[] =
73 {
74 {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
75 .type = INTEL_AGP_UNCACHED_MEMORY },
76 {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
77 .type = INTEL_AGP_CACHED_MEMORY_LLC },
78 {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
79 .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
80 {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
81 .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
82 {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
83 .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
84 };
85
86 struct intel_gtt_driver {
87 unsigned int gen : 8;
88 unsigned int is_g33 : 1;
89 unsigned int is_pineview : 1;
90 unsigned int is_ironlake : 1;
91 /* Chipset specific GTT setup */
92 int (*setup)(void);
93 void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
94 /* Flags is a more or less chipset specific opaque value.
95 * For chipsets that need to support old ums (non-gem) code, this
96 * needs to be identical to the various supported agp memory types! */
97 };
98
99 static struct _intel_private {
100 struct intel_gtt base;
101 const struct intel_gtt_driver *driver;
102 struct pci_dev *pcidev; /* device one */
103 struct pci_dev *bridge_dev;
104 u8 __iomem *registers;
105 phys_addr_t gtt_bus_addr;
106 phys_addr_t gma_bus_addr;
107 phys_addr_t pte_bus_addr;
108 u32 __iomem *gtt; /* I915G */
109 int num_dcache_entries;
110 union {
111 void __iomem *i9xx_flush_page;
112 void *i8xx_flush_page;
113 };
114 struct page *i8xx_page;
115 struct resource ifp_resource;
116 int resource_valid;
117 struct page *scratch_page;
118 dma_addr_t scratch_page_dma;
119 } intel_private;
120
121 #define INTEL_GTT_GEN intel_private.driver->gen
122 #define IS_G33 intel_private.driver->is_g33
123 #define IS_PINEVIEW intel_private.driver->is_pineview
124 #define IS_IRONLAKE intel_private.driver->is_ironlake
125
126 #if USE_PCI_DMA_API
127 static void intel_agp_free_sglist(struct agp_memory *mem)
128 {
129 struct sg_table st;
130
131 st.sgl = mem->sg_list;
132 st.orig_nents = st.nents = mem->page_count;
133
134 sg_free_table(&st);
135
136 mem->sg_list = NULL;
137 mem->num_sg = 0;
138 }
139
140 static int intel_agp_map_memory(struct agp_memory *mem)
141 {
142 struct sg_table st;
143 struct scatterlist *sg;
144 int i;
145
146 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
147
148 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
149 goto err;
150
151 mem->sg_list = sg = st.sgl;
152
153 for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
154 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
155
156 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
157 mem->page_count, PCI_DMA_BIDIRECTIONAL);
158 if (unlikely(!mem->num_sg))
159 goto err;
160
161 return 0;
162
163 err:
164 sg_free_table(&st);
165 return -ENOMEM;
166 }
167
168 static void intel_agp_unmap_memory(struct agp_memory *mem)
169 {
170 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
171
172 pci_unmap_sg(intel_private.pcidev, mem->sg_list,
173 mem->page_count, PCI_DMA_BIDIRECTIONAL);
174 intel_agp_free_sglist(mem);
175 }
176
177 static void intel_agp_insert_sg_entries(struct agp_memory *mem,
178 off_t pg_start, int mask_type)
179 {
180 struct scatterlist *sg;
181 int i, j;
182
183 j = pg_start;
184
185 WARN_ON(!mem->num_sg);
186
187 if (mem->num_sg == mem->page_count) {
188 for_each_sg(mem->sg_list, sg, mem->page_count, i) {
189 writel(agp_bridge->driver->mask_memory(agp_bridge,
190 sg_dma_address(sg), mask_type),
191 intel_private.gtt+j);
192 j++;
193 }
194 } else {
195 /* sg may merge pages, but we have to separate
196 * per-page addr for GTT */
197 unsigned int len, m;
198
199 for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
200 len = sg_dma_len(sg) / PAGE_SIZE;
201 for (m = 0; m < len; m++) {
202 writel(agp_bridge->driver->mask_memory(agp_bridge,
203 sg_dma_address(sg) + m * PAGE_SIZE,
204 mask_type),
205 intel_private.gtt+j);
206 j++;
207 }
208 }
209 }
210 readl(intel_private.gtt+j-1);
211 }
212
213 #else
214
215 static void intel_agp_insert_sg_entries(struct agp_memory *mem,
216 off_t pg_start, int mask_type)
217 {
218 int i, j;
219
220 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
221 writel(agp_bridge->driver->mask_memory(agp_bridge,
222 page_to_phys(mem->pages[i]), mask_type),
223 intel_private.gtt+j);
224 }
225
226 readl(intel_private.gtt+j-1);
227 }
228
229 #endif
230
231 static int intel_i810_fetch_size(void)
232 {
233 u32 smram_miscc;
234 struct aper_size_info_fixed *values;
235
236 pci_read_config_dword(intel_private.bridge_dev,
237 I810_SMRAM_MISCC, &smram_miscc);
238 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
239
240 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
241 dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
242 return 0;
243 }
244 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
245 agp_bridge->current_size = (void *) (values + 1);
246 agp_bridge->aperture_size_idx = 1;
247 return values[1].size;
248 } else {
249 agp_bridge->current_size = (void *) (values);
250 agp_bridge->aperture_size_idx = 0;
251 return values[0].size;
252 }
253
254 return 0;
255 }
256
257 static int intel_i810_configure(void)
258 {
259 struct aper_size_info_fixed *current_size;
260 u32 temp;
261 int i;
262
263 current_size = A_SIZE_FIX(agp_bridge->current_size);
264
265 if (!intel_private.registers) {
266 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
267 temp &= 0xfff80000;
268
269 intel_private.registers = ioremap(temp, 128 * 4096);
270 if (!intel_private.registers) {
271 dev_err(&intel_private.pcidev->dev,
272 "can't remap memory\n");
273 return -ENOMEM;
274 }
275 }
276
277 if ((readl(intel_private.registers+I810_DRAM_CTL)
278 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
279 /* This will need to be dynamically assigned */
280 dev_info(&intel_private.pcidev->dev,
281 "detected 4MB dedicated video ram\n");
282 intel_private.num_dcache_entries = 1024;
283 }
284 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
285 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
286 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
287 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
288
289 if (agp_bridge->driver->needs_scratch_page) {
290 for (i = 0; i < current_size->num_entries; i++) {
291 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
292 }
293 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
294 }
295 global_cache_flush();
296 return 0;
297 }
298
299 static void intel_i810_cleanup(void)
300 {
301 writel(0, intel_private.registers+I810_PGETBL_CTL);
302 readl(intel_private.registers); /* PCI Posting. */
303 iounmap(intel_private.registers);
304 }
305
306 static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
307 {
308 return;
309 }
310
311 /* Exists to support ARGB cursors */
312 static struct page *i8xx_alloc_pages(void)
313 {
314 struct page *page;
315
316 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
317 if (page == NULL)
318 return NULL;
319
320 if (set_pages_uc(page, 4) < 0) {
321 set_pages_wb(page, 4);
322 __free_pages(page, 2);
323 return NULL;
324 }
325 get_page(page);
326 atomic_inc(&agp_bridge->current_memory_agp);
327 return page;
328 }
329
330 static void i8xx_destroy_pages(struct page *page)
331 {
332 if (page == NULL)
333 return;
334
335 set_pages_wb(page, 4);
336 put_page(page);
337 __free_pages(page, 2);
338 atomic_dec(&agp_bridge->current_memory_agp);
339 }
340
341 static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
342 int type)
343 {
344 if (type < AGP_USER_TYPES)
345 return type;
346 else if (type == AGP_USER_CACHED_MEMORY)
347 return INTEL_AGP_CACHED_MEMORY;
348 else
349 return 0;
350 }
351
352 static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
353 int type)
354 {
355 unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
356 unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
357
358 if (type_mask == AGP_USER_UNCACHED_MEMORY)
359 return INTEL_AGP_UNCACHED_MEMORY;
360 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
361 return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
362 INTEL_AGP_CACHED_MEMORY_LLC_MLC;
363 else /* set 'normal'/'cached' to LLC by default */
364 return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
365 INTEL_AGP_CACHED_MEMORY_LLC;
366 }
367
368
369 static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
370 int type)
371 {
372 int i, j, num_entries;
373 void *temp;
374 int ret = -EINVAL;
375 int mask_type;
376
377 if (mem->page_count == 0)
378 goto out;
379
380 temp = agp_bridge->current_size;
381 num_entries = A_SIZE_FIX(temp)->num_entries;
382
383 if ((pg_start + mem->page_count) > num_entries)
384 goto out_err;
385
386
387 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
388 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
389 ret = -EBUSY;
390 goto out_err;
391 }
392 }
393
394 if (type != mem->type)
395 goto out_err;
396
397 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
398
399 switch (mask_type) {
400 case AGP_DCACHE_MEMORY:
401 if (!mem->is_flushed)
402 global_cache_flush();
403 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
404 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
405 intel_private.registers+I810_PTE_BASE+(i*4));
406 }
407 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
408 break;
409 case AGP_PHYS_MEMORY:
410 case AGP_NORMAL_MEMORY:
411 if (!mem->is_flushed)
412 global_cache_flush();
413 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
414 writel(agp_bridge->driver->mask_memory(agp_bridge,
415 page_to_phys(mem->pages[i]), mask_type),
416 intel_private.registers+I810_PTE_BASE+(j*4));
417 }
418 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
419 break;
420 default:
421 goto out_err;
422 }
423
424 out:
425 ret = 0;
426 out_err:
427 mem->is_flushed = true;
428 return ret;
429 }
430
431 static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
432 int type)
433 {
434 int i;
435
436 if (mem->page_count == 0)
437 return 0;
438
439 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
440 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
441 }
442 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
443
444 return 0;
445 }
446
447 /*
448 * The i810/i830 requires a physical address to program its mouse
449 * pointer into hardware.
450 * However the Xserver still writes to it through the agp aperture.
451 */
452 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
453 {
454 struct agp_memory *new;
455 struct page *page;
456
457 switch (pg_count) {
458 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
459 break;
460 case 4:
461 /* kludge to get 4 physical pages for ARGB cursor */
462 page = i8xx_alloc_pages();
463 break;
464 default:
465 return NULL;
466 }
467
468 if (page == NULL)
469 return NULL;
470
471 new = agp_create_memory(pg_count);
472 if (new == NULL)
473 return NULL;
474
475 new->pages[0] = page;
476 if (pg_count == 4) {
477 /* kludge to get 4 physical pages for ARGB cursor */
478 new->pages[1] = new->pages[0] + 1;
479 new->pages[2] = new->pages[1] + 1;
480 new->pages[3] = new->pages[2] + 1;
481 }
482 new->page_count = pg_count;
483 new->num_scratch_pages = pg_count;
484 new->type = AGP_PHYS_MEMORY;
485 new->physical = page_to_phys(new->pages[0]);
486 return new;
487 }
488
489 static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
490 {
491 struct agp_memory *new;
492
493 if (type == AGP_DCACHE_MEMORY) {
494 if (pg_count != intel_private.num_dcache_entries)
495 return NULL;
496
497 new = agp_create_memory(1);
498 if (new == NULL)
499 return NULL;
500
501 new->type = AGP_DCACHE_MEMORY;
502 new->page_count = pg_count;
503 new->num_scratch_pages = 0;
504 agp_free_page_array(new);
505 return new;
506 }
507 if (type == AGP_PHYS_MEMORY)
508 return alloc_agpphysmem_i8xx(pg_count, type);
509 return NULL;
510 }
511
512 static void intel_i810_free_by_type(struct agp_memory *curr)
513 {
514 agp_free_key(curr->key);
515 if (curr->type == AGP_PHYS_MEMORY) {
516 if (curr->page_count == 4)
517 i8xx_destroy_pages(curr->pages[0]);
518 else {
519 agp_bridge->driver->agp_destroy_page(curr->pages[0],
520 AGP_PAGE_DESTROY_UNMAP);
521 agp_bridge->driver->agp_destroy_page(curr->pages[0],
522 AGP_PAGE_DESTROY_FREE);
523 }
524 agp_free_page_array(curr);
525 }
526 kfree(curr);
527 }
528
529 static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
530 dma_addr_t addr, int type)
531 {
532 /* Type checking must be done elsewhere */
533 return addr | bridge->driver->masks[type].mask;
534 }
535
536 static int intel_gtt_setup_scratch_page(void)
537 {
538 struct page *page;
539 dma_addr_t dma_addr;
540
541 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
542 if (page == NULL)
543 return -ENOMEM;
544 get_page(page);
545 set_pages_uc(page, 1);
546
547 if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
548 dma_addr = pci_map_page(intel_private.pcidev, page, 0,
549 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
550 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
551 return -EINVAL;
552
553 intel_private.scratch_page_dma = dma_addr;
554 } else
555 intel_private.scratch_page_dma = page_to_phys(page);
556
557 intel_private.scratch_page = page;
558
559 return 0;
560 }
561
562 static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
563 {128, 32768, 5},
564 /* The 64M mode still requires a 128k gatt */
565 {64, 16384, 5},
566 {256, 65536, 6},
567 {512, 131072, 7},
568 };
569
570 static unsigned int intel_gtt_stolen_entries(void)
571 {
572 u16 gmch_ctrl;
573 u8 rdct;
574 int local = 0;
575 static const int ddt[4] = { 0, 16, 32, 64 };
576 unsigned int overhead_entries, stolen_entries;
577 unsigned int stolen_size = 0;
578
579 pci_read_config_word(intel_private.bridge_dev,
580 I830_GMCH_CTRL, &gmch_ctrl);
581
582 if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
583 overhead_entries = 0;
584 else
585 overhead_entries = intel_private.base.gtt_mappable_entries
586 / 1024;
587
588 overhead_entries += 1; /* BIOS popup */
589
590 if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
591 intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
592 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
593 case I830_GMCH_GMS_STOLEN_512:
594 stolen_size = KB(512);
595 break;
596 case I830_GMCH_GMS_STOLEN_1024:
597 stolen_size = MB(1);
598 break;
599 case I830_GMCH_GMS_STOLEN_8192:
600 stolen_size = MB(8);
601 break;
602 case I830_GMCH_GMS_LOCAL:
603 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
604 stolen_size = (I830_RDRAM_ND(rdct) + 1) *
605 MB(ddt[I830_RDRAM_DDT(rdct)]);
606 local = 1;
607 break;
608 default:
609 stolen_size = 0;
610 break;
611 }
612 } else if (INTEL_GTT_GEN == 6) {
613 /*
614 * SandyBridge has new memory control reg at 0x50.w
615 */
616 u16 snb_gmch_ctl;
617 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
618 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
619 case SNB_GMCH_GMS_STOLEN_32M:
620 stolen_size = MB(32);
621 break;
622 case SNB_GMCH_GMS_STOLEN_64M:
623 stolen_size = MB(64);
624 break;
625 case SNB_GMCH_GMS_STOLEN_96M:
626 stolen_size = MB(96);
627 break;
628 case SNB_GMCH_GMS_STOLEN_128M:
629 stolen_size = MB(128);
630 break;
631 case SNB_GMCH_GMS_STOLEN_160M:
632 stolen_size = MB(160);
633 break;
634 case SNB_GMCH_GMS_STOLEN_192M:
635 stolen_size = MB(192);
636 break;
637 case SNB_GMCH_GMS_STOLEN_224M:
638 stolen_size = MB(224);
639 break;
640 case SNB_GMCH_GMS_STOLEN_256M:
641 stolen_size = MB(256);
642 break;
643 case SNB_GMCH_GMS_STOLEN_288M:
644 stolen_size = MB(288);
645 break;
646 case SNB_GMCH_GMS_STOLEN_320M:
647 stolen_size = MB(320);
648 break;
649 case SNB_GMCH_GMS_STOLEN_352M:
650 stolen_size = MB(352);
651 break;
652 case SNB_GMCH_GMS_STOLEN_384M:
653 stolen_size = MB(384);
654 break;
655 case SNB_GMCH_GMS_STOLEN_416M:
656 stolen_size = MB(416);
657 break;
658 case SNB_GMCH_GMS_STOLEN_448M:
659 stolen_size = MB(448);
660 break;
661 case SNB_GMCH_GMS_STOLEN_480M:
662 stolen_size = MB(480);
663 break;
664 case SNB_GMCH_GMS_STOLEN_512M:
665 stolen_size = MB(512);
666 break;
667 }
668 } else {
669 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
670 case I855_GMCH_GMS_STOLEN_1M:
671 stolen_size = MB(1);
672 break;
673 case I855_GMCH_GMS_STOLEN_4M:
674 stolen_size = MB(4);
675 break;
676 case I855_GMCH_GMS_STOLEN_8M:
677 stolen_size = MB(8);
678 break;
679 case I855_GMCH_GMS_STOLEN_16M:
680 stolen_size = MB(16);
681 break;
682 case I855_GMCH_GMS_STOLEN_32M:
683 stolen_size = MB(32);
684 break;
685 case I915_GMCH_GMS_STOLEN_48M:
686 stolen_size = MB(48);
687 break;
688 case I915_GMCH_GMS_STOLEN_64M:
689 stolen_size = MB(64);
690 break;
691 case G33_GMCH_GMS_STOLEN_128M:
692 stolen_size = MB(128);
693 break;
694 case G33_GMCH_GMS_STOLEN_256M:
695 stolen_size = MB(256);
696 break;
697 case INTEL_GMCH_GMS_STOLEN_96M:
698 stolen_size = MB(96);
699 break;
700 case INTEL_GMCH_GMS_STOLEN_160M:
701 stolen_size = MB(160);
702 break;
703 case INTEL_GMCH_GMS_STOLEN_224M:
704 stolen_size = MB(224);
705 break;
706 case INTEL_GMCH_GMS_STOLEN_352M:
707 stolen_size = MB(352);
708 break;
709 default:
710 stolen_size = 0;
711 break;
712 }
713 }
714
715 if (!local && stolen_size > intel_max_stolen) {
716 dev_info(&intel_private.bridge_dev->dev,
717 "detected %dK stolen memory, trimming to %dK\n",
718 stolen_size / KB(1), intel_max_stolen / KB(1));
719 stolen_size = intel_max_stolen;
720 } else if (stolen_size > 0) {
721 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
722 stolen_size / KB(1), local ? "local" : "stolen");
723 } else {
724 dev_info(&intel_private.bridge_dev->dev,
725 "no pre-allocated video memory detected\n");
726 stolen_size = 0;
727 }
728
729 stolen_entries = stolen_size/KB(4) - overhead_entries;
730
731 return stolen_entries;
732 }
733
734 static unsigned int intel_gtt_total_entries(void)
735 {
736 int size;
737
738 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
739 u32 pgetbl_ctl;
740 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
741
742 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
743 case I965_PGETBL_SIZE_128KB:
744 size = KB(128);
745 break;
746 case I965_PGETBL_SIZE_256KB:
747 size = KB(256);
748 break;
749 case I965_PGETBL_SIZE_512KB:
750 size = KB(512);
751 break;
752 case I965_PGETBL_SIZE_1MB:
753 size = KB(1024);
754 break;
755 case I965_PGETBL_SIZE_2MB:
756 size = KB(2048);
757 break;
758 case I965_PGETBL_SIZE_1_5MB:
759 size = KB(1024 + 512);
760 break;
761 default:
762 dev_info(&intel_private.pcidev->dev,
763 "unknown page table size, assuming 512KB\n");
764 size = KB(512);
765 }
766
767 return size/4;
768 } else if (INTEL_GTT_GEN == 6) {
769 u16 snb_gmch_ctl;
770
771 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
772 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
773 default:
774 case SNB_GTT_SIZE_0M:
775 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
776 size = MB(0);
777 break;
778 case SNB_GTT_SIZE_1M:
779 size = MB(1);
780 break;
781 case SNB_GTT_SIZE_2M:
782 size = MB(2);
783 break;
784 }
785 return size/4;
786 } else {
787 /* On previous hardware, the GTT size was just what was
788 * required to map the aperture.
789 */
790 return intel_private.base.gtt_mappable_entries;
791 }
792 }
793
794 static unsigned int intel_gtt_mappable_entries(void)
795 {
796 unsigned int aperture_size;
797
798 if (INTEL_GTT_GEN == 2) {
799 u16 gmch_ctrl;
800
801 pci_read_config_word(intel_private.bridge_dev,
802 I830_GMCH_CTRL, &gmch_ctrl);
803
804 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
805 aperture_size = MB(64);
806 else
807 aperture_size = MB(128);
808 } else {
809 /* 9xx supports large sizes, just look at the length */
810 aperture_size = pci_resource_len(intel_private.pcidev, 2);
811 }
812
813 return aperture_size >> PAGE_SHIFT;
814 }
815
816 static void intel_gtt_teardown_scratch_page(void)
817 {
818 set_pages_wb(intel_private.scratch_page, 1);
819 pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
820 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
821 put_page(intel_private.scratch_page);
822 __free_page(intel_private.scratch_page);
823 }
824
825 static void intel_gtt_cleanup(void)
826 {
827 if (intel_private.i9xx_flush_page)
828 iounmap(intel_private.i9xx_flush_page);
829 if (intel_private.resource_valid)
830 release_resource(&intel_private.ifp_resource);
831 intel_private.ifp_resource.start = 0;
832 intel_private.resource_valid = 0;
833 iounmap(intel_private.gtt);
834 iounmap(intel_private.registers);
835
836 intel_gtt_teardown_scratch_page();
837 }
838
839 static int intel_gtt_init(void)
840 {
841 u32 gtt_map_size;
842 int ret;
843
844 ret = intel_private.driver->setup();
845 if (ret != 0)
846 return ret;
847
848 intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
849 intel_private.base.gtt_total_entries = intel_gtt_total_entries();
850
851 gtt_map_size = intel_private.base.gtt_total_entries * 4;
852
853 intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
854 gtt_map_size);
855 if (!intel_private.gtt) {
856 iounmap(intel_private.registers);
857 return -ENOMEM;
858 }
859
860 global_cache_flush(); /* FIXME: ? */
861
862 /* we have to call this as early as possible after the MMIO base address is known */
863 intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
864 if (intel_private.base.gtt_stolen_entries == 0) {
865 iounmap(intel_private.registers);
866 iounmap(intel_private.gtt);
867 return -ENOMEM;
868 }
869
870 ret = intel_gtt_setup_scratch_page();
871 if (ret != 0) {
872 intel_gtt_cleanup();
873 return ret;
874 }
875
876 return 0;
877 }
878
879 static int intel_fake_agp_fetch_size(void)
880 {
881 int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
882 unsigned int aper_size;
883 int i;
884
885 aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
886 / MB(1);
887
888 for (i = 0; i < num_sizes; i++) {
889 if (aper_size == intel_fake_agp_sizes[i].size) {
890 agp_bridge->current_size =
891 (void *) (intel_fake_agp_sizes + i);
892 return aper_size;
893 }
894 }
895
896 return 0;
897 }
898
899 static void intel_i830_fini_flush(void)
900 {
901 kunmap(intel_private.i8xx_page);
902 intel_private.i8xx_flush_page = NULL;
903 unmap_page_from_agp(intel_private.i8xx_page);
904
905 __free_page(intel_private.i8xx_page);
906 intel_private.i8xx_page = NULL;
907 }
908
909 static void intel_i830_setup_flush(void)
910 {
911 /* return if we've already set the flush mechanism up */
912 if (intel_private.i8xx_page)
913 return;
914
915 intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
916 if (!intel_private.i8xx_page)
917 return;
918
919 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
920 if (!intel_private.i8xx_flush_page)
921 intel_i830_fini_flush();
922 }
923
924 /* The chipset_flush interface needs to get data that has already been
925 * flushed out of the CPU all the way out to main memory, because the GPU
926 * doesn't snoop those buffers.
927 *
928 * The 8xx series doesn't have the same lovely interface for flushing the
929 * chipset write buffers that the later chips do. According to the 865
930 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
931 * that buffer out, we just fill 1KB and clflush it out, on the assumption
932 * that it'll push whatever was in there out. It appears to work.
933 */
934 static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
935 {
936 unsigned int *pg = intel_private.i8xx_flush_page;
937
938 memset(pg, 0, 1024);
939
940 if (cpu_has_clflush)
941 clflush_cache_range(pg, 1024);
942 else if (wbinvd_on_all_cpus() != 0)
943 printk(KERN_ERR "Timed out waiting for cache flush.\n");
944 }
945
946 static void i830_write_entry(dma_addr_t addr, unsigned int entry,
947 unsigned int flags)
948 {
949 u32 pte_flags = I810_PTE_VALID;
950
951 switch (flags) {
952 case AGP_DCACHE_MEMORY:
953 pte_flags |= I810_PTE_LOCAL;
954 break;
955 case AGP_USER_CACHED_MEMORY:
956 pte_flags |= I830_PTE_SYSTEM_CACHED;
957 break;
958 }
959
960 writel(addr | pte_flags, intel_private.gtt + entry);
961 }
962
963 static void intel_enable_gtt(void)
964 {
965 u32 gma_addr;
966 u16 gmch_ctrl;
967
968 if (INTEL_GTT_GEN == 2)
969 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
970 &gma_addr);
971 else
972 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
973 &gma_addr);
974
975 intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
976
977 pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
978 gmch_ctrl |= I830_GMCH_ENABLED;
979 pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
980
981 writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
982 intel_private.registers+I810_PGETBL_CTL);
983 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
984 }
985
986 static int i830_setup(void)
987 {
988 u32 reg_addr;
989
990 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
991 reg_addr &= 0xfff80000;
992
993 intel_private.registers = ioremap(reg_addr, KB(64));
994 if (!intel_private.registers)
995 return -ENOMEM;
996
997 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
998 intel_private.pte_bus_addr =
999 readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1000
1001 intel_i830_setup_flush();
1002
1003 return 0;
1004 }
1005
1006 static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
1007 {
1008 agp_bridge->gatt_table_real = NULL;
1009 agp_bridge->gatt_table = NULL;
1010 agp_bridge->gatt_bus_addr = 0;
1011
1012 return 0;
1013 }
1014
1015 static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
1016 {
1017 return 0;
1018 }
1019
1020 static int intel_fake_agp_configure(void)
1021 {
1022 int i;
1023
1024 intel_enable_gtt();
1025
1026 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
1027
1028 for (i = intel_private.base.gtt_stolen_entries;
1029 i < intel_private.base.gtt_total_entries; i++) {
1030 intel_private.driver->write_entry(intel_private.scratch_page_dma,
1031 i, 0);
1032 }
1033 readl(intel_private.gtt+i-1); /* PCI Posting. */
1034
1035 global_cache_flush();
1036
1037 return 0;
1038 }
1039
1040 static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
1041 int type)
1042 {
1043 int i, j, num_entries;
1044 void *temp;
1045 int ret = -EINVAL;
1046 int mask_type;
1047
1048 if (mem->page_count == 0)
1049 goto out;
1050
1051 temp = agp_bridge->current_size;
1052 num_entries = A_SIZE_FIX(temp)->num_entries;
1053
1054 if (pg_start < intel_private.base.gtt_stolen_entries) {
1055 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1056 "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
1057 pg_start, intel_private.base.gtt_stolen_entries);
1058
1059 dev_info(&intel_private.pcidev->dev,
1060 "trying to insert into local/stolen memory\n");
1061 goto out_err;
1062 }
1063
1064 if ((pg_start + mem->page_count) > num_entries)
1065 goto out_err;
1066
1067 /* The i830 can't check the GTT for entries since its read only,
1068 * depend on the caller to make the correct offset decisions.
1069 */
1070
1071 if (type != mem->type)
1072 goto out_err;
1073
1074 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1075
1076 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1077 mask_type != INTEL_AGP_CACHED_MEMORY)
1078 goto out_err;
1079
1080 if (!mem->is_flushed)
1081 global_cache_flush();
1082
1083 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1084 writel(agp_bridge->driver->mask_memory(agp_bridge,
1085 page_to_phys(mem->pages[i]), mask_type),
1086 intel_private.gtt+j);
1087 }
1088 readl(intel_private.gtt+j-1);
1089
1090 out:
1091 ret = 0;
1092 out_err:
1093 mem->is_flushed = true;
1094 return ret;
1095 }
1096
1097 static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
1098 int type)
1099 {
1100 int i;
1101
1102 if (mem->page_count == 0)
1103 return 0;
1104
1105 if (pg_start < intel_private.base.gtt_stolen_entries) {
1106 dev_info(&intel_private.pcidev->dev,
1107 "trying to disable local/stolen memory\n");
1108 return -EINVAL;
1109 }
1110
1111 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1112 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1113 }
1114 readl(intel_private.gtt+i-1);
1115
1116 return 0;
1117 }
1118
1119 static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
1120 int type)
1121 {
1122 if (type == AGP_PHYS_MEMORY)
1123 return alloc_agpphysmem_i8xx(pg_count, type);
1124 /* always return NULL for other allocation types for now */
1125 return NULL;
1126 }
1127
1128 static int intel_alloc_chipset_flush_resource(void)
1129 {
1130 int ret;
1131 ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1132 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1133 pcibios_align_resource, intel_private.bridge_dev);
1134
1135 return ret;
1136 }
1137
1138 static void intel_i915_setup_chipset_flush(void)
1139 {
1140 int ret;
1141 u32 temp;
1142
1143 pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1144 if (!(temp & 0x1)) {
1145 intel_alloc_chipset_flush_resource();
1146 intel_private.resource_valid = 1;
1147 pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1148 } else {
1149 temp &= ~1;
1150
1151 intel_private.resource_valid = 1;
1152 intel_private.ifp_resource.start = temp;
1153 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1154 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1155 /* some BIOSes reserve this area in a pnp some don't */
1156 if (ret)
1157 intel_private.resource_valid = 0;
1158 }
1159 }
1160
1161 static void intel_i965_g33_setup_chipset_flush(void)
1162 {
1163 u32 temp_hi, temp_lo;
1164 int ret;
1165
1166 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1167 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1168
1169 if (!(temp_lo & 0x1)) {
1170
1171 intel_alloc_chipset_flush_resource();
1172
1173 intel_private.resource_valid = 1;
1174 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1175 upper_32_bits(intel_private.ifp_resource.start));
1176 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1177 } else {
1178 u64 l64;
1179
1180 temp_lo &= ~0x1;
1181 l64 = ((u64)temp_hi << 32) | temp_lo;
1182
1183 intel_private.resource_valid = 1;
1184 intel_private.ifp_resource.start = l64;
1185 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1186 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1187 /* some BIOSes reserve this area in a pnp some don't */
1188 if (ret)
1189 intel_private.resource_valid = 0;
1190 }
1191 }
1192
1193 static void intel_i9xx_setup_flush(void)
1194 {
1195 /* return if already configured */
1196 if (intel_private.ifp_resource.start)
1197 return;
1198
1199 if (INTEL_GTT_GEN == 6)
1200 return;
1201
1202 /* setup a resource for this object */
1203 intel_private.ifp_resource.name = "Intel Flush Page";
1204 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1205
1206 /* Setup chipset flush for 915 */
1207 if (IS_G33 || INTEL_GTT_GEN >= 4) {
1208 intel_i965_g33_setup_chipset_flush();
1209 } else {
1210 intel_i915_setup_chipset_flush();
1211 }
1212
1213 if (intel_private.ifp_resource.start)
1214 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1215 if (!intel_private.i9xx_flush_page)
1216 dev_err(&intel_private.pcidev->dev,
1217 "can't ioremap flush page - no chipset flushing\n");
1218 }
1219
1220 static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1221 {
1222 if (intel_private.i9xx_flush_page)
1223 writel(1, intel_private.i9xx_flush_page);
1224 }
1225
1226 static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1227 int type)
1228 {
1229 int num_entries;
1230 void *temp;
1231 int ret = -EINVAL;
1232 int mask_type;
1233
1234 if (mem->page_count == 0)
1235 goto out;
1236
1237 temp = agp_bridge->current_size;
1238 num_entries = A_SIZE_FIX(temp)->num_entries;
1239
1240 if (pg_start < intel_private.base.gtt_stolen_entries) {
1241 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1242 "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
1243 pg_start, intel_private.base.gtt_stolen_entries);
1244
1245 dev_info(&intel_private.pcidev->dev,
1246 "trying to insert into local/stolen memory\n");
1247 goto out_err;
1248 }
1249
1250 if ((pg_start + mem->page_count) > num_entries)
1251 goto out_err;
1252
1253 /* The i915 can't check the GTT for entries since it's read only;
1254 * depend on the caller to make the correct offset decisions.
1255 */
1256
1257 if (type != mem->type)
1258 goto out_err;
1259
1260 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1261
1262 if (INTEL_GTT_GEN != 6 && mask_type != 0 &&
1263 mask_type != AGP_PHYS_MEMORY &&
1264 mask_type != INTEL_AGP_CACHED_MEMORY)
1265 goto out_err;
1266
1267 if (!mem->is_flushed)
1268 global_cache_flush();
1269
1270 intel_agp_insert_sg_entries(mem, pg_start, mask_type);
1271
1272 out:
1273 ret = 0;
1274 out_err:
1275 mem->is_flushed = true;
1276 return ret;
1277 }
1278
1279 static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1280 int type)
1281 {
1282 int i;
1283
1284 if (mem->page_count == 0)
1285 return 0;
1286
1287 if (pg_start < intel_private.base.gtt_stolen_entries) {
1288 dev_info(&intel_private.pcidev->dev,
1289 "trying to disable local/stolen memory\n");
1290 return -EINVAL;
1291 }
1292
1293 for (i = pg_start; i < (mem->page_count + pg_start); i++)
1294 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1295
1296 readl(intel_private.gtt+i-1);
1297
1298 return 0;
1299 }
1300
1301 static void i965_write_entry(dma_addr_t addr, unsigned int entry,
1302 unsigned int flags)
1303 {
1304 /* Shift high bits down */
1305 addr |= (addr >> 28) & 0xf0;
1306 writel(addr | I810_PTE_VALID, intel_private.gtt + entry);
1307 }
1308
1309 static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1310 unsigned int flags)
1311 {
1312 unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1313 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1314 u32 pte_flags;
1315
1316 if (type_mask == AGP_USER_UNCACHED_MEMORY)
1317 pte_flags = GEN6_PTE_UNCACHED;
1318 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1319 pte_flags = GEN6_PTE_LLC;
1320 if (gfdt)
1321 pte_flags |= GEN6_PTE_GFDT;
1322 } else { /* set 'normal'/'cached' to LLC by default */
1323 pte_flags = GEN6_PTE_LLC_MLC;
1324 if (gfdt)
1325 pte_flags |= GEN6_PTE_GFDT;
1326 }
1327
1328 /* gen6 has bit11-4 for physical addr bit39-32 */
1329 addr |= (addr >> 28) & 0xff0;
1330 writel(addr | pte_flags, intel_private.gtt + entry);
1331 }
1332
1333 static int i9xx_setup(void)
1334 {
1335 u32 reg_addr;
1336
1337 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
1338
1339 reg_addr &= 0xfff80000;
1340
1341 intel_private.registers = ioremap(reg_addr, 128 * 4096);
1342 if (!intel_private.registers)
1343 return -ENOMEM;
1344
1345 if (INTEL_GTT_GEN == 3) {
1346 u32 gtt_addr;
1347
1348 pci_read_config_dword(intel_private.pcidev,
1349 I915_PTEADDR, &gtt_addr);
1350 intel_private.gtt_bus_addr = gtt_addr;
1351 } else {
1352 u32 gtt_offset;
1353
1354 switch (INTEL_GTT_GEN) {
1355 case 5:
1356 case 6:
1357 gtt_offset = MB(2);
1358 break;
1359 case 4:
1360 default:
1361 gtt_offset = KB(512);
1362 break;
1363 }
1364 intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1365 }
1366
1367 intel_private.pte_bus_addr =
1368 readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1369
1370 intel_i9xx_setup_flush();
1371
1372 return 0;
1373 }
1374
1375 /*
1376 * The i965 supports 36-bit physical addresses, but to keep
1377 * the format of the GTT the same, the bits that don't fit
1378 * in a 32-bit word are shifted down to bits 4..7.
1379 *
1380 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1381 * is always zero on 32-bit architectures, so no need to make
1382 * this conditional.
1383 */
1384 static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1385 dma_addr_t addr, int type)
1386 {
1387 /* Shift high bits down */
1388 addr |= (addr >> 28) & 0xf0;
1389
1390 /* Type checking must be done elsewhere */
1391 return addr | bridge->driver->masks[type].mask;
1392 }
1393
1394 static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
1395 dma_addr_t addr, int type)
1396 {
1397 /* gen6 has bit11-4 for physical addr bit39-32 */
1398 addr |= (addr >> 28) & 0xff0;
1399
1400 /* Type checking must be done elsewhere */
1401 return addr | bridge->driver->masks[type].mask;
1402 }
1403
1404 static const struct agp_bridge_driver intel_810_driver = {
1405 .owner = THIS_MODULE,
1406 .aperture_sizes = intel_i810_sizes,
1407 .size_type = FIXED_APER_SIZE,
1408 .num_aperture_sizes = 2,
1409 .needs_scratch_page = true,
1410 .configure = intel_i810_configure,
1411 .fetch_size = intel_i810_fetch_size,
1412 .cleanup = intel_i810_cleanup,
1413 .mask_memory = intel_i810_mask_memory,
1414 .masks = intel_i810_masks,
1415 .agp_enable = intel_fake_agp_enable,
1416 .cache_flush = global_cache_flush,
1417 .create_gatt_table = agp_generic_create_gatt_table,
1418 .free_gatt_table = agp_generic_free_gatt_table,
1419 .insert_memory = intel_i810_insert_entries,
1420 .remove_memory = intel_i810_remove_entries,
1421 .alloc_by_type = intel_i810_alloc_by_type,
1422 .free_by_type = intel_i810_free_by_type,
1423 .agp_alloc_page = agp_generic_alloc_page,
1424 .agp_alloc_pages = agp_generic_alloc_pages,
1425 .agp_destroy_page = agp_generic_destroy_page,
1426 .agp_destroy_pages = agp_generic_destroy_pages,
1427 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1428 };
1429
1430 static const struct agp_bridge_driver intel_830_driver = {
1431 .owner = THIS_MODULE,
1432 .size_type = FIXED_APER_SIZE,
1433 .aperture_sizes = intel_fake_agp_sizes,
1434 .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
1435 .configure = intel_fake_agp_configure,
1436 .fetch_size = intel_fake_agp_fetch_size,
1437 .cleanup = intel_gtt_cleanup,
1438 .mask_memory = intel_i810_mask_memory,
1439 .masks = intel_i810_masks,
1440 .agp_enable = intel_fake_agp_enable,
1441 .cache_flush = global_cache_flush,
1442 .create_gatt_table = intel_fake_agp_create_gatt_table,
1443 .free_gatt_table = intel_fake_agp_free_gatt_table,
1444 .insert_memory = intel_i830_insert_entries,
1445 .remove_memory = intel_i830_remove_entries,
1446 .alloc_by_type = intel_fake_agp_alloc_by_type,
1447 .free_by_type = intel_i810_free_by_type,
1448 .agp_alloc_page = agp_generic_alloc_page,
1449 .agp_alloc_pages = agp_generic_alloc_pages,
1450 .agp_destroy_page = agp_generic_destroy_page,
1451 .agp_destroy_pages = agp_generic_destroy_pages,
1452 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1453 .chipset_flush = intel_i830_chipset_flush,
1454 };
1455
1456 static const struct agp_bridge_driver intel_915_driver = {
1457 .owner = THIS_MODULE,
1458 .size_type = FIXED_APER_SIZE,
1459 .aperture_sizes = intel_fake_agp_sizes,
1460 .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
1461 .configure = intel_fake_agp_configure,
1462 .fetch_size = intel_fake_agp_fetch_size,
1463 .cleanup = intel_gtt_cleanup,
1464 .mask_memory = intel_i810_mask_memory,
1465 .masks = intel_i810_masks,
1466 .agp_enable = intel_fake_agp_enable,
1467 .cache_flush = global_cache_flush,
1468 .create_gatt_table = intel_fake_agp_create_gatt_table,
1469 .free_gatt_table = intel_fake_agp_free_gatt_table,
1470 .insert_memory = intel_i915_insert_entries,
1471 .remove_memory = intel_i915_remove_entries,
1472 .alloc_by_type = intel_fake_agp_alloc_by_type,
1473 .free_by_type = intel_i810_free_by_type,
1474 .agp_alloc_page = agp_generic_alloc_page,
1475 .agp_alloc_pages = agp_generic_alloc_pages,
1476 .agp_destroy_page = agp_generic_destroy_page,
1477 .agp_destroy_pages = agp_generic_destroy_pages,
1478 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1479 .chipset_flush = intel_i915_chipset_flush,
1480 #if USE_PCI_DMA_API
1481 .agp_map_memory = intel_agp_map_memory,
1482 .agp_unmap_memory = intel_agp_unmap_memory,
1483 #endif
1484 };
1485
1486 static const struct agp_bridge_driver intel_i965_driver = {
1487 .owner = THIS_MODULE,
1488 .size_type = FIXED_APER_SIZE,
1489 .aperture_sizes = intel_fake_agp_sizes,
1490 .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
1491 .configure = intel_fake_agp_configure,
1492 .fetch_size = intel_fake_agp_fetch_size,
1493 .cleanup = intel_gtt_cleanup,
1494 .mask_memory = intel_i965_mask_memory,
1495 .masks = intel_i810_masks,
1496 .agp_enable = intel_fake_agp_enable,
1497 .cache_flush = global_cache_flush,
1498 .create_gatt_table = intel_fake_agp_create_gatt_table,
1499 .free_gatt_table = intel_fake_agp_free_gatt_table,
1500 .insert_memory = intel_i915_insert_entries,
1501 .remove_memory = intel_i915_remove_entries,
1502 .alloc_by_type = intel_fake_agp_alloc_by_type,
1503 .free_by_type = intel_i810_free_by_type,
1504 .agp_alloc_page = agp_generic_alloc_page,
1505 .agp_alloc_pages = agp_generic_alloc_pages,
1506 .agp_destroy_page = agp_generic_destroy_page,
1507 .agp_destroy_pages = agp_generic_destroy_pages,
1508 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1509 .chipset_flush = intel_i915_chipset_flush,
1510 #if USE_PCI_DMA_API
1511 .agp_map_memory = intel_agp_map_memory,
1512 .agp_unmap_memory = intel_agp_unmap_memory,
1513 #endif
1514 };
1515
1516 static const struct agp_bridge_driver intel_gen6_driver = {
1517 .owner = THIS_MODULE,
1518 .size_type = FIXED_APER_SIZE,
1519 .aperture_sizes = intel_fake_agp_sizes,
1520 .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
1521 .configure = intel_fake_agp_configure,
1522 .fetch_size = intel_fake_agp_fetch_size,
1523 .cleanup = intel_gtt_cleanup,
1524 .mask_memory = intel_gen6_mask_memory,
1525 .masks = intel_gen6_masks,
1526 .agp_enable = intel_fake_agp_enable,
1527 .cache_flush = global_cache_flush,
1528 .create_gatt_table = intel_fake_agp_create_gatt_table,
1529 .free_gatt_table = intel_fake_agp_free_gatt_table,
1530 .insert_memory = intel_i915_insert_entries,
1531 .remove_memory = intel_i915_remove_entries,
1532 .alloc_by_type = intel_fake_agp_alloc_by_type,
1533 .free_by_type = intel_i810_free_by_type,
1534 .agp_alloc_page = agp_generic_alloc_page,
1535 .agp_alloc_pages = agp_generic_alloc_pages,
1536 .agp_destroy_page = agp_generic_destroy_page,
1537 .agp_destroy_pages = agp_generic_destroy_pages,
1538 .agp_type_to_mask_type = intel_gen6_type_to_mask_type,
1539 .chipset_flush = intel_i915_chipset_flush,
1540 #if USE_PCI_DMA_API
1541 .agp_map_memory = intel_agp_map_memory,
1542 .agp_unmap_memory = intel_agp_unmap_memory,
1543 #endif
1544 };
1545
1546 static const struct agp_bridge_driver intel_g33_driver = {
1547 .owner = THIS_MODULE,
1548 .size_type = FIXED_APER_SIZE,
1549 .aperture_sizes = intel_fake_agp_sizes,
1550 .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
1551 .configure = intel_fake_agp_configure,
1552 .fetch_size = intel_fake_agp_fetch_size,
1553 .cleanup = intel_gtt_cleanup,
1554 .mask_memory = intel_i965_mask_memory,
1555 .masks = intel_i810_masks,
1556 .agp_enable = intel_fake_agp_enable,
1557 .cache_flush = global_cache_flush,
1558 .create_gatt_table = intel_fake_agp_create_gatt_table,
1559 .free_gatt_table = intel_fake_agp_free_gatt_table,
1560 .insert_memory = intel_i915_insert_entries,
1561 .remove_memory = intel_i915_remove_entries,
1562 .alloc_by_type = intel_fake_agp_alloc_by_type,
1563 .free_by_type = intel_i810_free_by_type,
1564 .agp_alloc_page = agp_generic_alloc_page,
1565 .agp_alloc_pages = agp_generic_alloc_pages,
1566 .agp_destroy_page = agp_generic_destroy_page,
1567 .agp_destroy_pages = agp_generic_destroy_pages,
1568 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1569 .chipset_flush = intel_i915_chipset_flush,
1570 #if USE_PCI_DMA_API
1571 .agp_map_memory = intel_agp_map_memory,
1572 .agp_unmap_memory = intel_agp_unmap_memory,
1573 #endif
1574 };
1575
1576 static const struct intel_gtt_driver i8xx_gtt_driver = {
1577 .gen = 2,
1578 .setup = i830_setup,
1579 .write_entry = i830_write_entry,
1580 };
1581 static const struct intel_gtt_driver i915_gtt_driver = {
1582 .gen = 3,
1583 .setup = i9xx_setup,
1584 /* i945 is the last gpu to need phys mem (for overlay and cursors). */
1585 .write_entry = i830_write_entry,
1586 };
1587 static const struct intel_gtt_driver g33_gtt_driver = {
1588 .gen = 3,
1589 .is_g33 = 1,
1590 .setup = i9xx_setup,
1591 .write_entry = i965_write_entry,
1592 };
1593 static const struct intel_gtt_driver pineview_gtt_driver = {
1594 .gen = 3,
1595 .is_pineview = 1, .is_g33 = 1,
1596 .setup = i9xx_setup,
1597 .write_entry = i965_write_entry,
1598 };
1599 static const struct intel_gtt_driver i965_gtt_driver = {
1600 .gen = 4,
1601 .setup = i9xx_setup,
1602 .write_entry = i965_write_entry,
1603 };
1604 static const struct intel_gtt_driver g4x_gtt_driver = {
1605 .gen = 5,
1606 .setup = i9xx_setup,
1607 .write_entry = i965_write_entry,
1608 };
1609 static const struct intel_gtt_driver ironlake_gtt_driver = {
1610 .gen = 5,
1611 .is_ironlake = 1,
1612 .setup = i9xx_setup,
1613 .write_entry = i965_write_entry,
1614 };
1615 static const struct intel_gtt_driver sandybridge_gtt_driver = {
1616 .gen = 6,
1617 .setup = i9xx_setup,
1618 .write_entry = gen6_write_entry,
1619 };
1620
1621 /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
1622 * driver and gmch_driver must be non-null, and find_gmch will determine
1623 * which one should be used if a gmch_chip_id is present.
1624 */
1625 static const struct intel_gtt_driver_description {
1626 unsigned int gmch_chip_id;
1627 char *name;
1628 const struct agp_bridge_driver *gmch_driver;
1629 const struct intel_gtt_driver *gtt_driver;
1630 } intel_gtt_chipsets[] = {
1631 { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver , NULL},
1632 { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver , NULL},
1633 { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver , NULL},
1634 { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver , NULL},
1635 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1636 &intel_830_driver , &i8xx_gtt_driver},
1637 { PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
1638 &intel_830_driver , &i8xx_gtt_driver},
1639 { PCI_DEVICE_ID_INTEL_82854_IG, "854",
1640 &intel_830_driver , &i8xx_gtt_driver},
1641 { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1642 &intel_830_driver , &i8xx_gtt_driver},
1643 { PCI_DEVICE_ID_INTEL_82865_IG, "865",
1644 &intel_830_driver , &i8xx_gtt_driver},
1645 { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1646 &intel_915_driver , &i915_gtt_driver },
1647 { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1648 &intel_915_driver , &i915_gtt_driver },
1649 { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1650 &intel_915_driver , &i915_gtt_driver },
1651 { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1652 &intel_915_driver , &i915_gtt_driver },
1653 { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1654 &intel_915_driver , &i915_gtt_driver },
1655 { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1656 &intel_915_driver , &i915_gtt_driver },
1657 { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1658 &intel_i965_driver , &i965_gtt_driver },
1659 { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1660 &intel_i965_driver , &i965_gtt_driver },
1661 { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1662 &intel_i965_driver , &i965_gtt_driver },
1663 { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1664 &intel_i965_driver , &i965_gtt_driver },
1665 { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1666 &intel_i965_driver , &i965_gtt_driver },
1667 { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1668 &intel_i965_driver , &i965_gtt_driver },
1669 { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1670 &intel_g33_driver , &g33_gtt_driver },
1671 { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1672 &intel_g33_driver , &g33_gtt_driver },
1673 { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1674 &intel_g33_driver , &g33_gtt_driver },
1675 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1676 &intel_g33_driver , &pineview_gtt_driver },
1677 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1678 &intel_g33_driver , &pineview_gtt_driver },
1679 { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1680 &intel_i965_driver , &g4x_gtt_driver },
1681 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1682 &intel_i965_driver , &g4x_gtt_driver },
1683 { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1684 &intel_i965_driver , &g4x_gtt_driver },
1685 { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1686 &intel_i965_driver , &g4x_gtt_driver },
1687 { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1688 &intel_i965_driver , &g4x_gtt_driver },
1689 { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1690 &intel_i965_driver , &g4x_gtt_driver },
1691 { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1692 &intel_i965_driver , &g4x_gtt_driver },
1693 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1694 "HD Graphics", &intel_i965_driver , &ironlake_gtt_driver },
1695 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1696 "HD Graphics", &intel_i965_driver , &ironlake_gtt_driver },
1697 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
1698 "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
1699 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
1700 "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
1701 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
1702 "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
1703 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
1704 "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
1705 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
1706 "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
1707 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
1708 "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
1709 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1710 "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
1711 { 0, NULL, NULL }
1712 };
1713
1714 static int find_gmch(u16 device)
1715 {
1716 struct pci_dev *gmch_device;
1717
1718 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1719 if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1720 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1721 device, gmch_device);
1722 }
1723
1724 if (!gmch_device)
1725 return 0;
1726
1727 intel_private.pcidev = gmch_device;
1728 return 1;
1729 }
1730
1731 int intel_gmch_probe(struct pci_dev *pdev,
1732 struct agp_bridge_data *bridge)
1733 {
1734 int i, mask;
1735 bridge->driver = NULL;
1736
1737 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1738 if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1739 bridge->driver =
1740 intel_gtt_chipsets[i].gmch_driver;
1741 intel_private.driver =
1742 intel_gtt_chipsets[i].gtt_driver;
1743 break;
1744 }
1745 }
1746
1747 if (!bridge->driver)
1748 return 0;
1749
1750 bridge->dev_private_data = &intel_private;
1751 bridge->dev = pdev;
1752
1753 intel_private.bridge_dev = pci_dev_get(pdev);
1754
1755 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1756
1757 if (bridge->driver->mask_memory == intel_gen6_mask_memory)
1758 mask = 40;
1759 else if (bridge->driver->mask_memory == intel_i965_mask_memory)
1760 mask = 36;
1761 else
1762 mask = 32;
1763
1764 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1765 dev_err(&intel_private.pcidev->dev,
1766 "set gfx device dma mask %d-bit failed!\n", mask);
1767 else
1768 pci_set_consistent_dma_mask(intel_private.pcidev,
1769 DMA_BIT_MASK(mask));
1770
1771 if (bridge->driver == &intel_810_driver)
1772 return 1;
1773
1774 if (intel_gtt_init() != 0)
1775 return 0;
1776
1777 return 1;
1778 }
1779 EXPORT_SYMBOL(intel_gmch_probe);
1780
1781 struct intel_gtt *intel_gtt_get(void)
1782 {
1783 return &intel_private.base;
1784 }
1785 EXPORT_SYMBOL(intel_gtt_get);
1786
1787 void intel_gmch_remove(struct pci_dev *pdev)
1788 {
1789 if (intel_private.pcidev)
1790 pci_dev_put(intel_private.pcidev);
1791 if (intel_private.bridge_dev)
1792 pci_dev_put(intel_private.bridge_dev);
1793 }
1794 EXPORT_SYMBOL(intel_gmch_remove);
1795
1796 MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
1797 MODULE_LICENSE("GPL and additional rights");
This page took 0.071898 seconds and 5 git commands to generate.