Revert "UBI: use mtd->writebufsize to set minimal I/O unit size"
[deliverable/linux.git] / drivers / char / agp / intel-gtt.c
1 /*
2 * Intel GTT (Graphics Translation Table) routines
3 *
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
10 *
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
13 * here.
14 *
15 * /fairy-tale-mode off
16 */
17
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/pagemap.h>
23 #include <linux/agp_backend.h>
24 #include <asm/smp.h>
25 #include "agp.h"
26 #include "intel-agp.h"
27 #include <drm/intel-gtt.h>
28
29 /*
30 * If we have Intel graphics, we're not going to have anything other than
31 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
32 * on the Intel IOMMU support (CONFIG_DMAR).
33 * Only newer chipsets need to bother with this, of course.
34 */
35 #ifdef CONFIG_DMAR
36 #define USE_PCI_DMA_API 1
37 #else
38 #define USE_PCI_DMA_API 0
39 #endif
40
41 struct intel_gtt_driver {
42 unsigned int gen : 8;
43 unsigned int is_g33 : 1;
44 unsigned int is_pineview : 1;
45 unsigned int is_ironlake : 1;
46 unsigned int has_pgtbl_enable : 1;
47 unsigned int dma_mask_size : 8;
48 /* Chipset specific GTT setup */
49 int (*setup)(void);
50 /* This should undo anything done in ->setup() save the unmapping
51 * of the mmio register file, that's done in the generic code. */
52 void (*cleanup)(void);
53 void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
54 /* Flags is a more or less chipset specific opaque value.
55 * For chipsets that need to support old ums (non-gem) code, this
56 * needs to be identical to the various supported agp memory types! */
57 bool (*check_flags)(unsigned int flags);
58 void (*chipset_flush)(void);
59 };
60
61 static struct _intel_private {
62 struct intel_gtt base;
63 const struct intel_gtt_driver *driver;
64 struct pci_dev *pcidev; /* device one */
65 struct pci_dev *bridge_dev;
66 u8 __iomem *registers;
67 phys_addr_t gtt_bus_addr;
68 phys_addr_t gma_bus_addr;
69 u32 PGETBL_save;
70 u32 __iomem *gtt; /* I915G */
71 int num_dcache_entries;
72 union {
73 void __iomem *i9xx_flush_page;
74 void *i8xx_flush_page;
75 };
76 char *i81x_gtt_table;
77 struct page *i8xx_page;
78 struct resource ifp_resource;
79 int resource_valid;
80 struct page *scratch_page;
81 dma_addr_t scratch_page_dma;
82 } intel_private;
83
84 #define INTEL_GTT_GEN intel_private.driver->gen
85 #define IS_G33 intel_private.driver->is_g33
86 #define IS_PINEVIEW intel_private.driver->is_pineview
87 #define IS_IRONLAKE intel_private.driver->is_ironlake
88 #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
89
90 int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
91 struct scatterlist **sg_list, int *num_sg)
92 {
93 struct sg_table st;
94 struct scatterlist *sg;
95 int i;
96
97 if (*sg_list)
98 return 0; /* already mapped (for e.g. resume */
99
100 DBG("try mapping %lu pages\n", (unsigned long)num_entries);
101
102 if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
103 goto err;
104
105 *sg_list = sg = st.sgl;
106
107 for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
108 sg_set_page(sg, pages[i], PAGE_SIZE, 0);
109
110 *num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
111 num_entries, PCI_DMA_BIDIRECTIONAL);
112 if (unlikely(!*num_sg))
113 goto err;
114
115 return 0;
116
117 err:
118 sg_free_table(&st);
119 return -ENOMEM;
120 }
121 EXPORT_SYMBOL(intel_gtt_map_memory);
122
123 void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
124 {
125 struct sg_table st;
126 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
127
128 pci_unmap_sg(intel_private.pcidev, sg_list,
129 num_sg, PCI_DMA_BIDIRECTIONAL);
130
131 st.sgl = sg_list;
132 st.orig_nents = st.nents = num_sg;
133
134 sg_free_table(&st);
135 }
136 EXPORT_SYMBOL(intel_gtt_unmap_memory);
137
138 static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
139 {
140 return;
141 }
142
143 /* Exists to support ARGB cursors */
144 static struct page *i8xx_alloc_pages(void)
145 {
146 struct page *page;
147
148 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
149 if (page == NULL)
150 return NULL;
151
152 if (set_pages_uc(page, 4) < 0) {
153 set_pages_wb(page, 4);
154 __free_pages(page, 2);
155 return NULL;
156 }
157 get_page(page);
158 atomic_inc(&agp_bridge->current_memory_agp);
159 return page;
160 }
161
162 static void i8xx_destroy_pages(struct page *page)
163 {
164 if (page == NULL)
165 return;
166
167 set_pages_wb(page, 4);
168 put_page(page);
169 __free_pages(page, 2);
170 atomic_dec(&agp_bridge->current_memory_agp);
171 }
172
173 #define I810_GTT_ORDER 4
174 static int i810_setup(void)
175 {
176 u32 reg_addr;
177 char *gtt_table;
178
179 /* i81x does not preallocate the gtt. It's always 64kb in size. */
180 gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
181 if (gtt_table == NULL)
182 return -ENOMEM;
183 intel_private.i81x_gtt_table = gtt_table;
184
185 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
186 reg_addr &= 0xfff80000;
187
188 intel_private.registers = ioremap(reg_addr, KB(64));
189 if (!intel_private.registers)
190 return -ENOMEM;
191
192 writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
193 intel_private.registers+I810_PGETBL_CTL);
194
195 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
196
197 if ((readl(intel_private.registers+I810_DRAM_CTL)
198 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
199 dev_info(&intel_private.pcidev->dev,
200 "detected 4MB dedicated video ram\n");
201 intel_private.num_dcache_entries = 1024;
202 }
203
204 return 0;
205 }
206
207 static void i810_cleanup(void)
208 {
209 writel(0, intel_private.registers+I810_PGETBL_CTL);
210 free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
211 }
212
213 static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
214 int type)
215 {
216 int i;
217
218 if ((pg_start + mem->page_count)
219 > intel_private.num_dcache_entries)
220 return -EINVAL;
221
222 if (!mem->is_flushed)
223 global_cache_flush();
224
225 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
226 dma_addr_t addr = i << PAGE_SHIFT;
227 intel_private.driver->write_entry(addr,
228 i, type);
229 }
230 readl(intel_private.gtt+i-1);
231
232 return 0;
233 }
234
235 /*
236 * The i810/i830 requires a physical address to program its mouse
237 * pointer into hardware.
238 * However the Xserver still writes to it through the agp aperture.
239 */
240 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
241 {
242 struct agp_memory *new;
243 struct page *page;
244
245 switch (pg_count) {
246 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
247 break;
248 case 4:
249 /* kludge to get 4 physical pages for ARGB cursor */
250 page = i8xx_alloc_pages();
251 break;
252 default:
253 return NULL;
254 }
255
256 if (page == NULL)
257 return NULL;
258
259 new = agp_create_memory(pg_count);
260 if (new == NULL)
261 return NULL;
262
263 new->pages[0] = page;
264 if (pg_count == 4) {
265 /* kludge to get 4 physical pages for ARGB cursor */
266 new->pages[1] = new->pages[0] + 1;
267 new->pages[2] = new->pages[1] + 1;
268 new->pages[3] = new->pages[2] + 1;
269 }
270 new->page_count = pg_count;
271 new->num_scratch_pages = pg_count;
272 new->type = AGP_PHYS_MEMORY;
273 new->physical = page_to_phys(new->pages[0]);
274 return new;
275 }
276
277 static void intel_i810_free_by_type(struct agp_memory *curr)
278 {
279 agp_free_key(curr->key);
280 if (curr->type == AGP_PHYS_MEMORY) {
281 if (curr->page_count == 4)
282 i8xx_destroy_pages(curr->pages[0]);
283 else {
284 agp_bridge->driver->agp_destroy_page(curr->pages[0],
285 AGP_PAGE_DESTROY_UNMAP);
286 agp_bridge->driver->agp_destroy_page(curr->pages[0],
287 AGP_PAGE_DESTROY_FREE);
288 }
289 agp_free_page_array(curr);
290 }
291 kfree(curr);
292 }
293
294 static int intel_gtt_setup_scratch_page(void)
295 {
296 struct page *page;
297 dma_addr_t dma_addr;
298
299 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
300 if (page == NULL)
301 return -ENOMEM;
302 get_page(page);
303 set_pages_uc(page, 1);
304
305 if (intel_private.base.needs_dmar) {
306 dma_addr = pci_map_page(intel_private.pcidev, page, 0,
307 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
308 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
309 return -EINVAL;
310
311 intel_private.scratch_page_dma = dma_addr;
312 } else
313 intel_private.scratch_page_dma = page_to_phys(page);
314
315 intel_private.scratch_page = page;
316
317 return 0;
318 }
319
320 static void i810_write_entry(dma_addr_t addr, unsigned int entry,
321 unsigned int flags)
322 {
323 u32 pte_flags = I810_PTE_VALID;
324
325 switch (flags) {
326 case AGP_DCACHE_MEMORY:
327 pte_flags |= I810_PTE_LOCAL;
328 break;
329 case AGP_USER_CACHED_MEMORY:
330 pte_flags |= I830_PTE_SYSTEM_CACHED;
331 break;
332 }
333
334 writel(addr | pte_flags, intel_private.gtt + entry);
335 }
336
337 static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
338 {32, 8192, 3},
339 {64, 16384, 4},
340 {128, 32768, 5},
341 {256, 65536, 6},
342 {512, 131072, 7},
343 };
344
345 static unsigned int intel_gtt_stolen_size(void)
346 {
347 u16 gmch_ctrl;
348 u8 rdct;
349 int local = 0;
350 static const int ddt[4] = { 0, 16, 32, 64 };
351 unsigned int stolen_size = 0;
352
353 if (INTEL_GTT_GEN == 1)
354 return 0; /* no stolen mem on i81x */
355
356 pci_read_config_word(intel_private.bridge_dev,
357 I830_GMCH_CTRL, &gmch_ctrl);
358
359 if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
360 intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
361 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
362 case I830_GMCH_GMS_STOLEN_512:
363 stolen_size = KB(512);
364 break;
365 case I830_GMCH_GMS_STOLEN_1024:
366 stolen_size = MB(1);
367 break;
368 case I830_GMCH_GMS_STOLEN_8192:
369 stolen_size = MB(8);
370 break;
371 case I830_GMCH_GMS_LOCAL:
372 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
373 stolen_size = (I830_RDRAM_ND(rdct) + 1) *
374 MB(ddt[I830_RDRAM_DDT(rdct)]);
375 local = 1;
376 break;
377 default:
378 stolen_size = 0;
379 break;
380 }
381 } else if (INTEL_GTT_GEN == 6) {
382 /*
383 * SandyBridge has new memory control reg at 0x50.w
384 */
385 u16 snb_gmch_ctl;
386 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
387 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
388 case SNB_GMCH_GMS_STOLEN_32M:
389 stolen_size = MB(32);
390 break;
391 case SNB_GMCH_GMS_STOLEN_64M:
392 stolen_size = MB(64);
393 break;
394 case SNB_GMCH_GMS_STOLEN_96M:
395 stolen_size = MB(96);
396 break;
397 case SNB_GMCH_GMS_STOLEN_128M:
398 stolen_size = MB(128);
399 break;
400 case SNB_GMCH_GMS_STOLEN_160M:
401 stolen_size = MB(160);
402 break;
403 case SNB_GMCH_GMS_STOLEN_192M:
404 stolen_size = MB(192);
405 break;
406 case SNB_GMCH_GMS_STOLEN_224M:
407 stolen_size = MB(224);
408 break;
409 case SNB_GMCH_GMS_STOLEN_256M:
410 stolen_size = MB(256);
411 break;
412 case SNB_GMCH_GMS_STOLEN_288M:
413 stolen_size = MB(288);
414 break;
415 case SNB_GMCH_GMS_STOLEN_320M:
416 stolen_size = MB(320);
417 break;
418 case SNB_GMCH_GMS_STOLEN_352M:
419 stolen_size = MB(352);
420 break;
421 case SNB_GMCH_GMS_STOLEN_384M:
422 stolen_size = MB(384);
423 break;
424 case SNB_GMCH_GMS_STOLEN_416M:
425 stolen_size = MB(416);
426 break;
427 case SNB_GMCH_GMS_STOLEN_448M:
428 stolen_size = MB(448);
429 break;
430 case SNB_GMCH_GMS_STOLEN_480M:
431 stolen_size = MB(480);
432 break;
433 case SNB_GMCH_GMS_STOLEN_512M:
434 stolen_size = MB(512);
435 break;
436 }
437 } else {
438 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
439 case I855_GMCH_GMS_STOLEN_1M:
440 stolen_size = MB(1);
441 break;
442 case I855_GMCH_GMS_STOLEN_4M:
443 stolen_size = MB(4);
444 break;
445 case I855_GMCH_GMS_STOLEN_8M:
446 stolen_size = MB(8);
447 break;
448 case I855_GMCH_GMS_STOLEN_16M:
449 stolen_size = MB(16);
450 break;
451 case I855_GMCH_GMS_STOLEN_32M:
452 stolen_size = MB(32);
453 break;
454 case I915_GMCH_GMS_STOLEN_48M:
455 stolen_size = MB(48);
456 break;
457 case I915_GMCH_GMS_STOLEN_64M:
458 stolen_size = MB(64);
459 break;
460 case G33_GMCH_GMS_STOLEN_128M:
461 stolen_size = MB(128);
462 break;
463 case G33_GMCH_GMS_STOLEN_256M:
464 stolen_size = MB(256);
465 break;
466 case INTEL_GMCH_GMS_STOLEN_96M:
467 stolen_size = MB(96);
468 break;
469 case INTEL_GMCH_GMS_STOLEN_160M:
470 stolen_size = MB(160);
471 break;
472 case INTEL_GMCH_GMS_STOLEN_224M:
473 stolen_size = MB(224);
474 break;
475 case INTEL_GMCH_GMS_STOLEN_352M:
476 stolen_size = MB(352);
477 break;
478 default:
479 stolen_size = 0;
480 break;
481 }
482 }
483
484 if (stolen_size > 0) {
485 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
486 stolen_size / KB(1), local ? "local" : "stolen");
487 } else {
488 dev_info(&intel_private.bridge_dev->dev,
489 "no pre-allocated video memory detected\n");
490 stolen_size = 0;
491 }
492
493 return stolen_size;
494 }
495
496 static void i965_adjust_pgetbl_size(unsigned int size_flag)
497 {
498 u32 pgetbl_ctl, pgetbl_ctl2;
499
500 /* ensure that ppgtt is disabled */
501 pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
502 pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
503 writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
504
505 /* write the new ggtt size */
506 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
507 pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
508 pgetbl_ctl |= size_flag;
509 writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
510 }
511
512 static unsigned int i965_gtt_total_entries(void)
513 {
514 int size;
515 u32 pgetbl_ctl;
516 u16 gmch_ctl;
517
518 pci_read_config_word(intel_private.bridge_dev,
519 I830_GMCH_CTRL, &gmch_ctl);
520
521 if (INTEL_GTT_GEN == 5) {
522 switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
523 case G4x_GMCH_SIZE_1M:
524 case G4x_GMCH_SIZE_VT_1M:
525 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
526 break;
527 case G4x_GMCH_SIZE_VT_1_5M:
528 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
529 break;
530 case G4x_GMCH_SIZE_2M:
531 case G4x_GMCH_SIZE_VT_2M:
532 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
533 break;
534 }
535 }
536
537 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
538
539 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
540 case I965_PGETBL_SIZE_128KB:
541 size = KB(128);
542 break;
543 case I965_PGETBL_SIZE_256KB:
544 size = KB(256);
545 break;
546 case I965_PGETBL_SIZE_512KB:
547 size = KB(512);
548 break;
549 /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
550 case I965_PGETBL_SIZE_1MB:
551 size = KB(1024);
552 break;
553 case I965_PGETBL_SIZE_2MB:
554 size = KB(2048);
555 break;
556 case I965_PGETBL_SIZE_1_5MB:
557 size = KB(1024 + 512);
558 break;
559 default:
560 dev_info(&intel_private.pcidev->dev,
561 "unknown page table size, assuming 512KB\n");
562 size = KB(512);
563 }
564
565 return size/4;
566 }
567
568 static unsigned int intel_gtt_total_entries(void)
569 {
570 int size;
571
572 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
573 return i965_gtt_total_entries();
574 else if (INTEL_GTT_GEN == 6) {
575 u16 snb_gmch_ctl;
576
577 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
578 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
579 default:
580 case SNB_GTT_SIZE_0M:
581 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
582 size = MB(0);
583 break;
584 case SNB_GTT_SIZE_1M:
585 size = MB(1);
586 break;
587 case SNB_GTT_SIZE_2M:
588 size = MB(2);
589 break;
590 }
591 return size/4;
592 } else {
593 /* On previous hardware, the GTT size was just what was
594 * required to map the aperture.
595 */
596 return intel_private.base.gtt_mappable_entries;
597 }
598 }
599
600 static unsigned int intel_gtt_mappable_entries(void)
601 {
602 unsigned int aperture_size;
603
604 if (INTEL_GTT_GEN == 1) {
605 u32 smram_miscc;
606
607 pci_read_config_dword(intel_private.bridge_dev,
608 I810_SMRAM_MISCC, &smram_miscc);
609
610 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
611 == I810_GFX_MEM_WIN_32M)
612 aperture_size = MB(32);
613 else
614 aperture_size = MB(64);
615 } else if (INTEL_GTT_GEN == 2) {
616 u16 gmch_ctrl;
617
618 pci_read_config_word(intel_private.bridge_dev,
619 I830_GMCH_CTRL, &gmch_ctrl);
620
621 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
622 aperture_size = MB(64);
623 else
624 aperture_size = MB(128);
625 } else {
626 /* 9xx supports large sizes, just look at the length */
627 aperture_size = pci_resource_len(intel_private.pcidev, 2);
628 }
629
630 return aperture_size >> PAGE_SHIFT;
631 }
632
633 static void intel_gtt_teardown_scratch_page(void)
634 {
635 set_pages_wb(intel_private.scratch_page, 1);
636 pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
637 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
638 put_page(intel_private.scratch_page);
639 __free_page(intel_private.scratch_page);
640 }
641
642 static void intel_gtt_cleanup(void)
643 {
644 intel_private.driver->cleanup();
645
646 iounmap(intel_private.gtt);
647 iounmap(intel_private.registers);
648
649 intel_gtt_teardown_scratch_page();
650 }
651
652 static int intel_gtt_init(void)
653 {
654 u32 gtt_map_size;
655 int ret;
656
657 ret = intel_private.driver->setup();
658 if (ret != 0)
659 return ret;
660
661 intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
662 intel_private.base.gtt_total_entries = intel_gtt_total_entries();
663
664 /* save the PGETBL reg for resume */
665 intel_private.PGETBL_save =
666 readl(intel_private.registers+I810_PGETBL_CTL)
667 & ~I810_PGETBL_ENABLED;
668 /* we only ever restore the register when enabling the PGTBL... */
669 if (HAS_PGTBL_EN)
670 intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
671
672 dev_info(&intel_private.bridge_dev->dev,
673 "detected gtt size: %dK total, %dK mappable\n",
674 intel_private.base.gtt_total_entries * 4,
675 intel_private.base.gtt_mappable_entries * 4);
676
677 gtt_map_size = intel_private.base.gtt_total_entries * 4;
678
679 intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
680 gtt_map_size);
681 if (!intel_private.gtt) {
682 intel_private.driver->cleanup();
683 iounmap(intel_private.registers);
684 return -ENOMEM;
685 }
686
687 global_cache_flush(); /* FIXME: ? */
688
689 intel_private.base.stolen_size = intel_gtt_stolen_size();
690
691 intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
692
693 ret = intel_gtt_setup_scratch_page();
694 if (ret != 0) {
695 intel_gtt_cleanup();
696 return ret;
697 }
698
699 return 0;
700 }
701
702 static int intel_fake_agp_fetch_size(void)
703 {
704 int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
705 unsigned int aper_size;
706 int i;
707
708 aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
709 / MB(1);
710
711 for (i = 0; i < num_sizes; i++) {
712 if (aper_size == intel_fake_agp_sizes[i].size) {
713 agp_bridge->current_size =
714 (void *) (intel_fake_agp_sizes + i);
715 return aper_size;
716 }
717 }
718
719 return 0;
720 }
721
722 static void i830_cleanup(void)
723 {
724 if (intel_private.i8xx_flush_page) {
725 kunmap(intel_private.i8xx_flush_page);
726 intel_private.i8xx_flush_page = NULL;
727 }
728
729 __free_page(intel_private.i8xx_page);
730 intel_private.i8xx_page = NULL;
731 }
732
733 static void intel_i830_setup_flush(void)
734 {
735 /* return if we've already set the flush mechanism up */
736 if (intel_private.i8xx_page)
737 return;
738
739 intel_private.i8xx_page = alloc_page(GFP_KERNEL);
740 if (!intel_private.i8xx_page)
741 return;
742
743 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
744 if (!intel_private.i8xx_flush_page)
745 i830_cleanup();
746 }
747
748 /* The chipset_flush interface needs to get data that has already been
749 * flushed out of the CPU all the way out to main memory, because the GPU
750 * doesn't snoop those buffers.
751 *
752 * The 8xx series doesn't have the same lovely interface for flushing the
753 * chipset write buffers that the later chips do. According to the 865
754 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
755 * that buffer out, we just fill 1KB and clflush it out, on the assumption
756 * that it'll push whatever was in there out. It appears to work.
757 */
758 static void i830_chipset_flush(void)
759 {
760 unsigned int *pg = intel_private.i8xx_flush_page;
761
762 memset(pg, 0, 1024);
763
764 if (cpu_has_clflush)
765 clflush_cache_range(pg, 1024);
766 else if (wbinvd_on_all_cpus() != 0)
767 printk(KERN_ERR "Timed out waiting for cache flush.\n");
768 }
769
770 static void i830_write_entry(dma_addr_t addr, unsigned int entry,
771 unsigned int flags)
772 {
773 u32 pte_flags = I810_PTE_VALID;
774
775 if (flags == AGP_USER_CACHED_MEMORY)
776 pte_flags |= I830_PTE_SYSTEM_CACHED;
777
778 writel(addr | pte_flags, intel_private.gtt + entry);
779 }
780
781 static bool intel_enable_gtt(void)
782 {
783 u32 gma_addr;
784 u8 __iomem *reg;
785
786 if (INTEL_GTT_GEN <= 2)
787 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
788 &gma_addr);
789 else
790 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
791 &gma_addr);
792
793 intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
794
795 if (INTEL_GTT_GEN >= 6)
796 return true;
797
798 if (INTEL_GTT_GEN == 2) {
799 u16 gmch_ctrl;
800
801 pci_read_config_word(intel_private.bridge_dev,
802 I830_GMCH_CTRL, &gmch_ctrl);
803 gmch_ctrl |= I830_GMCH_ENABLED;
804 pci_write_config_word(intel_private.bridge_dev,
805 I830_GMCH_CTRL, gmch_ctrl);
806
807 pci_read_config_word(intel_private.bridge_dev,
808 I830_GMCH_CTRL, &gmch_ctrl);
809 if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
810 dev_err(&intel_private.pcidev->dev,
811 "failed to enable the GTT: GMCH_CTRL=%x\n",
812 gmch_ctrl);
813 return false;
814 }
815 }
816
817 /* On the resume path we may be adjusting the PGTBL value, so
818 * be paranoid and flush all chipset write buffers...
819 */
820 if (INTEL_GTT_GEN >= 3)
821 writel(0, intel_private.registers+GFX_FLSH_CNTL);
822
823 reg = intel_private.registers+I810_PGETBL_CTL;
824 writel(intel_private.PGETBL_save, reg);
825 if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
826 dev_err(&intel_private.pcidev->dev,
827 "failed to enable the GTT: PGETBL=%x [expected %x]\n",
828 readl(reg), intel_private.PGETBL_save);
829 return false;
830 }
831
832 if (INTEL_GTT_GEN >= 3)
833 writel(0, intel_private.registers+GFX_FLSH_CNTL);
834
835 return true;
836 }
837
838 static int i830_setup(void)
839 {
840 u32 reg_addr;
841
842 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
843 reg_addr &= 0xfff80000;
844
845 intel_private.registers = ioremap(reg_addr, KB(64));
846 if (!intel_private.registers)
847 return -ENOMEM;
848
849 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
850
851 intel_i830_setup_flush();
852
853 return 0;
854 }
855
856 static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
857 {
858 agp_bridge->gatt_table_real = NULL;
859 agp_bridge->gatt_table = NULL;
860 agp_bridge->gatt_bus_addr = 0;
861
862 return 0;
863 }
864
865 static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
866 {
867 return 0;
868 }
869
870 static int intel_fake_agp_configure(void)
871 {
872 int i;
873
874 if (!intel_enable_gtt())
875 return -EIO;
876
877 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
878
879 for (i = 0; i < intel_private.base.gtt_total_entries; i++) {
880 intel_private.driver->write_entry(intel_private.scratch_page_dma,
881 i, 0);
882 }
883 readl(intel_private.gtt+i-1); /* PCI Posting. */
884
885 global_cache_flush();
886
887 return 0;
888 }
889
890 static bool i830_check_flags(unsigned int flags)
891 {
892 switch (flags) {
893 case 0:
894 case AGP_PHYS_MEMORY:
895 case AGP_USER_CACHED_MEMORY:
896 case AGP_USER_MEMORY:
897 return true;
898 }
899
900 return false;
901 }
902
903 void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
904 unsigned int sg_len,
905 unsigned int pg_start,
906 unsigned int flags)
907 {
908 struct scatterlist *sg;
909 unsigned int len, m;
910 int i, j;
911
912 j = pg_start;
913
914 /* sg may merge pages, but we have to separate
915 * per-page addr for GTT */
916 for_each_sg(sg_list, sg, sg_len, i) {
917 len = sg_dma_len(sg) >> PAGE_SHIFT;
918 for (m = 0; m < len; m++) {
919 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
920 intel_private.driver->write_entry(addr,
921 j, flags);
922 j++;
923 }
924 }
925 readl(intel_private.gtt+j-1);
926 }
927 EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
928
929 void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
930 struct page **pages, unsigned int flags)
931 {
932 int i, j;
933
934 for (i = 0, j = first_entry; i < num_entries; i++, j++) {
935 dma_addr_t addr = page_to_phys(pages[i]);
936 intel_private.driver->write_entry(addr,
937 j, flags);
938 }
939 readl(intel_private.gtt+j-1);
940 }
941 EXPORT_SYMBOL(intel_gtt_insert_pages);
942
943 static int intel_fake_agp_insert_entries(struct agp_memory *mem,
944 off_t pg_start, int type)
945 {
946 int ret = -EINVAL;
947
948 if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
949 return i810_insert_dcache_entries(mem, pg_start, type);
950
951 if (mem->page_count == 0)
952 goto out;
953
954 if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
955 goto out_err;
956
957 if (type != mem->type)
958 goto out_err;
959
960 if (!intel_private.driver->check_flags(type))
961 goto out_err;
962
963 if (!mem->is_flushed)
964 global_cache_flush();
965
966 if (intel_private.base.needs_dmar) {
967 ret = intel_gtt_map_memory(mem->pages, mem->page_count,
968 &mem->sg_list, &mem->num_sg);
969 if (ret != 0)
970 return ret;
971
972 intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
973 pg_start, type);
974 } else
975 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
976 type);
977
978 out:
979 ret = 0;
980 out_err:
981 mem->is_flushed = true;
982 return ret;
983 }
984
985 void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
986 {
987 unsigned int i;
988
989 for (i = first_entry; i < (first_entry + num_entries); i++) {
990 intel_private.driver->write_entry(intel_private.scratch_page_dma,
991 i, 0);
992 }
993 readl(intel_private.gtt+i-1);
994 }
995 EXPORT_SYMBOL(intel_gtt_clear_range);
996
997 static int intel_fake_agp_remove_entries(struct agp_memory *mem,
998 off_t pg_start, int type)
999 {
1000 if (mem->page_count == 0)
1001 return 0;
1002
1003 intel_gtt_clear_range(pg_start, mem->page_count);
1004
1005 if (intel_private.base.needs_dmar) {
1006 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
1007 mem->sg_list = NULL;
1008 mem->num_sg = 0;
1009 }
1010
1011 return 0;
1012 }
1013
1014 static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
1015 int type)
1016 {
1017 struct agp_memory *new;
1018
1019 if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
1020 if (pg_count != intel_private.num_dcache_entries)
1021 return NULL;
1022
1023 new = agp_create_memory(1);
1024 if (new == NULL)
1025 return NULL;
1026
1027 new->type = AGP_DCACHE_MEMORY;
1028 new->page_count = pg_count;
1029 new->num_scratch_pages = 0;
1030 agp_free_page_array(new);
1031 return new;
1032 }
1033 if (type == AGP_PHYS_MEMORY)
1034 return alloc_agpphysmem_i8xx(pg_count, type);
1035 /* always return NULL for other allocation types for now */
1036 return NULL;
1037 }
1038
1039 static int intel_alloc_chipset_flush_resource(void)
1040 {
1041 int ret;
1042 ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1043 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1044 pcibios_align_resource, intel_private.bridge_dev);
1045
1046 return ret;
1047 }
1048
1049 static void intel_i915_setup_chipset_flush(void)
1050 {
1051 int ret;
1052 u32 temp;
1053
1054 pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1055 if (!(temp & 0x1)) {
1056 intel_alloc_chipset_flush_resource();
1057 intel_private.resource_valid = 1;
1058 pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1059 } else {
1060 temp &= ~1;
1061
1062 intel_private.resource_valid = 1;
1063 intel_private.ifp_resource.start = temp;
1064 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1065 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1066 /* some BIOSes reserve this area in a pnp some don't */
1067 if (ret)
1068 intel_private.resource_valid = 0;
1069 }
1070 }
1071
1072 static void intel_i965_g33_setup_chipset_flush(void)
1073 {
1074 u32 temp_hi, temp_lo;
1075 int ret;
1076
1077 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1078 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1079
1080 if (!(temp_lo & 0x1)) {
1081
1082 intel_alloc_chipset_flush_resource();
1083
1084 intel_private.resource_valid = 1;
1085 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1086 upper_32_bits(intel_private.ifp_resource.start));
1087 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1088 } else {
1089 u64 l64;
1090
1091 temp_lo &= ~0x1;
1092 l64 = ((u64)temp_hi << 32) | temp_lo;
1093
1094 intel_private.resource_valid = 1;
1095 intel_private.ifp_resource.start = l64;
1096 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1097 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1098 /* some BIOSes reserve this area in a pnp some don't */
1099 if (ret)
1100 intel_private.resource_valid = 0;
1101 }
1102 }
1103
1104 static void intel_i9xx_setup_flush(void)
1105 {
1106 /* return if already configured */
1107 if (intel_private.ifp_resource.start)
1108 return;
1109
1110 if (INTEL_GTT_GEN == 6)
1111 return;
1112
1113 /* setup a resource for this object */
1114 intel_private.ifp_resource.name = "Intel Flush Page";
1115 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1116
1117 /* Setup chipset flush for 915 */
1118 if (IS_G33 || INTEL_GTT_GEN >= 4) {
1119 intel_i965_g33_setup_chipset_flush();
1120 } else {
1121 intel_i915_setup_chipset_flush();
1122 }
1123
1124 if (intel_private.ifp_resource.start)
1125 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1126 if (!intel_private.i9xx_flush_page)
1127 dev_err(&intel_private.pcidev->dev,
1128 "can't ioremap flush page - no chipset flushing\n");
1129 }
1130
1131 static void i9xx_cleanup(void)
1132 {
1133 if (intel_private.i9xx_flush_page)
1134 iounmap(intel_private.i9xx_flush_page);
1135 if (intel_private.resource_valid)
1136 release_resource(&intel_private.ifp_resource);
1137 intel_private.ifp_resource.start = 0;
1138 intel_private.resource_valid = 0;
1139 }
1140
1141 static void i9xx_chipset_flush(void)
1142 {
1143 if (intel_private.i9xx_flush_page)
1144 writel(1, intel_private.i9xx_flush_page);
1145 }
1146
1147 static void i965_write_entry(dma_addr_t addr,
1148 unsigned int entry,
1149 unsigned int flags)
1150 {
1151 u32 pte_flags;
1152
1153 pte_flags = I810_PTE_VALID;
1154 if (flags == AGP_USER_CACHED_MEMORY)
1155 pte_flags |= I830_PTE_SYSTEM_CACHED;
1156
1157 /* Shift high bits down */
1158 addr |= (addr >> 28) & 0xf0;
1159 writel(addr | pte_flags, intel_private.gtt + entry);
1160 }
1161
1162 static bool gen6_check_flags(unsigned int flags)
1163 {
1164 return true;
1165 }
1166
1167 static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1168 unsigned int flags)
1169 {
1170 unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1171 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1172 u32 pte_flags;
1173
1174 if (type_mask == AGP_USER_MEMORY)
1175 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1176 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1177 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1178 if (gfdt)
1179 pte_flags |= GEN6_PTE_GFDT;
1180 } else { /* set 'normal'/'cached' to LLC by default */
1181 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1182 if (gfdt)
1183 pte_flags |= GEN6_PTE_GFDT;
1184 }
1185
1186 /* gen6 has bit11-4 for physical addr bit39-32 */
1187 addr |= (addr >> 28) & 0xff0;
1188 writel(addr | pte_flags, intel_private.gtt + entry);
1189 }
1190
1191 static void gen6_cleanup(void)
1192 {
1193 }
1194
1195 static int i9xx_setup(void)
1196 {
1197 u32 reg_addr;
1198
1199 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
1200
1201 reg_addr &= 0xfff80000;
1202
1203 intel_private.registers = ioremap(reg_addr, 128 * 4096);
1204 if (!intel_private.registers)
1205 return -ENOMEM;
1206
1207 if (INTEL_GTT_GEN == 3) {
1208 u32 gtt_addr;
1209
1210 pci_read_config_dword(intel_private.pcidev,
1211 I915_PTEADDR, &gtt_addr);
1212 intel_private.gtt_bus_addr = gtt_addr;
1213 } else {
1214 u32 gtt_offset;
1215
1216 switch (INTEL_GTT_GEN) {
1217 case 5:
1218 case 6:
1219 gtt_offset = MB(2);
1220 break;
1221 case 4:
1222 default:
1223 gtt_offset = KB(512);
1224 break;
1225 }
1226 intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1227 }
1228
1229 intel_i9xx_setup_flush();
1230
1231 return 0;
1232 }
1233
1234 static const struct agp_bridge_driver intel_fake_agp_driver = {
1235 .owner = THIS_MODULE,
1236 .size_type = FIXED_APER_SIZE,
1237 .aperture_sizes = intel_fake_agp_sizes,
1238 .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
1239 .configure = intel_fake_agp_configure,
1240 .fetch_size = intel_fake_agp_fetch_size,
1241 .cleanup = intel_gtt_cleanup,
1242 .agp_enable = intel_fake_agp_enable,
1243 .cache_flush = global_cache_flush,
1244 .create_gatt_table = intel_fake_agp_create_gatt_table,
1245 .free_gatt_table = intel_fake_agp_free_gatt_table,
1246 .insert_memory = intel_fake_agp_insert_entries,
1247 .remove_memory = intel_fake_agp_remove_entries,
1248 .alloc_by_type = intel_fake_agp_alloc_by_type,
1249 .free_by_type = intel_i810_free_by_type,
1250 .agp_alloc_page = agp_generic_alloc_page,
1251 .agp_alloc_pages = agp_generic_alloc_pages,
1252 .agp_destroy_page = agp_generic_destroy_page,
1253 .agp_destroy_pages = agp_generic_destroy_pages,
1254 };
1255
1256 static const struct intel_gtt_driver i81x_gtt_driver = {
1257 .gen = 1,
1258 .has_pgtbl_enable = 1,
1259 .dma_mask_size = 32,
1260 .setup = i810_setup,
1261 .cleanup = i810_cleanup,
1262 .check_flags = i830_check_flags,
1263 .write_entry = i810_write_entry,
1264 };
1265 static const struct intel_gtt_driver i8xx_gtt_driver = {
1266 .gen = 2,
1267 .has_pgtbl_enable = 1,
1268 .setup = i830_setup,
1269 .cleanup = i830_cleanup,
1270 .write_entry = i830_write_entry,
1271 .dma_mask_size = 32,
1272 .check_flags = i830_check_flags,
1273 .chipset_flush = i830_chipset_flush,
1274 };
1275 static const struct intel_gtt_driver i915_gtt_driver = {
1276 .gen = 3,
1277 .has_pgtbl_enable = 1,
1278 .setup = i9xx_setup,
1279 .cleanup = i9xx_cleanup,
1280 /* i945 is the last gpu to need phys mem (for overlay and cursors). */
1281 .write_entry = i830_write_entry,
1282 .dma_mask_size = 32,
1283 .check_flags = i830_check_flags,
1284 .chipset_flush = i9xx_chipset_flush,
1285 };
1286 static const struct intel_gtt_driver g33_gtt_driver = {
1287 .gen = 3,
1288 .is_g33 = 1,
1289 .setup = i9xx_setup,
1290 .cleanup = i9xx_cleanup,
1291 .write_entry = i965_write_entry,
1292 .dma_mask_size = 36,
1293 .check_flags = i830_check_flags,
1294 .chipset_flush = i9xx_chipset_flush,
1295 };
1296 static const struct intel_gtt_driver pineview_gtt_driver = {
1297 .gen = 3,
1298 .is_pineview = 1, .is_g33 = 1,
1299 .setup = i9xx_setup,
1300 .cleanup = i9xx_cleanup,
1301 .write_entry = i965_write_entry,
1302 .dma_mask_size = 36,
1303 .check_flags = i830_check_flags,
1304 .chipset_flush = i9xx_chipset_flush,
1305 };
1306 static const struct intel_gtt_driver i965_gtt_driver = {
1307 .gen = 4,
1308 .has_pgtbl_enable = 1,
1309 .setup = i9xx_setup,
1310 .cleanup = i9xx_cleanup,
1311 .write_entry = i965_write_entry,
1312 .dma_mask_size = 36,
1313 .check_flags = i830_check_flags,
1314 .chipset_flush = i9xx_chipset_flush,
1315 };
1316 static const struct intel_gtt_driver g4x_gtt_driver = {
1317 .gen = 5,
1318 .setup = i9xx_setup,
1319 .cleanup = i9xx_cleanup,
1320 .write_entry = i965_write_entry,
1321 .dma_mask_size = 36,
1322 .check_flags = i830_check_flags,
1323 .chipset_flush = i9xx_chipset_flush,
1324 };
1325 static const struct intel_gtt_driver ironlake_gtt_driver = {
1326 .gen = 5,
1327 .is_ironlake = 1,
1328 .setup = i9xx_setup,
1329 .cleanup = i9xx_cleanup,
1330 .write_entry = i965_write_entry,
1331 .dma_mask_size = 36,
1332 .check_flags = i830_check_flags,
1333 .chipset_flush = i9xx_chipset_flush,
1334 };
1335 static const struct intel_gtt_driver sandybridge_gtt_driver = {
1336 .gen = 6,
1337 .setup = i9xx_setup,
1338 .cleanup = gen6_cleanup,
1339 .write_entry = gen6_write_entry,
1340 .dma_mask_size = 40,
1341 .check_flags = gen6_check_flags,
1342 .chipset_flush = i9xx_chipset_flush,
1343 };
1344
1345 /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
1346 * driver and gmch_driver must be non-null, and find_gmch will determine
1347 * which one should be used if a gmch_chip_id is present.
1348 */
1349 static const struct intel_gtt_driver_description {
1350 unsigned int gmch_chip_id;
1351 char *name;
1352 const struct intel_gtt_driver *gtt_driver;
1353 } intel_gtt_chipsets[] = {
1354 { PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
1355 &i81x_gtt_driver},
1356 { PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
1357 &i81x_gtt_driver},
1358 { PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
1359 &i81x_gtt_driver},
1360 { PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
1361 &i81x_gtt_driver},
1362 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1363 &i8xx_gtt_driver},
1364 { PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
1365 &i8xx_gtt_driver},
1366 { PCI_DEVICE_ID_INTEL_82854_IG, "854",
1367 &i8xx_gtt_driver},
1368 { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1369 &i8xx_gtt_driver},
1370 { PCI_DEVICE_ID_INTEL_82865_IG, "865",
1371 &i8xx_gtt_driver},
1372 { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1373 &i915_gtt_driver },
1374 { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1375 &i915_gtt_driver },
1376 { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1377 &i915_gtt_driver },
1378 { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1379 &i915_gtt_driver },
1380 { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1381 &i915_gtt_driver },
1382 { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1383 &i915_gtt_driver },
1384 { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1385 &i965_gtt_driver },
1386 { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1387 &i965_gtt_driver },
1388 { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1389 &i965_gtt_driver },
1390 { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1391 &i965_gtt_driver },
1392 { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1393 &i965_gtt_driver },
1394 { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1395 &i965_gtt_driver },
1396 { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1397 &g33_gtt_driver },
1398 { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1399 &g33_gtt_driver },
1400 { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1401 &g33_gtt_driver },
1402 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1403 &pineview_gtt_driver },
1404 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1405 &pineview_gtt_driver },
1406 { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1407 &g4x_gtt_driver },
1408 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1409 &g4x_gtt_driver },
1410 { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1411 &g4x_gtt_driver },
1412 { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1413 &g4x_gtt_driver },
1414 { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1415 &g4x_gtt_driver },
1416 { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1417 &g4x_gtt_driver },
1418 { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1419 &g4x_gtt_driver },
1420 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1421 "HD Graphics", &ironlake_gtt_driver },
1422 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1423 "HD Graphics", &ironlake_gtt_driver },
1424 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
1425 "Sandybridge", &sandybridge_gtt_driver },
1426 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
1427 "Sandybridge", &sandybridge_gtt_driver },
1428 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
1429 "Sandybridge", &sandybridge_gtt_driver },
1430 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
1431 "Sandybridge", &sandybridge_gtt_driver },
1432 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
1433 "Sandybridge", &sandybridge_gtt_driver },
1434 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
1435 "Sandybridge", &sandybridge_gtt_driver },
1436 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1437 "Sandybridge", &sandybridge_gtt_driver },
1438 { 0, NULL, NULL }
1439 };
1440
1441 static int find_gmch(u16 device)
1442 {
1443 struct pci_dev *gmch_device;
1444
1445 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1446 if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1447 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1448 device, gmch_device);
1449 }
1450
1451 if (!gmch_device)
1452 return 0;
1453
1454 intel_private.pcidev = gmch_device;
1455 return 1;
1456 }
1457
1458 int intel_gmch_probe(struct pci_dev *pdev,
1459 struct agp_bridge_data *bridge)
1460 {
1461 int i, mask;
1462 intel_private.driver = NULL;
1463
1464 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1465 if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1466 intel_private.driver =
1467 intel_gtt_chipsets[i].gtt_driver;
1468 break;
1469 }
1470 }
1471
1472 if (!intel_private.driver)
1473 return 0;
1474
1475 bridge->driver = &intel_fake_agp_driver;
1476 bridge->dev_private_data = &intel_private;
1477 bridge->dev = pdev;
1478
1479 intel_private.bridge_dev = pci_dev_get(pdev);
1480
1481 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1482
1483 mask = intel_private.driver->dma_mask_size;
1484 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1485 dev_err(&intel_private.pcidev->dev,
1486 "set gfx device dma mask %d-bit failed!\n", mask);
1487 else
1488 pci_set_consistent_dma_mask(intel_private.pcidev,
1489 DMA_BIT_MASK(mask));
1490
1491 /*if (bridge->driver == &intel_810_driver)
1492 return 1;*/
1493
1494 if (intel_gtt_init() != 0)
1495 return 0;
1496
1497 return 1;
1498 }
1499 EXPORT_SYMBOL(intel_gmch_probe);
1500
1501 const struct intel_gtt *intel_gtt_get(void)
1502 {
1503 return &intel_private.base;
1504 }
1505 EXPORT_SYMBOL(intel_gtt_get);
1506
1507 void intel_gtt_chipset_flush(void)
1508 {
1509 if (intel_private.driver->chipset_flush)
1510 intel_private.driver->chipset_flush();
1511 }
1512 EXPORT_SYMBOL(intel_gtt_chipset_flush);
1513
1514 void intel_gmch_remove(struct pci_dev *pdev)
1515 {
1516 if (intel_private.pcidev)
1517 pci_dev_put(intel_private.pcidev);
1518 if (intel_private.bridge_dev)
1519 pci_dev_put(intel_private.bridge_dev);
1520 }
1521 EXPORT_SYMBOL(intel_gmch_remove);
1522
1523 MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
1524 MODULE_LICENSE("GPL and additional rights");
This page took 0.065174 seconds and 5 git commands to generate.