2 * Intel GTT (Graphics Translation Table) routines
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
15 * /fairy-tale-mode off
19 * If we have Intel graphics, we're not going to have anything other than
20 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21 * on the Intel IOMMU support (CONFIG_DMAR).
22 * Only newer chipsets need to bother with this, of course.
25 #define USE_PCI_DMA_API 1
28 /* Max amount of stolen space, anything above will be returned to Linux */
29 int intel_max_stolen
= 32 * 1024 * 1024;
30 EXPORT_SYMBOL(intel_max_stolen
);
32 static const struct aper_size_info_fixed intel_i810_sizes
[] =
35 /* The 32M mode still requires a 64k gatt */
39 #define AGP_DCACHE_MEMORY 1
40 #define AGP_PHYS_MEMORY 2
41 #define INTEL_AGP_CACHED_MEMORY 3
43 static struct gatt_mask intel_i810_masks
[] =
45 {.mask
= I810_PTE_VALID
, .type
= 0},
46 {.mask
= (I810_PTE_VALID
| I810_PTE_LOCAL
), .type
= AGP_DCACHE_MEMORY
},
47 {.mask
= I810_PTE_VALID
, .type
= 0},
48 {.mask
= I810_PTE_VALID
| I830_PTE_SYSTEM_CACHED
,
49 .type
= INTEL_AGP_CACHED_MEMORY
}
52 #define INTEL_AGP_UNCACHED_MEMORY 0
53 #define INTEL_AGP_CACHED_MEMORY_LLC 1
54 #define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
55 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
56 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
58 static struct gatt_mask intel_gen6_masks
[] =
60 {.mask
= I810_PTE_VALID
| GEN6_PTE_UNCACHED
,
61 .type
= INTEL_AGP_UNCACHED_MEMORY
},
62 {.mask
= I810_PTE_VALID
| GEN6_PTE_LLC
,
63 .type
= INTEL_AGP_CACHED_MEMORY_LLC
},
64 {.mask
= I810_PTE_VALID
| GEN6_PTE_LLC
| GEN6_PTE_GFDT
,
65 .type
= INTEL_AGP_CACHED_MEMORY_LLC_GFDT
},
66 {.mask
= I810_PTE_VALID
| GEN6_PTE_LLC_MLC
,
67 .type
= INTEL_AGP_CACHED_MEMORY_LLC_MLC
},
68 {.mask
= I810_PTE_VALID
| GEN6_PTE_LLC_MLC
| GEN6_PTE_GFDT
,
69 .type
= INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT
},
72 static struct _intel_private
{
73 struct pci_dev
*pcidev
; /* device one */
74 u8 __iomem
*registers
;
75 u32 __iomem
*gtt
; /* I915G */
76 int num_dcache_entries
;
77 /* gtt_entries is the number of gtt entries that are already mapped
78 * to stolen memory. Stolen memory is larger than the memory mapped
79 * through gtt_entries, as it includes some reserved space for the BIOS
80 * popup and for the GTT.
82 int gtt_entries
; /* i830+ */
85 void __iomem
*i9xx_flush_page
;
86 void *i8xx_flush_page
;
88 struct page
*i8xx_page
;
89 struct resource ifp_resource
;
93 #ifdef USE_PCI_DMA_API
94 static int intel_agp_map_page(struct page
*page
, dma_addr_t
*ret
)
96 *ret
= pci_map_page(intel_private
.pcidev
, page
, 0,
97 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
98 if (pci_dma_mapping_error(intel_private
.pcidev
, *ret
))
103 static void intel_agp_unmap_page(struct page
*page
, dma_addr_t dma
)
105 pci_unmap_page(intel_private
.pcidev
, dma
,
106 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
109 static void intel_agp_free_sglist(struct agp_memory
*mem
)
113 st
.sgl
= mem
->sg_list
;
114 st
.orig_nents
= st
.nents
= mem
->page_count
;
122 static int intel_agp_map_memory(struct agp_memory
*mem
)
125 struct scatterlist
*sg
;
128 DBG("try mapping %lu pages\n", (unsigned long)mem
->page_count
);
130 if (sg_alloc_table(&st
, mem
->page_count
, GFP_KERNEL
))
133 mem
->sg_list
= sg
= st
.sgl
;
135 for (i
= 0 ; i
< mem
->page_count
; i
++, sg
= sg_next(sg
))
136 sg_set_page(sg
, mem
->pages
[i
], PAGE_SIZE
, 0);
138 mem
->num_sg
= pci_map_sg(intel_private
.pcidev
, mem
->sg_list
,
139 mem
->page_count
, PCI_DMA_BIDIRECTIONAL
);
140 if (unlikely(!mem
->num_sg
))
150 static void intel_agp_unmap_memory(struct agp_memory
*mem
)
152 DBG("try unmapping %lu pages\n", (unsigned long)mem
->page_count
);
154 pci_unmap_sg(intel_private
.pcidev
, mem
->sg_list
,
155 mem
->page_count
, PCI_DMA_BIDIRECTIONAL
);
156 intel_agp_free_sglist(mem
);
159 static void intel_agp_insert_sg_entries(struct agp_memory
*mem
,
160 off_t pg_start
, int mask_type
)
162 struct scatterlist
*sg
;
167 WARN_ON(!mem
->num_sg
);
169 if (mem
->num_sg
== mem
->page_count
) {
170 for_each_sg(mem
->sg_list
, sg
, mem
->page_count
, i
) {
171 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
172 sg_dma_address(sg
), mask_type
),
173 intel_private
.gtt
+j
);
177 /* sg may merge pages, but we have to separate
178 * per-page addr for GTT */
181 for_each_sg(mem
->sg_list
, sg
, mem
->num_sg
, i
) {
182 len
= sg_dma_len(sg
) / PAGE_SIZE
;
183 for (m
= 0; m
< len
; m
++) {
184 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
185 sg_dma_address(sg
) + m
* PAGE_SIZE
,
187 intel_private
.gtt
+j
);
192 readl(intel_private
.gtt
+j
-1);
197 static void intel_agp_insert_sg_entries(struct agp_memory
*mem
,
198 off_t pg_start
, int mask_type
)
202 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
203 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
204 page_to_phys(mem
->pages
[i
]), mask_type
),
205 intel_private
.gtt
+j
);
208 readl(intel_private
.gtt
+j
-1);
213 static int intel_i810_fetch_size(void)
216 struct aper_size_info_fixed
*values
;
218 pci_read_config_dword(agp_bridge
->dev
, I810_SMRAM_MISCC
, &smram_miscc
);
219 values
= A_SIZE_FIX(agp_bridge
->driver
->aperture_sizes
);
221 if ((smram_miscc
& I810_GMS
) == I810_GMS_DISABLE
) {
222 dev_warn(&agp_bridge
->dev
->dev
, "i810 is disabled\n");
225 if ((smram_miscc
& I810_GFX_MEM_WIN_SIZE
) == I810_GFX_MEM_WIN_32M
) {
226 agp_bridge
->current_size
= (void *) (values
+ 1);
227 agp_bridge
->aperture_size_idx
= 1;
228 return values
[1].size
;
230 agp_bridge
->current_size
= (void *) (values
);
231 agp_bridge
->aperture_size_idx
= 0;
232 return values
[0].size
;
238 static int intel_i810_configure(void)
240 struct aper_size_info_fixed
*current_size
;
244 current_size
= A_SIZE_FIX(agp_bridge
->current_size
);
246 if (!intel_private
.registers
) {
247 pci_read_config_dword(intel_private
.pcidev
, I810_MMADDR
, &temp
);
250 intel_private
.registers
= ioremap(temp
, 128 * 4096);
251 if (!intel_private
.registers
) {
252 dev_err(&intel_private
.pcidev
->dev
,
253 "can't remap memory\n");
258 if ((readl(intel_private
.registers
+I810_DRAM_CTL
)
259 & I810_DRAM_ROW_0
) == I810_DRAM_ROW_0_SDRAM
) {
260 /* This will need to be dynamically assigned */
261 dev_info(&intel_private
.pcidev
->dev
,
262 "detected 4MB dedicated video ram\n");
263 intel_private
.num_dcache_entries
= 1024;
265 pci_read_config_dword(intel_private
.pcidev
, I810_GMADDR
, &temp
);
266 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
267 writel(agp_bridge
->gatt_bus_addr
| I810_PGETBL_ENABLED
, intel_private
.registers
+I810_PGETBL_CTL
);
268 readl(intel_private
.registers
+I810_PGETBL_CTL
); /* PCI Posting. */
270 if (agp_bridge
->driver
->needs_scratch_page
) {
271 for (i
= 0; i
< current_size
->num_entries
; i
++) {
272 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
274 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4)); /* PCI posting. */
276 global_cache_flush();
280 static void intel_i810_cleanup(void)
282 writel(0, intel_private
.registers
+I810_PGETBL_CTL
);
283 readl(intel_private
.registers
); /* PCI Posting. */
284 iounmap(intel_private
.registers
);
287 static void intel_i810_agp_enable(struct agp_bridge_data
*bridge
, u32 mode
)
292 /* Exists to support ARGB cursors */
293 static struct page
*i8xx_alloc_pages(void)
297 page
= alloc_pages(GFP_KERNEL
| GFP_DMA32
, 2);
301 if (set_pages_uc(page
, 4) < 0) {
302 set_pages_wb(page
, 4);
303 __free_pages(page
, 2);
307 atomic_inc(&agp_bridge
->current_memory_agp
);
311 static void i8xx_destroy_pages(struct page
*page
)
316 set_pages_wb(page
, 4);
318 __free_pages(page
, 2);
319 atomic_dec(&agp_bridge
->current_memory_agp
);
322 static int intel_i830_type_to_mask_type(struct agp_bridge_data
*bridge
,
325 if (type
< AGP_USER_TYPES
)
327 else if (type
== AGP_USER_CACHED_MEMORY
)
328 return INTEL_AGP_CACHED_MEMORY
;
333 static int intel_gen6_type_to_mask_type(struct agp_bridge_data
*bridge
,
336 unsigned int type_mask
= type
& ~AGP_USER_CACHED_MEMORY_GFDT
;
337 unsigned int gfdt
= type
& AGP_USER_CACHED_MEMORY_GFDT
;
339 if (type_mask
== AGP_USER_UNCACHED_MEMORY
)
340 return INTEL_AGP_UNCACHED_MEMORY
;
341 else if (type_mask
== AGP_USER_CACHED_MEMORY_LLC_MLC
)
342 return gfdt
? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT
:
343 INTEL_AGP_CACHED_MEMORY_LLC_MLC
;
344 else /* set 'normal'/'cached' to LLC by default */
345 return gfdt
? INTEL_AGP_CACHED_MEMORY_LLC_GFDT
:
346 INTEL_AGP_CACHED_MEMORY_LLC
;
350 static int intel_i810_insert_entries(struct agp_memory
*mem
, off_t pg_start
,
353 int i
, j
, num_entries
;
358 if (mem
->page_count
== 0)
361 temp
= agp_bridge
->current_size
;
362 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
364 if ((pg_start
+ mem
->page_count
) > num_entries
)
368 for (j
= pg_start
; j
< (pg_start
+ mem
->page_count
); j
++) {
369 if (!PGE_EMPTY(agp_bridge
, readl(agp_bridge
->gatt_table
+j
))) {
375 if (type
!= mem
->type
)
378 mask_type
= agp_bridge
->driver
->agp_type_to_mask_type(agp_bridge
, type
);
381 case AGP_DCACHE_MEMORY
:
382 if (!mem
->is_flushed
)
383 global_cache_flush();
384 for (i
= pg_start
; i
< (pg_start
+ mem
->page_count
); i
++) {
385 writel((i
*4096)|I810_PTE_LOCAL
|I810_PTE_VALID
,
386 intel_private
.registers
+I810_PTE_BASE
+(i
*4));
388 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
390 case AGP_PHYS_MEMORY
:
391 case AGP_NORMAL_MEMORY
:
392 if (!mem
->is_flushed
)
393 global_cache_flush();
394 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
395 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
396 page_to_phys(mem
->pages
[i
]), mask_type
),
397 intel_private
.registers
+I810_PTE_BASE
+(j
*4));
399 readl(intel_private
.registers
+I810_PTE_BASE
+((j
-1)*4));
408 mem
->is_flushed
= true;
412 static int intel_i810_remove_entries(struct agp_memory
*mem
, off_t pg_start
,
417 if (mem
->page_count
== 0)
420 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
421 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
423 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
429 * The i810/i830 requires a physical address to program its mouse
430 * pointer into hardware.
431 * However the Xserver still writes to it through the agp aperture.
433 static struct agp_memory
*alloc_agpphysmem_i8xx(size_t pg_count
, int type
)
435 struct agp_memory
*new;
439 case 1: page
= agp_bridge
->driver
->agp_alloc_page(agp_bridge
);
442 /* kludge to get 4 physical pages for ARGB cursor */
443 page
= i8xx_alloc_pages();
452 new = agp_create_memory(pg_count
);
456 new->pages
[0] = page
;
458 /* kludge to get 4 physical pages for ARGB cursor */
459 new->pages
[1] = new->pages
[0] + 1;
460 new->pages
[2] = new->pages
[1] + 1;
461 new->pages
[3] = new->pages
[2] + 1;
463 new->page_count
= pg_count
;
464 new->num_scratch_pages
= pg_count
;
465 new->type
= AGP_PHYS_MEMORY
;
466 new->physical
= page_to_phys(new->pages
[0]);
470 static struct agp_memory
*intel_i810_alloc_by_type(size_t pg_count
, int type
)
472 struct agp_memory
*new;
474 if (type
== AGP_DCACHE_MEMORY
) {
475 if (pg_count
!= intel_private
.num_dcache_entries
)
478 new = agp_create_memory(1);
482 new->type
= AGP_DCACHE_MEMORY
;
483 new->page_count
= pg_count
;
484 new->num_scratch_pages
= 0;
485 agp_free_page_array(new);
488 if (type
== AGP_PHYS_MEMORY
)
489 return alloc_agpphysmem_i8xx(pg_count
, type
);
493 static void intel_i810_free_by_type(struct agp_memory
*curr
)
495 agp_free_key(curr
->key
);
496 if (curr
->type
== AGP_PHYS_MEMORY
) {
497 if (curr
->page_count
== 4)
498 i8xx_destroy_pages(curr
->pages
[0]);
500 agp_bridge
->driver
->agp_destroy_page(curr
->pages
[0],
501 AGP_PAGE_DESTROY_UNMAP
);
502 agp_bridge
->driver
->agp_destroy_page(curr
->pages
[0],
503 AGP_PAGE_DESTROY_FREE
);
505 agp_free_page_array(curr
);
510 static unsigned long intel_i810_mask_memory(struct agp_bridge_data
*bridge
,
511 dma_addr_t addr
, int type
)
513 /* Type checking must be done elsewhere */
514 return addr
| bridge
->driver
->masks
[type
].mask
;
517 static struct aper_size_info_fixed intel_i830_sizes
[] =
520 /* The 64M mode still requires a 128k gatt */
526 static void intel_i830_init_gtt_entries(void)
532 static const int ddt
[4] = { 0, 16, 32, 64 };
533 int size
; /* reserved space (in kb) at the top of stolen memory */
535 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
539 pgetbl_ctl
= readl(intel_private
.registers
+I810_PGETBL_CTL
);
541 /* The 965 has a field telling us the size of the GTT,
542 * which may be larger than what is necessary to map the
545 switch (pgetbl_ctl
& I965_PGETBL_SIZE_MASK
) {
546 case I965_PGETBL_SIZE_128KB
:
549 case I965_PGETBL_SIZE_256KB
:
552 case I965_PGETBL_SIZE_512KB
:
555 case I965_PGETBL_SIZE_1MB
:
558 case I965_PGETBL_SIZE_2MB
:
561 case I965_PGETBL_SIZE_1_5MB
:
565 dev_info(&intel_private
.pcidev
->dev
,
566 "unknown page table size, assuming 512KB\n");
569 size
+= 4; /* add in BIOS popup space */
570 } else if (IS_G33
&& !IS_PINEVIEW
) {
571 /* G33's GTT size defined in gmch_ctrl */
572 switch (gmch_ctrl
& G33_PGETBL_SIZE_MASK
) {
573 case G33_PGETBL_SIZE_1M
:
576 case G33_PGETBL_SIZE_2M
:
580 dev_info(&agp_bridge
->dev
->dev
,
581 "unknown page table size 0x%x, assuming 512KB\n",
582 (gmch_ctrl
& G33_PGETBL_SIZE_MASK
));
586 } else if (IS_G4X
|| IS_PINEVIEW
) {
587 /* On 4 series hardware, GTT stolen is separate from graphics
588 * stolen, ignore it in stolen gtt entries counting. However,
589 * 4KB of the stolen memory doesn't get mapped to the GTT.
593 /* On previous hardware, the GTT size was just what was
594 * required to map the aperture.
596 size
= agp_bridge
->driver
->fetch_size() + 4;
599 if (agp_bridge
->dev
->device
== PCI_DEVICE_ID_INTEL_82830_HB
||
600 agp_bridge
->dev
->device
== PCI_DEVICE_ID_INTEL_82845G_HB
) {
601 switch (gmch_ctrl
& I830_GMCH_GMS_MASK
) {
602 case I830_GMCH_GMS_STOLEN_512
:
603 gtt_entries
= KB(512) - KB(size
);
605 case I830_GMCH_GMS_STOLEN_1024
:
606 gtt_entries
= MB(1) - KB(size
);
608 case I830_GMCH_GMS_STOLEN_8192
:
609 gtt_entries
= MB(8) - KB(size
);
611 case I830_GMCH_GMS_LOCAL
:
612 rdct
= readb(intel_private
.registers
+I830_RDRAM_CHANNEL_TYPE
);
613 gtt_entries
= (I830_RDRAM_ND(rdct
) + 1) *
614 MB(ddt
[I830_RDRAM_DDT(rdct
)]);
623 * SandyBridge has new memory control reg at 0x50.w
626 pci_read_config_word(intel_private
.pcidev
, SNB_GMCH_CTRL
, &snb_gmch_ctl
);
627 switch (snb_gmch_ctl
& SNB_GMCH_GMS_STOLEN_MASK
) {
628 case SNB_GMCH_GMS_STOLEN_32M
:
629 gtt_entries
= MB(32) - KB(size
);
631 case SNB_GMCH_GMS_STOLEN_64M
:
632 gtt_entries
= MB(64) - KB(size
);
634 case SNB_GMCH_GMS_STOLEN_96M
:
635 gtt_entries
= MB(96) - KB(size
);
637 case SNB_GMCH_GMS_STOLEN_128M
:
638 gtt_entries
= MB(128) - KB(size
);
640 case SNB_GMCH_GMS_STOLEN_160M
:
641 gtt_entries
= MB(160) - KB(size
);
643 case SNB_GMCH_GMS_STOLEN_192M
:
644 gtt_entries
= MB(192) - KB(size
);
646 case SNB_GMCH_GMS_STOLEN_224M
:
647 gtt_entries
= MB(224) - KB(size
);
649 case SNB_GMCH_GMS_STOLEN_256M
:
650 gtt_entries
= MB(256) - KB(size
);
652 case SNB_GMCH_GMS_STOLEN_288M
:
653 gtt_entries
= MB(288) - KB(size
);
655 case SNB_GMCH_GMS_STOLEN_320M
:
656 gtt_entries
= MB(320) - KB(size
);
658 case SNB_GMCH_GMS_STOLEN_352M
:
659 gtt_entries
= MB(352) - KB(size
);
661 case SNB_GMCH_GMS_STOLEN_384M
:
662 gtt_entries
= MB(384) - KB(size
);
664 case SNB_GMCH_GMS_STOLEN_416M
:
665 gtt_entries
= MB(416) - KB(size
);
667 case SNB_GMCH_GMS_STOLEN_448M
:
668 gtt_entries
= MB(448) - KB(size
);
670 case SNB_GMCH_GMS_STOLEN_480M
:
671 gtt_entries
= MB(480) - KB(size
);
673 case SNB_GMCH_GMS_STOLEN_512M
:
674 gtt_entries
= MB(512) - KB(size
);
678 switch (gmch_ctrl
& I855_GMCH_GMS_MASK
) {
679 case I855_GMCH_GMS_STOLEN_1M
:
680 gtt_entries
= MB(1) - KB(size
);
682 case I855_GMCH_GMS_STOLEN_4M
:
683 gtt_entries
= MB(4) - KB(size
);
685 case I855_GMCH_GMS_STOLEN_8M
:
686 gtt_entries
= MB(8) - KB(size
);
688 case I855_GMCH_GMS_STOLEN_16M
:
689 gtt_entries
= MB(16) - KB(size
);
691 case I855_GMCH_GMS_STOLEN_32M
:
692 gtt_entries
= MB(32) - KB(size
);
694 case I915_GMCH_GMS_STOLEN_48M
:
695 /* Check it's really I915G */
696 if (IS_I915
|| IS_I965
|| IS_G33
|| IS_G4X
)
697 gtt_entries
= MB(48) - KB(size
);
701 case I915_GMCH_GMS_STOLEN_64M
:
702 /* Check it's really I915G */
703 if (IS_I915
|| IS_I965
|| IS_G33
|| IS_G4X
)
704 gtt_entries
= MB(64) - KB(size
);
708 case G33_GMCH_GMS_STOLEN_128M
:
709 if (IS_G33
|| IS_I965
|| IS_G4X
)
710 gtt_entries
= MB(128) - KB(size
);
714 case G33_GMCH_GMS_STOLEN_256M
:
715 if (IS_G33
|| IS_I965
|| IS_G4X
)
716 gtt_entries
= MB(256) - KB(size
);
720 case INTEL_GMCH_GMS_STOLEN_96M
:
721 if (IS_I965
|| IS_G4X
)
722 gtt_entries
= MB(96) - KB(size
);
726 case INTEL_GMCH_GMS_STOLEN_160M
:
727 if (IS_I965
|| IS_G4X
)
728 gtt_entries
= MB(160) - KB(size
);
732 case INTEL_GMCH_GMS_STOLEN_224M
:
733 if (IS_I965
|| IS_G4X
)
734 gtt_entries
= MB(224) - KB(size
);
738 case INTEL_GMCH_GMS_STOLEN_352M
:
739 if (IS_I965
|| IS_G4X
)
740 gtt_entries
= MB(352) - KB(size
);
749 if (!local
&& gtt_entries
> intel_max_stolen
) {
750 dev_info(&agp_bridge
->dev
->dev
,
751 "detected %dK stolen memory, trimming to %dK\n",
752 gtt_entries
/ KB(1), intel_max_stolen
/ KB(1));
753 gtt_entries
= intel_max_stolen
/ KB(4);
754 } else if (gtt_entries
> 0) {
755 dev_info(&agp_bridge
->dev
->dev
, "detected %dK %s memory\n",
756 gtt_entries
/ KB(1), local
? "local" : "stolen");
757 gtt_entries
/= KB(4);
759 dev_info(&agp_bridge
->dev
->dev
,
760 "no pre-allocated video memory detected\n");
764 intel_private
.gtt_entries
= gtt_entries
;
767 static void intel_i830_fini_flush(void)
769 kunmap(intel_private
.i8xx_page
);
770 intel_private
.i8xx_flush_page
= NULL
;
771 unmap_page_from_agp(intel_private
.i8xx_page
);
773 __free_page(intel_private
.i8xx_page
);
774 intel_private
.i8xx_page
= NULL
;
777 static void intel_i830_setup_flush(void)
779 /* return if we've already set the flush mechanism up */
780 if (intel_private
.i8xx_page
)
783 intel_private
.i8xx_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
| GFP_DMA32
);
784 if (!intel_private
.i8xx_page
)
787 intel_private
.i8xx_flush_page
= kmap(intel_private
.i8xx_page
);
788 if (!intel_private
.i8xx_flush_page
)
789 intel_i830_fini_flush();
792 /* The chipset_flush interface needs to get data that has already been
793 * flushed out of the CPU all the way out to main memory, because the GPU
794 * doesn't snoop those buffers.
796 * The 8xx series doesn't have the same lovely interface for flushing the
797 * chipset write buffers that the later chips do. According to the 865
798 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
799 * that buffer out, we just fill 1KB and clflush it out, on the assumption
800 * that it'll push whatever was in there out. It appears to work.
802 static void intel_i830_chipset_flush(struct agp_bridge_data
*bridge
)
804 unsigned int *pg
= intel_private
.i8xx_flush_page
;
809 clflush_cache_range(pg
, 1024);
810 else if (wbinvd_on_all_cpus() != 0)
811 printk(KERN_ERR
"Timed out waiting for cache flush.\n");
814 /* The intel i830 automatically initializes the agp aperture during POST.
815 * Use the memory already set aside for in the GTT.
817 static int intel_i830_create_gatt_table(struct agp_bridge_data
*bridge
)
820 struct aper_size_info_fixed
*size
;
824 size
= agp_bridge
->current_size
;
825 page_order
= size
->page_order
;
826 num_entries
= size
->num_entries
;
827 agp_bridge
->gatt_table_real
= NULL
;
829 pci_read_config_dword(intel_private
.pcidev
, I810_MMADDR
, &temp
);
832 intel_private
.registers
= ioremap(temp
, 128 * 4096);
833 if (!intel_private
.registers
)
836 temp
= readl(intel_private
.registers
+I810_PGETBL_CTL
) & 0xfffff000;
837 global_cache_flush(); /* FIXME: ?? */
839 /* we have to call this as early as possible after the MMIO base address is known */
840 intel_i830_init_gtt_entries();
841 if (intel_private
.gtt_entries
== 0) {
842 iounmap(intel_private
.registers
);
846 agp_bridge
->gatt_table
= NULL
;
848 agp_bridge
->gatt_bus_addr
= temp
;
853 /* Return the gatt table to a sane state. Use the top of stolen
854 * memory for the GTT.
856 static int intel_i830_free_gatt_table(struct agp_bridge_data
*bridge
)
861 static int intel_i830_fetch_size(void)
864 struct aper_size_info_fixed
*values
;
866 values
= A_SIZE_FIX(agp_bridge
->driver
->aperture_sizes
);
868 if (agp_bridge
->dev
->device
!= PCI_DEVICE_ID_INTEL_82830_HB
&&
869 agp_bridge
->dev
->device
!= PCI_DEVICE_ID_INTEL_82845G_HB
) {
870 /* 855GM/852GM/865G has 128MB aperture size */
871 agp_bridge
->current_size
= (void *) values
;
872 agp_bridge
->aperture_size_idx
= 0;
873 return values
[0].size
;
876 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
878 if ((gmch_ctrl
& I830_GMCH_MEM_MASK
) == I830_GMCH_MEM_128M
) {
879 agp_bridge
->current_size
= (void *) values
;
880 agp_bridge
->aperture_size_idx
= 0;
881 return values
[0].size
;
883 agp_bridge
->current_size
= (void *) (values
+ 1);
884 agp_bridge
->aperture_size_idx
= 1;
885 return values
[1].size
;
891 static int intel_i830_configure(void)
893 struct aper_size_info_fixed
*current_size
;
898 current_size
= A_SIZE_FIX(agp_bridge
->current_size
);
900 pci_read_config_dword(intel_private
.pcidev
, I810_GMADDR
, &temp
);
901 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
903 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
904 gmch_ctrl
|= I830_GMCH_ENABLED
;
905 pci_write_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, gmch_ctrl
);
907 writel(agp_bridge
->gatt_bus_addr
|I810_PGETBL_ENABLED
, intel_private
.registers
+I810_PGETBL_CTL
);
908 readl(intel_private
.registers
+I810_PGETBL_CTL
); /* PCI Posting. */
910 if (agp_bridge
->driver
->needs_scratch_page
) {
911 for (i
= intel_private
.gtt_entries
; i
< current_size
->num_entries
; i
++) {
912 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
914 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4)); /* PCI Posting. */
917 global_cache_flush();
919 intel_i830_setup_flush();
923 static void intel_i830_cleanup(void)
925 iounmap(intel_private
.registers
);
928 static int intel_i830_insert_entries(struct agp_memory
*mem
, off_t pg_start
,
931 int i
, j
, num_entries
;
936 if (mem
->page_count
== 0)
939 temp
= agp_bridge
->current_size
;
940 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
942 if (pg_start
< intel_private
.gtt_entries
) {
943 dev_printk(KERN_DEBUG
, &intel_private
.pcidev
->dev
,
944 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
945 pg_start
, intel_private
.gtt_entries
);
947 dev_info(&intel_private
.pcidev
->dev
,
948 "trying to insert into local/stolen memory\n");
952 if ((pg_start
+ mem
->page_count
) > num_entries
)
955 /* The i830 can't check the GTT for entries since its read only,
956 * depend on the caller to make the correct offset decisions.
959 if (type
!= mem
->type
)
962 mask_type
= agp_bridge
->driver
->agp_type_to_mask_type(agp_bridge
, type
);
964 if (mask_type
!= 0 && mask_type
!= AGP_PHYS_MEMORY
&&
965 mask_type
!= INTEL_AGP_CACHED_MEMORY
)
968 if (!mem
->is_flushed
)
969 global_cache_flush();
971 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
972 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
973 page_to_phys(mem
->pages
[i
]), mask_type
),
974 intel_private
.registers
+I810_PTE_BASE
+(j
*4));
976 readl(intel_private
.registers
+I810_PTE_BASE
+((j
-1)*4));
981 mem
->is_flushed
= true;
985 static int intel_i830_remove_entries(struct agp_memory
*mem
, off_t pg_start
,
990 if (mem
->page_count
== 0)
993 if (pg_start
< intel_private
.gtt_entries
) {
994 dev_info(&intel_private
.pcidev
->dev
,
995 "trying to disable local/stolen memory\n");
999 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
1000 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
1002 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
1007 static struct agp_memory
*intel_i830_alloc_by_type(size_t pg_count
, int type
)
1009 if (type
== AGP_PHYS_MEMORY
)
1010 return alloc_agpphysmem_i8xx(pg_count
, type
);
1011 /* always return NULL for other allocation types for now */
1015 static int intel_alloc_chipset_flush_resource(void)
1018 ret
= pci_bus_alloc_resource(agp_bridge
->dev
->bus
, &intel_private
.ifp_resource
, PAGE_SIZE
,
1019 PAGE_SIZE
, PCIBIOS_MIN_MEM
, 0,
1020 pcibios_align_resource
, agp_bridge
->dev
);
1025 static void intel_i915_setup_chipset_flush(void)
1030 pci_read_config_dword(agp_bridge
->dev
, I915_IFPADDR
, &temp
);
1031 if (!(temp
& 0x1)) {
1032 intel_alloc_chipset_flush_resource();
1033 intel_private
.resource_valid
= 1;
1034 pci_write_config_dword(agp_bridge
->dev
, I915_IFPADDR
, (intel_private
.ifp_resource
.start
& 0xffffffff) | 0x1);
1038 intel_private
.resource_valid
= 1;
1039 intel_private
.ifp_resource
.start
= temp
;
1040 intel_private
.ifp_resource
.end
= temp
+ PAGE_SIZE
;
1041 ret
= request_resource(&iomem_resource
, &intel_private
.ifp_resource
);
1042 /* some BIOSes reserve this area in a pnp some don't */
1044 intel_private
.resource_valid
= 0;
1048 static void intel_i965_g33_setup_chipset_flush(void)
1050 u32 temp_hi
, temp_lo
;
1053 pci_read_config_dword(agp_bridge
->dev
, I965_IFPADDR
+ 4, &temp_hi
);
1054 pci_read_config_dword(agp_bridge
->dev
, I965_IFPADDR
, &temp_lo
);
1056 if (!(temp_lo
& 0x1)) {
1058 intel_alloc_chipset_flush_resource();
1060 intel_private
.resource_valid
= 1;
1061 pci_write_config_dword(agp_bridge
->dev
, I965_IFPADDR
+ 4,
1062 upper_32_bits(intel_private
.ifp_resource
.start
));
1063 pci_write_config_dword(agp_bridge
->dev
, I965_IFPADDR
, (intel_private
.ifp_resource
.start
& 0xffffffff) | 0x1);
1068 l64
= ((u64
)temp_hi
<< 32) | temp_lo
;
1070 intel_private
.resource_valid
= 1;
1071 intel_private
.ifp_resource
.start
= l64
;
1072 intel_private
.ifp_resource
.end
= l64
+ PAGE_SIZE
;
1073 ret
= request_resource(&iomem_resource
, &intel_private
.ifp_resource
);
1074 /* some BIOSes reserve this area in a pnp some don't */
1076 intel_private
.resource_valid
= 0;
1080 static void intel_i9xx_setup_flush(void)
1082 /* return if already configured */
1083 if (intel_private
.ifp_resource
.start
)
1089 /* setup a resource for this object */
1090 intel_private
.ifp_resource
.name
= "Intel Flush Page";
1091 intel_private
.ifp_resource
.flags
= IORESOURCE_MEM
;
1093 /* Setup chipset flush for 915 */
1094 if (IS_I965
|| IS_G33
|| IS_G4X
) {
1095 intel_i965_g33_setup_chipset_flush();
1097 intel_i915_setup_chipset_flush();
1100 if (intel_private
.ifp_resource
.start
)
1101 intel_private
.i9xx_flush_page
= ioremap_nocache(intel_private
.ifp_resource
.start
, PAGE_SIZE
);
1102 if (!intel_private
.i9xx_flush_page
)
1103 dev_err(&intel_private
.pcidev
->dev
,
1104 "can't ioremap flush page - no chipset flushing\n");
1107 static int intel_i9xx_configure(void)
1109 struct aper_size_info_fixed
*current_size
;
1114 current_size
= A_SIZE_FIX(agp_bridge
->current_size
);
1116 pci_read_config_dword(intel_private
.pcidev
, I915_GMADDR
, &temp
);
1118 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1120 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
1121 gmch_ctrl
|= I830_GMCH_ENABLED
;
1122 pci_write_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, gmch_ctrl
);
1124 writel(agp_bridge
->gatt_bus_addr
|I810_PGETBL_ENABLED
, intel_private
.registers
+I810_PGETBL_CTL
);
1125 readl(intel_private
.registers
+I810_PGETBL_CTL
); /* PCI Posting. */
1127 if (agp_bridge
->driver
->needs_scratch_page
) {
1128 for (i
= intel_private
.gtt_entries
; i
< intel_private
.gtt_total_size
; i
++) {
1129 writel(agp_bridge
->scratch_page
, intel_private
.gtt
+i
);
1131 readl(intel_private
.gtt
+i
-1); /* PCI Posting. */
1134 global_cache_flush();
1136 intel_i9xx_setup_flush();
1141 static void intel_i915_cleanup(void)
1143 if (intel_private
.i9xx_flush_page
)
1144 iounmap(intel_private
.i9xx_flush_page
);
1145 if (intel_private
.resource_valid
)
1146 release_resource(&intel_private
.ifp_resource
);
1147 intel_private
.ifp_resource
.start
= 0;
1148 intel_private
.resource_valid
= 0;
1149 iounmap(intel_private
.gtt
);
1150 iounmap(intel_private
.registers
);
1153 static void intel_i915_chipset_flush(struct agp_bridge_data
*bridge
)
1155 if (intel_private
.i9xx_flush_page
)
1156 writel(1, intel_private
.i9xx_flush_page
);
1159 static int intel_i915_insert_entries(struct agp_memory
*mem
, off_t pg_start
,
1167 if (mem
->page_count
== 0)
1170 temp
= agp_bridge
->current_size
;
1171 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
1173 if (pg_start
< intel_private
.gtt_entries
) {
1174 dev_printk(KERN_DEBUG
, &intel_private
.pcidev
->dev
,
1175 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1176 pg_start
, intel_private
.gtt_entries
);
1178 dev_info(&intel_private
.pcidev
->dev
,
1179 "trying to insert into local/stolen memory\n");
1183 if ((pg_start
+ mem
->page_count
) > num_entries
)
1186 /* The i915 can't check the GTT for entries since it's read only;
1187 * depend on the caller to make the correct offset decisions.
1190 if (type
!= mem
->type
)
1193 mask_type
= agp_bridge
->driver
->agp_type_to_mask_type(agp_bridge
, type
);
1195 if (!IS_SNB
&& mask_type
!= 0 && mask_type
!= AGP_PHYS_MEMORY
&&
1196 mask_type
!= INTEL_AGP_CACHED_MEMORY
)
1199 if (!mem
->is_flushed
)
1200 global_cache_flush();
1202 intel_agp_insert_sg_entries(mem
, pg_start
, mask_type
);
1207 mem
->is_flushed
= true;
1211 static int intel_i915_remove_entries(struct agp_memory
*mem
, off_t pg_start
,
1216 if (mem
->page_count
== 0)
1219 if (pg_start
< intel_private
.gtt_entries
) {
1220 dev_info(&intel_private
.pcidev
->dev
,
1221 "trying to disable local/stolen memory\n");
1225 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++)
1226 writel(agp_bridge
->scratch_page
, intel_private
.gtt
+i
);
1228 readl(intel_private
.gtt
+i
-1);
1233 /* Return the aperture size by just checking the resource length. The effect
1234 * described in the spec of the MSAC registers is just changing of the
1237 static int intel_i9xx_fetch_size(void)
1239 int num_sizes
= ARRAY_SIZE(intel_i830_sizes
);
1240 int aper_size
; /* size in megabytes */
1243 aper_size
= pci_resource_len(intel_private
.pcidev
, 2) / MB(1);
1245 for (i
= 0; i
< num_sizes
; i
++) {
1246 if (aper_size
== intel_i830_sizes
[i
].size
) {
1247 agp_bridge
->current_size
= intel_i830_sizes
+ i
;
1255 static int intel_i915_get_gtt_size(void)
1262 /* G33's GTT size defined in gmch_ctrl */
1263 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
1264 switch (gmch_ctrl
& I830_GMCH_GMS_MASK
) {
1265 case I830_GMCH_GMS_STOLEN_512
:
1268 case I830_GMCH_GMS_STOLEN_1024
:
1271 case I830_GMCH_GMS_STOLEN_8192
:
1275 dev_info(&agp_bridge
->dev
->dev
,
1276 "unknown page table size 0x%x, assuming 512KB\n",
1277 (gmch_ctrl
& I830_GMCH_GMS_MASK
));
1281 /* On previous hardware, the GTT size was just what was
1282 * required to map the aperture.
1284 size
= agp_bridge
->driver
->fetch_size();
1290 /* The intel i915 automatically initializes the agp aperture during POST.
1291 * Use the memory already set aside for in the GTT.
1293 static int intel_i915_create_gatt_table(struct agp_bridge_data
*bridge
)
1296 struct aper_size_info_fixed
*size
;
1301 size
= agp_bridge
->current_size
;
1302 page_order
= size
->page_order
;
1303 num_entries
= size
->num_entries
;
1304 agp_bridge
->gatt_table_real
= NULL
;
1306 pci_read_config_dword(intel_private
.pcidev
, I915_MMADDR
, &temp
);
1307 pci_read_config_dword(intel_private
.pcidev
, I915_PTEADDR
, &temp2
);
1309 gtt_map_size
= intel_i915_get_gtt_size();
1311 intel_private
.gtt
= ioremap(temp2
, gtt_map_size
);
1312 if (!intel_private
.gtt
)
1315 intel_private
.gtt_total_size
= gtt_map_size
/ 4;
1319 intel_private
.registers
= ioremap(temp
, 128 * 4096);
1320 if (!intel_private
.registers
) {
1321 iounmap(intel_private
.gtt
);
1325 temp
= readl(intel_private
.registers
+I810_PGETBL_CTL
) & 0xfffff000;
1326 global_cache_flush(); /* FIXME: ? */
1328 /* we have to call this as early as possible after the MMIO base address is known */
1329 intel_i830_init_gtt_entries();
1330 if (intel_private
.gtt_entries
== 0) {
1331 iounmap(intel_private
.gtt
);
1332 iounmap(intel_private
.registers
);
1336 agp_bridge
->gatt_table
= NULL
;
1338 agp_bridge
->gatt_bus_addr
= temp
;
1344 * The i965 supports 36-bit physical addresses, but to keep
1345 * the format of the GTT the same, the bits that don't fit
1346 * in a 32-bit word are shifted down to bits 4..7.
1348 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1349 * is always zero on 32-bit architectures, so no need to make
1352 static unsigned long intel_i965_mask_memory(struct agp_bridge_data
*bridge
,
1353 dma_addr_t addr
, int type
)
1355 /* Shift high bits down */
1356 addr
|= (addr
>> 28) & 0xf0;
1358 /* Type checking must be done elsewhere */
1359 return addr
| bridge
->driver
->masks
[type
].mask
;
1362 static unsigned long intel_gen6_mask_memory(struct agp_bridge_data
*bridge
,
1363 dma_addr_t addr
, int type
)
1365 /* gen6 has bit11-4 for physical addr bit39-32 */
1366 addr
|= (addr
>> 28) & 0xff0;
1368 /* Type checking must be done elsewhere */
1369 return addr
| bridge
->driver
->masks
[type
].mask
;
1372 static void intel_i965_get_gtt_range(int *gtt_offset
, int *gtt_size
)
1376 switch (agp_bridge
->dev
->device
) {
1377 case PCI_DEVICE_ID_INTEL_GM45_HB
:
1378 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB
:
1379 case PCI_DEVICE_ID_INTEL_Q45_HB
:
1380 case PCI_DEVICE_ID_INTEL_G45_HB
:
1381 case PCI_DEVICE_ID_INTEL_G41_HB
:
1382 case PCI_DEVICE_ID_INTEL_B43_HB
:
1383 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB
:
1384 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB
:
1385 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB
:
1386 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB
:
1387 *gtt_offset
= *gtt_size
= MB(2);
1389 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB
:
1390 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB
:
1391 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB
:
1392 *gtt_offset
= MB(2);
1394 pci_read_config_word(intel_private
.pcidev
, SNB_GMCH_CTRL
, &snb_gmch_ctl
);
1395 switch (snb_gmch_ctl
& SNB_GTT_SIZE_MASK
) {
1397 case SNB_GTT_SIZE_0M
:
1398 printk(KERN_ERR
"Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl
);
1401 case SNB_GTT_SIZE_1M
:
1404 case SNB_GTT_SIZE_2M
:
1410 *gtt_offset
= *gtt_size
= KB(512);
1414 /* The intel i965 automatically initializes the agp aperture during POST.
1415 * Use the memory already set aside for in the GTT.
1417 static int intel_i965_create_gatt_table(struct agp_bridge_data
*bridge
)
1420 struct aper_size_info_fixed
*size
;
1423 int gtt_offset
, gtt_size
;
1425 size
= agp_bridge
->current_size
;
1426 page_order
= size
->page_order
;
1427 num_entries
= size
->num_entries
;
1428 agp_bridge
->gatt_table_real
= NULL
;
1430 pci_read_config_dword(intel_private
.pcidev
, I915_MMADDR
, &temp
);
1434 intel_i965_get_gtt_range(>t_offset
, >t_size
);
1436 intel_private
.gtt
= ioremap((temp
+ gtt_offset
) , gtt_size
);
1438 if (!intel_private
.gtt
)
1441 intel_private
.gtt_total_size
= gtt_size
/ 4;
1443 intel_private
.registers
= ioremap(temp
, 128 * 4096);
1444 if (!intel_private
.registers
) {
1445 iounmap(intel_private
.gtt
);
1449 temp
= readl(intel_private
.registers
+I810_PGETBL_CTL
) & 0xfffff000;
1450 global_cache_flush(); /* FIXME: ? */
1452 /* we have to call this as early as possible after the MMIO base address is known */
1453 intel_i830_init_gtt_entries();
1454 if (intel_private
.gtt_entries
== 0) {
1455 iounmap(intel_private
.gtt
);
1456 iounmap(intel_private
.registers
);
1460 agp_bridge
->gatt_table
= NULL
;
1462 agp_bridge
->gatt_bus_addr
= temp
;
1467 static const struct agp_bridge_driver intel_810_driver
= {
1468 .owner
= THIS_MODULE
,
1469 .aperture_sizes
= intel_i810_sizes
,
1470 .size_type
= FIXED_APER_SIZE
,
1471 .num_aperture_sizes
= 2,
1472 .needs_scratch_page
= true,
1473 .configure
= intel_i810_configure
,
1474 .fetch_size
= intel_i810_fetch_size
,
1475 .cleanup
= intel_i810_cleanup
,
1476 .mask_memory
= intel_i810_mask_memory
,
1477 .masks
= intel_i810_masks
,
1478 .agp_enable
= intel_i810_agp_enable
,
1479 .cache_flush
= global_cache_flush
,
1480 .create_gatt_table
= agp_generic_create_gatt_table
,
1481 .free_gatt_table
= agp_generic_free_gatt_table
,
1482 .insert_memory
= intel_i810_insert_entries
,
1483 .remove_memory
= intel_i810_remove_entries
,
1484 .alloc_by_type
= intel_i810_alloc_by_type
,
1485 .free_by_type
= intel_i810_free_by_type
,
1486 .agp_alloc_page
= agp_generic_alloc_page
,
1487 .agp_alloc_pages
= agp_generic_alloc_pages
,
1488 .agp_destroy_page
= agp_generic_destroy_page
,
1489 .agp_destroy_pages
= agp_generic_destroy_pages
,
1490 .agp_type_to_mask_type
= agp_generic_type_to_mask_type
,
1493 static const struct agp_bridge_driver intel_830_driver
= {
1494 .owner
= THIS_MODULE
,
1495 .aperture_sizes
= intel_i830_sizes
,
1496 .size_type
= FIXED_APER_SIZE
,
1497 .num_aperture_sizes
= 4,
1498 .needs_scratch_page
= true,
1499 .configure
= intel_i830_configure
,
1500 .fetch_size
= intel_i830_fetch_size
,
1501 .cleanup
= intel_i830_cleanup
,
1502 .mask_memory
= intel_i810_mask_memory
,
1503 .masks
= intel_i810_masks
,
1504 .agp_enable
= intel_i810_agp_enable
,
1505 .cache_flush
= global_cache_flush
,
1506 .create_gatt_table
= intel_i830_create_gatt_table
,
1507 .free_gatt_table
= intel_i830_free_gatt_table
,
1508 .insert_memory
= intel_i830_insert_entries
,
1509 .remove_memory
= intel_i830_remove_entries
,
1510 .alloc_by_type
= intel_i830_alloc_by_type
,
1511 .free_by_type
= intel_i810_free_by_type
,
1512 .agp_alloc_page
= agp_generic_alloc_page
,
1513 .agp_alloc_pages
= agp_generic_alloc_pages
,
1514 .agp_destroy_page
= agp_generic_destroy_page
,
1515 .agp_destroy_pages
= agp_generic_destroy_pages
,
1516 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1517 .chipset_flush
= intel_i830_chipset_flush
,
1520 static const struct agp_bridge_driver intel_915_driver
= {
1521 .owner
= THIS_MODULE
,
1522 .aperture_sizes
= intel_i830_sizes
,
1523 .size_type
= FIXED_APER_SIZE
,
1524 .num_aperture_sizes
= 4,
1525 .needs_scratch_page
= true,
1526 .configure
= intel_i9xx_configure
,
1527 .fetch_size
= intel_i9xx_fetch_size
,
1528 .cleanup
= intel_i915_cleanup
,
1529 .mask_memory
= intel_i810_mask_memory
,
1530 .masks
= intel_i810_masks
,
1531 .agp_enable
= intel_i810_agp_enable
,
1532 .cache_flush
= global_cache_flush
,
1533 .create_gatt_table
= intel_i915_create_gatt_table
,
1534 .free_gatt_table
= intel_i830_free_gatt_table
,
1535 .insert_memory
= intel_i915_insert_entries
,
1536 .remove_memory
= intel_i915_remove_entries
,
1537 .alloc_by_type
= intel_i830_alloc_by_type
,
1538 .free_by_type
= intel_i810_free_by_type
,
1539 .agp_alloc_page
= agp_generic_alloc_page
,
1540 .agp_alloc_pages
= agp_generic_alloc_pages
,
1541 .agp_destroy_page
= agp_generic_destroy_page
,
1542 .agp_destroy_pages
= agp_generic_destroy_pages
,
1543 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1544 .chipset_flush
= intel_i915_chipset_flush
,
1545 #ifdef USE_PCI_DMA_API
1546 .agp_map_page
= intel_agp_map_page
,
1547 .agp_unmap_page
= intel_agp_unmap_page
,
1548 .agp_map_memory
= intel_agp_map_memory
,
1549 .agp_unmap_memory
= intel_agp_unmap_memory
,
1553 static const struct agp_bridge_driver intel_i965_driver
= {
1554 .owner
= THIS_MODULE
,
1555 .aperture_sizes
= intel_i830_sizes
,
1556 .size_type
= FIXED_APER_SIZE
,
1557 .num_aperture_sizes
= 4,
1558 .needs_scratch_page
= true,
1559 .configure
= intel_i9xx_configure
,
1560 .fetch_size
= intel_i9xx_fetch_size
,
1561 .cleanup
= intel_i915_cleanup
,
1562 .mask_memory
= intel_i965_mask_memory
,
1563 .masks
= intel_i810_masks
,
1564 .agp_enable
= intel_i810_agp_enable
,
1565 .cache_flush
= global_cache_flush
,
1566 .create_gatt_table
= intel_i965_create_gatt_table
,
1567 .free_gatt_table
= intel_i830_free_gatt_table
,
1568 .insert_memory
= intel_i915_insert_entries
,
1569 .remove_memory
= intel_i915_remove_entries
,
1570 .alloc_by_type
= intel_i830_alloc_by_type
,
1571 .free_by_type
= intel_i810_free_by_type
,
1572 .agp_alloc_page
= agp_generic_alloc_page
,
1573 .agp_alloc_pages
= agp_generic_alloc_pages
,
1574 .agp_destroy_page
= agp_generic_destroy_page
,
1575 .agp_destroy_pages
= agp_generic_destroy_pages
,
1576 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1577 .chipset_flush
= intel_i915_chipset_flush
,
1578 #ifdef USE_PCI_DMA_API
1579 .agp_map_page
= intel_agp_map_page
,
1580 .agp_unmap_page
= intel_agp_unmap_page
,
1581 .agp_map_memory
= intel_agp_map_memory
,
1582 .agp_unmap_memory
= intel_agp_unmap_memory
,
1586 static const struct agp_bridge_driver intel_gen6_driver
= {
1587 .owner
= THIS_MODULE
,
1588 .aperture_sizes
= intel_i830_sizes
,
1589 .size_type
= FIXED_APER_SIZE
,
1590 .num_aperture_sizes
= 4,
1591 .needs_scratch_page
= true,
1592 .configure
= intel_i9xx_configure
,
1593 .fetch_size
= intel_i9xx_fetch_size
,
1594 .cleanup
= intel_i915_cleanup
,
1595 .mask_memory
= intel_gen6_mask_memory
,
1596 .masks
= intel_gen6_masks
,
1597 .agp_enable
= intel_i810_agp_enable
,
1598 .cache_flush
= global_cache_flush
,
1599 .create_gatt_table
= intel_i965_create_gatt_table
,
1600 .free_gatt_table
= intel_i830_free_gatt_table
,
1601 .insert_memory
= intel_i915_insert_entries
,
1602 .remove_memory
= intel_i915_remove_entries
,
1603 .alloc_by_type
= intel_i830_alloc_by_type
,
1604 .free_by_type
= intel_i810_free_by_type
,
1605 .agp_alloc_page
= agp_generic_alloc_page
,
1606 .agp_alloc_pages
= agp_generic_alloc_pages
,
1607 .agp_destroy_page
= agp_generic_destroy_page
,
1608 .agp_destroy_pages
= agp_generic_destroy_pages
,
1609 .agp_type_to_mask_type
= intel_gen6_type_to_mask_type
,
1610 .chipset_flush
= intel_i915_chipset_flush
,
1611 #ifdef USE_PCI_DMA_API
1612 .agp_map_page
= intel_agp_map_page
,
1613 .agp_unmap_page
= intel_agp_unmap_page
,
1614 .agp_map_memory
= intel_agp_map_memory
,
1615 .agp_unmap_memory
= intel_agp_unmap_memory
,
1619 static const struct agp_bridge_driver intel_g33_driver
= {
1620 .owner
= THIS_MODULE
,
1621 .aperture_sizes
= intel_i830_sizes
,
1622 .size_type
= FIXED_APER_SIZE
,
1623 .num_aperture_sizes
= 4,
1624 .needs_scratch_page
= true,
1625 .configure
= intel_i9xx_configure
,
1626 .fetch_size
= intel_i9xx_fetch_size
,
1627 .cleanup
= intel_i915_cleanup
,
1628 .mask_memory
= intel_i965_mask_memory
,
1629 .masks
= intel_i810_masks
,
1630 .agp_enable
= intel_i810_agp_enable
,
1631 .cache_flush
= global_cache_flush
,
1632 .create_gatt_table
= intel_i915_create_gatt_table
,
1633 .free_gatt_table
= intel_i830_free_gatt_table
,
1634 .insert_memory
= intel_i915_insert_entries
,
1635 .remove_memory
= intel_i915_remove_entries
,
1636 .alloc_by_type
= intel_i830_alloc_by_type
,
1637 .free_by_type
= intel_i810_free_by_type
,
1638 .agp_alloc_page
= agp_generic_alloc_page
,
1639 .agp_alloc_pages
= agp_generic_alloc_pages
,
1640 .agp_destroy_page
= agp_generic_destroy_page
,
1641 .agp_destroy_pages
= agp_generic_destroy_pages
,
1642 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1643 .chipset_flush
= intel_i915_chipset_flush
,
1644 #ifdef USE_PCI_DMA_API
1645 .agp_map_page
= intel_agp_map_page
,
1646 .agp_unmap_page
= intel_agp_unmap_page
,
1647 .agp_map_memory
= intel_agp_map_memory
,
1648 .agp_unmap_memory
= intel_agp_unmap_memory
,