2 * Copyright © 2008-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
30 #include <drm/i915_drm.h>
33 #define KB(x) ((x) * 1024)
34 #define MB(x) (KB(x) * 1024)
37 * The BIOS typically reserves some of the system's memory for the exclusive
38 * use of the integrated graphics. This memory is no longer available for
39 * use by the OS and so the user finds that his system has less memory
40 * available than he put in. We refer to this memory as stolen.
42 * The BIOS will allocate its framebuffer from the stolen memory. Our
43 * goal is try to reuse that object for our own fbcon which must always
44 * be available for panics. Anything else we can reuse the stolen memory
48 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private
*dev_priv
,
49 struct drm_mm_node
*node
, u64 size
,
50 unsigned alignment
, u64 start
, u64 end
)
54 if (!drm_mm_initialized(&dev_priv
->mm
.stolen
))
57 /* See the comment at the drm_mm_init() call for more about this check.
58 * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
59 if (INTEL_INFO(dev_priv
)->gen
== 8 && start
< 4096)
62 mutex_lock(&dev_priv
->mm
.stolen_lock
);
63 ret
= drm_mm_insert_node_in_range(&dev_priv
->mm
.stolen
, node
, size
,
64 alignment
, start
, end
,
65 DRM_MM_SEARCH_DEFAULT
);
66 mutex_unlock(&dev_priv
->mm
.stolen_lock
);
71 int i915_gem_stolen_insert_node(struct drm_i915_private
*dev_priv
,
72 struct drm_mm_node
*node
, u64 size
,
75 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
77 return i915_gem_stolen_insert_node_in_range(dev_priv
, node
, size
,
79 ggtt
->stolen_usable_size
);
82 void i915_gem_stolen_remove_node(struct drm_i915_private
*dev_priv
,
83 struct drm_mm_node
*node
)
85 mutex_lock(&dev_priv
->mm
.stolen_lock
);
86 drm_mm_remove_node(node
);
87 mutex_unlock(&dev_priv
->mm
.stolen_lock
);
90 static unsigned long i915_stolen_to_physical(struct drm_device
*dev
)
92 struct drm_i915_private
*dev_priv
= to_i915(dev
);
93 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
97 /* Almost universally we can find the Graphics Base of Stolen Memory
98 * at register BSM (0x5c) in the igfx configuration space. On a few
99 * (desktop) machines this is also mirrored in the bridge device at
100 * different locations, or in the MCHBAR.
102 * On 865 we just check the TOUD register.
104 * On 830/845/85x the stolen memory base isn't available in any
105 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
109 if (INTEL_INFO(dev
)->gen
>= 3) {
112 pci_read_config_dword(dev
->pdev
, BSM
, &bsm
);
114 base
= bsm
& BSM_MASK
;
115 } else if (IS_I865G(dev
)) {
119 * FIXME is the graphics stolen memory region
120 * always at TOUD? Ie. is it always the last
121 * one to be allocated by the BIOS?
123 pci_bus_read_config_word(dev
->pdev
->bus
, PCI_DEVFN(0, 0),
127 } else if (IS_I85X(dev
)) {
132 pci_bus_read_config_byte(dev
->pdev
->bus
, PCI_DEVFN(0, 0),
135 if (tmp
& TSEG_ENABLE
)
138 pci_bus_read_config_byte(dev
->pdev
->bus
, PCI_DEVFN(0, 1),
142 base
= tom
- tseg_size
- ggtt
->stolen_size
;
143 } else if (IS_845G(dev
)) {
148 pci_bus_read_config_byte(dev
->pdev
->bus
, PCI_DEVFN(0, 0),
151 if (tmp
& TSEG_ENABLE
) {
152 switch (tmp
& I845_TSEG_SIZE_MASK
) {
153 case I845_TSEG_SIZE_512K
:
156 case I845_TSEG_SIZE_1M
:
162 pci_bus_read_config_byte(dev
->pdev
->bus
, PCI_DEVFN(0, 0),
166 base
= tom
- tseg_size
- ggtt
->stolen_size
;
167 } else if (IS_I830(dev
)) {
172 pci_bus_read_config_byte(dev
->pdev
->bus
, PCI_DEVFN(0, 0),
175 if (tmp
& TSEG_ENABLE
) {
176 if (tmp
& I830_TSEG_SIZE_1M
)
182 pci_bus_read_config_byte(dev
->pdev
->bus
, PCI_DEVFN(0, 0),
186 base
= tom
- tseg_size
- ggtt
->stolen_size
;
192 /* make sure we don't clobber the GTT if it's within stolen memory */
193 if (INTEL_INFO(dev
)->gen
<= 4 && !IS_G33(dev
) && !IS_G4X(dev
)) {
197 { .start
= base
, .end
= base
+ ggtt
->stolen_size
, },
198 { .start
= base
, .end
= base
+ ggtt
->stolen_size
, },
200 u64 ggtt_start
, ggtt_end
;
202 ggtt_start
= I915_READ(PGTBL_CTL
);
204 ggtt_start
= (ggtt_start
& PGTBL_ADDRESS_LO_MASK
) |
205 (ggtt_start
& PGTBL_ADDRESS_HI_MASK
) << 28;
207 ggtt_start
&= PGTBL_ADDRESS_LO_MASK
;
208 ggtt_end
= ggtt_start
+ ggtt_total_entries(ggtt
) * 4;
210 if (ggtt_start
>= stolen
[0].start
&& ggtt_start
< stolen
[0].end
)
211 stolen
[0].end
= ggtt_start
;
212 if (ggtt_end
> stolen
[1].start
&& ggtt_end
<= stolen
[1].end
)
213 stolen
[1].start
= ggtt_end
;
215 /* pick the larger of the two chunks */
216 if (stolen
[0].end
- stolen
[0].start
>
217 stolen
[1].end
- stolen
[1].start
) {
218 base
= stolen
[0].start
;
219 ggtt
->stolen_size
= stolen
[0].end
- stolen
[0].start
;
221 base
= stolen
[1].start
;
222 ggtt
->stolen_size
= stolen
[1].end
- stolen
[1].start
;
225 if (stolen
[0].start
!= stolen
[1].start
||
226 stolen
[0].end
!= stolen
[1].end
) {
227 DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
228 (unsigned long long)ggtt_start
,
229 (unsigned long long)ggtt_end
- 1);
230 DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
231 base
, base
+ (u32
)ggtt
->stolen_size
- 1);
236 /* Verify that nothing else uses this physical address. Stolen
237 * memory should be reserved by the BIOS and hidden from the
238 * kernel. So if the region is already marked as busy, something
239 * is seriously wrong.
241 r
= devm_request_mem_region(dev
->dev
, base
, ggtt
->stolen_size
,
242 "Graphics Stolen Memory");
245 * One more attempt but this time requesting region from
246 * base + 1, as we have seen that this resolves the region
247 * conflict with the PCI Bus.
248 * This is a BIOS w/a: Some BIOS wrap stolen in the root
249 * PCI bus, but have an off-by-one error. Hence retry the
250 * reservation starting from 1 instead of 0.
252 r
= devm_request_mem_region(dev
->dev
, base
+ 1,
253 ggtt
->stolen_size
- 1,
254 "Graphics Stolen Memory");
256 * GEN3 firmware likes to smash pci bridges into the stolen
257 * range. Apparently this works.
259 if (r
== NULL
&& !IS_GEN3(dev
)) {
260 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
261 base
, base
+ (uint32_t)ggtt
->stolen_size
);
269 void i915_gem_cleanup_stolen(struct drm_device
*dev
)
271 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
273 if (!drm_mm_initialized(&dev_priv
->mm
.stolen
))
276 drm_mm_takedown(&dev_priv
->mm
.stolen
);
279 static void g4x_get_stolen_reserved(struct drm_i915_private
*dev_priv
,
280 unsigned long *base
, unsigned long *size
)
282 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
283 uint32_t reg_val
= I915_READ(IS_GM45(dev_priv
) ?
284 CTG_STOLEN_RESERVED
:
285 ELK_STOLEN_RESERVED
);
286 unsigned long stolen_top
= dev_priv
->mm
.stolen_base
+
289 *base
= (reg_val
& G4X_STOLEN_RESERVED_ADDR2_MASK
) << 16;
291 WARN_ON((reg_val
& G4X_STOLEN_RESERVED_ADDR1_MASK
) < *base
);
293 /* On these platforms, the register doesn't have a size field, so the
294 * size is the distance between the base and the top of the stolen
295 * memory. We also have the genuine case where base is zero and there's
296 * nothing reserved. */
300 *size
= stolen_top
- *base
;
303 static void gen6_get_stolen_reserved(struct drm_i915_private
*dev_priv
,
304 unsigned long *base
, unsigned long *size
)
306 uint32_t reg_val
= I915_READ(GEN6_STOLEN_RESERVED
);
308 *base
= reg_val
& GEN6_STOLEN_RESERVED_ADDR_MASK
;
310 switch (reg_val
& GEN6_STOLEN_RESERVED_SIZE_MASK
) {
311 case GEN6_STOLEN_RESERVED_1M
:
314 case GEN6_STOLEN_RESERVED_512K
:
317 case GEN6_STOLEN_RESERVED_256K
:
320 case GEN6_STOLEN_RESERVED_128K
:
325 MISSING_CASE(reg_val
& GEN6_STOLEN_RESERVED_SIZE_MASK
);
329 static void gen7_get_stolen_reserved(struct drm_i915_private
*dev_priv
,
330 unsigned long *base
, unsigned long *size
)
332 uint32_t reg_val
= I915_READ(GEN6_STOLEN_RESERVED
);
334 *base
= reg_val
& GEN7_STOLEN_RESERVED_ADDR_MASK
;
336 switch (reg_val
& GEN7_STOLEN_RESERVED_SIZE_MASK
) {
337 case GEN7_STOLEN_RESERVED_1M
:
340 case GEN7_STOLEN_RESERVED_256K
:
345 MISSING_CASE(reg_val
& GEN7_STOLEN_RESERVED_SIZE_MASK
);
349 static void gen8_get_stolen_reserved(struct drm_i915_private
*dev_priv
,
350 unsigned long *base
, unsigned long *size
)
352 uint32_t reg_val
= I915_READ(GEN6_STOLEN_RESERVED
);
354 *base
= reg_val
& GEN6_STOLEN_RESERVED_ADDR_MASK
;
356 switch (reg_val
& GEN8_STOLEN_RESERVED_SIZE_MASK
) {
357 case GEN8_STOLEN_RESERVED_1M
:
360 case GEN8_STOLEN_RESERVED_2M
:
361 *size
= 2 * 1024 * 1024;
363 case GEN8_STOLEN_RESERVED_4M
:
364 *size
= 4 * 1024 * 1024;
366 case GEN8_STOLEN_RESERVED_8M
:
367 *size
= 8 * 1024 * 1024;
370 *size
= 8 * 1024 * 1024;
371 MISSING_CASE(reg_val
& GEN8_STOLEN_RESERVED_SIZE_MASK
);
375 static void bdw_get_stolen_reserved(struct drm_i915_private
*dev_priv
,
376 unsigned long *base
, unsigned long *size
)
378 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
379 uint32_t reg_val
= I915_READ(GEN6_STOLEN_RESERVED
);
380 unsigned long stolen_top
;
382 stolen_top
= dev_priv
->mm
.stolen_base
+ ggtt
->stolen_size
;
384 *base
= reg_val
& GEN6_STOLEN_RESERVED_ADDR_MASK
;
386 /* On these platforms, the register doesn't have a size field, so the
387 * size is the distance between the base and the top of the stolen
388 * memory. We also have the genuine case where base is zero and there's
389 * nothing reserved. */
393 *size
= stolen_top
- *base
;
396 int i915_gem_init_stolen(struct drm_device
*dev
)
398 struct drm_i915_private
*dev_priv
= to_i915(dev
);
399 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
400 unsigned long reserved_total
, reserved_base
= 0, reserved_size
;
401 unsigned long stolen_top
;
403 mutex_init(&dev_priv
->mm
.stolen_lock
);
405 #ifdef CONFIG_INTEL_IOMMU
406 if (intel_iommu_gfx_mapped
&& INTEL_INFO(dev
)->gen
< 8) {
407 DRM_INFO("DMAR active, disabling use of stolen memory\n");
412 if (ggtt
->stolen_size
== 0)
415 dev_priv
->mm
.stolen_base
= i915_stolen_to_physical(dev
);
416 if (dev_priv
->mm
.stolen_base
== 0)
419 stolen_top
= dev_priv
->mm
.stolen_base
+ ggtt
->stolen_size
;
421 switch (INTEL_INFO(dev_priv
)->gen
) {
427 g4x_get_stolen_reserved(dev_priv
, &reserved_base
,
431 /* Assume the gen6 maximum for the older platforms. */
432 reserved_size
= 1024 * 1024;
433 reserved_base
= stolen_top
- reserved_size
;
436 gen6_get_stolen_reserved(dev_priv
, &reserved_base
,
440 gen7_get_stolen_reserved(dev_priv
, &reserved_base
,
444 if (IS_BROADWELL(dev_priv
) ||
445 IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev
))
446 bdw_get_stolen_reserved(dev_priv
, &reserved_base
,
449 gen8_get_stolen_reserved(dev_priv
, &reserved_base
,
454 /* It is possible for the reserved base to be zero, but the register
455 * field for size doesn't have a zero option. */
456 if (reserved_base
== 0) {
458 reserved_base
= stolen_top
;
461 if (reserved_base
< dev_priv
->mm
.stolen_base
||
462 reserved_base
+ reserved_size
> stolen_top
) {
463 DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
464 reserved_base
, reserved_base
+ reserved_size
,
465 dev_priv
->mm
.stolen_base
, stolen_top
);
469 ggtt
->stolen_reserved_base
= reserved_base
;
470 ggtt
->stolen_reserved_size
= reserved_size
;
472 /* It is possible for the reserved area to end before the end of stolen
473 * memory, so just consider the start. */
474 reserved_total
= stolen_top
- reserved_base
;
476 DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
477 ggtt
->stolen_size
>> 10,
478 (ggtt
->stolen_size
- reserved_total
) >> 10);
480 ggtt
->stolen_usable_size
= ggtt
->stolen_size
- reserved_total
;
483 * Basic memrange allocator for stolen space.
485 * TODO: Notice that some platforms require us to not use the first page
486 * of the stolen memory but their BIOSes may still put the framebuffer
487 * on the first page. So we don't reserve this page for now because of
488 * that. Our current solution is to just prevent new nodes from being
489 * inserted on the first page - see the check we have at
490 * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
493 drm_mm_init(&dev_priv
->mm
.stolen
, 0, ggtt
->stolen_usable_size
);
498 static struct sg_table
*
499 i915_pages_create_for_stolen(struct drm_device
*dev
,
500 u32 offset
, u32 size
)
502 struct drm_i915_private
*dev_priv
= to_i915(dev
);
503 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
505 struct scatterlist
*sg
;
507 DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset
, size
);
508 BUG_ON(offset
> ggtt
->stolen_size
- size
);
510 /* We hide that we have no struct page backing our stolen object
511 * by wrapping the contiguous physical allocation with a fake
512 * dma mapping in a single scatterlist.
515 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
519 if (sg_alloc_table(st
, 1, GFP_KERNEL
)) {
528 sg_dma_address(sg
) = (dma_addr_t
)dev_priv
->mm
.stolen_base
+ offset
;
529 sg_dma_len(sg
) = size
;
534 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object
*obj
)
540 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object
*obj
)
542 /* Should only be called during free */
543 sg_free_table(obj
->pages
);
549 i915_gem_object_release_stolen(struct drm_i915_gem_object
*obj
)
551 struct drm_i915_private
*dev_priv
= obj
->base
.dev
->dev_private
;
554 i915_gem_stolen_remove_node(dev_priv
, obj
->stolen
);
559 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops
= {
560 .get_pages
= i915_gem_object_get_pages_stolen
,
561 .put_pages
= i915_gem_object_put_pages_stolen
,
562 .release
= i915_gem_object_release_stolen
,
565 static struct drm_i915_gem_object
*
566 _i915_gem_object_create_stolen(struct drm_device
*dev
,
567 struct drm_mm_node
*stolen
)
569 struct drm_i915_gem_object
*obj
;
571 obj
= i915_gem_object_alloc(dev
);
575 drm_gem_private_object_init(dev
, &obj
->base
, stolen
->size
);
576 i915_gem_object_init(obj
, &i915_gem_object_stolen_ops
);
578 obj
->pages
= i915_pages_create_for_stolen(dev
,
579 stolen
->start
, stolen
->size
);
580 if (obj
->pages
== NULL
)
583 obj
->get_page
.sg
= obj
->pages
->sgl
;
584 obj
->get_page
.last
= 0;
586 i915_gem_object_pin_pages(obj
);
587 obj
->stolen
= stolen
;
589 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
| I915_GEM_DOMAIN_GTT
;
590 obj
->cache_level
= HAS_LLC(dev
) ? I915_CACHE_LLC
: I915_CACHE_NONE
;
595 i915_gem_object_free(obj
);
599 struct drm_i915_gem_object
*
600 i915_gem_object_create_stolen(struct drm_device
*dev
, u32 size
)
602 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
603 struct drm_i915_gem_object
*obj
;
604 struct drm_mm_node
*stolen
;
607 if (!drm_mm_initialized(&dev_priv
->mm
.stolen
))
610 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size
);
614 stolen
= kzalloc(sizeof(*stolen
), GFP_KERNEL
);
618 ret
= i915_gem_stolen_insert_node(dev_priv
, stolen
, size
, 4096);
624 obj
= _i915_gem_object_create_stolen(dev
, stolen
);
628 i915_gem_stolen_remove_node(dev_priv
, stolen
);
633 struct drm_i915_gem_object
*
634 i915_gem_object_create_stolen_for_preallocated(struct drm_device
*dev
,
639 struct drm_i915_private
*dev_priv
= to_i915(dev
);
640 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
641 struct drm_i915_gem_object
*obj
;
642 struct drm_mm_node
*stolen
;
643 struct i915_vma
*vma
;
646 if (!drm_mm_initialized(&dev_priv
->mm
.stolen
))
649 lockdep_assert_held(&dev
->struct_mutex
);
651 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
652 stolen_offset
, gtt_offset
, size
);
654 /* KISS and expect everything to be page-aligned */
655 if (WARN_ON(size
== 0) || WARN_ON(size
& 4095) ||
656 WARN_ON(stolen_offset
& 4095))
659 stolen
= kzalloc(sizeof(*stolen
), GFP_KERNEL
);
663 stolen
->start
= stolen_offset
;
665 mutex_lock(&dev_priv
->mm
.stolen_lock
);
666 ret
= drm_mm_reserve_node(&dev_priv
->mm
.stolen
, stolen
);
667 mutex_unlock(&dev_priv
->mm
.stolen_lock
);
669 DRM_DEBUG_KMS("failed to allocate stolen space\n");
674 obj
= _i915_gem_object_create_stolen(dev
, stolen
);
676 DRM_DEBUG_KMS("failed to allocate stolen object\n");
677 i915_gem_stolen_remove_node(dev_priv
, stolen
);
682 /* Some objects just need physical mem from stolen space */
683 if (gtt_offset
== I915_GTT_OFFSET_NONE
)
686 vma
= i915_gem_obj_lookup_or_create_vma(obj
, &ggtt
->base
);
692 /* To simplify the initialisation sequence between KMS and GTT,
693 * we allow construction of the stolen object prior to
694 * setting up the GTT space. The actual reservation will occur
697 vma
->node
.start
= gtt_offset
;
698 vma
->node
.size
= size
;
699 if (drm_mm_initialized(&ggtt
->base
.mm
)) {
700 ret
= drm_mm_reserve_node(&ggtt
->base
.mm
, &vma
->node
);
702 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
706 vma
->bound
|= GLOBAL_BIND
;
707 __i915_vma_set_map_and_fenceable(vma
);
708 list_add_tail(&vma
->vm_link
, &ggtt
->base
.inactive_list
);
711 list_add_tail(&obj
->global_list
, &dev_priv
->mm
.bound_list
);
712 i915_gem_object_pin_pages(obj
);
717 drm_gem_object_unreference(&obj
->base
);