2 * Copyright © 2008-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
30 #include <drm/i915_drm.h>
34 * The BIOS typically reserves some of the system's memory for the exclusive
35 * use of the integrated graphics. This memory is no longer available for
36 * use by the OS and so the user finds that his system has less memory
37 * available than he put in. We refer to this memory as stolen.
39 * The BIOS will allocate its framebuffer from the stolen memory. Our
40 * goal is try to reuse that object for our own fbcon which must always
41 * be available for panics. Anything else we can reuse the stolen memory
45 static unsigned long i915_stolen_to_physical(struct drm_device
*dev
)
47 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
48 struct pci_dev
*pdev
= dev_priv
->bridge_dev
;
52 /* On the machines I have tested the Graphics Base of Stolen Memory
53 * is unreliable, so on those compute the base by subtracting the
54 * stolen memory from the Top of Low Usable DRAM which is where the
55 * BIOS places the graphics stolen memory.
57 * On gen2, the layout is slightly different with the Graphics Segment
58 * immediately following Top of Memory (or Top of Usable DRAM). Note
59 * it appears that TOUD is only reported by 865g, so we just use the
60 * top of memory as determined by the e820 probe.
62 * XXX gen2 requires an unavailable symbol and 945gm fails with
66 if (IS_VALLEYVIEW(dev
)) {
67 pci_read_config_dword(dev
->pdev
, 0x5c, &base
);
68 base
&= ~((1<<20) - 1);
69 } else if (INTEL_INFO(dev
)->gen
>= 6) {
70 /* Read Base Data of Stolen Memory Register (BDSM) directly.
71 * Note that there is also a MCHBAR miror at 0x1080c0 or
72 * we could use device 2:0x5c instead.
74 pci_read_config_dword(pdev
, 0xB0, &base
);
75 base
&= ~4095; /* lower bits used for locking register */
76 } else if (INTEL_INFO(dev
)->gen
> 3 || IS_G33(dev
)) {
77 /* Read Graphics Base of Stolen Memory directly */
78 pci_read_config_dword(pdev
, 0xA4, &base
);
80 } else if (IS_GEN3(dev
)) {
82 /* Stolen is immediately below Top of Low Usable DRAM */
83 pci_read_config_byte(pdev
, 0x9c, &val
);
84 base
= val
>> 3 << 27;
85 base
-= dev_priv
->mm
.gtt
->stolen_size
;
87 /* Stolen is immediately above Top of Memory */
88 base
= max_low_pfn_mapped
<< PAGE_SHIFT
;
95 /* Verify that nothing else uses this physical address. Stolen
96 * memory should be reserved by the BIOS and hidden from the
97 * kernel. So if the region is already marked as busy, something
100 r
= devm_request_mem_region(dev
->dev
, base
, dev_priv
->gtt
.stolen_size
,
101 "Graphics Stolen Memory");
103 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
104 base
, base
+ (uint32_t)dev_priv
->gtt
.stolen_size
);
111 static int i915_setup_compression(struct drm_device
*dev
, int size
)
113 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
114 struct drm_mm_node
*compressed_fb
, *uninitialized_var(compressed_llb
);
116 /* Try to over-allocate to reduce reallocations and fragmentation */
117 compressed_fb
= drm_mm_search_free(&dev_priv
->mm
.stolen
,
118 size
<<= 1, 4096, 0);
120 compressed_fb
= drm_mm_search_free(&dev_priv
->mm
.stolen
,
121 size
>>= 1, 4096, 0);
123 compressed_fb
= drm_mm_get_block(compressed_fb
, size
, 4096);
127 if (HAS_PCH_SPLIT(dev
))
128 I915_WRITE(ILK_DPFC_CB_BASE
, compressed_fb
->start
);
129 else if (IS_GM45(dev
)) {
130 I915_WRITE(DPFC_CB_BASE
, compressed_fb
->start
);
132 compressed_llb
= drm_mm_search_free(&dev_priv
->mm
.stolen
,
135 compressed_llb
= drm_mm_get_block(compressed_llb
,
140 dev_priv
->fbc
.compressed_llb
= compressed_llb
;
142 I915_WRITE(FBC_CFB_BASE
,
143 dev_priv
->mm
.stolen_base
+ compressed_fb
->start
);
144 I915_WRITE(FBC_LL_BASE
,
145 dev_priv
->mm
.stolen_base
+ compressed_llb
->start
);
148 dev_priv
->fbc
.compressed_fb
= compressed_fb
;
149 dev_priv
->fbc
.size
= size
;
151 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
157 drm_mm_put_block(compressed_fb
);
159 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size
);
163 int i915_gem_stolen_setup_compression(struct drm_device
*dev
, int size
)
165 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
167 if (dev_priv
->mm
.stolen_base
== 0)
170 if (size
< dev_priv
->fbc
.size
)
173 /* Release any current block */
174 i915_gem_stolen_cleanup_compression(dev
);
176 return i915_setup_compression(dev
, size
);
179 void i915_gem_stolen_cleanup_compression(struct drm_device
*dev
)
181 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
183 if (dev_priv
->fbc
.size
== 0)
186 if (dev_priv
->fbc
.compressed_fb
)
187 drm_mm_put_block(dev_priv
->fbc
.compressed_fb
);
189 if (dev_priv
->fbc
.compressed_llb
)
190 drm_mm_put_block(dev_priv
->fbc
.compressed_llb
);
192 dev_priv
->fbc
.size
= 0;
195 void i915_gem_cleanup_stolen(struct drm_device
*dev
)
197 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
199 i915_gem_stolen_cleanup_compression(dev
);
200 drm_mm_takedown(&dev_priv
->mm
.stolen
);
203 int i915_gem_init_stolen(struct drm_device
*dev
)
205 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
206 int bios_reserved
= 0;
208 dev_priv
->mm
.stolen_base
= i915_stolen_to_physical(dev
);
209 if (dev_priv
->mm
.stolen_base
== 0)
212 DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
213 dev_priv
->gtt
.stolen_size
, dev_priv
->mm
.stolen_base
);
215 if (IS_VALLEYVIEW(dev
))
216 bios_reserved
= 1024*1024; /* top 1M on VLV/BYT */
218 if (WARN_ON(bios_reserved
> dev_priv
->gtt
.stolen_size
))
221 /* Basic memrange allocator for stolen space */
222 drm_mm_init(&dev_priv
->mm
.stolen
, 0, dev_priv
->gtt
.stolen_size
-
228 static struct sg_table
*
229 i915_pages_create_for_stolen(struct drm_device
*dev
,
230 u32 offset
, u32 size
)
232 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
234 struct scatterlist
*sg
;
236 DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset
, size
);
237 BUG_ON(offset
> dev_priv
->gtt
.stolen_size
- size
);
239 /* We hide that we have no struct page backing our stolen object
240 * by wrapping the contiguous physical allocation with a fake
241 * dma mapping in a single scatterlist.
244 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
248 if (sg_alloc_table(st
, 1, GFP_KERNEL
)) {
257 sg_dma_address(sg
) = (dma_addr_t
)dev_priv
->mm
.stolen_base
+ offset
;
258 sg_dma_len(sg
) = size
;
263 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object
*obj
)
269 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object
*obj
)
271 /* Should only be called during free */
272 sg_free_table(obj
->pages
);
276 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops
= {
277 .get_pages
= i915_gem_object_get_pages_stolen
,
278 .put_pages
= i915_gem_object_put_pages_stolen
,
281 static struct drm_i915_gem_object
*
282 _i915_gem_object_create_stolen(struct drm_device
*dev
,
283 struct drm_mm_node
*stolen
)
285 struct drm_i915_gem_object
*obj
;
287 obj
= i915_gem_object_alloc(dev
);
291 if (drm_gem_private_object_init(dev
, &obj
->base
, stolen
->size
))
294 i915_gem_object_init(obj
, &i915_gem_object_stolen_ops
);
296 obj
->pages
= i915_pages_create_for_stolen(dev
,
297 stolen
->start
, stolen
->size
);
298 if (obj
->pages
== NULL
)
301 obj
->has_dma_mapping
= true;
302 i915_gem_object_pin_pages(obj
);
303 obj
->stolen
= stolen
;
305 obj
->base
.write_domain
= I915_GEM_DOMAIN_GTT
;
306 obj
->base
.read_domains
= I915_GEM_DOMAIN_GTT
;
307 obj
->cache_level
= I915_CACHE_NONE
;
312 i915_gem_object_free(obj
);
316 struct drm_i915_gem_object
*
317 i915_gem_object_create_stolen(struct drm_device
*dev
, u32 size
)
319 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
320 struct drm_i915_gem_object
*obj
;
321 struct drm_mm_node
*stolen
;
323 if (dev_priv
->mm
.stolen_base
== 0)
326 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size
);
330 stolen
= drm_mm_search_free(&dev_priv
->mm
.stolen
, size
, 4096, 0);
332 stolen
= drm_mm_get_block(stolen
, size
, 4096);
336 obj
= _i915_gem_object_create_stolen(dev
, stolen
);
340 drm_mm_put_block(stolen
);
344 struct drm_i915_gem_object
*
345 i915_gem_object_create_stolen_for_preallocated(struct drm_device
*dev
,
350 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
351 struct drm_i915_gem_object
*obj
;
352 struct drm_mm_node
*stolen
;
355 if (dev_priv
->mm
.stolen_base
== 0)
358 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
359 stolen_offset
, gtt_offset
, size
);
361 /* KISS and expect everything to be page-aligned */
362 BUG_ON(stolen_offset
& 4095);
365 if (WARN_ON(size
== 0))
368 stolen
= kzalloc(sizeof(*stolen
), GFP_KERNEL
);
372 stolen
->start
= stolen_offset
;
374 ret
= drm_mm_reserve_node(&dev_priv
->mm
.stolen
, stolen
);
376 DRM_DEBUG_KMS("failed to allocate stolen space\n");
381 obj
= _i915_gem_object_create_stolen(dev
, stolen
);
383 DRM_DEBUG_KMS("failed to allocate stolen object\n");
384 drm_mm_put_block(stolen
);
388 /* Some objects just need physical mem from stolen space */
389 if (gtt_offset
== I915_GTT_OFFSET_NONE
)
392 /* To simplify the initialisation sequence between KMS and GTT,
393 * we allow construction of the stolen object prior to
394 * setting up the GTT space. The actual reservation will occur
397 obj
->gtt_space
.start
= gtt_offset
;
398 obj
->gtt_space
.size
= size
;
399 if (drm_mm_initialized(&dev_priv
->gtt
.base
.mm
)) {
400 ret
= drm_mm_reserve_node(&dev_priv
->gtt
.base
.mm
,
403 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
408 obj
->has_global_gtt_mapping
= 1;
410 list_add_tail(&obj
->global_list
, &dev_priv
->mm
.bound_list
);
411 list_add_tail(&obj
->mm_list
, &dev_priv
->mm
.inactive_list
);
416 drm_gem_object_unreference(&obj
->base
);
421 i915_gem_object_release_stolen(struct drm_i915_gem_object
*obj
)
424 drm_mm_put_block(obj
->stolen
);