2 * Copyright © 2008-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
30 #include <drm/i915_drm.h>
34 * The BIOS typically reserves some of the system's memory for the exclusive
35 * use of the integrated graphics. This memory is no longer available for
36 * use by the OS and so the user finds that his system has less memory
37 * available than he put in. We refer to this memory as stolen.
39 * The BIOS will allocate its framebuffer from the stolen memory. Our
40 * goal is try to reuse that object for our own fbcon which must always
41 * be available for panics. Anything else we can reuse the stolen memory
45 static unsigned long i915_stolen_to_physical(struct drm_device
*dev
)
47 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
48 struct pci_dev
*pdev
= dev_priv
->bridge_dev
;
51 /* On the machines I have tested the Graphics Base of Stolen Memory
52 * is unreliable, so on those compute the base by subtracting the
53 * stolen memory from the Top of Low Usable DRAM which is where the
54 * BIOS places the graphics stolen memory.
56 * On gen2, the layout is slightly different with the Graphics Segment
57 * immediately following Top of Memory (or Top of Usable DRAM). Note
58 * it appears that TOUD is only reported by 865g, so we just use the
59 * top of memory as determined by the e820 probe.
61 * XXX gen2 requires an unavailable symbol and 945gm fails with
65 if (INTEL_INFO(dev
)->gen
>= 6) {
66 /* Read Base Data of Stolen Memory Register (BDSM) directly.
67 * Note that there is also a MCHBAR miror at 0x1080c0 or
68 * we could use device 2:0x5c instead.
70 pci_read_config_dword(pdev
, 0xB0, &base
);
71 base
&= ~4095; /* lower bits used for locking register */
72 } else if (INTEL_INFO(dev
)->gen
> 3 || IS_G33(dev
)) {
73 /* Read Graphics Base of Stolen Memory directly */
74 pci_read_config_dword(pdev
, 0xA4, &base
);
76 } else if (IS_GEN3(dev
)) {
78 /* Stolen is immediately below Top of Low Usable DRAM */
79 pci_read_config_byte(pdev
, 0x9c, &val
);
80 base
= val
>> 3 << 27;
81 base
-= dev_priv
->mm
.gtt
->stolen_size
;
83 /* Stolen is immediately above Top of Memory */
84 base
= max_low_pfn_mapped
<< PAGE_SHIFT
;
91 static int i915_setup_compression(struct drm_device
*dev
, int size
)
93 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
94 struct drm_mm_node
*compressed_fb
, *uninitialized_var(compressed_llb
);
96 /* Try to over-allocate to reduce reallocations and fragmentation */
97 compressed_fb
= drm_mm_search_free(&dev_priv
->mm
.stolen
,
100 compressed_fb
= drm_mm_search_free(&dev_priv
->mm
.stolen
,
101 size
>>= 1, 4096, 0);
103 compressed_fb
= drm_mm_get_block(compressed_fb
, size
, 4096);
107 if (HAS_PCH_SPLIT(dev
))
108 I915_WRITE(ILK_DPFC_CB_BASE
, compressed_fb
->start
);
109 else if (IS_GM45(dev
)) {
110 I915_WRITE(DPFC_CB_BASE
, compressed_fb
->start
);
112 compressed_llb
= drm_mm_search_free(&dev_priv
->mm
.stolen
,
115 compressed_llb
= drm_mm_get_block(compressed_llb
,
120 dev_priv
->compressed_llb
= compressed_llb
;
122 I915_WRITE(FBC_CFB_BASE
,
123 dev_priv
->mm
.stolen_base
+ compressed_fb
->start
);
124 I915_WRITE(FBC_LL_BASE
,
125 dev_priv
->mm
.stolen_base
+ compressed_llb
->start
);
128 dev_priv
->compressed_fb
= compressed_fb
;
129 dev_priv
->cfb_size
= size
;
131 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
137 drm_mm_put_block(compressed_fb
);
142 int i915_gem_stolen_setup_compression(struct drm_device
*dev
, int size
)
144 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
146 if (dev_priv
->mm
.stolen_base
== 0)
149 if (size
< dev_priv
->cfb_size
)
152 /* Release any current block */
153 i915_gem_stolen_cleanup_compression(dev
);
155 return i915_setup_compression(dev
, size
);
158 void i915_gem_stolen_cleanup_compression(struct drm_device
*dev
)
160 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
162 if (dev_priv
->cfb_size
== 0)
165 if (dev_priv
->compressed_fb
)
166 drm_mm_put_block(dev_priv
->compressed_fb
);
168 if (dev_priv
->compressed_llb
)
169 drm_mm_put_block(dev_priv
->compressed_llb
);
171 dev_priv
->cfb_size
= 0;
174 void i915_gem_cleanup_stolen(struct drm_device
*dev
)
176 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
178 i915_gem_stolen_cleanup_compression(dev
);
179 drm_mm_takedown(&dev_priv
->mm
.stolen
);
182 int i915_gem_init_stolen(struct drm_device
*dev
)
184 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
186 dev_priv
->mm
.stolen_base
= i915_stolen_to_physical(dev
);
187 if (dev_priv
->mm
.stolen_base
== 0)
190 DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
191 dev_priv
->gtt
.stolen_size
, dev_priv
->mm
.stolen_base
);
193 /* Basic memrange allocator for stolen space */
194 drm_mm_init(&dev_priv
->mm
.stolen
, 0, dev_priv
->gtt
.stolen_size
);
199 static struct sg_table
*
200 i915_pages_create_for_stolen(struct drm_device
*dev
,
201 u32 offset
, u32 size
)
203 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
205 struct scatterlist
*sg
;
207 DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset
, size
);
208 BUG_ON(offset
> dev_priv
->gtt
.stolen_size
- size
);
210 /* We hide that we have no struct page backing our stolen object
211 * by wrapping the contiguous physical allocation with a fake
212 * dma mapping in a single scatterlist.
215 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
219 if (sg_alloc_table(st
, 1, GFP_KERNEL
)) {
228 sg_dma_address(sg
) = (dma_addr_t
)dev_priv
->mm
.stolen_base
+ offset
;
229 sg_dma_len(sg
) = size
;
234 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object
*obj
)
240 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object
*obj
)
242 /* Should only be called during free */
243 sg_free_table(obj
->pages
);
247 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops
= {
248 .get_pages
= i915_gem_object_get_pages_stolen
,
249 .put_pages
= i915_gem_object_put_pages_stolen
,
252 static struct drm_i915_gem_object
*
253 _i915_gem_object_create_stolen(struct drm_device
*dev
,
254 struct drm_mm_node
*stolen
)
256 struct drm_i915_gem_object
*obj
;
258 obj
= i915_gem_object_alloc(dev
);
262 if (drm_gem_private_object_init(dev
, &obj
->base
, stolen
->size
))
265 i915_gem_object_init(obj
, &i915_gem_object_stolen_ops
);
267 obj
->pages
= i915_pages_create_for_stolen(dev
,
268 stolen
->start
, stolen
->size
);
269 if (obj
->pages
== NULL
)
272 obj
->has_dma_mapping
= true;
273 obj
->pages_pin_count
= 1;
274 obj
->stolen
= stolen
;
276 obj
->base
.write_domain
= I915_GEM_DOMAIN_GTT
;
277 obj
->base
.read_domains
= I915_GEM_DOMAIN_GTT
;
278 obj
->cache_level
= I915_CACHE_NONE
;
283 i915_gem_object_free(obj
);
287 struct drm_i915_gem_object
*
288 i915_gem_object_create_stolen(struct drm_device
*dev
, u32 size
)
290 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
291 struct drm_i915_gem_object
*obj
;
292 struct drm_mm_node
*stolen
;
294 if (dev_priv
->mm
.stolen_base
== 0)
297 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size
);
301 stolen
= drm_mm_search_free(&dev_priv
->mm
.stolen
, size
, 4096, 0);
303 stolen
= drm_mm_get_block(stolen
, size
, 4096);
307 obj
= _i915_gem_object_create_stolen(dev
, stolen
);
311 drm_mm_put_block(stolen
);
315 struct drm_i915_gem_object
*
316 i915_gem_object_create_stolen_for_preallocated(struct drm_device
*dev
,
321 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
322 struct drm_i915_gem_object
*obj
;
323 struct drm_mm_node
*stolen
;
325 if (dev_priv
->mm
.stolen_base
== 0)
328 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
329 stolen_offset
, gtt_offset
, size
);
331 /* KISS and expect everything to be page-aligned */
332 BUG_ON(stolen_offset
& 4095);
333 BUG_ON(gtt_offset
& 4095);
336 if (WARN_ON(size
== 0))
339 stolen
= drm_mm_create_block(&dev_priv
->mm
.stolen
,
342 if (stolen
== NULL
) {
343 DRM_DEBUG_KMS("failed to allocate stolen space\n");
347 obj
= _i915_gem_object_create_stolen(dev
, stolen
);
349 DRM_DEBUG_KMS("failed to allocate stolen object\n");
350 drm_mm_put_block(stolen
);
354 /* To simplify the initialisation sequence between KMS and GTT,
355 * we allow construction of the stolen object prior to
356 * setting up the GTT space. The actual reservation will occur
359 if (drm_mm_initialized(&dev_priv
->mm
.gtt_space
)) {
360 obj
->gtt_space
= drm_mm_create_block(&dev_priv
->mm
.gtt_space
,
363 if (obj
->gtt_space
== NULL
) {
364 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
365 drm_gem_object_unreference(&obj
->base
);
369 obj
->gtt_space
= I915_GTT_RESERVED
;
371 obj
->gtt_offset
= gtt_offset
;
372 obj
->has_global_gtt_mapping
= 1;
374 list_add_tail(&obj
->gtt_list
, &dev_priv
->mm
.bound_list
);
375 list_add_tail(&obj
->mm_list
, &dev_priv
->mm
.inactive_list
);
381 i915_gem_object_release_stolen(struct drm_i915_gem_object
*obj
)
384 drm_mm_put_block(obj
->stolen
);