Commit | Line | Data |
---|---|---|
673a394b EA |
1 | /* |
2 | * Copyright © 2008 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | * | |
26 | */ | |
27 | ||
760285e7 DH |
28 | #include <linux/string.h> |
29 | #include <linux/bitops.h> | |
30 | #include <drm/drmP.h> | |
31 | #include <drm/i915_drm.h> | |
673a394b EA |
32 | #include "i915_drv.h" |
33 | ||
34 | /** @file i915_gem_tiling.c | |
35 | * | |
36 | * Support for managing tiling state of buffer objects. | |
37 | * | |
38 | * The idea behind tiling is to increase cache hit rates by rearranging | |
39 | * pixel data so that a group of pixel accesses are in the same cacheline. | |
40 | * Performance improvement from doing this on the back/depth buffer are on | |
41 | * the order of 30%. | |
42 | * | |
43 | * Intel architectures make this somewhat more complicated, though, by | |
44 | * adjustments made to addressing of data when the memory is in interleaved | |
45 | * mode (matched pairs of DIMMS) to improve memory bandwidth. | |
46 | * For interleaved memory, the CPU sends every sequential 64 bytes | |
47 | * to an alternate memory channel so it can get the bandwidth from both. | |
48 | * | |
49 | * The GPU also rearranges its accesses for increased bandwidth to interleaved | |
50 | * memory, and it matches what the CPU does for non-tiled. However, when tiled | |
51 | * it does it a little differently, since one walks addresses not just in the | |
52 | * X direction but also Y. So, along with alternating channels when bit | |
53 | * 6 of the address flips, it also alternates when other bits flip -- Bits 9 | |
54 | * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) | |
55 | * are common to both the 915 and 965-class hardware. | |
56 | * | |
57 | * The CPU also sometimes XORs in higher bits as well, to improve | |
58 | * bandwidth doing strided access like we do so frequently in graphics. This | |
59 | * is called "Channel XOR Randomization" in the MCH documentation. The result | |
60 | * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address | |
61 | * decode. | |
62 | * | |
63 | * All of this bit 6 XORing has an effect on our memory management, | |
64 | * as we need to make sure that the 3d driver can correctly address object | |
65 | * contents. | |
66 | * | |
67 | * If we don't have interleaved memory, all tiling is safe and no swizzling is | |
68 | * required. | |
69 | * | |
70 | * When bit 17 is XORed in, we simply refuse to tile at all. Bit | |
71 | * 17 is not just a page offset, so as we page an objet out and back in, | |
72 | * individual pages in it will have different bit 17 addresses, resulting in | |
73 | * each 64 bytes being swapped with its neighbor! | |
74 | * | |
75 | * Otherwise, if interleaved, we have to tell the 3d driver what the address | |
76 | * swizzling it needs to do is, since it's writing with the CPU to the pages | |
77 | * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the | |
78 | * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling | |
79 | * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order | |
80 | * to match what the GPU expects. | |
81 | */ | |
82 | ||
83 | /** | |
84 | * Detects bit 6 swizzling of address lookup between IGD access and CPU | |
85 | * access through main memory. | |
86 | */ | |
87 | void | |
88 | i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |
89 | { | |
90 | drm_i915_private_t *dev_priv = dev->dev_private; | |
91 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | |
92 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | |
93 | ||
7f661341 JB |
94 | if (IS_VALLEYVIEW(dev)) { |
95 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
96 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
97 | } else if (INTEL_INFO(dev)->gen >= 6) { | |
f691e2f4 DV |
98 | uint32_t dimm_c0, dimm_c1; |
99 | dimm_c0 = I915_READ(MAD_DIMM_C0); | |
100 | dimm_c1 = I915_READ(MAD_DIMM_C1); | |
101 | dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; | |
102 | dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; | |
103 | /* Enable swizzling when the channels are populated with | |
104 | * identically sized dimms. We don't need to check the 3rd | |
105 | * channel because no cpu with gpu attached ships in that | |
106 | * configuration. Also, swizzling only makes sense for 2 | |
107 | * channels anyway. */ | |
108 | if (dimm_c0 == dimm_c1) { | |
109 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | |
110 | swizzle_y = I915_BIT_6_SWIZZLE_9; | |
111 | } else { | |
112 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
113 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
114 | } | |
acc83eb5 | 115 | } else if (IS_GEN5(dev)) { |
f2b115e6 | 116 | /* On Ironlake whatever DRAM config, GPU always do |
553bd149 ZW |
117 | * same swizzling setup. |
118 | */ | |
119 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | |
120 | swizzle_y = I915_BIT_6_SWIZZLE_9; | |
a6c45cf0 | 121 | } else if (IS_GEN2(dev)) { |
673a394b EA |
122 | /* As far as we know, the 865 doesn't have these bit 6 |
123 | * swizzling issues. | |
124 | */ | |
125 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
126 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
c9c4b6f6 | 127 | } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) { |
673a394b EA |
128 | uint32_t dcc; |
129 | ||
c9c4b6f6 | 130 | /* On 9xx chipsets, channel interleave by the CPU is |
568d9a8f EA |
131 | * determined by DCC. For single-channel, neither the CPU |
132 | * nor the GPU do swizzling. For dual channel interleaved, | |
133 | * the GPU's interleave is bit 9 and 10 for X tiled, and bit | |
134 | * 9 for Y tiled. The CPU's interleave is independent, and | |
135 | * can be based on either bit 11 (haven't seen this yet) or | |
136 | * bit 17 (common). | |
673a394b EA |
137 | */ |
138 | dcc = I915_READ(DCC); | |
139 | switch (dcc & DCC_ADDRESSING_MODE_MASK) { | |
140 | case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: | |
141 | case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: | |
142 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
143 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
144 | break; | |
145 | case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: | |
568d9a8f EA |
146 | if (dcc & DCC_CHANNEL_XOR_DISABLE) { |
147 | /* This is the base swizzling by the GPU for | |
148 | * tiled buffers. | |
149 | */ | |
673a394b EA |
150 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
151 | swizzle_y = I915_BIT_6_SWIZZLE_9; | |
568d9a8f EA |
152 | } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { |
153 | /* Bit 11 swizzling by the CPU in addition. */ | |
673a394b EA |
154 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; |
155 | swizzle_y = I915_BIT_6_SWIZZLE_9_11; | |
156 | } else { | |
568d9a8f | 157 | /* Bit 17 swizzling by the CPU in addition. */ |
280b713b EA |
158 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_17; |
159 | swizzle_y = I915_BIT_6_SWIZZLE_9_17; | |
673a394b EA |
160 | } |
161 | break; | |
162 | } | |
163 | if (dcc == 0xffffffff) { | |
164 | DRM_ERROR("Couldn't read from MCHBAR. " | |
165 | "Disabling tiling.\n"); | |
166 | swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | |
167 | swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | |
168 | } | |
169 | } else { | |
170 | /* The 965, G33, and newer, have a very flexible memory | |
171 | * configuration. It will enable dual-channel mode | |
172 | * (interleaving) on as much memory as it can, and the GPU | |
173 | * will additionally sometimes enable different bit 6 | |
174 | * swizzling for tiled objects from the CPU. | |
175 | * | |
176 | * Here's what I found on the G965: | |
177 | * slot fill memory size swizzling | |
178 | * 0A 0B 1A 1B 1-ch 2-ch | |
179 | * 512 0 0 0 512 0 O | |
180 | * 512 0 512 0 16 1008 X | |
181 | * 512 0 0 512 16 1008 X | |
182 | * 0 512 0 512 16 1008 X | |
183 | * 1024 1024 1024 0 2048 1024 O | |
184 | * | |
185 | * We could probably detect this based on either the DRB | |
186 | * matching, which was the case for the swizzling required in | |
187 | * the table above, or from the 1-ch value being less than | |
188 | * the minimum size of a rank. | |
189 | */ | |
190 | if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { | |
191 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
192 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
193 | } else { | |
194 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | |
195 | swizzle_y = I915_BIT_6_SWIZZLE_9; | |
196 | } | |
197 | } | |
198 | ||
199 | dev_priv->mm.bit_6_swizzle_x = swizzle_x; | |
200 | dev_priv->mm.bit_6_swizzle_y = swizzle_y; | |
201 | } | |
202 | ||
0f973f27 | 203 | /* Check pitch constriants for all chips & tiling formats */ |
a00b10c3 | 204 | static bool |
0f973f27 JB |
205 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) |
206 | { | |
0ee537ab | 207 | int tile_width; |
0f973f27 JB |
208 | |
209 | /* Linear is always fine */ | |
210 | if (tiling_mode == I915_TILING_NONE) | |
211 | return true; | |
212 | ||
a6c45cf0 | 213 | if (IS_GEN2(dev) || |
e76a16de | 214 | (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) |
0f973f27 JB |
215 | tile_width = 128; |
216 | else | |
217 | tile_width = 512; | |
218 | ||
8d7773a3 | 219 | /* check maximum stride & object size */ |
3a062478 VS |
220 | /* i965+ stores the end address of the gtt mapping in the fence |
221 | * reg, so dont bother to check the size */ | |
222 | if (INTEL_INFO(dev)->gen >= 7) { | |
223 | if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL) | |
224 | return false; | |
225 | } else if (INTEL_INFO(dev)->gen >= 4) { | |
8d7773a3 DV |
226 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) |
227 | return false; | |
a6c45cf0 | 228 | } else { |
c36a2a6d | 229 | if (stride > 8192) |
8d7773a3 | 230 | return false; |
e76a16de | 231 | |
c36a2a6d DV |
232 | if (IS_GEN3(dev)) { |
233 | if (size > I830_FENCE_MAX_SIZE_VAL << 20) | |
234 | return false; | |
235 | } else { | |
236 | if (size > I830_FENCE_MAX_SIZE_VAL << 19) | |
237 | return false; | |
238 | } | |
8d7773a3 DV |
239 | } |
240 | ||
fe48d8de VS |
241 | if (stride < tile_width) |
242 | return false; | |
243 | ||
0f973f27 | 244 | /* 965+ just needs multiples of tile width */ |
a6c45cf0 | 245 | if (INTEL_INFO(dev)->gen >= 4) { |
0f973f27 JB |
246 | if (stride & (tile_width - 1)) |
247 | return false; | |
248 | return true; | |
249 | } | |
250 | ||
251 | /* Pre-965 needs power of two tile widths */ | |
0f973f27 JB |
252 | if (stride & (stride - 1)) |
253 | return false; | |
254 | ||
0f973f27 JB |
255 | return true; |
256 | } | |
257 | ||
a00b10c3 CW |
258 | /* Is the current GTT allocation valid for the change in tiling? */ |
259 | static bool | |
05394f39 | 260 | i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) |
52dc7d32 | 261 | { |
a00b10c3 | 262 | u32 size; |
52dc7d32 CW |
263 | |
264 | if (tiling_mode == I915_TILING_NONE) | |
265 | return true; | |
266 | ||
05394f39 | 267 | if (INTEL_INFO(obj->base.dev)->gen >= 4) |
a6c45cf0 CW |
268 | return true; |
269 | ||
05394f39 | 270 | if (INTEL_INFO(obj->base.dev)->gen == 3) { |
f343c5f6 | 271 | if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) |
df153158 CW |
272 | return false; |
273 | } else { | |
f343c5f6 | 274 | if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) |
df153158 CW |
275 | return false; |
276 | } | |
277 | ||
0fa87796 | 278 | size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); |
f343c5f6 | 279 | if (i915_gem_obj_ggtt_size(obj) != size) |
a6c45cf0 CW |
280 | return false; |
281 | ||
f343c5f6 | 282 | if (i915_gem_obj_ggtt_offset(obj) & (size - 1)) |
df153158 | 283 | return false; |
52dc7d32 CW |
284 | |
285 | return true; | |
286 | } | |
287 | ||
673a394b EA |
288 | /** |
289 | * Sets the tiling mode of an object, returning the required swizzling of | |
290 | * bit 6 of addresses in the object. | |
291 | */ | |
292 | int | |
293 | i915_gem_set_tiling(struct drm_device *dev, void *data, | |
05394f39 | 294 | struct drm_file *file) |
673a394b EA |
295 | { |
296 | struct drm_i915_gem_set_tiling *args = data; | |
297 | drm_i915_private_t *dev_priv = dev->dev_private; | |
05394f39 | 298 | struct drm_i915_gem_object *obj; |
47ae63e0 | 299 | int ret = 0; |
673a394b | 300 | |
05394f39 | 301 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
c8725226 | 302 | if (&obj->base == NULL) |
bf79cb91 | 303 | return -ENOENT; |
673a394b | 304 | |
05394f39 CW |
305 | if (!i915_tiling_ok(dev, |
306 | args->stride, obj->base.size, args->tiling_mode)) { | |
307 | drm_gem_object_unreference_unlocked(&obj->base); | |
0f973f27 | 308 | return -EINVAL; |
72daad40 | 309 | } |
0f973f27 | 310 | |
80075d49 | 311 | if (obj->pin_count || obj->framebuffer_references) { |
05394f39 | 312 | drm_gem_object_unreference_unlocked(&obj->base); |
31770bd4 DV |
313 | return -EBUSY; |
314 | } | |
315 | ||
673a394b | 316 | if (args->tiling_mode == I915_TILING_NONE) { |
673a394b | 317 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; |
52dc7d32 | 318 | args->stride = 0; |
673a394b EA |
319 | } else { |
320 | if (args->tiling_mode == I915_TILING_X) | |
321 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; | |
322 | else | |
323 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; | |
280b713b EA |
324 | |
325 | /* Hide bit 17 swizzling from the user. This prevents old Mesa | |
326 | * from aborting the application on sw fallbacks to bit 17, | |
327 | * and we use the pread/pwrite bit17 paths to swizzle for it. | |
328 | * If there was a user that was relying on the swizzle | |
329 | * information for drm_intel_bo_map()ed reads/writes this would | |
330 | * break it, but we don't have any of those. | |
331 | */ | |
332 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) | |
333 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9; | |
334 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) | |
335 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; | |
336 | ||
673a394b EA |
337 | /* If we can't handle the swizzling, make it untiled. */ |
338 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { | |
339 | args->tiling_mode = I915_TILING_NONE; | |
340 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; | |
52dc7d32 | 341 | args->stride = 0; |
673a394b EA |
342 | } |
343 | } | |
0f973f27 | 344 | |
52dc7d32 | 345 | mutex_lock(&dev->struct_mutex); |
05394f39 CW |
346 | if (args->tiling_mode != obj->tiling_mode || |
347 | args->stride != obj->stride) { | |
52dc7d32 CW |
348 | /* We need to rebind the object if its current allocation |
349 | * no longer meets the alignment restrictions for its new | |
350 | * tiling mode. Otherwise we can just leave it alone, but | |
1869b620 CW |
351 | * need to ensure that any fence register is updated before |
352 | * the next fenced (either through the GTT or by the BLT unit | |
353 | * on older GPUs) access. | |
5d82e3e6 CW |
354 | * |
355 | * After updating the tiling parameters, we then flag whether | |
356 | * we need to update an associated fence register. Note this | |
357 | * has to also include the unfenced register the GPU uses | |
358 | * whilst executing a fenced command for an untiled object. | |
0f973f27 | 359 | */ |
fe305198 | 360 | |
d9e86c0e | 361 | obj->map_and_fenceable = |
f343c5f6 | 362 | !i915_gem_obj_ggtt_bound(obj) || |
07fe0b12 BW |
363 | (i915_gem_obj_ggtt_offset(obj) + |
364 | obj->base.size <= dev_priv->gtt.mappable_end && | |
d9e86c0e | 365 | i915_gem_object_fence_ok(obj, args->tiling_mode)); |
52dc7d32 | 366 | |
467cffba CW |
367 | /* Rebind if we need a change of alignment */ |
368 | if (!obj->map_and_fenceable) { | |
07fe0b12 | 369 | u32 unfenced_align = |
d865110c ID |
370 | i915_gem_get_gtt_alignment(dev, obj->base.size, |
371 | args->tiling_mode, | |
372 | false); | |
07fe0b12 BW |
373 | if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1)) |
374 | ret = i915_gem_object_ggtt_unbind(obj); | |
467cffba CW |
375 | } |
376 | ||
377 | if (ret == 0) { | |
5d82e3e6 CW |
378 | obj->fence_dirty = |
379 | obj->fenced_gpu_access || | |
380 | obj->fence_reg != I915_FENCE_REG_NONE; | |
381 | ||
467cffba CW |
382 | obj->tiling_mode = args->tiling_mode; |
383 | obj->stride = args->stride; | |
1869b620 CW |
384 | |
385 | /* Force the fence to be reacquired for GTT access */ | |
386 | i915_gem_release_mmap(obj); | |
467cffba | 387 | } |
0f973f27 | 388 | } |
467cffba CW |
389 | /* we have to maintain this existing ABI... */ |
390 | args->stride = obj->stride; | |
391 | args->tiling_mode = obj->tiling_mode; | |
e9b73c67 CW |
392 | |
393 | /* Try to preallocate memory required to save swizzling on put-pages */ | |
394 | if (i915_gem_object_needs_bit17_swizzle(obj)) { | |
395 | if (obj->bit_17 == NULL) { | |
a1e22653 | 396 | obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT), |
e9b73c67 CW |
397 | sizeof(long), GFP_KERNEL); |
398 | } | |
399 | } else { | |
400 | kfree(obj->bit_17); | |
401 | obj->bit_17 = NULL; | |
402 | } | |
403 | ||
05394f39 | 404 | drm_gem_object_unreference(&obj->base); |
d6873102 | 405 | mutex_unlock(&dev->struct_mutex); |
673a394b | 406 | |
467cffba | 407 | return ret; |
673a394b EA |
408 | } |
409 | ||
410 | /** | |
411 | * Returns the current tiling mode and required bit 6 swizzling for the object. | |
412 | */ | |
413 | int | |
414 | i915_gem_get_tiling(struct drm_device *dev, void *data, | |
05394f39 | 415 | struct drm_file *file) |
673a394b EA |
416 | { |
417 | struct drm_i915_gem_get_tiling *args = data; | |
418 | drm_i915_private_t *dev_priv = dev->dev_private; | |
05394f39 | 419 | struct drm_i915_gem_object *obj; |
673a394b | 420 | |
05394f39 | 421 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
c8725226 | 422 | if (&obj->base == NULL) |
bf79cb91 | 423 | return -ENOENT; |
673a394b EA |
424 | |
425 | mutex_lock(&dev->struct_mutex); | |
426 | ||
05394f39 CW |
427 | args->tiling_mode = obj->tiling_mode; |
428 | switch (obj->tiling_mode) { | |
673a394b EA |
429 | case I915_TILING_X: |
430 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; | |
431 | break; | |
432 | case I915_TILING_Y: | |
433 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; | |
434 | break; | |
435 | case I915_TILING_NONE: | |
436 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; | |
437 | break; | |
438 | default: | |
439 | DRM_ERROR("unknown tiling mode\n"); | |
440 | } | |
441 | ||
280b713b EA |
442 | /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ |
443 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) | |
444 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9; | |
445 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) | |
446 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; | |
447 | ||
05394f39 | 448 | drm_gem_object_unreference(&obj->base); |
d6873102 | 449 | mutex_unlock(&dev->struct_mutex); |
673a394b EA |
450 | |
451 | return 0; | |
452 | } | |
280b713b EA |
453 | |
454 | /** | |
455 | * Swap every 64 bytes of this page around, to account for it having a new | |
456 | * bit 17 of its physical address and therefore being interpreted differently | |
457 | * by the GPU. | |
458 | */ | |
dd2575ff | 459 | static void |
280b713b EA |
460 | i915_gem_swizzle_page(struct page *page) |
461 | { | |
dd2575ff | 462 | char temp[64]; |
280b713b EA |
463 | char *vaddr; |
464 | int i; | |
280b713b EA |
465 | |
466 | vaddr = kmap(page); | |
280b713b EA |
467 | |
468 | for (i = 0; i < PAGE_SIZE; i += 128) { | |
469 | memcpy(temp, &vaddr[i], 64); | |
470 | memcpy(&vaddr[i], &vaddr[i + 64], 64); | |
471 | memcpy(&vaddr[i + 64], temp, 64); | |
472 | } | |
473 | ||
474 | kunmap(page); | |
280b713b EA |
475 | } |
476 | ||
477 | void | |
05394f39 | 478 | i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) |
280b713b | 479 | { |
67d5a50c | 480 | struct sg_page_iter sg_iter; |
280b713b EA |
481 | int i; |
482 | ||
05394f39 | 483 | if (obj->bit_17 == NULL) |
280b713b EA |
484 | return; |
485 | ||
67d5a50c ID |
486 | i = 0; |
487 | for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { | |
2db76d7c | 488 | struct page *page = sg_page_iter_page(&sg_iter); |
9da3da66 | 489 | char new_bit_17 = page_to_phys(page) >> 17; |
280b713b | 490 | if ((new_bit_17 & 0x1) != |
05394f39 | 491 | (test_bit(i, obj->bit_17) != 0)) { |
9da3da66 CW |
492 | i915_gem_swizzle_page(page); |
493 | set_page_dirty(page); | |
280b713b | 494 | } |
67d5a50c | 495 | i++; |
280b713b EA |
496 | } |
497 | } | |
498 | ||
499 | void | |
05394f39 | 500 | i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) |
280b713b | 501 | { |
67d5a50c | 502 | struct sg_page_iter sg_iter; |
05394f39 | 503 | int page_count = obj->base.size >> PAGE_SHIFT; |
280b713b EA |
504 | int i; |
505 | ||
05394f39 | 506 | if (obj->bit_17 == NULL) { |
a1e22653 DV |
507 | obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count), |
508 | sizeof(long), GFP_KERNEL); | |
05394f39 | 509 | if (obj->bit_17 == NULL) { |
280b713b EA |
510 | DRM_ERROR("Failed to allocate memory for bit 17 " |
511 | "record\n"); | |
512 | return; | |
513 | } | |
514 | } | |
515 | ||
67d5a50c ID |
516 | i = 0; |
517 | for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { | |
2db76d7c | 518 | if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) |
05394f39 | 519 | __set_bit(i, obj->bit_17); |
280b713b | 520 | else |
05394f39 | 521 | __clear_bit(i, obj->bit_17); |
67d5a50c | 522 | i++; |
280b713b EA |
523 | } |
524 | } |