Commit | Line | Data |
---|---|---|
673a394b EA |
1 | /* |
2 | * Copyright © 2008 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | * | |
26 | */ | |
27 | ||
280b713b EA |
28 | #include "linux/string.h" |
29 | #include "linux/bitops.h" | |
673a394b EA |
30 | #include "drmP.h" |
31 | #include "drm.h" | |
32 | #include "i915_drm.h" | |
33 | #include "i915_drv.h" | |
34 | ||
35 | /** @file i915_gem_tiling.c | |
36 | * | |
37 | * Support for managing tiling state of buffer objects. | |
38 | * | |
39 | * The idea behind tiling is to increase cache hit rates by rearranging | |
40 | * pixel data so that a group of pixel accesses are in the same cacheline. | |
41 | * Performance improvement from doing this on the back/depth buffer are on | |
42 | * the order of 30%. | |
43 | * | |
44 | * Intel architectures make this somewhat more complicated, though, by | |
45 | * adjustments made to addressing of data when the memory is in interleaved | |
46 | * mode (matched pairs of DIMMS) to improve memory bandwidth. | |
47 | * For interleaved memory, the CPU sends every sequential 64 bytes | |
48 | * to an alternate memory channel so it can get the bandwidth from both. | |
49 | * | |
50 | * The GPU also rearranges its accesses for increased bandwidth to interleaved | |
51 | * memory, and it matches what the CPU does for non-tiled. However, when tiled | |
52 | * it does it a little differently, since one walks addresses not just in the | |
53 | * X direction but also Y. So, along with alternating channels when bit | |
54 | * 6 of the address flips, it also alternates when other bits flip -- Bits 9 | |
55 | * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) | |
56 | * are common to both the 915 and 965-class hardware. | |
57 | * | |
58 | * The CPU also sometimes XORs in higher bits as well, to improve | |
59 | * bandwidth doing strided access like we do so frequently in graphics. This | |
60 | * is called "Channel XOR Randomization" in the MCH documentation. The result | |
61 | * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address | |
62 | * decode. | |
63 | * | |
64 | * All of this bit 6 XORing has an effect on our memory management, | |
65 | * as we need to make sure that the 3d driver can correctly address object | |
66 | * contents. | |
67 | * | |
68 | * If we don't have interleaved memory, all tiling is safe and no swizzling is | |
69 | * required. | |
70 | * | |
71 | * When bit 17 is XORed in, we simply refuse to tile at all. Bit | |
72 | * 17 is not just a page offset, so as we page an objet out and back in, | |
73 | * individual pages in it will have different bit 17 addresses, resulting in | |
74 | * each 64 bytes being swapped with its neighbor! | |
75 | * | |
76 | * Otherwise, if interleaved, we have to tell the 3d driver what the address | |
77 | * swizzling it needs to do is, since it's writing with the CPU to the pages | |
78 | * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the | |
79 | * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling | |
80 | * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order | |
81 | * to match what the GPU expects. | |
82 | */ | |
83 | ||
84 | /** | |
85 | * Detects bit 6 swizzling of address lookup between IGD access and CPU | |
86 | * access through main memory. | |
87 | */ | |
88 | void | |
89 | i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |
90 | { | |
91 | drm_i915_private_t *dev_priv = dev->dev_private; | |
92 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | |
93 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | |
94 | ||
acc83eb5 | 95 | if (INTEL_INFO(dev)->gen >= 6) { |
f691e2f4 DV |
96 | uint32_t dimm_c0, dimm_c1; |
97 | dimm_c0 = I915_READ(MAD_DIMM_C0); | |
98 | dimm_c1 = I915_READ(MAD_DIMM_C1); | |
99 | dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; | |
100 | dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; | |
101 | /* Enable swizzling when the channels are populated with | |
102 | * identically sized dimms. We don't need to check the 3rd | |
103 | * channel because no cpu with gpu attached ships in that | |
104 | * configuration. Also, swizzling only makes sense for 2 | |
105 | * channels anyway. */ | |
106 | if (dimm_c0 == dimm_c1) { | |
107 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | |
108 | swizzle_y = I915_BIT_6_SWIZZLE_9; | |
109 | } else { | |
110 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
111 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
112 | } | |
acc83eb5 | 113 | } else if (IS_GEN5(dev)) { |
f2b115e6 | 114 | /* On Ironlake whatever DRAM config, GPU always do |
553bd149 ZW |
115 | * same swizzling setup. |
116 | */ | |
117 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | |
118 | swizzle_y = I915_BIT_6_SWIZZLE_9; | |
a6c45cf0 | 119 | } else if (IS_GEN2(dev)) { |
673a394b EA |
120 | /* As far as we know, the 865 doesn't have these bit 6 |
121 | * swizzling issues. | |
122 | */ | |
123 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
124 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
c9c4b6f6 | 125 | } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) { |
673a394b EA |
126 | uint32_t dcc; |
127 | ||
c9c4b6f6 | 128 | /* On 9xx chipsets, channel interleave by the CPU is |
568d9a8f EA |
129 | * determined by DCC. For single-channel, neither the CPU |
130 | * nor the GPU do swizzling. For dual channel interleaved, | |
131 | * the GPU's interleave is bit 9 and 10 for X tiled, and bit | |
132 | * 9 for Y tiled. The CPU's interleave is independent, and | |
133 | * can be based on either bit 11 (haven't seen this yet) or | |
134 | * bit 17 (common). | |
673a394b EA |
135 | */ |
136 | dcc = I915_READ(DCC); | |
137 | switch (dcc & DCC_ADDRESSING_MODE_MASK) { | |
138 | case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: | |
139 | case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: | |
140 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
141 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
142 | break; | |
143 | case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: | |
568d9a8f EA |
144 | if (dcc & DCC_CHANNEL_XOR_DISABLE) { |
145 | /* This is the base swizzling by the GPU for | |
146 | * tiled buffers. | |
147 | */ | |
673a394b EA |
148 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
149 | swizzle_y = I915_BIT_6_SWIZZLE_9; | |
568d9a8f EA |
150 | } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { |
151 | /* Bit 11 swizzling by the CPU in addition. */ | |
673a394b EA |
152 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; |
153 | swizzle_y = I915_BIT_6_SWIZZLE_9_11; | |
154 | } else { | |
568d9a8f | 155 | /* Bit 17 swizzling by the CPU in addition. */ |
280b713b EA |
156 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_17; |
157 | swizzle_y = I915_BIT_6_SWIZZLE_9_17; | |
673a394b EA |
158 | } |
159 | break; | |
160 | } | |
161 | if (dcc == 0xffffffff) { | |
162 | DRM_ERROR("Couldn't read from MCHBAR. " | |
163 | "Disabling tiling.\n"); | |
164 | swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | |
165 | swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | |
166 | } | |
167 | } else { | |
168 | /* The 965, G33, and newer, have a very flexible memory | |
169 | * configuration. It will enable dual-channel mode | |
170 | * (interleaving) on as much memory as it can, and the GPU | |
171 | * will additionally sometimes enable different bit 6 | |
172 | * swizzling for tiled objects from the CPU. | |
173 | * | |
174 | * Here's what I found on the G965: | |
175 | * slot fill memory size swizzling | |
176 | * 0A 0B 1A 1B 1-ch 2-ch | |
177 | * 512 0 0 0 512 0 O | |
178 | * 512 0 512 0 16 1008 X | |
179 | * 512 0 0 512 16 1008 X | |
180 | * 0 512 0 512 16 1008 X | |
181 | * 1024 1024 1024 0 2048 1024 O | |
182 | * | |
183 | * We could probably detect this based on either the DRB | |
184 | * matching, which was the case for the swizzling required in | |
185 | * the table above, or from the 1-ch value being less than | |
186 | * the minimum size of a rank. | |
187 | */ | |
188 | if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { | |
189 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
190 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
191 | } else { | |
192 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | |
193 | swizzle_y = I915_BIT_6_SWIZZLE_9; | |
194 | } | |
195 | } | |
196 | ||
197 | dev_priv->mm.bit_6_swizzle_x = swizzle_x; | |
198 | dev_priv->mm.bit_6_swizzle_y = swizzle_y; | |
199 | } | |
200 | ||
0f973f27 | 201 | /* Check pitch constriants for all chips & tiling formats */ |
a00b10c3 | 202 | static bool |
0f973f27 JB |
203 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) |
204 | { | |
0ee537ab | 205 | int tile_width; |
0f973f27 JB |
206 | |
207 | /* Linear is always fine */ | |
208 | if (tiling_mode == I915_TILING_NONE) | |
209 | return true; | |
210 | ||
a6c45cf0 | 211 | if (IS_GEN2(dev) || |
e76a16de | 212 | (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) |
0f973f27 JB |
213 | tile_width = 128; |
214 | else | |
215 | tile_width = 512; | |
216 | ||
8d7773a3 | 217 | /* check maximum stride & object size */ |
a6c45cf0 | 218 | if (INTEL_INFO(dev)->gen >= 4) { |
8d7773a3 DV |
219 | /* i965 stores the end address of the gtt mapping in the fence |
220 | * reg, so dont bother to check the size */ | |
221 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) | |
222 | return false; | |
a6c45cf0 | 223 | } else { |
c36a2a6d | 224 | if (stride > 8192) |
8d7773a3 | 225 | return false; |
e76a16de | 226 | |
c36a2a6d DV |
227 | if (IS_GEN3(dev)) { |
228 | if (size > I830_FENCE_MAX_SIZE_VAL << 20) | |
229 | return false; | |
230 | } else { | |
231 | if (size > I830_FENCE_MAX_SIZE_VAL << 19) | |
232 | return false; | |
233 | } | |
8d7773a3 DV |
234 | } |
235 | ||
0f973f27 | 236 | /* 965+ just needs multiples of tile width */ |
a6c45cf0 | 237 | if (INTEL_INFO(dev)->gen >= 4) { |
0f973f27 JB |
238 | if (stride & (tile_width - 1)) |
239 | return false; | |
240 | return true; | |
241 | } | |
242 | ||
243 | /* Pre-965 needs power of two tile widths */ | |
244 | if (stride < tile_width) | |
245 | return false; | |
246 | ||
247 | if (stride & (stride - 1)) | |
248 | return false; | |
249 | ||
0f973f27 JB |
250 | return true; |
251 | } | |
252 | ||
a00b10c3 CW |
253 | /* Is the current GTT allocation valid for the change in tiling? */ |
254 | static bool | |
05394f39 | 255 | i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) |
52dc7d32 | 256 | { |
a00b10c3 | 257 | u32 size; |
52dc7d32 CW |
258 | |
259 | if (tiling_mode == I915_TILING_NONE) | |
260 | return true; | |
261 | ||
05394f39 | 262 | if (INTEL_INFO(obj->base.dev)->gen >= 4) |
a6c45cf0 CW |
263 | return true; |
264 | ||
05394f39 CW |
265 | if (INTEL_INFO(obj->base.dev)->gen == 3) { |
266 | if (obj->gtt_offset & ~I915_FENCE_START_MASK) | |
df153158 CW |
267 | return false; |
268 | } else { | |
05394f39 | 269 | if (obj->gtt_offset & ~I830_FENCE_START_MASK) |
df153158 CW |
270 | return false; |
271 | } | |
272 | ||
a00b10c3 CW |
273 | /* |
274 | * Previous chips need to be aligned to the size of the smallest | |
275 | * fence register that can contain the object. | |
276 | */ | |
05394f39 | 277 | if (INTEL_INFO(obj->base.dev)->gen == 3) |
a00b10c3 CW |
278 | size = 1024*1024; |
279 | else | |
280 | size = 512*1024; | |
281 | ||
05394f39 | 282 | while (size < obj->base.size) |
a00b10c3 CW |
283 | size <<= 1; |
284 | ||
05394f39 | 285 | if (obj->gtt_space->size != size) |
a6c45cf0 CW |
286 | return false; |
287 | ||
05394f39 | 288 | if (obj->gtt_offset & (size - 1)) |
df153158 | 289 | return false; |
52dc7d32 CW |
290 | |
291 | return true; | |
292 | } | |
293 | ||
673a394b EA |
294 | /** |
295 | * Sets the tiling mode of an object, returning the required swizzling of | |
296 | * bit 6 of addresses in the object. | |
297 | */ | |
298 | int | |
299 | i915_gem_set_tiling(struct drm_device *dev, void *data, | |
05394f39 | 300 | struct drm_file *file) |
673a394b EA |
301 | { |
302 | struct drm_i915_gem_set_tiling *args = data; | |
303 | drm_i915_private_t *dev_priv = dev->dev_private; | |
05394f39 | 304 | struct drm_i915_gem_object *obj; |
47ae63e0 | 305 | int ret = 0; |
673a394b | 306 | |
05394f39 | 307 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
c8725226 | 308 | if (&obj->base == NULL) |
bf79cb91 | 309 | return -ENOENT; |
673a394b | 310 | |
05394f39 CW |
311 | if (!i915_tiling_ok(dev, |
312 | args->stride, obj->base.size, args->tiling_mode)) { | |
313 | drm_gem_object_unreference_unlocked(&obj->base); | |
0f973f27 | 314 | return -EINVAL; |
72daad40 | 315 | } |
0f973f27 | 316 | |
05394f39 CW |
317 | if (obj->pin_count) { |
318 | drm_gem_object_unreference_unlocked(&obj->base); | |
31770bd4 DV |
319 | return -EBUSY; |
320 | } | |
321 | ||
673a394b | 322 | if (args->tiling_mode == I915_TILING_NONE) { |
673a394b | 323 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; |
52dc7d32 | 324 | args->stride = 0; |
673a394b EA |
325 | } else { |
326 | if (args->tiling_mode == I915_TILING_X) | |
327 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; | |
328 | else | |
329 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; | |
280b713b EA |
330 | |
331 | /* Hide bit 17 swizzling from the user. This prevents old Mesa | |
332 | * from aborting the application on sw fallbacks to bit 17, | |
333 | * and we use the pread/pwrite bit17 paths to swizzle for it. | |
334 | * If there was a user that was relying on the swizzle | |
335 | * information for drm_intel_bo_map()ed reads/writes this would | |
336 | * break it, but we don't have any of those. | |
337 | */ | |
338 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) | |
339 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9; | |
340 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) | |
341 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; | |
342 | ||
673a394b EA |
343 | /* If we can't handle the swizzling, make it untiled. */ |
344 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { | |
345 | args->tiling_mode = I915_TILING_NONE; | |
346 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; | |
52dc7d32 | 347 | args->stride = 0; |
673a394b EA |
348 | } |
349 | } | |
0f973f27 | 350 | |
52dc7d32 | 351 | mutex_lock(&dev->struct_mutex); |
05394f39 CW |
352 | if (args->tiling_mode != obj->tiling_mode || |
353 | args->stride != obj->stride) { | |
52dc7d32 CW |
354 | /* We need to rebind the object if its current allocation |
355 | * no longer meets the alignment restrictions for its new | |
356 | * tiling mode. Otherwise we can just leave it alone, but | |
1869b620 CW |
357 | * need to ensure that any fence register is updated before |
358 | * the next fenced (either through the GTT or by the BLT unit | |
359 | * on older GPUs) access. | |
5d82e3e6 CW |
360 | * |
361 | * After updating the tiling parameters, we then flag whether | |
362 | * we need to update an associated fence register. Note this | |
363 | * has to also include the unfenced register the GPU uses | |
364 | * whilst executing a fenced command for an untiled object. | |
0f973f27 | 365 | */ |
fe305198 | 366 | |
d9e86c0e CW |
367 | obj->map_and_fenceable = |
368 | obj->gtt_space == NULL || | |
369 | (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && | |
370 | i915_gem_object_fence_ok(obj, args->tiling_mode)); | |
52dc7d32 | 371 | |
467cffba CW |
372 | /* Rebind if we need a change of alignment */ |
373 | if (!obj->map_and_fenceable) { | |
374 | u32 unfenced_alignment = | |
e28f8711 CW |
375 | i915_gem_get_unfenced_gtt_alignment(dev, |
376 | obj->base.size, | |
377 | args->tiling_mode); | |
467cffba CW |
378 | if (obj->gtt_offset & (unfenced_alignment - 1)) |
379 | ret = i915_gem_object_unbind(obj); | |
380 | } | |
381 | ||
382 | if (ret == 0) { | |
5d82e3e6 CW |
383 | obj->fence_dirty = |
384 | obj->fenced_gpu_access || | |
385 | obj->fence_reg != I915_FENCE_REG_NONE; | |
386 | ||
467cffba CW |
387 | obj->tiling_mode = args->tiling_mode; |
388 | obj->stride = args->stride; | |
1869b620 CW |
389 | |
390 | /* Force the fence to be reacquired for GTT access */ | |
391 | i915_gem_release_mmap(obj); | |
467cffba | 392 | } |
0f973f27 | 393 | } |
467cffba CW |
394 | /* we have to maintain this existing ABI... */ |
395 | args->stride = obj->stride; | |
396 | args->tiling_mode = obj->tiling_mode; | |
05394f39 | 397 | drm_gem_object_unreference(&obj->base); |
d6873102 | 398 | mutex_unlock(&dev->struct_mutex); |
673a394b | 399 | |
467cffba | 400 | return ret; |
673a394b EA |
401 | } |
402 | ||
403 | /** | |
404 | * Returns the current tiling mode and required bit 6 swizzling for the object. | |
405 | */ | |
406 | int | |
407 | i915_gem_get_tiling(struct drm_device *dev, void *data, | |
05394f39 | 408 | struct drm_file *file) |
673a394b EA |
409 | { |
410 | struct drm_i915_gem_get_tiling *args = data; | |
411 | drm_i915_private_t *dev_priv = dev->dev_private; | |
05394f39 | 412 | struct drm_i915_gem_object *obj; |
673a394b | 413 | |
05394f39 | 414 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
c8725226 | 415 | if (&obj->base == NULL) |
bf79cb91 | 416 | return -ENOENT; |
673a394b EA |
417 | |
418 | mutex_lock(&dev->struct_mutex); | |
419 | ||
05394f39 CW |
420 | args->tiling_mode = obj->tiling_mode; |
421 | switch (obj->tiling_mode) { | |
673a394b EA |
422 | case I915_TILING_X: |
423 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; | |
424 | break; | |
425 | case I915_TILING_Y: | |
426 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; | |
427 | break; | |
428 | case I915_TILING_NONE: | |
429 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; | |
430 | break; | |
431 | default: | |
432 | DRM_ERROR("unknown tiling mode\n"); | |
433 | } | |
434 | ||
280b713b EA |
435 | /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ |
436 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) | |
437 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9; | |
438 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) | |
439 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; | |
440 | ||
05394f39 | 441 | drm_gem_object_unreference(&obj->base); |
d6873102 | 442 | mutex_unlock(&dev->struct_mutex); |
673a394b EA |
443 | |
444 | return 0; | |
445 | } | |
280b713b EA |
446 | |
447 | /** | |
448 | * Swap every 64 bytes of this page around, to account for it having a new | |
449 | * bit 17 of its physical address and therefore being interpreted differently | |
450 | * by the GPU. | |
451 | */ | |
dd2575ff | 452 | static void |
280b713b EA |
453 | i915_gem_swizzle_page(struct page *page) |
454 | { | |
dd2575ff | 455 | char temp[64]; |
280b713b EA |
456 | char *vaddr; |
457 | int i; | |
280b713b EA |
458 | |
459 | vaddr = kmap(page); | |
280b713b EA |
460 | |
461 | for (i = 0; i < PAGE_SIZE; i += 128) { | |
462 | memcpy(temp, &vaddr[i], 64); | |
463 | memcpy(&vaddr[i], &vaddr[i + 64], 64); | |
464 | memcpy(&vaddr[i + 64], temp, 64); | |
465 | } | |
466 | ||
467 | kunmap(page); | |
280b713b EA |
468 | } |
469 | ||
470 | void | |
05394f39 | 471 | i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) |
280b713b | 472 | { |
9da3da66 | 473 | struct scatterlist *sg; |
05394f39 | 474 | int page_count = obj->base.size >> PAGE_SHIFT; |
280b713b EA |
475 | int i; |
476 | ||
05394f39 | 477 | if (obj->bit_17 == NULL) |
280b713b EA |
478 | return; |
479 | ||
9da3da66 CW |
480 | for_each_sg(obj->pages->sgl, sg, page_count, i) { |
481 | struct page *page = sg_page(sg); | |
482 | char new_bit_17 = page_to_phys(page) >> 17; | |
280b713b | 483 | if ((new_bit_17 & 0x1) != |
05394f39 | 484 | (test_bit(i, obj->bit_17) != 0)) { |
9da3da66 CW |
485 | i915_gem_swizzle_page(page); |
486 | set_page_dirty(page); | |
280b713b EA |
487 | } |
488 | } | |
489 | } | |
490 | ||
491 | void | |
05394f39 | 492 | i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) |
280b713b | 493 | { |
9da3da66 | 494 | struct scatterlist *sg; |
05394f39 | 495 | int page_count = obj->base.size >> PAGE_SHIFT; |
280b713b EA |
496 | int i; |
497 | ||
05394f39 CW |
498 | if (obj->bit_17 == NULL) { |
499 | obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * | |
280b713b | 500 | sizeof(long), GFP_KERNEL); |
05394f39 | 501 | if (obj->bit_17 == NULL) { |
280b713b EA |
502 | DRM_ERROR("Failed to allocate memory for bit 17 " |
503 | "record\n"); | |
504 | return; | |
505 | } | |
506 | } | |
507 | ||
9da3da66 CW |
508 | for_each_sg(obj->pages->sgl, sg, page_count, i) { |
509 | struct page *page = sg_page(sg); | |
510 | if (page_to_phys(page) & (1 << 17)) | |
05394f39 | 511 | __set_bit(i, obj->bit_17); |
280b713b | 512 | else |
05394f39 | 513 | __clear_bit(i, obj->bit_17); |
280b713b EA |
514 | } |
515 | } |