Commit | Line | Data |
---|---|---|
54cf91dc CW |
1 | /* |
2 | * Copyright © 2008,2010 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | * Chris Wilson <chris@chris-wilson.co.uk> | |
26 | * | |
27 | */ | |
28 | ||
29 | #include "drmP.h" | |
30 | #include "drm.h" | |
31 | #include "i915_drm.h" | |
32 | #include "i915_drv.h" | |
33 | #include "i915_trace.h" | |
34 | #include "intel_drv.h" | |
35 | ||
36 | struct change_domains { | |
37 | uint32_t invalidate_domains; | |
38 | uint32_t flush_domains; | |
39 | uint32_t flush_rings; | |
40 | }; | |
41 | ||
42 | /* | |
43 | * Set the next domain for the specified object. This | |
44 | * may not actually perform the necessary flushing/invaliding though, | |
45 | * as that may want to be batched with other set_domain operations | |
46 | * | |
47 | * This is (we hope) the only really tricky part of gem. The goal | |
48 | * is fairly simple -- track which caches hold bits of the object | |
49 | * and make sure they remain coherent. A few concrete examples may | |
50 | * help to explain how it works. For shorthand, we use the notation | |
51 | * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the | |
52 | * a pair of read and write domain masks. | |
53 | * | |
54 | * Case 1: the batch buffer | |
55 | * | |
56 | * 1. Allocated | |
57 | * 2. Written by CPU | |
58 | * 3. Mapped to GTT | |
59 | * 4. Read by GPU | |
60 | * 5. Unmapped from GTT | |
61 | * 6. Freed | |
62 | * | |
63 | * Let's take these a step at a time | |
64 | * | |
65 | * 1. Allocated | |
66 | * Pages allocated from the kernel may still have | |
67 | * cache contents, so we set them to (CPU, CPU) always. | |
68 | * 2. Written by CPU (using pwrite) | |
69 | * The pwrite function calls set_domain (CPU, CPU) and | |
70 | * this function does nothing (as nothing changes) | |
71 | * 3. Mapped by GTT | |
72 | * This function asserts that the object is not | |
73 | * currently in any GPU-based read or write domains | |
74 | * 4. Read by GPU | |
75 | * i915_gem_execbuffer calls set_domain (COMMAND, 0). | |
76 | * As write_domain is zero, this function adds in the | |
77 | * current read domains (CPU+COMMAND, 0). | |
78 | * flush_domains is set to CPU. | |
79 | * invalidate_domains is set to COMMAND | |
80 | * clflush is run to get data out of the CPU caches | |
81 | * then i915_dev_set_domain calls i915_gem_flush to | |
82 | * emit an MI_FLUSH and drm_agp_chipset_flush | |
83 | * 5. Unmapped from GTT | |
84 | * i915_gem_object_unbind calls set_domain (CPU, CPU) | |
85 | * flush_domains and invalidate_domains end up both zero | |
86 | * so no flushing/invalidating happens | |
87 | * 6. Freed | |
88 | * yay, done | |
89 | * | |
90 | * Case 2: The shared render buffer | |
91 | * | |
92 | * 1. Allocated | |
93 | * 2. Mapped to GTT | |
94 | * 3. Read/written by GPU | |
95 | * 4. set_domain to (CPU,CPU) | |
96 | * 5. Read/written by CPU | |
97 | * 6. Read/written by GPU | |
98 | * | |
99 | * 1. Allocated | |
100 | * Same as last example, (CPU, CPU) | |
101 | * 2. Mapped to GTT | |
102 | * Nothing changes (assertions find that it is not in the GPU) | |
103 | * 3. Read/written by GPU | |
104 | * execbuffer calls set_domain (RENDER, RENDER) | |
105 | * flush_domains gets CPU | |
106 | * invalidate_domains gets GPU | |
107 | * clflush (obj) | |
108 | * MI_FLUSH and drm_agp_chipset_flush | |
109 | * 4. set_domain (CPU, CPU) | |
110 | * flush_domains gets GPU | |
111 | * invalidate_domains gets CPU | |
112 | * wait_rendering (obj) to make sure all drawing is complete. | |
113 | * This will include an MI_FLUSH to get the data from GPU | |
114 | * to memory | |
115 | * clflush (obj) to invalidate the CPU cache | |
116 | * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) | |
117 | * 5. Read/written by CPU | |
118 | * cache lines are loaded and dirtied | |
119 | * 6. Read written by GPU | |
120 | * Same as last GPU access | |
121 | * | |
122 | * Case 3: The constant buffer | |
123 | * | |
124 | * 1. Allocated | |
125 | * 2. Written by CPU | |
126 | * 3. Read by GPU | |
127 | * 4. Updated (written) by CPU again | |
128 | * 5. Read by GPU | |
129 | * | |
130 | * 1. Allocated | |
131 | * (CPU, CPU) | |
132 | * 2. Written by CPU | |
133 | * (CPU, CPU) | |
134 | * 3. Read by GPU | |
135 | * (CPU+RENDER, 0) | |
136 | * flush_domains = CPU | |
137 | * invalidate_domains = RENDER | |
138 | * clflush (obj) | |
139 | * MI_FLUSH | |
140 | * drm_agp_chipset_flush | |
141 | * 4. Updated (written) by CPU again | |
142 | * (CPU, CPU) | |
143 | * flush_domains = 0 (no previous write domain) | |
144 | * invalidate_domains = 0 (no new read domains) | |
145 | * 5. Read by GPU | |
146 | * (CPU+RENDER, 0) | |
147 | * flush_domains = CPU | |
148 | * invalidate_domains = RENDER | |
149 | * clflush (obj) | |
150 | * MI_FLUSH | |
151 | * drm_agp_chipset_flush | |
152 | */ | |
153 | static void | |
154 | i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, | |
155 | struct intel_ring_buffer *ring, | |
156 | struct change_domains *cd) | |
157 | { | |
158 | uint32_t invalidate_domains = 0, flush_domains = 0; | |
159 | ||
160 | /* | |
161 | * If the object isn't moving to a new write domain, | |
162 | * let the object stay in multiple read domains | |
163 | */ | |
164 | if (obj->base.pending_write_domain == 0) | |
165 | obj->base.pending_read_domains |= obj->base.read_domains; | |
166 | ||
167 | /* | |
168 | * Flush the current write domain if | |
169 | * the new read domains don't match. Invalidate | |
170 | * any read domains which differ from the old | |
171 | * write domain | |
172 | */ | |
173 | if (obj->base.write_domain && | |
174 | (((obj->base.write_domain != obj->base.pending_read_domains || | |
175 | obj->ring != ring)) || | |
176 | (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) { | |
177 | flush_domains |= obj->base.write_domain; | |
178 | invalidate_domains |= | |
179 | obj->base.pending_read_domains & ~obj->base.write_domain; | |
180 | } | |
181 | /* | |
182 | * Invalidate any read caches which may have | |
183 | * stale data. That is, any new read domains. | |
184 | */ | |
185 | invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains; | |
186 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) | |
187 | i915_gem_clflush_object(obj); | |
188 | ||
189 | /* blow away mappings if mapped through GTT */ | |
190 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT) | |
191 | i915_gem_release_mmap(obj); | |
192 | ||
193 | /* The actual obj->write_domain will be updated with | |
194 | * pending_write_domain after we emit the accumulated flush for all | |
195 | * of our domain changes in execbuffers (which clears objects' | |
196 | * write_domains). So if we have a current write domain that we | |
197 | * aren't changing, set pending_write_domain to that. | |
198 | */ | |
199 | if (flush_domains == 0 && obj->base.pending_write_domain == 0) | |
200 | obj->base.pending_write_domain = obj->base.write_domain; | |
201 | ||
202 | cd->invalidate_domains |= invalidate_domains; | |
203 | cd->flush_domains |= flush_domains; | |
204 | if (flush_domains & I915_GEM_GPU_DOMAINS) | |
205 | cd->flush_rings |= obj->ring->id; | |
206 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) | |
207 | cd->flush_rings |= ring->id; | |
208 | } | |
209 | ||
67731b87 CW |
210 | struct eb_objects { |
211 | int and; | |
212 | struct hlist_head buckets[0]; | |
213 | }; | |
214 | ||
215 | static struct eb_objects * | |
216 | eb_create(int size) | |
217 | { | |
218 | struct eb_objects *eb; | |
219 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; | |
220 | while (count > size) | |
221 | count >>= 1; | |
222 | eb = kzalloc(count*sizeof(struct hlist_head) + | |
223 | sizeof(struct eb_objects), | |
224 | GFP_KERNEL); | |
225 | if (eb == NULL) | |
226 | return eb; | |
227 | ||
228 | eb->and = count - 1; | |
229 | return eb; | |
230 | } | |
231 | ||
232 | static void | |
233 | eb_reset(struct eb_objects *eb) | |
234 | { | |
235 | memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); | |
236 | } | |
237 | ||
238 | static void | |
239 | eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj) | |
240 | { | |
241 | hlist_add_head(&obj->exec_node, | |
242 | &eb->buckets[obj->exec_handle & eb->and]); | |
243 | } | |
244 | ||
245 | static struct drm_i915_gem_object * | |
246 | eb_get_object(struct eb_objects *eb, unsigned long handle) | |
247 | { | |
248 | struct hlist_head *head; | |
249 | struct hlist_node *node; | |
250 | struct drm_i915_gem_object *obj; | |
251 | ||
252 | head = &eb->buckets[handle & eb->and]; | |
253 | hlist_for_each(node, head) { | |
254 | obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); | |
255 | if (obj->exec_handle == handle) | |
256 | return obj; | |
257 | } | |
258 | ||
259 | return NULL; | |
260 | } | |
261 | ||
262 | static void | |
263 | eb_destroy(struct eb_objects *eb) | |
264 | { | |
265 | kfree(eb); | |
266 | } | |
267 | ||
54cf91dc CW |
268 | static int |
269 | i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |
67731b87 | 270 | struct eb_objects *eb, |
54cf91dc CW |
271 | struct drm_i915_gem_relocation_entry *reloc) |
272 | { | |
273 | struct drm_device *dev = obj->base.dev; | |
274 | struct drm_gem_object *target_obj; | |
275 | uint32_t target_offset; | |
276 | int ret = -EINVAL; | |
277 | ||
67731b87 CW |
278 | /* we've already hold a reference to all valid objects */ |
279 | target_obj = &eb_get_object(eb, reloc->target_handle)->base; | |
280 | if (unlikely(target_obj == NULL)) | |
54cf91dc CW |
281 | return -ENOENT; |
282 | ||
283 | target_offset = to_intel_bo(target_obj)->gtt_offset; | |
284 | ||
54cf91dc CW |
285 | /* The target buffer should have appeared before us in the |
286 | * exec_object list, so it should have a GTT space bound by now. | |
287 | */ | |
b8f7ab17 | 288 | if (unlikely(target_offset == 0)) { |
54cf91dc CW |
289 | DRM_ERROR("No GTT space found for object %d\n", |
290 | reloc->target_handle); | |
67731b87 | 291 | return ret; |
54cf91dc CW |
292 | } |
293 | ||
294 | /* Validate that the target is in a valid r/w GPU domain */ | |
b8f7ab17 | 295 | if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { |
54cf91dc CW |
296 | DRM_ERROR("reloc with multiple write domains: " |
297 | "obj %p target %d offset %d " | |
298 | "read %08x write %08x", | |
299 | obj, reloc->target_handle, | |
300 | (int) reloc->offset, | |
301 | reloc->read_domains, | |
302 | reloc->write_domain); | |
67731b87 | 303 | return ret; |
54cf91dc | 304 | } |
b8f7ab17 | 305 | if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) { |
54cf91dc CW |
306 | DRM_ERROR("reloc with read/write CPU domains: " |
307 | "obj %p target %d offset %d " | |
308 | "read %08x write %08x", | |
309 | obj, reloc->target_handle, | |
310 | (int) reloc->offset, | |
311 | reloc->read_domains, | |
312 | reloc->write_domain); | |
67731b87 | 313 | return ret; |
54cf91dc | 314 | } |
b8f7ab17 CW |
315 | if (unlikely(reloc->write_domain && target_obj->pending_write_domain && |
316 | reloc->write_domain != target_obj->pending_write_domain)) { | |
54cf91dc CW |
317 | DRM_ERROR("Write domain conflict: " |
318 | "obj %p target %d offset %d " | |
319 | "new %08x old %08x\n", | |
320 | obj, reloc->target_handle, | |
321 | (int) reloc->offset, | |
322 | reloc->write_domain, | |
323 | target_obj->pending_write_domain); | |
67731b87 | 324 | return ret; |
54cf91dc CW |
325 | } |
326 | ||
327 | target_obj->pending_read_domains |= reloc->read_domains; | |
328 | target_obj->pending_write_domain |= reloc->write_domain; | |
329 | ||
330 | /* If the relocation already has the right value in it, no | |
331 | * more work needs to be done. | |
332 | */ | |
333 | if (target_offset == reloc->presumed_offset) | |
67731b87 | 334 | return 0; |
54cf91dc CW |
335 | |
336 | /* Check that the relocation address is valid... */ | |
b8f7ab17 | 337 | if (unlikely(reloc->offset > obj->base.size - 4)) { |
54cf91dc CW |
338 | DRM_ERROR("Relocation beyond object bounds: " |
339 | "obj %p target %d offset %d size %d.\n", | |
340 | obj, reloc->target_handle, | |
341 | (int) reloc->offset, | |
342 | (int) obj->base.size); | |
67731b87 | 343 | return ret; |
54cf91dc | 344 | } |
b8f7ab17 | 345 | if (unlikely(reloc->offset & 3)) { |
54cf91dc CW |
346 | DRM_ERROR("Relocation not 4-byte aligned: " |
347 | "obj %p target %d offset %d.\n", | |
348 | obj, reloc->target_handle, | |
349 | (int) reloc->offset); | |
67731b87 | 350 | return ret; |
54cf91dc CW |
351 | } |
352 | ||
353 | /* and points to somewhere within the target object. */ | |
b8f7ab17 | 354 | if (unlikely(reloc->delta >= target_obj->size)) { |
54cf91dc CW |
355 | DRM_ERROR("Relocation beyond target object bounds: " |
356 | "obj %p target %d delta %d size %d.\n", | |
357 | obj, reloc->target_handle, | |
358 | (int) reloc->delta, | |
359 | (int) target_obj->size); | |
67731b87 | 360 | return ret; |
54cf91dc CW |
361 | } |
362 | ||
363 | reloc->delta += target_offset; | |
364 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { | |
365 | uint32_t page_offset = reloc->offset & ~PAGE_MASK; | |
366 | char *vaddr; | |
367 | ||
368 | vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); | |
369 | *(uint32_t *)(vaddr + page_offset) = reloc->delta; | |
370 | kunmap_atomic(vaddr); | |
371 | } else { | |
372 | struct drm_i915_private *dev_priv = dev->dev_private; | |
373 | uint32_t __iomem *reloc_entry; | |
374 | void __iomem *reloc_page; | |
375 | ||
376 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | |
377 | if (ret) | |
67731b87 | 378 | return ret; |
54cf91dc CW |
379 | |
380 | /* Map the page containing the relocation we're going to perform. */ | |
381 | reloc->offset += obj->gtt_offset; | |
382 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | |
383 | reloc->offset & PAGE_MASK); | |
384 | reloc_entry = (uint32_t __iomem *) | |
385 | (reloc_page + (reloc->offset & ~PAGE_MASK)); | |
386 | iowrite32(reloc->delta, reloc_entry); | |
387 | io_mapping_unmap_atomic(reloc_page); | |
388 | } | |
389 | ||
390 | /* and update the user's relocation entry */ | |
391 | reloc->presumed_offset = target_offset; | |
392 | ||
67731b87 | 393 | return 0; |
54cf91dc CW |
394 | } |
395 | ||
396 | static int | |
397 | i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, | |
6fe4f140 | 398 | struct eb_objects *eb) |
54cf91dc CW |
399 | { |
400 | struct drm_i915_gem_relocation_entry __user *user_relocs; | |
6fe4f140 | 401 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
54cf91dc CW |
402 | int i, ret; |
403 | ||
404 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; | |
405 | for (i = 0; i < entry->relocation_count; i++) { | |
406 | struct drm_i915_gem_relocation_entry reloc; | |
407 | ||
408 | if (__copy_from_user_inatomic(&reloc, | |
409 | user_relocs+i, | |
410 | sizeof(reloc))) | |
411 | return -EFAULT; | |
412 | ||
6fe4f140 | 413 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc); |
54cf91dc CW |
414 | if (ret) |
415 | return ret; | |
416 | ||
417 | if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, | |
418 | &reloc.presumed_offset, | |
419 | sizeof(reloc.presumed_offset))) | |
420 | return -EFAULT; | |
421 | } | |
422 | ||
423 | return 0; | |
424 | } | |
425 | ||
426 | static int | |
427 | i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, | |
67731b87 | 428 | struct eb_objects *eb, |
54cf91dc CW |
429 | struct drm_i915_gem_relocation_entry *relocs) |
430 | { | |
6fe4f140 | 431 | const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
54cf91dc CW |
432 | int i, ret; |
433 | ||
434 | for (i = 0; i < entry->relocation_count; i++) { | |
6fe4f140 | 435 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]); |
54cf91dc CW |
436 | if (ret) |
437 | return ret; | |
438 | } | |
439 | ||
440 | return 0; | |
441 | } | |
442 | ||
443 | static int | |
444 | i915_gem_execbuffer_relocate(struct drm_device *dev, | |
67731b87 | 445 | struct eb_objects *eb, |
6fe4f140 | 446 | struct list_head *objects) |
54cf91dc | 447 | { |
432e58ed CW |
448 | struct drm_i915_gem_object *obj; |
449 | int ret; | |
54cf91dc | 450 | |
432e58ed | 451 | list_for_each_entry(obj, objects, exec_list) { |
6fe4f140 | 452 | ret = i915_gem_execbuffer_relocate_object(obj, eb); |
54cf91dc CW |
453 | if (ret) |
454 | return ret; | |
455 | } | |
456 | ||
457 | return 0; | |
458 | } | |
459 | ||
460 | static int | |
d9e86c0e | 461 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, |
54cf91dc | 462 | struct drm_file *file, |
6fe4f140 | 463 | struct list_head *objects) |
54cf91dc | 464 | { |
432e58ed | 465 | struct drm_i915_gem_object *obj; |
432e58ed | 466 | int ret, retry; |
9b3826bf | 467 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
6fe4f140 CW |
468 | struct list_head ordered_objects; |
469 | ||
470 | INIT_LIST_HEAD(&ordered_objects); | |
471 | while (!list_empty(objects)) { | |
472 | struct drm_i915_gem_exec_object2 *entry; | |
473 | bool need_fence, need_mappable; | |
474 | ||
475 | obj = list_first_entry(objects, | |
476 | struct drm_i915_gem_object, | |
477 | exec_list); | |
478 | entry = obj->exec_entry; | |
479 | ||
480 | need_fence = | |
481 | has_fenced_gpu_access && | |
482 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | |
483 | obj->tiling_mode != I915_TILING_NONE; | |
484 | need_mappable = | |
485 | entry->relocation_count ? true : need_fence; | |
486 | ||
487 | if (need_mappable) | |
488 | list_move(&obj->exec_list, &ordered_objects); | |
489 | else | |
490 | list_move_tail(&obj->exec_list, &ordered_objects); | |
595dad76 CW |
491 | |
492 | obj->base.pending_read_domains = 0; | |
493 | obj->base.pending_write_domain = 0; | |
6fe4f140 CW |
494 | } |
495 | list_splice(&ordered_objects, objects); | |
54cf91dc CW |
496 | |
497 | /* Attempt to pin all of the buffers into the GTT. | |
498 | * This is done in 3 phases: | |
499 | * | |
500 | * 1a. Unbind all objects that do not match the GTT constraints for | |
501 | * the execbuffer (fenceable, mappable, alignment etc). | |
502 | * 1b. Increment pin count for already bound objects. | |
503 | * 2. Bind new objects. | |
504 | * 3. Decrement pin count. | |
505 | * | |
506 | * This avoid unnecessary unbinding of later objects in order to makr | |
507 | * room for the earlier objects *unless* we need to defragment. | |
508 | */ | |
509 | retry = 0; | |
510 | do { | |
511 | ret = 0; | |
512 | ||
513 | /* Unbind any ill-fitting objects or pin. */ | |
432e58ed | 514 | list_for_each_entry(obj, objects, exec_list) { |
6fe4f140 | 515 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
54cf91dc | 516 | bool need_fence, need_mappable; |
6fe4f140 | 517 | if (!obj->gtt_space) |
54cf91dc CW |
518 | continue; |
519 | ||
520 | need_fence = | |
9b3826bf | 521 | has_fenced_gpu_access && |
54cf91dc CW |
522 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
523 | obj->tiling_mode != I915_TILING_NONE; | |
524 | need_mappable = | |
525 | entry->relocation_count ? true : need_fence; | |
526 | ||
527 | if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || | |
528 | (need_mappable && !obj->map_and_fenceable)) | |
529 | ret = i915_gem_object_unbind(obj); | |
530 | else | |
531 | ret = i915_gem_object_pin(obj, | |
532 | entry->alignment, | |
533 | need_mappable); | |
432e58ed | 534 | if (ret) |
54cf91dc | 535 | goto err; |
432e58ed CW |
536 | |
537 | entry++; | |
54cf91dc CW |
538 | } |
539 | ||
540 | /* Bind fresh objects */ | |
432e58ed | 541 | list_for_each_entry(obj, objects, exec_list) { |
6fe4f140 | 542 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
54cf91dc CW |
543 | bool need_fence; |
544 | ||
545 | need_fence = | |
9b3826bf | 546 | has_fenced_gpu_access && |
54cf91dc CW |
547 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
548 | obj->tiling_mode != I915_TILING_NONE; | |
549 | ||
550 | if (!obj->gtt_space) { | |
551 | bool need_mappable = | |
552 | entry->relocation_count ? true : need_fence; | |
553 | ||
554 | ret = i915_gem_object_pin(obj, | |
555 | entry->alignment, | |
556 | need_mappable); | |
557 | if (ret) | |
558 | break; | |
559 | } | |
560 | ||
9b3826bf CW |
561 | if (has_fenced_gpu_access) { |
562 | if (need_fence) { | |
563 | ret = i915_gem_object_get_fence(obj, ring, 1); | |
564 | if (ret) | |
565 | break; | |
566 | } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && | |
567 | obj->tiling_mode == I915_TILING_NONE) { | |
568 | /* XXX pipelined! */ | |
569 | ret = i915_gem_object_put_fence(obj); | |
570 | if (ret) | |
571 | break; | |
572 | } | |
573 | obj->pending_fenced_gpu_access = need_fence; | |
54cf91dc CW |
574 | } |
575 | ||
576 | entry->offset = obj->gtt_offset; | |
577 | } | |
578 | ||
432e58ed CW |
579 | /* Decrement pin count for bound objects */ |
580 | list_for_each_entry(obj, objects, exec_list) { | |
54cf91dc CW |
581 | if (obj->gtt_space) |
582 | i915_gem_object_unpin(obj); | |
583 | } | |
584 | ||
585 | if (ret != -ENOSPC || retry > 1) | |
586 | return ret; | |
587 | ||
588 | /* First attempt, just clear anything that is purgeable. | |
589 | * Second attempt, clear the entire GTT. | |
590 | */ | |
d9e86c0e | 591 | ret = i915_gem_evict_everything(ring->dev, retry == 0); |
54cf91dc CW |
592 | if (ret) |
593 | return ret; | |
594 | ||
595 | retry++; | |
596 | } while (1); | |
432e58ed CW |
597 | |
598 | err: | |
602606a4 CW |
599 | obj = list_entry(obj->exec_list.prev, |
600 | struct drm_i915_gem_object, | |
601 | exec_list); | |
432e58ed CW |
602 | while (objects != &obj->exec_list) { |
603 | if (obj->gtt_space) | |
604 | i915_gem_object_unpin(obj); | |
605 | ||
606 | obj = list_entry(obj->exec_list.prev, | |
607 | struct drm_i915_gem_object, | |
608 | exec_list); | |
609 | } | |
610 | ||
611 | return ret; | |
54cf91dc CW |
612 | } |
613 | ||
614 | static int | |
615 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |
616 | struct drm_file *file, | |
d9e86c0e | 617 | struct intel_ring_buffer *ring, |
432e58ed | 618 | struct list_head *objects, |
67731b87 | 619 | struct eb_objects *eb, |
432e58ed | 620 | struct drm_i915_gem_exec_object2 *exec, |
54cf91dc CW |
621 | int count) |
622 | { | |
623 | struct drm_i915_gem_relocation_entry *reloc; | |
432e58ed | 624 | struct drm_i915_gem_object *obj; |
dd6864a4 | 625 | int *reloc_offset; |
54cf91dc CW |
626 | int i, total, ret; |
627 | ||
67731b87 | 628 | /* We may process another execbuffer during the unlock... */ |
36cf1742 | 629 | while (!list_empty(objects)) { |
67731b87 CW |
630 | obj = list_first_entry(objects, |
631 | struct drm_i915_gem_object, | |
632 | exec_list); | |
633 | list_del_init(&obj->exec_list); | |
634 | drm_gem_object_unreference(&obj->base); | |
635 | } | |
636 | ||
54cf91dc CW |
637 | mutex_unlock(&dev->struct_mutex); |
638 | ||
639 | total = 0; | |
640 | for (i = 0; i < count; i++) | |
432e58ed | 641 | total += exec[i].relocation_count; |
54cf91dc | 642 | |
dd6864a4 | 643 | reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset)); |
54cf91dc | 644 | reloc = drm_malloc_ab(total, sizeof(*reloc)); |
dd6864a4 CW |
645 | if (reloc == NULL || reloc_offset == NULL) { |
646 | drm_free_large(reloc); | |
647 | drm_free_large(reloc_offset); | |
54cf91dc CW |
648 | mutex_lock(&dev->struct_mutex); |
649 | return -ENOMEM; | |
650 | } | |
651 | ||
652 | total = 0; | |
653 | for (i = 0; i < count; i++) { | |
654 | struct drm_i915_gem_relocation_entry __user *user_relocs; | |
655 | ||
432e58ed | 656 | user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; |
54cf91dc CW |
657 | |
658 | if (copy_from_user(reloc+total, user_relocs, | |
432e58ed | 659 | exec[i].relocation_count * sizeof(*reloc))) { |
54cf91dc CW |
660 | ret = -EFAULT; |
661 | mutex_lock(&dev->struct_mutex); | |
662 | goto err; | |
663 | } | |
664 | ||
dd6864a4 | 665 | reloc_offset[i] = total; |
432e58ed | 666 | total += exec[i].relocation_count; |
54cf91dc CW |
667 | } |
668 | ||
669 | ret = i915_mutex_lock_interruptible(dev); | |
670 | if (ret) { | |
671 | mutex_lock(&dev->struct_mutex); | |
672 | goto err; | |
673 | } | |
674 | ||
67731b87 | 675 | /* reacquire the objects */ |
67731b87 CW |
676 | eb_reset(eb); |
677 | for (i = 0; i < count; i++) { | |
67731b87 CW |
678 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, |
679 | exec[i].handle)); | |
680 | if (obj == NULL) { | |
681 | DRM_ERROR("Invalid object handle %d at index %d\n", | |
682 | exec[i].handle, i); | |
683 | ret = -ENOENT; | |
684 | goto err; | |
685 | } | |
686 | ||
687 | list_add_tail(&obj->exec_list, objects); | |
688 | obj->exec_handle = exec[i].handle; | |
6fe4f140 | 689 | obj->exec_entry = &exec[i]; |
67731b87 CW |
690 | eb_add_object(eb, obj); |
691 | } | |
692 | ||
6fe4f140 | 693 | ret = i915_gem_execbuffer_reserve(ring, file, objects); |
54cf91dc CW |
694 | if (ret) |
695 | goto err; | |
696 | ||
432e58ed | 697 | list_for_each_entry(obj, objects, exec_list) { |
dd6864a4 | 698 | int offset = obj->exec_entry - exec; |
67731b87 | 699 | ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, |
dd6864a4 | 700 | reloc + reloc_offset[offset]); |
54cf91dc CW |
701 | if (ret) |
702 | goto err; | |
54cf91dc CW |
703 | } |
704 | ||
705 | /* Leave the user relocations as are, this is the painfully slow path, | |
706 | * and we want to avoid the complication of dropping the lock whilst | |
707 | * having buffers reserved in the aperture and so causing spurious | |
708 | * ENOSPC for random operations. | |
709 | */ | |
710 | ||
711 | err: | |
712 | drm_free_large(reloc); | |
dd6864a4 | 713 | drm_free_large(reloc_offset); |
54cf91dc CW |
714 | return ret; |
715 | } | |
716 | ||
88241785 | 717 | static int |
54cf91dc CW |
718 | i915_gem_execbuffer_flush(struct drm_device *dev, |
719 | uint32_t invalidate_domains, | |
720 | uint32_t flush_domains, | |
721 | uint32_t flush_rings) | |
722 | { | |
723 | drm_i915_private_t *dev_priv = dev->dev_private; | |
88241785 | 724 | int i, ret; |
54cf91dc CW |
725 | |
726 | if (flush_domains & I915_GEM_DOMAIN_CPU) | |
727 | intel_gtt_chipset_flush(); | |
728 | ||
63256ec5 CW |
729 | if (flush_domains & I915_GEM_DOMAIN_GTT) |
730 | wmb(); | |
731 | ||
54cf91dc | 732 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { |
1ec14ad3 | 733 | for (i = 0; i < I915_NUM_RINGS; i++) |
88241785 | 734 | if (flush_rings & (1 << i)) { |
db53a302 | 735 | ret = i915_gem_flush_ring(&dev_priv->ring[i], |
88241785 CW |
736 | invalidate_domains, |
737 | flush_domains); | |
738 | if (ret) | |
739 | return ret; | |
740 | } | |
54cf91dc | 741 | } |
88241785 CW |
742 | |
743 | return 0; | |
54cf91dc CW |
744 | } |
745 | ||
1ec14ad3 CW |
746 | static int |
747 | i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, | |
748 | struct intel_ring_buffer *to) | |
749 | { | |
750 | struct intel_ring_buffer *from = obj->ring; | |
751 | u32 seqno; | |
752 | int ret, idx; | |
753 | ||
754 | if (from == NULL || to == from) | |
755 | return 0; | |
756 | ||
1591192d CW |
757 | /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ |
758 | if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) | |
1ec14ad3 CW |
759 | return i915_gem_object_wait_rendering(obj, true); |
760 | ||
761 | idx = intel_ring_sync_index(from, to); | |
762 | ||
763 | seqno = obj->last_rendering_seqno; | |
764 | if (seqno <= from->sync_seqno[idx]) | |
765 | return 0; | |
766 | ||
767 | if (seqno == from->outstanding_lazy_request) { | |
768 | struct drm_i915_gem_request *request; | |
769 | ||
770 | request = kzalloc(sizeof(*request), GFP_KERNEL); | |
771 | if (request == NULL) | |
772 | return -ENOMEM; | |
773 | ||
db53a302 | 774 | ret = i915_add_request(from, NULL, request); |
1ec14ad3 CW |
775 | if (ret) { |
776 | kfree(request); | |
777 | return ret; | |
778 | } | |
779 | ||
780 | seqno = request->seqno; | |
781 | } | |
782 | ||
783 | from->sync_seqno[idx] = seqno; | |
784 | return intel_ring_sync(to, from, seqno - 1); | |
785 | } | |
54cf91dc CW |
786 | |
787 | static int | |
432e58ed CW |
788 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, |
789 | struct list_head *objects) | |
54cf91dc | 790 | { |
432e58ed | 791 | struct drm_i915_gem_object *obj; |
54cf91dc | 792 | struct change_domains cd; |
432e58ed | 793 | int ret; |
54cf91dc CW |
794 | |
795 | cd.invalidate_domains = 0; | |
796 | cd.flush_domains = 0; | |
797 | cd.flush_rings = 0; | |
432e58ed CW |
798 | list_for_each_entry(obj, objects, exec_list) |
799 | i915_gem_object_set_to_gpu_domain(obj, ring, &cd); | |
54cf91dc CW |
800 | |
801 | if (cd.invalidate_domains | cd.flush_domains) { | |
88241785 CW |
802 | ret = i915_gem_execbuffer_flush(ring->dev, |
803 | cd.invalidate_domains, | |
804 | cd.flush_domains, | |
805 | cd.flush_rings); | |
806 | if (ret) | |
807 | return ret; | |
54cf91dc CW |
808 | } |
809 | ||
432e58ed | 810 | list_for_each_entry(obj, objects, exec_list) { |
1ec14ad3 CW |
811 | ret = i915_gem_execbuffer_sync_rings(obj, ring); |
812 | if (ret) | |
813 | return ret; | |
54cf91dc CW |
814 | } |
815 | ||
816 | return 0; | |
817 | } | |
818 | ||
432e58ed CW |
819 | static bool |
820 | i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) | |
54cf91dc | 821 | { |
432e58ed | 822 | return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; |
54cf91dc CW |
823 | } |
824 | ||
825 | static int | |
826 | validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | |
827 | int count) | |
828 | { | |
829 | int i; | |
830 | ||
831 | for (i = 0; i < count; i++) { | |
832 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; | |
833 | int length; /* limited by fault_in_pages_readable() */ | |
834 | ||
835 | /* First check for malicious input causing overflow */ | |
836 | if (exec[i].relocation_count > | |
837 | INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) | |
838 | return -EINVAL; | |
839 | ||
840 | length = exec[i].relocation_count * | |
841 | sizeof(struct drm_i915_gem_relocation_entry); | |
842 | if (!access_ok(VERIFY_READ, ptr, length)) | |
843 | return -EFAULT; | |
844 | ||
845 | /* we may also need to update the presumed offsets */ | |
846 | if (!access_ok(VERIFY_WRITE, ptr, length)) | |
847 | return -EFAULT; | |
848 | ||
849 | if (fault_in_pages_readable(ptr, length)) | |
850 | return -EFAULT; | |
851 | } | |
852 | ||
853 | return 0; | |
854 | } | |
855 | ||
432e58ed CW |
856 | static int |
857 | i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, | |
858 | struct list_head *objects) | |
859 | { | |
860 | struct drm_i915_gem_object *obj; | |
861 | int flips; | |
862 | ||
863 | /* Check for any pending flips. As we only maintain a flip queue depth | |
864 | * of 1, we can simply insert a WAIT for the next display flip prior | |
865 | * to executing the batch and avoid stalling the CPU. | |
866 | */ | |
867 | flips = 0; | |
868 | list_for_each_entry(obj, objects, exec_list) { | |
869 | if (obj->base.write_domain) | |
870 | flips |= atomic_read(&obj->pending_flip); | |
871 | } | |
872 | if (flips) { | |
873 | int plane, flip_mask, ret; | |
874 | ||
875 | for (plane = 0; flips >> plane; plane++) { | |
876 | if (((flips >> plane) & 1) == 0) | |
877 | continue; | |
878 | ||
879 | if (plane) | |
880 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | |
881 | else | |
882 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | |
883 | ||
884 | ret = intel_ring_begin(ring, 2); | |
885 | if (ret) | |
886 | return ret; | |
887 | ||
888 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); | |
889 | intel_ring_emit(ring, MI_NOOP); | |
890 | intel_ring_advance(ring); | |
891 | } | |
892 | } | |
893 | ||
894 | return 0; | |
895 | } | |
896 | ||
897 | static void | |
898 | i915_gem_execbuffer_move_to_active(struct list_head *objects, | |
1ec14ad3 CW |
899 | struct intel_ring_buffer *ring, |
900 | u32 seqno) | |
432e58ed CW |
901 | { |
902 | struct drm_i915_gem_object *obj; | |
903 | ||
904 | list_for_each_entry(obj, objects, exec_list) { | |
db53a302 CW |
905 | u32 old_read = obj->base.read_domains; |
906 | u32 old_write = obj->base.write_domain; | |
907 | ||
908 | ||
432e58ed CW |
909 | obj->base.read_domains = obj->base.pending_read_domains; |
910 | obj->base.write_domain = obj->base.pending_write_domain; | |
911 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; | |
912 | ||
1ec14ad3 | 913 | i915_gem_object_move_to_active(obj, ring, seqno); |
432e58ed CW |
914 | if (obj->base.write_domain) { |
915 | obj->dirty = 1; | |
87ca9c8a | 916 | obj->pending_gpu_write = true; |
432e58ed CW |
917 | list_move_tail(&obj->gpu_write_list, |
918 | &ring->gpu_write_list); | |
919 | intel_mark_busy(ring->dev, obj); | |
920 | } | |
921 | ||
db53a302 | 922 | trace_i915_gem_object_change_domain(obj, old_read, old_write); |
432e58ed CW |
923 | } |
924 | } | |
925 | ||
54cf91dc CW |
926 | static void |
927 | i915_gem_execbuffer_retire_commands(struct drm_device *dev, | |
432e58ed | 928 | struct drm_file *file, |
54cf91dc CW |
929 | struct intel_ring_buffer *ring) |
930 | { | |
432e58ed | 931 | struct drm_i915_gem_request *request; |
b72f3acb | 932 | u32 invalidate; |
54cf91dc | 933 | |
432e58ed CW |
934 | /* |
935 | * Ensure that the commands in the batch buffer are | |
936 | * finished before the interrupt fires. | |
937 | * | |
938 | * The sampler always gets flushed on i965 (sigh). | |
939 | */ | |
b72f3acb | 940 | invalidate = I915_GEM_DOMAIN_COMMAND; |
54cf91dc | 941 | if (INTEL_INFO(dev)->gen >= 4) |
b72f3acb CW |
942 | invalidate |= I915_GEM_DOMAIN_SAMPLER; |
943 | if (ring->flush(ring, invalidate, 0)) { | |
db53a302 | 944 | i915_gem_next_request_seqno(ring); |
b72f3acb CW |
945 | return; |
946 | } | |
54cf91dc | 947 | |
432e58ed CW |
948 | /* Add a breadcrumb for the completion of the batch buffer */ |
949 | request = kzalloc(sizeof(*request), GFP_KERNEL); | |
db53a302 CW |
950 | if (request == NULL || i915_add_request(ring, file, request)) { |
951 | i915_gem_next_request_seqno(ring); | |
432e58ed CW |
952 | kfree(request); |
953 | } | |
954 | } | |
54cf91dc CW |
955 | |
956 | static int | |
957 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |
958 | struct drm_file *file, | |
959 | struct drm_i915_gem_execbuffer2 *args, | |
432e58ed | 960 | struct drm_i915_gem_exec_object2 *exec) |
54cf91dc CW |
961 | { |
962 | drm_i915_private_t *dev_priv = dev->dev_private; | |
432e58ed | 963 | struct list_head objects; |
67731b87 | 964 | struct eb_objects *eb; |
54cf91dc CW |
965 | struct drm_i915_gem_object *batch_obj; |
966 | struct drm_clip_rect *cliprects = NULL; | |
54cf91dc | 967 | struct intel_ring_buffer *ring; |
c4e7a414 | 968 | u32 exec_start, exec_len; |
1ec14ad3 | 969 | u32 seqno; |
72bfa19c | 970 | int ret, mode, i; |
54cf91dc | 971 | |
432e58ed CW |
972 | if (!i915_gem_check_execbuffer(args)) { |
973 | DRM_ERROR("execbuf with invalid offset/length\n"); | |
974 | return -EINVAL; | |
975 | } | |
976 | ||
977 | ret = validate_exec_list(exec, args->buffer_count); | |
54cf91dc CW |
978 | if (ret) |
979 | return ret; | |
980 | ||
54cf91dc CW |
981 | switch (args->flags & I915_EXEC_RING_MASK) { |
982 | case I915_EXEC_DEFAULT: | |
983 | case I915_EXEC_RENDER: | |
1ec14ad3 | 984 | ring = &dev_priv->ring[RCS]; |
54cf91dc CW |
985 | break; |
986 | case I915_EXEC_BSD: | |
987 | if (!HAS_BSD(dev)) { | |
988 | DRM_ERROR("execbuf with invalid ring (BSD)\n"); | |
989 | return -EINVAL; | |
990 | } | |
1ec14ad3 | 991 | ring = &dev_priv->ring[VCS]; |
54cf91dc CW |
992 | break; |
993 | case I915_EXEC_BLT: | |
994 | if (!HAS_BLT(dev)) { | |
995 | DRM_ERROR("execbuf with invalid ring (BLT)\n"); | |
996 | return -EINVAL; | |
997 | } | |
1ec14ad3 | 998 | ring = &dev_priv->ring[BCS]; |
54cf91dc CW |
999 | break; |
1000 | default: | |
1001 | DRM_ERROR("execbuf with unknown ring: %d\n", | |
1002 | (int)(args->flags & I915_EXEC_RING_MASK)); | |
1003 | return -EINVAL; | |
1004 | } | |
1005 | ||
72bfa19c CW |
1006 | mode = args->flags & I915_EXEC_CONSTANTS_MASK; |
1007 | switch (mode) { | |
1008 | case I915_EXEC_CONSTANTS_REL_GENERAL: | |
1009 | case I915_EXEC_CONSTANTS_ABSOLUTE: | |
1010 | case I915_EXEC_CONSTANTS_REL_SURFACE: | |
1011 | if (ring == &dev_priv->ring[RCS] && | |
1012 | mode != dev_priv->relative_constants_mode) { | |
1013 | if (INTEL_INFO(dev)->gen < 4) | |
1014 | return -EINVAL; | |
1015 | ||
1016 | if (INTEL_INFO(dev)->gen > 5 && | |
1017 | mode == I915_EXEC_CONSTANTS_REL_SURFACE) | |
1018 | return -EINVAL; | |
1019 | ||
1020 | ret = intel_ring_begin(ring, 4); | |
1021 | if (ret) | |
1022 | return ret; | |
1023 | ||
1024 | intel_ring_emit(ring, MI_NOOP); | |
1025 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); | |
1026 | intel_ring_emit(ring, INSTPM); | |
1027 | intel_ring_emit(ring, | |
1028 | I915_EXEC_CONSTANTS_MASK << 16 | mode); | |
1029 | intel_ring_advance(ring); | |
1030 | ||
1031 | dev_priv->relative_constants_mode = mode; | |
1032 | } | |
1033 | break; | |
1034 | default: | |
1035 | DRM_ERROR("execbuf with unknown constants: %d\n", mode); | |
1036 | return -EINVAL; | |
1037 | } | |
1038 | ||
54cf91dc CW |
1039 | if (args->buffer_count < 1) { |
1040 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | |
1041 | return -EINVAL; | |
1042 | } | |
54cf91dc CW |
1043 | |
1044 | if (args->num_cliprects != 0) { | |
1ec14ad3 | 1045 | if (ring != &dev_priv->ring[RCS]) { |
c4e7a414 CW |
1046 | DRM_ERROR("clip rectangles are only valid with the render ring\n"); |
1047 | return -EINVAL; | |
1048 | } | |
1049 | ||
432e58ed | 1050 | cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), |
54cf91dc CW |
1051 | GFP_KERNEL); |
1052 | if (cliprects == NULL) { | |
1053 | ret = -ENOMEM; | |
1054 | goto pre_mutex_err; | |
1055 | } | |
1056 | ||
432e58ed CW |
1057 | if (copy_from_user(cliprects, |
1058 | (struct drm_clip_rect __user *)(uintptr_t) | |
1059 | args->cliprects_ptr, | |
1060 | sizeof(*cliprects)*args->num_cliprects)) { | |
54cf91dc CW |
1061 | ret = -EFAULT; |
1062 | goto pre_mutex_err; | |
1063 | } | |
1064 | } | |
1065 | ||
54cf91dc CW |
1066 | ret = i915_mutex_lock_interruptible(dev); |
1067 | if (ret) | |
1068 | goto pre_mutex_err; | |
1069 | ||
1070 | if (dev_priv->mm.suspended) { | |
1071 | mutex_unlock(&dev->struct_mutex); | |
1072 | ret = -EBUSY; | |
1073 | goto pre_mutex_err; | |
1074 | } | |
1075 | ||
67731b87 CW |
1076 | eb = eb_create(args->buffer_count); |
1077 | if (eb == NULL) { | |
1078 | mutex_unlock(&dev->struct_mutex); | |
1079 | ret = -ENOMEM; | |
1080 | goto pre_mutex_err; | |
1081 | } | |
1082 | ||
54cf91dc | 1083 | /* Look up object handles */ |
432e58ed | 1084 | INIT_LIST_HEAD(&objects); |
54cf91dc CW |
1085 | for (i = 0; i < args->buffer_count; i++) { |
1086 | struct drm_i915_gem_object *obj; | |
1087 | ||
432e58ed CW |
1088 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, |
1089 | exec[i].handle)); | |
54cf91dc CW |
1090 | if (obj == NULL) { |
1091 | DRM_ERROR("Invalid object handle %d at index %d\n", | |
432e58ed | 1092 | exec[i].handle, i); |
54cf91dc | 1093 | /* prevent error path from reading uninitialized data */ |
54cf91dc CW |
1094 | ret = -ENOENT; |
1095 | goto err; | |
1096 | } | |
54cf91dc | 1097 | |
432e58ed CW |
1098 | if (!list_empty(&obj->exec_list)) { |
1099 | DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n", | |
1100 | obj, exec[i].handle, i); | |
54cf91dc CW |
1101 | ret = -EINVAL; |
1102 | goto err; | |
1103 | } | |
432e58ed CW |
1104 | |
1105 | list_add_tail(&obj->exec_list, &objects); | |
67731b87 | 1106 | obj->exec_handle = exec[i].handle; |
6fe4f140 | 1107 | obj->exec_entry = &exec[i]; |
67731b87 | 1108 | eb_add_object(eb, obj); |
54cf91dc CW |
1109 | } |
1110 | ||
6fe4f140 CW |
1111 | /* take note of the batch buffer before we might reorder the lists */ |
1112 | batch_obj = list_entry(objects.prev, | |
1113 | struct drm_i915_gem_object, | |
1114 | exec_list); | |
1115 | ||
54cf91dc | 1116 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
6fe4f140 | 1117 | ret = i915_gem_execbuffer_reserve(ring, file, &objects); |
54cf91dc CW |
1118 | if (ret) |
1119 | goto err; | |
1120 | ||
1121 | /* The objects are in their final locations, apply the relocations. */ | |
6fe4f140 | 1122 | ret = i915_gem_execbuffer_relocate(dev, eb, &objects); |
54cf91dc CW |
1123 | if (ret) { |
1124 | if (ret == -EFAULT) { | |
d9e86c0e | 1125 | ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, |
67731b87 CW |
1126 | &objects, eb, |
1127 | exec, | |
54cf91dc CW |
1128 | args->buffer_count); |
1129 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | |
1130 | } | |
1131 | if (ret) | |
1132 | goto err; | |
1133 | } | |
1134 | ||
1135 | /* Set the pending read domains for the batch buffer to COMMAND */ | |
54cf91dc CW |
1136 | if (batch_obj->base.pending_write_domain) { |
1137 | DRM_ERROR("Attempting to use self-modifying batch buffer\n"); | |
1138 | ret = -EINVAL; | |
1139 | goto err; | |
1140 | } | |
1141 | batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; | |
1142 | ||
432e58ed CW |
1143 | ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); |
1144 | if (ret) | |
54cf91dc | 1145 | goto err; |
54cf91dc | 1146 | |
432e58ed | 1147 | ret = i915_gem_execbuffer_wait_for_flips(ring, &objects); |
54cf91dc CW |
1148 | if (ret) |
1149 | goto err; | |
1150 | ||
db53a302 | 1151 | seqno = i915_gem_next_request_seqno(ring); |
076e2c0e | 1152 | for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) { |
1ec14ad3 CW |
1153 | if (seqno < ring->sync_seqno[i]) { |
1154 | /* The GPU can not handle its semaphore value wrapping, | |
1155 | * so every billion or so execbuffers, we need to stall | |
1156 | * the GPU in order to reset the counters. | |
1157 | */ | |
1158 | ret = i915_gpu_idle(dev); | |
1159 | if (ret) | |
1160 | goto err; | |
1161 | ||
1162 | BUG_ON(ring->sync_seqno[i]); | |
1163 | } | |
1164 | } | |
1165 | ||
db53a302 CW |
1166 | trace_i915_gem_ring_dispatch(ring, seqno); |
1167 | ||
c4e7a414 CW |
1168 | exec_start = batch_obj->gtt_offset + args->batch_start_offset; |
1169 | exec_len = args->batch_len; | |
1170 | if (cliprects) { | |
1171 | for (i = 0; i < args->num_cliprects; i++) { | |
1172 | ret = i915_emit_box(dev, &cliprects[i], | |
1173 | args->DR1, args->DR4); | |
1174 | if (ret) | |
1175 | goto err; | |
1176 | ||
1177 | ret = ring->dispatch_execbuffer(ring, | |
1178 | exec_start, exec_len); | |
1179 | if (ret) | |
1180 | goto err; | |
1181 | } | |
1182 | } else { | |
1183 | ret = ring->dispatch_execbuffer(ring, exec_start, exec_len); | |
1184 | if (ret) | |
1185 | goto err; | |
1186 | } | |
54cf91dc | 1187 | |
1ec14ad3 | 1188 | i915_gem_execbuffer_move_to_active(&objects, ring, seqno); |
432e58ed | 1189 | i915_gem_execbuffer_retire_commands(dev, file, ring); |
54cf91dc CW |
1190 | |
1191 | err: | |
67731b87 | 1192 | eb_destroy(eb); |
432e58ed CW |
1193 | while (!list_empty(&objects)) { |
1194 | struct drm_i915_gem_object *obj; | |
1195 | ||
1196 | obj = list_first_entry(&objects, | |
1197 | struct drm_i915_gem_object, | |
1198 | exec_list); | |
1199 | list_del_init(&obj->exec_list); | |
1200 | drm_gem_object_unreference(&obj->base); | |
54cf91dc CW |
1201 | } |
1202 | ||
1203 | mutex_unlock(&dev->struct_mutex); | |
1204 | ||
1205 | pre_mutex_err: | |
54cf91dc | 1206 | kfree(cliprects); |
54cf91dc CW |
1207 | return ret; |
1208 | } | |
1209 | ||
1210 | /* | |
1211 | * Legacy execbuffer just creates an exec2 list from the original exec object | |
1212 | * list array and passes it to the real function. | |
1213 | */ | |
1214 | int | |
1215 | i915_gem_execbuffer(struct drm_device *dev, void *data, | |
1216 | struct drm_file *file) | |
1217 | { | |
1218 | struct drm_i915_gem_execbuffer *args = data; | |
1219 | struct drm_i915_gem_execbuffer2 exec2; | |
1220 | struct drm_i915_gem_exec_object *exec_list = NULL; | |
1221 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | |
1222 | int ret, i; | |
1223 | ||
54cf91dc CW |
1224 | if (args->buffer_count < 1) { |
1225 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | |
1226 | return -EINVAL; | |
1227 | } | |
1228 | ||
1229 | /* Copy in the exec list from userland */ | |
1230 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | |
1231 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | |
1232 | if (exec_list == NULL || exec2_list == NULL) { | |
1233 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | |
1234 | args->buffer_count); | |
1235 | drm_free_large(exec_list); | |
1236 | drm_free_large(exec2_list); | |
1237 | return -ENOMEM; | |
1238 | } | |
1239 | ret = copy_from_user(exec_list, | |
1240 | (struct drm_i915_relocation_entry __user *) | |
1241 | (uintptr_t) args->buffers_ptr, | |
1242 | sizeof(*exec_list) * args->buffer_count); | |
1243 | if (ret != 0) { | |
1244 | DRM_ERROR("copy %d exec entries failed %d\n", | |
1245 | args->buffer_count, ret); | |
1246 | drm_free_large(exec_list); | |
1247 | drm_free_large(exec2_list); | |
1248 | return -EFAULT; | |
1249 | } | |
1250 | ||
1251 | for (i = 0; i < args->buffer_count; i++) { | |
1252 | exec2_list[i].handle = exec_list[i].handle; | |
1253 | exec2_list[i].relocation_count = exec_list[i].relocation_count; | |
1254 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; | |
1255 | exec2_list[i].alignment = exec_list[i].alignment; | |
1256 | exec2_list[i].offset = exec_list[i].offset; | |
1257 | if (INTEL_INFO(dev)->gen < 4) | |
1258 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; | |
1259 | else | |
1260 | exec2_list[i].flags = 0; | |
1261 | } | |
1262 | ||
1263 | exec2.buffers_ptr = args->buffers_ptr; | |
1264 | exec2.buffer_count = args->buffer_count; | |
1265 | exec2.batch_start_offset = args->batch_start_offset; | |
1266 | exec2.batch_len = args->batch_len; | |
1267 | exec2.DR1 = args->DR1; | |
1268 | exec2.DR4 = args->DR4; | |
1269 | exec2.num_cliprects = args->num_cliprects; | |
1270 | exec2.cliprects_ptr = args->cliprects_ptr; | |
1271 | exec2.flags = I915_EXEC_RENDER; | |
1272 | ||
1273 | ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); | |
1274 | if (!ret) { | |
1275 | /* Copy the new buffer offsets back to the user's exec list. */ | |
1276 | for (i = 0; i < args->buffer_count; i++) | |
1277 | exec_list[i].offset = exec2_list[i].offset; | |
1278 | /* ... and back out to userspace */ | |
1279 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | |
1280 | (uintptr_t) args->buffers_ptr, | |
1281 | exec_list, | |
1282 | sizeof(*exec_list) * args->buffer_count); | |
1283 | if (ret) { | |
1284 | ret = -EFAULT; | |
1285 | DRM_ERROR("failed to copy %d exec entries " | |
1286 | "back to user (%d)\n", | |
1287 | args->buffer_count, ret); | |
1288 | } | |
1289 | } | |
1290 | ||
1291 | drm_free_large(exec_list); | |
1292 | drm_free_large(exec2_list); | |
1293 | return ret; | |
1294 | } | |
1295 | ||
1296 | int | |
1297 | i915_gem_execbuffer2(struct drm_device *dev, void *data, | |
1298 | struct drm_file *file) | |
1299 | { | |
1300 | struct drm_i915_gem_execbuffer2 *args = data; | |
1301 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | |
1302 | int ret; | |
1303 | ||
54cf91dc CW |
1304 | if (args->buffer_count < 1) { |
1305 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); | |
1306 | return -EINVAL; | |
1307 | } | |
1308 | ||
1309 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | |
1310 | if (exec2_list == NULL) { | |
1311 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | |
1312 | args->buffer_count); | |
1313 | return -ENOMEM; | |
1314 | } | |
1315 | ret = copy_from_user(exec2_list, | |
1316 | (struct drm_i915_relocation_entry __user *) | |
1317 | (uintptr_t) args->buffers_ptr, | |
1318 | sizeof(*exec2_list) * args->buffer_count); | |
1319 | if (ret != 0) { | |
1320 | DRM_ERROR("copy %d exec entries failed %d\n", | |
1321 | args->buffer_count, ret); | |
1322 | drm_free_large(exec2_list); | |
1323 | return -EFAULT; | |
1324 | } | |
1325 | ||
1326 | ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); | |
1327 | if (!ret) { | |
1328 | /* Copy the new buffer offsets back to the user's exec list. */ | |
1329 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | |
1330 | (uintptr_t) args->buffers_ptr, | |
1331 | exec2_list, | |
1332 | sizeof(*exec2_list) * args->buffer_count); | |
1333 | if (ret) { | |
1334 | ret = -EFAULT; | |
1335 | DRM_ERROR("failed to copy %d exec entries " | |
1336 | "back to user (%d)\n", | |
1337 | args->buffer_count, ret); | |
1338 | } | |
1339 | } | |
1340 | ||
1341 | drm_free_large(exec2_list); | |
1342 | return ret; | |
1343 | } |