Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
1 /*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/dma_remapping.h>
35
36 struct eb_objects {
37 struct list_head objects;
38 int and;
39 union {
40 struct drm_i915_gem_object *lut[0];
41 struct hlist_head buckets[0];
42 };
43 };
44
45 static struct eb_objects *
46 eb_create(struct drm_i915_gem_execbuffer2 *args)
47 {
48 struct eb_objects *eb = NULL;
49
50 if (args->flags & I915_EXEC_HANDLE_LUT) {
51 int size = args->buffer_count;
52 size *= sizeof(struct drm_i915_gem_object *);
53 size += sizeof(struct eb_objects);
54 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
55 }
56
57 if (eb == NULL) {
58 int size = args->buffer_count;
59 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
60 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
61 while (count > 2*size)
62 count >>= 1;
63 eb = kzalloc(count*sizeof(struct hlist_head) +
64 sizeof(struct eb_objects),
65 GFP_TEMPORARY);
66 if (eb == NULL)
67 return eb;
68
69 eb->and = count - 1;
70 } else
71 eb->and = -args->buffer_count;
72
73 INIT_LIST_HEAD(&eb->objects);
74 return eb;
75 }
76
77 static void
78 eb_reset(struct eb_objects *eb)
79 {
80 if (eb->and >= 0)
81 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
82 }
83
84 static int
85 eb_lookup_objects(struct eb_objects *eb,
86 struct drm_i915_gem_exec_object2 *exec,
87 const struct drm_i915_gem_execbuffer2 *args,
88 struct drm_file *file)
89 {
90 int i;
91
92 spin_lock(&file->table_lock);
93 for (i = 0; i < args->buffer_count; i++) {
94 struct drm_i915_gem_object *obj;
95
96 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
97 if (obj == NULL) {
98 spin_unlock(&file->table_lock);
99 DRM_DEBUG("Invalid object handle %d at index %d\n",
100 exec[i].handle, i);
101 return -ENOENT;
102 }
103
104 if (!list_empty(&obj->exec_list)) {
105 spin_unlock(&file->table_lock);
106 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
107 obj, exec[i].handle, i);
108 return -EINVAL;
109 }
110
111 drm_gem_object_reference(&obj->base);
112 list_add_tail(&obj->exec_list, &eb->objects);
113
114 obj->exec_entry = &exec[i];
115 if (eb->and < 0) {
116 eb->lut[i] = obj;
117 } else {
118 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
119 obj->exec_handle = handle;
120 hlist_add_head(&obj->exec_node,
121 &eb->buckets[handle & eb->and]);
122 }
123 }
124 spin_unlock(&file->table_lock);
125
126 return 0;
127 }
128
129 static struct drm_i915_gem_object *
130 eb_get_object(struct eb_objects *eb, unsigned long handle)
131 {
132 if (eb->and < 0) {
133 if (handle >= -eb->and)
134 return NULL;
135 return eb->lut[handle];
136 } else {
137 struct hlist_head *head;
138 struct hlist_node *node;
139
140 head = &eb->buckets[handle & eb->and];
141 hlist_for_each(node, head) {
142 struct drm_i915_gem_object *obj;
143
144 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
145 if (obj->exec_handle == handle)
146 return obj;
147 }
148 return NULL;
149 }
150 }
151
152 static void
153 eb_destroy(struct eb_objects *eb)
154 {
155 while (!list_empty(&eb->objects)) {
156 struct drm_i915_gem_object *obj;
157
158 obj = list_first_entry(&eb->objects,
159 struct drm_i915_gem_object,
160 exec_list);
161 list_del_init(&obj->exec_list);
162 drm_gem_object_unreference(&obj->base);
163 }
164 kfree(eb);
165 }
166
167 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
168 {
169 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
170 !obj->map_and_fenceable ||
171 obj->cache_level != I915_CACHE_NONE);
172 }
173
174 static int
175 relocate_entry_cpu(struct drm_i915_gem_object *obj,
176 struct drm_i915_gem_relocation_entry *reloc)
177 {
178 uint32_t page_offset = offset_in_page(reloc->offset);
179 char *vaddr;
180 int ret = -EINVAL;
181
182 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
183 if (ret)
184 return ret;
185
186 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
187 reloc->offset >> PAGE_SHIFT));
188 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
189 kunmap_atomic(vaddr);
190
191 return 0;
192 }
193
194 static int
195 relocate_entry_gtt(struct drm_i915_gem_object *obj,
196 struct drm_i915_gem_relocation_entry *reloc)
197 {
198 struct drm_device *dev = obj->base.dev;
199 struct drm_i915_private *dev_priv = dev->dev_private;
200 uint32_t __iomem *reloc_entry;
201 void __iomem *reloc_page;
202 int ret = -EINVAL;
203
204 ret = i915_gem_object_set_to_gtt_domain(obj, true);
205 if (ret)
206 return ret;
207
208 ret = i915_gem_object_put_fence(obj);
209 if (ret)
210 return ret;
211
212 /* Map the page containing the relocation we're going to perform. */
213 reloc->offset += i915_gem_obj_ggtt_offset(obj);
214 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
215 reloc->offset & PAGE_MASK);
216 reloc_entry = (uint32_t __iomem *)
217 (reloc_page + offset_in_page(reloc->offset));
218 iowrite32(reloc->delta, reloc_entry);
219 io_mapping_unmap_atomic(reloc_page);
220
221 return 0;
222 }
223
224 static int
225 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
226 struct eb_objects *eb,
227 struct drm_i915_gem_relocation_entry *reloc,
228 struct i915_address_space *vm)
229 {
230 struct drm_device *dev = obj->base.dev;
231 struct drm_gem_object *target_obj;
232 struct drm_i915_gem_object *target_i915_obj;
233 uint32_t target_offset;
234 int ret = -EINVAL;
235
236 /* we've already hold a reference to all valid objects */
237 target_obj = &eb_get_object(eb, reloc->target_handle)->base;
238 if (unlikely(target_obj == NULL))
239 return -ENOENT;
240
241 target_i915_obj = to_intel_bo(target_obj);
242 target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
243
244 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
245 * pipe_control writes because the gpu doesn't properly redirect them
246 * through the ppgtt for non_secure batchbuffers. */
247 if (unlikely(IS_GEN6(dev) &&
248 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
249 !target_i915_obj->has_global_gtt_mapping)) {
250 i915_gem_gtt_bind_object(target_i915_obj,
251 target_i915_obj->cache_level);
252 }
253
254 /* Validate that the target is in a valid r/w GPU domain */
255 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
256 DRM_DEBUG("reloc with multiple write domains: "
257 "obj %p target %d offset %d "
258 "read %08x write %08x",
259 obj, reloc->target_handle,
260 (int) reloc->offset,
261 reloc->read_domains,
262 reloc->write_domain);
263 return ret;
264 }
265 if (unlikely((reloc->write_domain | reloc->read_domains)
266 & ~I915_GEM_GPU_DOMAINS)) {
267 DRM_DEBUG("reloc with read/write non-GPU domains: "
268 "obj %p target %d offset %d "
269 "read %08x write %08x",
270 obj, reloc->target_handle,
271 (int) reloc->offset,
272 reloc->read_domains,
273 reloc->write_domain);
274 return ret;
275 }
276
277 target_obj->pending_read_domains |= reloc->read_domains;
278 target_obj->pending_write_domain |= reloc->write_domain;
279
280 /* If the relocation already has the right value in it, no
281 * more work needs to be done.
282 */
283 if (target_offset == reloc->presumed_offset)
284 return 0;
285
286 /* Check that the relocation address is valid... */
287 if (unlikely(reloc->offset > obj->base.size - 4)) {
288 DRM_DEBUG("Relocation beyond object bounds: "
289 "obj %p target %d offset %d size %d.\n",
290 obj, reloc->target_handle,
291 (int) reloc->offset,
292 (int) obj->base.size);
293 return ret;
294 }
295 if (unlikely(reloc->offset & 3)) {
296 DRM_DEBUG("Relocation not 4-byte aligned: "
297 "obj %p target %d offset %d.\n",
298 obj, reloc->target_handle,
299 (int) reloc->offset);
300 return ret;
301 }
302
303 /* We can't wait for rendering with pagefaults disabled */
304 if (obj->active && in_atomic())
305 return -EFAULT;
306
307 reloc->delta += target_offset;
308 if (use_cpu_reloc(obj))
309 ret = relocate_entry_cpu(obj, reloc);
310 else
311 ret = relocate_entry_gtt(obj, reloc);
312
313 if (ret)
314 return ret;
315
316 /* and update the user's relocation entry */
317 reloc->presumed_offset = target_offset;
318
319 return 0;
320 }
321
322 static int
323 i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
324 struct eb_objects *eb,
325 struct i915_address_space *vm)
326 {
327 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
328 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
329 struct drm_i915_gem_relocation_entry __user *user_relocs;
330 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
331 int remain, ret;
332
333 user_relocs = to_user_ptr(entry->relocs_ptr);
334
335 remain = entry->relocation_count;
336 while (remain) {
337 struct drm_i915_gem_relocation_entry *r = stack_reloc;
338 int count = remain;
339 if (count > ARRAY_SIZE(stack_reloc))
340 count = ARRAY_SIZE(stack_reloc);
341 remain -= count;
342
343 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
344 return -EFAULT;
345
346 do {
347 u64 offset = r->presumed_offset;
348
349 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
350 vm);
351 if (ret)
352 return ret;
353
354 if (r->presumed_offset != offset &&
355 __copy_to_user_inatomic(&user_relocs->presumed_offset,
356 &r->presumed_offset,
357 sizeof(r->presumed_offset))) {
358 return -EFAULT;
359 }
360
361 user_relocs++;
362 r++;
363 } while (--count);
364 }
365
366 return 0;
367 #undef N_RELOC
368 }
369
370 static int
371 i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
372 struct eb_objects *eb,
373 struct drm_i915_gem_relocation_entry *relocs,
374 struct i915_address_space *vm)
375 {
376 const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
377 int i, ret;
378
379 for (i = 0; i < entry->relocation_count; i++) {
380 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
381 vm);
382 if (ret)
383 return ret;
384 }
385
386 return 0;
387 }
388
389 static int
390 i915_gem_execbuffer_relocate(struct eb_objects *eb,
391 struct i915_address_space *vm)
392 {
393 struct drm_i915_gem_object *obj;
394 int ret = 0;
395
396 /* This is the fast path and we cannot handle a pagefault whilst
397 * holding the struct mutex lest the user pass in the relocations
398 * contained within a mmaped bo. For in such a case we, the page
399 * fault handler would call i915_gem_fault() and we would try to
400 * acquire the struct mutex again. Obviously this is bad and so
401 * lockdep complains vehemently.
402 */
403 pagefault_disable();
404 list_for_each_entry(obj, &eb->objects, exec_list) {
405 ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
406 if (ret)
407 break;
408 }
409 pagefault_enable();
410
411 return ret;
412 }
413
414 #define __EXEC_OBJECT_HAS_PIN (1<<31)
415 #define __EXEC_OBJECT_HAS_FENCE (1<<30)
416
417 static int
418 need_reloc_mappable(struct drm_i915_gem_object *obj)
419 {
420 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
421 return entry->relocation_count && !use_cpu_reloc(obj);
422 }
423
424 static int
425 i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
426 struct intel_ring_buffer *ring,
427 struct i915_address_space *vm,
428 bool *need_reloc)
429 {
430 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
431 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
432 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
433 bool need_fence, need_mappable;
434 int ret;
435
436 need_fence =
437 has_fenced_gpu_access &&
438 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
439 obj->tiling_mode != I915_TILING_NONE;
440 need_mappable = need_fence || need_reloc_mappable(obj);
441
442 ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
443 false);
444 if (ret)
445 return ret;
446
447 entry->flags |= __EXEC_OBJECT_HAS_PIN;
448
449 if (has_fenced_gpu_access) {
450 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
451 ret = i915_gem_object_get_fence(obj);
452 if (ret)
453 return ret;
454
455 if (i915_gem_object_pin_fence(obj))
456 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
457
458 obj->pending_fenced_gpu_access = true;
459 }
460 }
461
462 /* Ensure ppgtt mapping exists if needed */
463 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
464 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
465 obj, obj->cache_level);
466
467 obj->has_aliasing_ppgtt_mapping = 1;
468 }
469
470 if (entry->offset != i915_gem_obj_offset(obj, vm)) {
471 entry->offset = i915_gem_obj_offset(obj, vm);
472 *need_reloc = true;
473 }
474
475 if (entry->flags & EXEC_OBJECT_WRITE) {
476 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
477 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
478 }
479
480 if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
481 !obj->has_global_gtt_mapping)
482 i915_gem_gtt_bind_object(obj, obj->cache_level);
483
484 return 0;
485 }
486
487 static void
488 i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
489 {
490 struct drm_i915_gem_exec_object2 *entry;
491
492 if (!i915_gem_obj_bound_any(obj))
493 return;
494
495 entry = obj->exec_entry;
496
497 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
498 i915_gem_object_unpin_fence(obj);
499
500 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
501 i915_gem_object_unpin(obj);
502
503 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
504 }
505
506 static int
507 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
508 struct list_head *objects,
509 struct i915_address_space *vm,
510 bool *need_relocs)
511 {
512 struct drm_i915_gem_object *obj;
513 struct list_head ordered_objects;
514 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
515 int retry;
516
517 INIT_LIST_HEAD(&ordered_objects);
518 while (!list_empty(objects)) {
519 struct drm_i915_gem_exec_object2 *entry;
520 bool need_fence, need_mappable;
521
522 obj = list_first_entry(objects,
523 struct drm_i915_gem_object,
524 exec_list);
525 entry = obj->exec_entry;
526
527 need_fence =
528 has_fenced_gpu_access &&
529 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
530 obj->tiling_mode != I915_TILING_NONE;
531 need_mappable = need_fence || need_reloc_mappable(obj);
532
533 if (need_mappable)
534 list_move(&obj->exec_list, &ordered_objects);
535 else
536 list_move_tail(&obj->exec_list, &ordered_objects);
537
538 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
539 obj->base.pending_write_domain = 0;
540 obj->pending_fenced_gpu_access = false;
541 }
542 list_splice(&ordered_objects, objects);
543
544 /* Attempt to pin all of the buffers into the GTT.
545 * This is done in 3 phases:
546 *
547 * 1a. Unbind all objects that do not match the GTT constraints for
548 * the execbuffer (fenceable, mappable, alignment etc).
549 * 1b. Increment pin count for already bound objects.
550 * 2. Bind new objects.
551 * 3. Decrement pin count.
552 *
553 * This avoid unnecessary unbinding of later objects in order to make
554 * room for the earlier objects *unless* we need to defragment.
555 */
556 retry = 0;
557 do {
558 int ret = 0;
559
560 /* Unbind any ill-fitting objects or pin. */
561 list_for_each_entry(obj, objects, exec_list) {
562 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
563 bool need_fence, need_mappable;
564 u32 obj_offset;
565
566 if (!i915_gem_obj_bound(obj, vm))
567 continue;
568
569 obj_offset = i915_gem_obj_offset(obj, vm);
570 need_fence =
571 has_fenced_gpu_access &&
572 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
573 obj->tiling_mode != I915_TILING_NONE;
574 need_mappable = need_fence || need_reloc_mappable(obj);
575
576 WARN_ON((need_mappable || need_fence) &&
577 !i915_is_ggtt(vm));
578
579 if ((entry->alignment &&
580 obj_offset & (entry->alignment - 1)) ||
581 (need_mappable && !obj->map_and_fenceable))
582 ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
583 else
584 ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
585 if (ret)
586 goto err;
587 }
588
589 /* Bind fresh objects */
590 list_for_each_entry(obj, objects, exec_list) {
591 if (i915_gem_obj_bound(obj, vm))
592 continue;
593
594 ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
595 if (ret)
596 goto err;
597 }
598
599 err: /* Decrement pin count for bound objects */
600 list_for_each_entry(obj, objects, exec_list)
601 i915_gem_execbuffer_unreserve_object(obj);
602
603 if (ret != -ENOSPC || retry++)
604 return ret;
605
606 ret = i915_gem_evict_everything(ring->dev);
607 if (ret)
608 return ret;
609 } while (1);
610 }
611
612 static int
613 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
614 struct drm_i915_gem_execbuffer2 *args,
615 struct drm_file *file,
616 struct intel_ring_buffer *ring,
617 struct eb_objects *eb,
618 struct drm_i915_gem_exec_object2 *exec,
619 struct i915_address_space *vm)
620 {
621 struct drm_i915_gem_relocation_entry *reloc;
622 struct drm_i915_gem_object *obj;
623 bool need_relocs;
624 int *reloc_offset;
625 int i, total, ret;
626 int count = args->buffer_count;
627
628 /* We may process another execbuffer during the unlock... */
629 while (!list_empty(&eb->objects)) {
630 obj = list_first_entry(&eb->objects,
631 struct drm_i915_gem_object,
632 exec_list);
633 list_del_init(&obj->exec_list);
634 drm_gem_object_unreference(&obj->base);
635 }
636
637 mutex_unlock(&dev->struct_mutex);
638
639 total = 0;
640 for (i = 0; i < count; i++)
641 total += exec[i].relocation_count;
642
643 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
644 reloc = drm_malloc_ab(total, sizeof(*reloc));
645 if (reloc == NULL || reloc_offset == NULL) {
646 drm_free_large(reloc);
647 drm_free_large(reloc_offset);
648 mutex_lock(&dev->struct_mutex);
649 return -ENOMEM;
650 }
651
652 total = 0;
653 for (i = 0; i < count; i++) {
654 struct drm_i915_gem_relocation_entry __user *user_relocs;
655 u64 invalid_offset = (u64)-1;
656 int j;
657
658 user_relocs = to_user_ptr(exec[i].relocs_ptr);
659
660 if (copy_from_user(reloc+total, user_relocs,
661 exec[i].relocation_count * sizeof(*reloc))) {
662 ret = -EFAULT;
663 mutex_lock(&dev->struct_mutex);
664 goto err;
665 }
666
667 /* As we do not update the known relocation offsets after
668 * relocating (due to the complexities in lock handling),
669 * we need to mark them as invalid now so that we force the
670 * relocation processing next time. Just in case the target
671 * object is evicted and then rebound into its old
672 * presumed_offset before the next execbuffer - if that
673 * happened we would make the mistake of assuming that the
674 * relocations were valid.
675 */
676 for (j = 0; j < exec[i].relocation_count; j++) {
677 if (copy_to_user(&user_relocs[j].presumed_offset,
678 &invalid_offset,
679 sizeof(invalid_offset))) {
680 ret = -EFAULT;
681 mutex_lock(&dev->struct_mutex);
682 goto err;
683 }
684 }
685
686 reloc_offset[i] = total;
687 total += exec[i].relocation_count;
688 }
689
690 ret = i915_mutex_lock_interruptible(dev);
691 if (ret) {
692 mutex_lock(&dev->struct_mutex);
693 goto err;
694 }
695
696 /* reacquire the objects */
697 eb_reset(eb);
698 ret = eb_lookup_objects(eb, exec, args, file);
699 if (ret)
700 goto err;
701
702 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
703 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
704 if (ret)
705 goto err;
706
707 list_for_each_entry(obj, &eb->objects, exec_list) {
708 int offset = obj->exec_entry - exec;
709 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
710 reloc + reloc_offset[offset],
711 vm);
712 if (ret)
713 goto err;
714 }
715
716 /* Leave the user relocations as are, this is the painfully slow path,
717 * and we want to avoid the complication of dropping the lock whilst
718 * having buffers reserved in the aperture and so causing spurious
719 * ENOSPC for random operations.
720 */
721
722 err:
723 drm_free_large(reloc);
724 drm_free_large(reloc_offset);
725 return ret;
726 }
727
728 static int
729 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
730 struct list_head *objects)
731 {
732 struct drm_i915_gem_object *obj;
733 uint32_t flush_domains = 0;
734 bool flush_chipset = false;
735 int ret;
736
737 list_for_each_entry(obj, objects, exec_list) {
738 ret = i915_gem_object_sync(obj, ring);
739 if (ret)
740 return ret;
741
742 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
743 flush_chipset |= i915_gem_clflush_object(obj, false);
744
745 flush_domains |= obj->base.write_domain;
746 }
747
748 if (flush_chipset)
749 i915_gem_chipset_flush(ring->dev);
750
751 if (flush_domains & I915_GEM_DOMAIN_GTT)
752 wmb();
753
754 /* Unconditionally invalidate gpu caches and ensure that we do flush
755 * any residual writes from the previous batch.
756 */
757 return intel_ring_invalidate_all_caches(ring);
758 }
759
760 static bool
761 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
762 {
763 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
764 return false;
765
766 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
767 }
768
769 static int
770 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
771 int count)
772 {
773 int i;
774 int relocs_total = 0;
775 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
776
777 for (i = 0; i < count; i++) {
778 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
779 int length; /* limited by fault_in_pages_readable() */
780
781 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
782 return -EINVAL;
783
784 /* First check for malicious input causing overflow in
785 * the worst case where we need to allocate the entire
786 * relocation tree as a single array.
787 */
788 if (exec[i].relocation_count > relocs_max - relocs_total)
789 return -EINVAL;
790 relocs_total += exec[i].relocation_count;
791
792 length = exec[i].relocation_count *
793 sizeof(struct drm_i915_gem_relocation_entry);
794 /*
795 * We must check that the entire relocation array is safe
796 * to read, but since we may need to update the presumed
797 * offsets during execution, check for full write access.
798 */
799 if (!access_ok(VERIFY_WRITE, ptr, length))
800 return -EFAULT;
801
802 if (likely(!i915_prefault_disable)) {
803 if (fault_in_multipages_readable(ptr, length))
804 return -EFAULT;
805 }
806 }
807
808 return 0;
809 }
810
811 static void
812 i915_gem_execbuffer_move_to_active(struct list_head *objects,
813 struct i915_address_space *vm,
814 struct intel_ring_buffer *ring)
815 {
816 struct drm_i915_gem_object *obj;
817
818 list_for_each_entry(obj, objects, exec_list) {
819 u32 old_read = obj->base.read_domains;
820 u32 old_write = obj->base.write_domain;
821
822 obj->base.write_domain = obj->base.pending_write_domain;
823 if (obj->base.write_domain == 0)
824 obj->base.pending_read_domains |= obj->base.read_domains;
825 obj->base.read_domains = obj->base.pending_read_domains;
826 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
827
828 /* FIXME: This lookup gets fixed later <-- danvet */
829 list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
830 i915_gem_object_move_to_active(obj, ring);
831 if (obj->base.write_domain) {
832 obj->dirty = 1;
833 obj->last_write_seqno = intel_ring_get_seqno(ring);
834 if (obj->pin_count) /* check for potential scanout */
835 intel_mark_fb_busy(obj, ring);
836 }
837
838 trace_i915_gem_object_change_domain(obj, old_read, old_write);
839 }
840 }
841
842 static void
843 i915_gem_execbuffer_retire_commands(struct drm_device *dev,
844 struct drm_file *file,
845 struct intel_ring_buffer *ring,
846 struct drm_i915_gem_object *obj)
847 {
848 /* Unconditionally force add_request to emit a full flush. */
849 ring->gpu_caches_dirty = true;
850
851 /* Add a breadcrumb for the completion of the batch buffer */
852 (void)__i915_add_request(ring, file, obj, NULL);
853 }
854
855 static int
856 i915_reset_gen7_sol_offsets(struct drm_device *dev,
857 struct intel_ring_buffer *ring)
858 {
859 drm_i915_private_t *dev_priv = dev->dev_private;
860 int ret, i;
861
862 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
863 return 0;
864
865 ret = intel_ring_begin(ring, 4 * 3);
866 if (ret)
867 return ret;
868
869 for (i = 0; i < 4; i++) {
870 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
871 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
872 intel_ring_emit(ring, 0);
873 }
874
875 intel_ring_advance(ring);
876
877 return 0;
878 }
879
880 static int
881 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
882 struct drm_file *file,
883 struct drm_i915_gem_execbuffer2 *args,
884 struct drm_i915_gem_exec_object2 *exec,
885 struct i915_address_space *vm)
886 {
887 drm_i915_private_t *dev_priv = dev->dev_private;
888 struct eb_objects *eb;
889 struct drm_i915_gem_object *batch_obj;
890 struct drm_clip_rect *cliprects = NULL;
891 struct intel_ring_buffer *ring;
892 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
893 u32 exec_start, exec_len;
894 u32 mask, flags;
895 int ret, mode, i;
896 bool need_relocs;
897
898 if (!i915_gem_check_execbuffer(args))
899 return -EINVAL;
900
901 ret = validate_exec_list(exec, args->buffer_count);
902 if (ret)
903 return ret;
904
905 flags = 0;
906 if (args->flags & I915_EXEC_SECURE) {
907 if (!file->is_master || !capable(CAP_SYS_ADMIN))
908 return -EPERM;
909
910 flags |= I915_DISPATCH_SECURE;
911 }
912 if (args->flags & I915_EXEC_IS_PINNED)
913 flags |= I915_DISPATCH_PINNED;
914
915 switch (args->flags & I915_EXEC_RING_MASK) {
916 case I915_EXEC_DEFAULT:
917 case I915_EXEC_RENDER:
918 ring = &dev_priv->ring[RCS];
919 break;
920 case I915_EXEC_BSD:
921 ring = &dev_priv->ring[VCS];
922 if (ctx_id != DEFAULT_CONTEXT_ID) {
923 DRM_DEBUG("Ring %s doesn't support contexts\n",
924 ring->name);
925 return -EPERM;
926 }
927 break;
928 case I915_EXEC_BLT:
929 ring = &dev_priv->ring[BCS];
930 if (ctx_id != DEFAULT_CONTEXT_ID) {
931 DRM_DEBUG("Ring %s doesn't support contexts\n",
932 ring->name);
933 return -EPERM;
934 }
935 break;
936 case I915_EXEC_VEBOX:
937 ring = &dev_priv->ring[VECS];
938 if (ctx_id != DEFAULT_CONTEXT_ID) {
939 DRM_DEBUG("Ring %s doesn't support contexts\n",
940 ring->name);
941 return -EPERM;
942 }
943 break;
944
945 default:
946 DRM_DEBUG("execbuf with unknown ring: %d\n",
947 (int)(args->flags & I915_EXEC_RING_MASK));
948 return -EINVAL;
949 }
950 if (!intel_ring_initialized(ring)) {
951 DRM_DEBUG("execbuf with invalid ring: %d\n",
952 (int)(args->flags & I915_EXEC_RING_MASK));
953 return -EINVAL;
954 }
955
956 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
957 mask = I915_EXEC_CONSTANTS_MASK;
958 switch (mode) {
959 case I915_EXEC_CONSTANTS_REL_GENERAL:
960 case I915_EXEC_CONSTANTS_ABSOLUTE:
961 case I915_EXEC_CONSTANTS_REL_SURFACE:
962 if (ring == &dev_priv->ring[RCS] &&
963 mode != dev_priv->relative_constants_mode) {
964 if (INTEL_INFO(dev)->gen < 4)
965 return -EINVAL;
966
967 if (INTEL_INFO(dev)->gen > 5 &&
968 mode == I915_EXEC_CONSTANTS_REL_SURFACE)
969 return -EINVAL;
970
971 /* The HW changed the meaning on this bit on gen6 */
972 if (INTEL_INFO(dev)->gen >= 6)
973 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
974 }
975 break;
976 default:
977 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
978 return -EINVAL;
979 }
980
981 if (args->buffer_count < 1) {
982 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
983 return -EINVAL;
984 }
985
986 if (args->num_cliprects != 0) {
987 if (ring != &dev_priv->ring[RCS]) {
988 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
989 return -EINVAL;
990 }
991
992 if (INTEL_INFO(dev)->gen >= 5) {
993 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
994 return -EINVAL;
995 }
996
997 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
998 DRM_DEBUG("execbuf with %u cliprects\n",
999 args->num_cliprects);
1000 return -EINVAL;
1001 }
1002
1003 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
1004 GFP_KERNEL);
1005 if (cliprects == NULL) {
1006 ret = -ENOMEM;
1007 goto pre_mutex_err;
1008 }
1009
1010 if (copy_from_user(cliprects,
1011 to_user_ptr(args->cliprects_ptr),
1012 sizeof(*cliprects)*args->num_cliprects)) {
1013 ret = -EFAULT;
1014 goto pre_mutex_err;
1015 }
1016 }
1017
1018 ret = i915_mutex_lock_interruptible(dev);
1019 if (ret)
1020 goto pre_mutex_err;
1021
1022 if (dev_priv->ums.mm_suspended) {
1023 mutex_unlock(&dev->struct_mutex);
1024 ret = -EBUSY;
1025 goto pre_mutex_err;
1026 }
1027
1028 eb = eb_create(args);
1029 if (eb == NULL) {
1030 mutex_unlock(&dev->struct_mutex);
1031 ret = -ENOMEM;
1032 goto pre_mutex_err;
1033 }
1034
1035 /* Look up object handles */
1036 ret = eb_lookup_objects(eb, exec, args, file);
1037 if (ret)
1038 goto err;
1039
1040 /* take note of the batch buffer before we might reorder the lists */
1041 batch_obj = list_entry(eb->objects.prev,
1042 struct drm_i915_gem_object,
1043 exec_list);
1044
1045 /* Move the objects en-masse into the GTT, evicting if necessary. */
1046 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1047 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
1048 if (ret)
1049 goto err;
1050
1051 /* The objects are in their final locations, apply the relocations. */
1052 if (need_relocs)
1053 ret = i915_gem_execbuffer_relocate(eb, vm);
1054 if (ret) {
1055 if (ret == -EFAULT) {
1056 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1057 eb, exec, vm);
1058 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1059 }
1060 if (ret)
1061 goto err;
1062 }
1063
1064 /* Set the pending read domains for the batch buffer to COMMAND */
1065 if (batch_obj->base.pending_write_domain) {
1066 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1067 ret = -EINVAL;
1068 goto err;
1069 }
1070 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1071
1072 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1073 * batch" bit. Hence we need to pin secure batches into the global gtt.
1074 * hsw should have this fixed, but let's be paranoid and do it
1075 * unconditionally for now. */
1076 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1077 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1078
1079 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
1080 if (ret)
1081 goto err;
1082
1083 ret = i915_switch_context(ring, file, ctx_id);
1084 if (ret)
1085 goto err;
1086
1087 if (ring == &dev_priv->ring[RCS] &&
1088 mode != dev_priv->relative_constants_mode) {
1089 ret = intel_ring_begin(ring, 4);
1090 if (ret)
1091 goto err;
1092
1093 intel_ring_emit(ring, MI_NOOP);
1094 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1095 intel_ring_emit(ring, INSTPM);
1096 intel_ring_emit(ring, mask << 16 | mode);
1097 intel_ring_advance(ring);
1098
1099 dev_priv->relative_constants_mode = mode;
1100 }
1101
1102 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1103 ret = i915_reset_gen7_sol_offsets(dev, ring);
1104 if (ret)
1105 goto err;
1106 }
1107
1108 exec_start = i915_gem_obj_offset(batch_obj, vm) +
1109 args->batch_start_offset;
1110 exec_len = args->batch_len;
1111 if (cliprects) {
1112 for (i = 0; i < args->num_cliprects; i++) {
1113 ret = i915_emit_box(dev, &cliprects[i],
1114 args->DR1, args->DR4);
1115 if (ret)
1116 goto err;
1117
1118 ret = ring->dispatch_execbuffer(ring,
1119 exec_start, exec_len,
1120 flags);
1121 if (ret)
1122 goto err;
1123 }
1124 } else {
1125 ret = ring->dispatch_execbuffer(ring,
1126 exec_start, exec_len,
1127 flags);
1128 if (ret)
1129 goto err;
1130 }
1131
1132 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1133
1134 i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
1135 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1136
1137 err:
1138 eb_destroy(eb);
1139
1140 mutex_unlock(&dev->struct_mutex);
1141
1142 pre_mutex_err:
1143 kfree(cliprects);
1144 return ret;
1145 }
1146
1147 /*
1148 * Legacy execbuffer just creates an exec2 list from the original exec object
1149 * list array and passes it to the real function.
1150 */
1151 int
1152 i915_gem_execbuffer(struct drm_device *dev, void *data,
1153 struct drm_file *file)
1154 {
1155 struct drm_i915_private *dev_priv = dev->dev_private;
1156 struct drm_i915_gem_execbuffer *args = data;
1157 struct drm_i915_gem_execbuffer2 exec2;
1158 struct drm_i915_gem_exec_object *exec_list = NULL;
1159 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1160 int ret, i;
1161
1162 if (args->buffer_count < 1) {
1163 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1164 return -EINVAL;
1165 }
1166
1167 /* Copy in the exec list from userland */
1168 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1169 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1170 if (exec_list == NULL || exec2_list == NULL) {
1171 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1172 args->buffer_count);
1173 drm_free_large(exec_list);
1174 drm_free_large(exec2_list);
1175 return -ENOMEM;
1176 }
1177 ret = copy_from_user(exec_list,
1178 to_user_ptr(args->buffers_ptr),
1179 sizeof(*exec_list) * args->buffer_count);
1180 if (ret != 0) {
1181 DRM_DEBUG("copy %d exec entries failed %d\n",
1182 args->buffer_count, ret);
1183 drm_free_large(exec_list);
1184 drm_free_large(exec2_list);
1185 return -EFAULT;
1186 }
1187
1188 for (i = 0; i < args->buffer_count; i++) {
1189 exec2_list[i].handle = exec_list[i].handle;
1190 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1191 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1192 exec2_list[i].alignment = exec_list[i].alignment;
1193 exec2_list[i].offset = exec_list[i].offset;
1194 if (INTEL_INFO(dev)->gen < 4)
1195 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1196 else
1197 exec2_list[i].flags = 0;
1198 }
1199
1200 exec2.buffers_ptr = args->buffers_ptr;
1201 exec2.buffer_count = args->buffer_count;
1202 exec2.batch_start_offset = args->batch_start_offset;
1203 exec2.batch_len = args->batch_len;
1204 exec2.DR1 = args->DR1;
1205 exec2.DR4 = args->DR4;
1206 exec2.num_cliprects = args->num_cliprects;
1207 exec2.cliprects_ptr = args->cliprects_ptr;
1208 exec2.flags = I915_EXEC_RENDER;
1209 i915_execbuffer2_set_context_id(exec2, 0);
1210
1211 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
1212 &dev_priv->gtt.base);
1213 if (!ret) {
1214 /* Copy the new buffer offsets back to the user's exec list. */
1215 for (i = 0; i < args->buffer_count; i++)
1216 exec_list[i].offset = exec2_list[i].offset;
1217 /* ... and back out to userspace */
1218 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1219 exec_list,
1220 sizeof(*exec_list) * args->buffer_count);
1221 if (ret) {
1222 ret = -EFAULT;
1223 DRM_DEBUG("failed to copy %d exec entries "
1224 "back to user (%d)\n",
1225 args->buffer_count, ret);
1226 }
1227 }
1228
1229 drm_free_large(exec_list);
1230 drm_free_large(exec2_list);
1231 return ret;
1232 }
1233
1234 int
1235 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1236 struct drm_file *file)
1237 {
1238 struct drm_i915_private *dev_priv = dev->dev_private;
1239 struct drm_i915_gem_execbuffer2 *args = data;
1240 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1241 int ret;
1242
1243 if (args->buffer_count < 1 ||
1244 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1245 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1246 return -EINVAL;
1247 }
1248
1249 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1250 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1251 if (exec2_list == NULL)
1252 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1253 args->buffer_count);
1254 if (exec2_list == NULL) {
1255 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1256 args->buffer_count);
1257 return -ENOMEM;
1258 }
1259 ret = copy_from_user(exec2_list,
1260 to_user_ptr(args->buffers_ptr),
1261 sizeof(*exec2_list) * args->buffer_count);
1262 if (ret != 0) {
1263 DRM_DEBUG("copy %d exec entries failed %d\n",
1264 args->buffer_count, ret);
1265 drm_free_large(exec2_list);
1266 return -EFAULT;
1267 }
1268
1269 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
1270 &dev_priv->gtt.base);
1271 if (!ret) {
1272 /* Copy the new buffer offsets back to the user's exec list. */
1273 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1274 exec2_list,
1275 sizeof(*exec2_list) * args->buffer_count);
1276 if (ret) {
1277 ret = -EFAULT;
1278 DRM_DEBUG("failed to copy %d exec entries "
1279 "back to user (%d)\n",
1280 args->buffer_count, ret);
1281 }
1282 }
1283
1284 drm_free_large(exec2_list);
1285 return ret;
1286 }
This page took 0.078197 seconds and 5 git commands to generate.