drm/i915: Force CPU relocations if not GTT mapped
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
1 /*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/dma_remapping.h>
35
36 #define __EXEC_OBJECT_HAS_PIN (1<<31)
37 #define __EXEC_OBJECT_HAS_FENCE (1<<30)
38 #define __EXEC_OBJECT_NEEDS_MAP (1<<29)
39 #define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
40
41 #define BATCH_OFFSET_BIAS (256*1024)
42
43 struct eb_vmas {
44 struct list_head vmas;
45 int and;
46 union {
47 struct i915_vma *lut[0];
48 struct hlist_head buckets[0];
49 };
50 };
51
52 static struct eb_vmas *
53 eb_create(struct drm_i915_gem_execbuffer2 *args)
54 {
55 struct eb_vmas *eb = NULL;
56
57 if (args->flags & I915_EXEC_HANDLE_LUT) {
58 unsigned size = args->buffer_count;
59 size *= sizeof(struct i915_vma *);
60 size += sizeof(struct eb_vmas);
61 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
62 }
63
64 if (eb == NULL) {
65 unsigned size = args->buffer_count;
66 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
67 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
68 while (count > 2*size)
69 count >>= 1;
70 eb = kzalloc(count*sizeof(struct hlist_head) +
71 sizeof(struct eb_vmas),
72 GFP_TEMPORARY);
73 if (eb == NULL)
74 return eb;
75
76 eb->and = count - 1;
77 } else
78 eb->and = -args->buffer_count;
79
80 INIT_LIST_HEAD(&eb->vmas);
81 return eb;
82 }
83
84 static void
85 eb_reset(struct eb_vmas *eb)
86 {
87 if (eb->and >= 0)
88 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
89 }
90
91 static int
92 eb_lookup_vmas(struct eb_vmas *eb,
93 struct drm_i915_gem_exec_object2 *exec,
94 const struct drm_i915_gem_execbuffer2 *args,
95 struct i915_address_space *vm,
96 struct drm_file *file)
97 {
98 struct drm_i915_private *dev_priv = vm->dev->dev_private;
99 struct drm_i915_gem_object *obj;
100 struct list_head objects;
101 int i, ret;
102
103 INIT_LIST_HEAD(&objects);
104 spin_lock(&file->table_lock);
105 /* Grab a reference to the object and release the lock so we can lookup
106 * or create the VMA without using GFP_ATOMIC */
107 for (i = 0; i < args->buffer_count; i++) {
108 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
109 if (obj == NULL) {
110 spin_unlock(&file->table_lock);
111 DRM_DEBUG("Invalid object handle %d at index %d\n",
112 exec[i].handle, i);
113 ret = -ENOENT;
114 goto err;
115 }
116
117 if (!list_empty(&obj->obj_exec_link)) {
118 spin_unlock(&file->table_lock);
119 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
120 obj, exec[i].handle, i);
121 ret = -EINVAL;
122 goto err;
123 }
124
125 drm_gem_object_reference(&obj->base);
126 list_add_tail(&obj->obj_exec_link, &objects);
127 }
128 spin_unlock(&file->table_lock);
129
130 i = 0;
131 while (!list_empty(&objects)) {
132 struct i915_vma *vma;
133 struct i915_address_space *bind_vm = vm;
134
135 if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
136 USES_FULL_PPGTT(vm->dev)) {
137 ret = -EINVAL;
138 goto err;
139 }
140
141 /* If we have secure dispatch, or the userspace assures us that
142 * they know what they're doing, use the GGTT VM.
143 */
144 if (((args->flags & I915_EXEC_SECURE) &&
145 (i == (args->buffer_count - 1))))
146 bind_vm = &dev_priv->gtt.base;
147
148 obj = list_first_entry(&objects,
149 struct drm_i915_gem_object,
150 obj_exec_link);
151
152 /*
153 * NOTE: We can leak any vmas created here when something fails
154 * later on. But that's no issue since vma_unbind can deal with
155 * vmas which are not actually bound. And since only
156 * lookup_or_create exists as an interface to get at the vma
157 * from the (obj, vm) we don't run the risk of creating
158 * duplicated vmas for the same vm.
159 */
160 vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
161 if (IS_ERR(vma)) {
162 DRM_DEBUG("Failed to lookup VMA\n");
163 ret = PTR_ERR(vma);
164 goto err;
165 }
166
167 /* Transfer ownership from the objects list to the vmas list. */
168 list_add_tail(&vma->exec_list, &eb->vmas);
169 list_del_init(&obj->obj_exec_link);
170
171 vma->exec_entry = &exec[i];
172 if (eb->and < 0) {
173 eb->lut[i] = vma;
174 } else {
175 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
176 vma->exec_handle = handle;
177 hlist_add_head(&vma->exec_node,
178 &eb->buckets[handle & eb->and]);
179 }
180 ++i;
181 }
182
183 return 0;
184
185
186 err:
187 while (!list_empty(&objects)) {
188 obj = list_first_entry(&objects,
189 struct drm_i915_gem_object,
190 obj_exec_link);
191 list_del_init(&obj->obj_exec_link);
192 drm_gem_object_unreference(&obj->base);
193 }
194 /*
195 * Objects already transfered to the vmas list will be unreferenced by
196 * eb_destroy.
197 */
198
199 return ret;
200 }
201
202 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
203 {
204 if (eb->and < 0) {
205 if (handle >= -eb->and)
206 return NULL;
207 return eb->lut[handle];
208 } else {
209 struct hlist_head *head;
210 struct hlist_node *node;
211
212 head = &eb->buckets[handle & eb->and];
213 hlist_for_each(node, head) {
214 struct i915_vma *vma;
215
216 vma = hlist_entry(node, struct i915_vma, exec_node);
217 if (vma->exec_handle == handle)
218 return vma;
219 }
220 return NULL;
221 }
222 }
223
224 static void
225 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
226 {
227 struct drm_i915_gem_exec_object2 *entry;
228 struct drm_i915_gem_object *obj = vma->obj;
229
230 if (!drm_mm_node_allocated(&vma->node))
231 return;
232
233 entry = vma->exec_entry;
234
235 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
236 i915_gem_object_unpin_fence(obj);
237
238 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
239 vma->pin_count--;
240
241 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
242 }
243
244 static void eb_destroy(struct eb_vmas *eb)
245 {
246 while (!list_empty(&eb->vmas)) {
247 struct i915_vma *vma;
248
249 vma = list_first_entry(&eb->vmas,
250 struct i915_vma,
251 exec_list);
252 list_del_init(&vma->exec_list);
253 i915_gem_execbuffer_unreserve_vma(vma);
254 drm_gem_object_unreference(&vma->obj->base);
255 }
256 kfree(eb);
257 }
258
259 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
260 {
261 return (HAS_LLC(obj->base.dev) ||
262 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
263 !obj->map_and_fenceable ||
264 obj->cache_level != I915_CACHE_NONE);
265 }
266
267 static int
268 relocate_entry_cpu(struct drm_i915_gem_object *obj,
269 struct drm_i915_gem_relocation_entry *reloc,
270 uint64_t target_offset)
271 {
272 struct drm_device *dev = obj->base.dev;
273 uint32_t page_offset = offset_in_page(reloc->offset);
274 uint64_t delta = reloc->delta + target_offset;
275 char *vaddr;
276 int ret;
277
278 ret = i915_gem_object_set_to_cpu_domain(obj, true);
279 if (ret)
280 return ret;
281
282 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
283 reloc->offset >> PAGE_SHIFT));
284 *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
285
286 if (INTEL_INFO(dev)->gen >= 8) {
287 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
288
289 if (page_offset == 0) {
290 kunmap_atomic(vaddr);
291 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
292 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
293 }
294
295 *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
296 }
297
298 kunmap_atomic(vaddr);
299
300 return 0;
301 }
302
303 static int
304 relocate_entry_gtt(struct drm_i915_gem_object *obj,
305 struct drm_i915_gem_relocation_entry *reloc,
306 uint64_t target_offset)
307 {
308 struct drm_device *dev = obj->base.dev;
309 struct drm_i915_private *dev_priv = dev->dev_private;
310 uint64_t delta = reloc->delta + target_offset;
311 uint32_t __iomem *reloc_entry;
312 void __iomem *reloc_page;
313 int ret;
314
315 ret = i915_gem_object_set_to_gtt_domain(obj, true);
316 if (ret)
317 return ret;
318
319 ret = i915_gem_object_put_fence(obj);
320 if (ret)
321 return ret;
322
323 /* Map the page containing the relocation we're going to perform. */
324 reloc->offset += i915_gem_obj_ggtt_offset(obj);
325 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
326 reloc->offset & PAGE_MASK);
327 reloc_entry = (uint32_t __iomem *)
328 (reloc_page + offset_in_page(reloc->offset));
329 iowrite32(lower_32_bits(delta), reloc_entry);
330
331 if (INTEL_INFO(dev)->gen >= 8) {
332 reloc_entry += 1;
333
334 if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
335 io_mapping_unmap_atomic(reloc_page);
336 reloc_page = io_mapping_map_atomic_wc(
337 dev_priv->gtt.mappable,
338 reloc->offset + sizeof(uint32_t));
339 reloc_entry = reloc_page;
340 }
341
342 iowrite32(upper_32_bits(delta), reloc_entry);
343 }
344
345 io_mapping_unmap_atomic(reloc_page);
346
347 return 0;
348 }
349
350 static int
351 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
352 struct eb_vmas *eb,
353 struct drm_i915_gem_relocation_entry *reloc)
354 {
355 struct drm_device *dev = obj->base.dev;
356 struct drm_gem_object *target_obj;
357 struct drm_i915_gem_object *target_i915_obj;
358 struct i915_vma *target_vma;
359 uint64_t target_offset;
360 int ret;
361
362 /* we've already hold a reference to all valid objects */
363 target_vma = eb_get_vma(eb, reloc->target_handle);
364 if (unlikely(target_vma == NULL))
365 return -ENOENT;
366 target_i915_obj = target_vma->obj;
367 target_obj = &target_vma->obj->base;
368
369 target_offset = target_vma->node.start;
370
371 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
372 * pipe_control writes because the gpu doesn't properly redirect them
373 * through the ppgtt for non_secure batchbuffers. */
374 if (unlikely(IS_GEN6(dev) &&
375 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
376 !target_i915_obj->has_global_gtt_mapping)) {
377 struct i915_vma *vma =
378 list_first_entry(&target_i915_obj->vma_list,
379 typeof(*vma), vma_link);
380 vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
381 }
382
383 /* Validate that the target is in a valid r/w GPU domain */
384 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
385 DRM_DEBUG("reloc with multiple write domains: "
386 "obj %p target %d offset %d "
387 "read %08x write %08x",
388 obj, reloc->target_handle,
389 (int) reloc->offset,
390 reloc->read_domains,
391 reloc->write_domain);
392 return -EINVAL;
393 }
394 if (unlikely((reloc->write_domain | reloc->read_domains)
395 & ~I915_GEM_GPU_DOMAINS)) {
396 DRM_DEBUG("reloc with read/write non-GPU domains: "
397 "obj %p target %d offset %d "
398 "read %08x write %08x",
399 obj, reloc->target_handle,
400 (int) reloc->offset,
401 reloc->read_domains,
402 reloc->write_domain);
403 return -EINVAL;
404 }
405
406 target_obj->pending_read_domains |= reloc->read_domains;
407 target_obj->pending_write_domain |= reloc->write_domain;
408
409 /* If the relocation already has the right value in it, no
410 * more work needs to be done.
411 */
412 if (target_offset == reloc->presumed_offset)
413 return 0;
414
415 /* Check that the relocation address is valid... */
416 if (unlikely(reloc->offset >
417 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
418 DRM_DEBUG("Relocation beyond object bounds: "
419 "obj %p target %d offset %d size %d.\n",
420 obj, reloc->target_handle,
421 (int) reloc->offset,
422 (int) obj->base.size);
423 return -EINVAL;
424 }
425 if (unlikely(reloc->offset & 3)) {
426 DRM_DEBUG("Relocation not 4-byte aligned: "
427 "obj %p target %d offset %d.\n",
428 obj, reloc->target_handle,
429 (int) reloc->offset);
430 return -EINVAL;
431 }
432
433 /* We can't wait for rendering with pagefaults disabled */
434 if (obj->active && in_atomic())
435 return -EFAULT;
436
437 if (use_cpu_reloc(obj))
438 ret = relocate_entry_cpu(obj, reloc, target_offset);
439 else
440 ret = relocate_entry_gtt(obj, reloc, target_offset);
441
442 if (ret)
443 return ret;
444
445 /* and update the user's relocation entry */
446 reloc->presumed_offset = target_offset;
447
448 return 0;
449 }
450
451 static int
452 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
453 struct eb_vmas *eb)
454 {
455 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
456 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
457 struct drm_i915_gem_relocation_entry __user *user_relocs;
458 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
459 int remain, ret;
460
461 user_relocs = to_user_ptr(entry->relocs_ptr);
462
463 remain = entry->relocation_count;
464 while (remain) {
465 struct drm_i915_gem_relocation_entry *r = stack_reloc;
466 int count = remain;
467 if (count > ARRAY_SIZE(stack_reloc))
468 count = ARRAY_SIZE(stack_reloc);
469 remain -= count;
470
471 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
472 return -EFAULT;
473
474 do {
475 u64 offset = r->presumed_offset;
476
477 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
478 if (ret)
479 return ret;
480
481 if (r->presumed_offset != offset &&
482 __copy_to_user_inatomic(&user_relocs->presumed_offset,
483 &r->presumed_offset,
484 sizeof(r->presumed_offset))) {
485 return -EFAULT;
486 }
487
488 user_relocs++;
489 r++;
490 } while (--count);
491 }
492
493 return 0;
494 #undef N_RELOC
495 }
496
497 static int
498 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
499 struct eb_vmas *eb,
500 struct drm_i915_gem_relocation_entry *relocs)
501 {
502 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
503 int i, ret;
504
505 for (i = 0; i < entry->relocation_count; i++) {
506 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
507 if (ret)
508 return ret;
509 }
510
511 return 0;
512 }
513
514 static int
515 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
516 {
517 struct i915_vma *vma;
518 int ret = 0;
519
520 /* This is the fast path and we cannot handle a pagefault whilst
521 * holding the struct mutex lest the user pass in the relocations
522 * contained within a mmaped bo. For in such a case we, the page
523 * fault handler would call i915_gem_fault() and we would try to
524 * acquire the struct mutex again. Obviously this is bad and so
525 * lockdep complains vehemently.
526 */
527 pagefault_disable();
528 list_for_each_entry(vma, &eb->vmas, exec_list) {
529 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
530 if (ret)
531 break;
532 }
533 pagefault_enable();
534
535 return ret;
536 }
537
538 static int
539 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
540 struct intel_engine_cs *ring,
541 bool *need_reloc)
542 {
543 struct drm_i915_gem_object *obj = vma->obj;
544 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
545 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
546 uint64_t flags;
547 int ret;
548
549 flags = 0;
550 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
551 flags |= PIN_MAPPABLE;
552 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
553 flags |= PIN_GLOBAL;
554 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
555 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
556
557 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
558 if (ret)
559 return ret;
560
561 entry->flags |= __EXEC_OBJECT_HAS_PIN;
562
563 if (has_fenced_gpu_access) {
564 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
565 ret = i915_gem_object_get_fence(obj);
566 if (ret)
567 return ret;
568
569 if (i915_gem_object_pin_fence(obj))
570 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
571
572 obj->pending_fenced_gpu_access = true;
573 }
574 }
575
576 if (entry->offset != vma->node.start) {
577 entry->offset = vma->node.start;
578 *need_reloc = true;
579 }
580
581 if (entry->flags & EXEC_OBJECT_WRITE) {
582 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
583 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
584 }
585
586 return 0;
587 }
588
589 static bool
590 need_reloc_mappable(struct i915_vma *vma)
591 {
592 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
593
594 if (entry->relocation_count == 0)
595 return false;
596
597 if (!i915_is_ggtt(vma->vm))
598 return false;
599
600 /* See also use_cpu_reloc() */
601 if (HAS_LLC(vma->obj->base.dev))
602 return false;
603
604 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
605 return false;
606
607 return true;
608 }
609
610 static bool
611 eb_vma_misplaced(struct i915_vma *vma)
612 {
613 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
614 struct drm_i915_gem_object *obj = vma->obj;
615
616 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
617 !i915_is_ggtt(vma->vm));
618
619 if (entry->alignment &&
620 vma->node.start & (entry->alignment - 1))
621 return true;
622
623 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
624 return true;
625
626 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
627 vma->node.start < BATCH_OFFSET_BIAS)
628 return true;
629
630 return false;
631 }
632
633 static int
634 i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
635 struct list_head *vmas,
636 bool *need_relocs)
637 {
638 struct drm_i915_gem_object *obj;
639 struct i915_vma *vma;
640 struct i915_address_space *vm;
641 struct list_head ordered_vmas;
642 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
643 int retry;
644
645 if (list_empty(vmas))
646 return 0;
647
648 i915_gem_retire_requests_ring(ring);
649
650 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
651
652 INIT_LIST_HEAD(&ordered_vmas);
653 while (!list_empty(vmas)) {
654 struct drm_i915_gem_exec_object2 *entry;
655 bool need_fence, need_mappable;
656
657 vma = list_first_entry(vmas, struct i915_vma, exec_list);
658 obj = vma->obj;
659 entry = vma->exec_entry;
660
661 need_fence =
662 has_fenced_gpu_access &&
663 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
664 obj->tiling_mode != I915_TILING_NONE;
665 need_mappable = need_fence || need_reloc_mappable(vma);
666
667 if (need_mappable) {
668 entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
669 list_move(&vma->exec_list, &ordered_vmas);
670 } else
671 list_move_tail(&vma->exec_list, &ordered_vmas);
672
673 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
674 obj->base.pending_write_domain = 0;
675 obj->pending_fenced_gpu_access = false;
676 }
677 list_splice(&ordered_vmas, vmas);
678
679 /* Attempt to pin all of the buffers into the GTT.
680 * This is done in 3 phases:
681 *
682 * 1a. Unbind all objects that do not match the GTT constraints for
683 * the execbuffer (fenceable, mappable, alignment etc).
684 * 1b. Increment pin count for already bound objects.
685 * 2. Bind new objects.
686 * 3. Decrement pin count.
687 *
688 * This avoid unnecessary unbinding of later objects in order to make
689 * room for the earlier objects *unless* we need to defragment.
690 */
691 retry = 0;
692 do {
693 int ret = 0;
694
695 /* Unbind any ill-fitting objects or pin. */
696 list_for_each_entry(vma, vmas, exec_list) {
697 if (!drm_mm_node_allocated(&vma->node))
698 continue;
699
700 if (eb_vma_misplaced(vma))
701 ret = i915_vma_unbind(vma);
702 else
703 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
704 if (ret)
705 goto err;
706 }
707
708 /* Bind fresh objects */
709 list_for_each_entry(vma, vmas, exec_list) {
710 if (drm_mm_node_allocated(&vma->node))
711 continue;
712
713 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
714 if (ret)
715 goto err;
716 }
717
718 err:
719 if (ret != -ENOSPC || retry++)
720 return ret;
721
722 /* Decrement pin count for bound objects */
723 list_for_each_entry(vma, vmas, exec_list)
724 i915_gem_execbuffer_unreserve_vma(vma);
725
726 ret = i915_gem_evict_vm(vm, true);
727 if (ret)
728 return ret;
729 } while (1);
730 }
731
732 static int
733 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
734 struct drm_i915_gem_execbuffer2 *args,
735 struct drm_file *file,
736 struct intel_engine_cs *ring,
737 struct eb_vmas *eb,
738 struct drm_i915_gem_exec_object2 *exec)
739 {
740 struct drm_i915_gem_relocation_entry *reloc;
741 struct i915_address_space *vm;
742 struct i915_vma *vma;
743 bool need_relocs;
744 int *reloc_offset;
745 int i, total, ret;
746 unsigned count = args->buffer_count;
747
748 if (WARN_ON(list_empty(&eb->vmas)))
749 return 0;
750
751 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
752
753 /* We may process another execbuffer during the unlock... */
754 while (!list_empty(&eb->vmas)) {
755 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
756 list_del_init(&vma->exec_list);
757 i915_gem_execbuffer_unreserve_vma(vma);
758 drm_gem_object_unreference(&vma->obj->base);
759 }
760
761 mutex_unlock(&dev->struct_mutex);
762
763 total = 0;
764 for (i = 0; i < count; i++)
765 total += exec[i].relocation_count;
766
767 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
768 reloc = drm_malloc_ab(total, sizeof(*reloc));
769 if (reloc == NULL || reloc_offset == NULL) {
770 drm_free_large(reloc);
771 drm_free_large(reloc_offset);
772 mutex_lock(&dev->struct_mutex);
773 return -ENOMEM;
774 }
775
776 total = 0;
777 for (i = 0; i < count; i++) {
778 struct drm_i915_gem_relocation_entry __user *user_relocs;
779 u64 invalid_offset = (u64)-1;
780 int j;
781
782 user_relocs = to_user_ptr(exec[i].relocs_ptr);
783
784 if (copy_from_user(reloc+total, user_relocs,
785 exec[i].relocation_count * sizeof(*reloc))) {
786 ret = -EFAULT;
787 mutex_lock(&dev->struct_mutex);
788 goto err;
789 }
790
791 /* As we do not update the known relocation offsets after
792 * relocating (due to the complexities in lock handling),
793 * we need to mark them as invalid now so that we force the
794 * relocation processing next time. Just in case the target
795 * object is evicted and then rebound into its old
796 * presumed_offset before the next execbuffer - if that
797 * happened we would make the mistake of assuming that the
798 * relocations were valid.
799 */
800 for (j = 0; j < exec[i].relocation_count; j++) {
801 if (__copy_to_user(&user_relocs[j].presumed_offset,
802 &invalid_offset,
803 sizeof(invalid_offset))) {
804 ret = -EFAULT;
805 mutex_lock(&dev->struct_mutex);
806 goto err;
807 }
808 }
809
810 reloc_offset[i] = total;
811 total += exec[i].relocation_count;
812 }
813
814 ret = i915_mutex_lock_interruptible(dev);
815 if (ret) {
816 mutex_lock(&dev->struct_mutex);
817 goto err;
818 }
819
820 /* reacquire the objects */
821 eb_reset(eb);
822 ret = eb_lookup_vmas(eb, exec, args, vm, file);
823 if (ret)
824 goto err;
825
826 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
827 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
828 if (ret)
829 goto err;
830
831 list_for_each_entry(vma, &eb->vmas, exec_list) {
832 int offset = vma->exec_entry - exec;
833 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
834 reloc + reloc_offset[offset]);
835 if (ret)
836 goto err;
837 }
838
839 /* Leave the user relocations as are, this is the painfully slow path,
840 * and we want to avoid the complication of dropping the lock whilst
841 * having buffers reserved in the aperture and so causing spurious
842 * ENOSPC for random operations.
843 */
844
845 err:
846 drm_free_large(reloc);
847 drm_free_large(reloc_offset);
848 return ret;
849 }
850
851 static int
852 i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
853 struct list_head *vmas)
854 {
855 struct i915_vma *vma;
856 uint32_t flush_domains = 0;
857 bool flush_chipset = false;
858 int ret;
859
860 list_for_each_entry(vma, vmas, exec_list) {
861 struct drm_i915_gem_object *obj = vma->obj;
862 ret = i915_gem_object_sync(obj, ring);
863 if (ret)
864 return ret;
865
866 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
867 flush_chipset |= i915_gem_clflush_object(obj, false);
868
869 flush_domains |= obj->base.write_domain;
870 }
871
872 if (flush_chipset)
873 i915_gem_chipset_flush(ring->dev);
874
875 if (flush_domains & I915_GEM_DOMAIN_GTT)
876 wmb();
877
878 /* Unconditionally invalidate gpu caches and ensure that we do flush
879 * any residual writes from the previous batch.
880 */
881 return intel_ring_invalidate_all_caches(ring);
882 }
883
884 static bool
885 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
886 {
887 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
888 return false;
889
890 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
891 }
892
893 static int
894 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
895 int count)
896 {
897 int i;
898 unsigned relocs_total = 0;
899 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
900
901 for (i = 0; i < count; i++) {
902 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
903 int length; /* limited by fault_in_pages_readable() */
904
905 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
906 return -EINVAL;
907
908 /* First check for malicious input causing overflow in
909 * the worst case where we need to allocate the entire
910 * relocation tree as a single array.
911 */
912 if (exec[i].relocation_count > relocs_max - relocs_total)
913 return -EINVAL;
914 relocs_total += exec[i].relocation_count;
915
916 length = exec[i].relocation_count *
917 sizeof(struct drm_i915_gem_relocation_entry);
918 /*
919 * We must check that the entire relocation array is safe
920 * to read, but since we may need to update the presumed
921 * offsets during execution, check for full write access.
922 */
923 if (!access_ok(VERIFY_WRITE, ptr, length))
924 return -EFAULT;
925
926 if (likely(!i915.prefault_disable)) {
927 if (fault_in_multipages_readable(ptr, length))
928 return -EFAULT;
929 }
930 }
931
932 return 0;
933 }
934
935 static struct intel_context *
936 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
937 struct intel_engine_cs *ring, const u32 ctx_id)
938 {
939 struct intel_context *ctx = NULL;
940 struct i915_ctx_hang_stats *hs;
941
942 if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
943 return ERR_PTR(-EINVAL);
944
945 ctx = i915_gem_context_get(file->driver_priv, ctx_id);
946 if (IS_ERR(ctx))
947 return ctx;
948
949 hs = &ctx->hang_stats;
950 if (hs->banned) {
951 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
952 return ERR_PTR(-EIO);
953 }
954
955 return ctx;
956 }
957
958 static void
959 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
960 struct intel_engine_cs *ring)
961 {
962 struct i915_vma *vma;
963
964 list_for_each_entry(vma, vmas, exec_list) {
965 struct drm_i915_gem_object *obj = vma->obj;
966 u32 old_read = obj->base.read_domains;
967 u32 old_write = obj->base.write_domain;
968
969 obj->base.write_domain = obj->base.pending_write_domain;
970 if (obj->base.write_domain == 0)
971 obj->base.pending_read_domains |= obj->base.read_domains;
972 obj->base.read_domains = obj->base.pending_read_domains;
973 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
974
975 i915_vma_move_to_active(vma, ring);
976 if (obj->base.write_domain) {
977 obj->dirty = 1;
978 obj->last_write_seqno = intel_ring_get_seqno(ring);
979
980 intel_fb_obj_invalidate(obj, ring);
981
982 /* update for the implicit flush after a batch */
983 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
984 }
985
986 trace_i915_gem_object_change_domain(obj, old_read, old_write);
987 }
988 }
989
990 static void
991 i915_gem_execbuffer_retire_commands(struct drm_device *dev,
992 struct drm_file *file,
993 struct intel_engine_cs *ring,
994 struct drm_i915_gem_object *obj)
995 {
996 /* Unconditionally force add_request to emit a full flush. */
997 ring->gpu_caches_dirty = true;
998
999 /* Add a breadcrumb for the completion of the batch buffer */
1000 (void)__i915_add_request(ring, file, obj, NULL);
1001 }
1002
1003 static int
1004 i915_reset_gen7_sol_offsets(struct drm_device *dev,
1005 struct intel_engine_cs *ring)
1006 {
1007 struct drm_i915_private *dev_priv = dev->dev_private;
1008 int ret, i;
1009
1010 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
1011 DRM_DEBUG("sol reset is gen7/rcs only\n");
1012 return -EINVAL;
1013 }
1014
1015 ret = intel_ring_begin(ring, 4 * 3);
1016 if (ret)
1017 return ret;
1018
1019 for (i = 0; i < 4; i++) {
1020 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1021 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
1022 intel_ring_emit(ring, 0);
1023 }
1024
1025 intel_ring_advance(ring);
1026
1027 return 0;
1028 }
1029
1030 static int
1031 legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1032 struct intel_engine_cs *ring,
1033 struct intel_context *ctx,
1034 struct drm_i915_gem_execbuffer2 *args,
1035 struct list_head *vmas,
1036 struct drm_i915_gem_object *batch_obj,
1037 u64 exec_start, u32 flags)
1038 {
1039 struct drm_clip_rect *cliprects = NULL;
1040 struct drm_i915_private *dev_priv = dev->dev_private;
1041 u64 exec_len;
1042 int instp_mode;
1043 u32 instp_mask;
1044 int i, ret = 0;
1045
1046 if (args->num_cliprects != 0) {
1047 if (ring != &dev_priv->ring[RCS]) {
1048 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1049 return -EINVAL;
1050 }
1051
1052 if (INTEL_INFO(dev)->gen >= 5) {
1053 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1054 return -EINVAL;
1055 }
1056
1057 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1058 DRM_DEBUG("execbuf with %u cliprects\n",
1059 args->num_cliprects);
1060 return -EINVAL;
1061 }
1062
1063 cliprects = kcalloc(args->num_cliprects,
1064 sizeof(*cliprects),
1065 GFP_KERNEL);
1066 if (cliprects == NULL) {
1067 ret = -ENOMEM;
1068 goto error;
1069 }
1070
1071 if (copy_from_user(cliprects,
1072 to_user_ptr(args->cliprects_ptr),
1073 sizeof(*cliprects)*args->num_cliprects)) {
1074 ret = -EFAULT;
1075 goto error;
1076 }
1077 } else {
1078 if (args->DR4 == 0xffffffff) {
1079 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1080 args->DR4 = 0;
1081 }
1082
1083 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
1084 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
1085 return -EINVAL;
1086 }
1087 }
1088
1089 ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
1090 if (ret)
1091 goto error;
1092
1093 ret = i915_switch_context(ring, ctx);
1094 if (ret)
1095 goto error;
1096
1097 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1098 instp_mask = I915_EXEC_CONSTANTS_MASK;
1099 switch (instp_mode) {
1100 case I915_EXEC_CONSTANTS_REL_GENERAL:
1101 case I915_EXEC_CONSTANTS_ABSOLUTE:
1102 case I915_EXEC_CONSTANTS_REL_SURFACE:
1103 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
1104 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1105 ret = -EINVAL;
1106 goto error;
1107 }
1108
1109 if (instp_mode != dev_priv->relative_constants_mode) {
1110 if (INTEL_INFO(dev)->gen < 4) {
1111 DRM_DEBUG("no rel constants on pre-gen4\n");
1112 ret = -EINVAL;
1113 goto error;
1114 }
1115
1116 if (INTEL_INFO(dev)->gen > 5 &&
1117 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1118 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1119 ret = -EINVAL;
1120 goto error;
1121 }
1122
1123 /* The HW changed the meaning on this bit on gen6 */
1124 if (INTEL_INFO(dev)->gen >= 6)
1125 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1126 }
1127 break;
1128 default:
1129 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1130 ret = -EINVAL;
1131 goto error;
1132 }
1133
1134 if (ring == &dev_priv->ring[RCS] &&
1135 instp_mode != dev_priv->relative_constants_mode) {
1136 ret = intel_ring_begin(ring, 4);
1137 if (ret)
1138 goto error;
1139
1140 intel_ring_emit(ring, MI_NOOP);
1141 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1142 intel_ring_emit(ring, INSTPM);
1143 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1144 intel_ring_advance(ring);
1145
1146 dev_priv->relative_constants_mode = instp_mode;
1147 }
1148
1149 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1150 ret = i915_reset_gen7_sol_offsets(dev, ring);
1151 if (ret)
1152 goto error;
1153 }
1154
1155 exec_len = args->batch_len;
1156 if (cliprects) {
1157 for (i = 0; i < args->num_cliprects; i++) {
1158 ret = i915_emit_box(dev, &cliprects[i],
1159 args->DR1, args->DR4);
1160 if (ret)
1161 goto error;
1162
1163 ret = ring->dispatch_execbuffer(ring,
1164 exec_start, exec_len,
1165 flags);
1166 if (ret)
1167 goto error;
1168 }
1169 } else {
1170 ret = ring->dispatch_execbuffer(ring,
1171 exec_start, exec_len,
1172 flags);
1173 if (ret)
1174 return ret;
1175 }
1176
1177 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1178
1179 i915_gem_execbuffer_move_to_active(vmas, ring);
1180 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1181
1182 error:
1183 kfree(cliprects);
1184 return ret;
1185 }
1186
1187 /**
1188 * Find one BSD ring to dispatch the corresponding BSD command.
1189 * The Ring ID is returned.
1190 */
1191 static int gen8_dispatch_bsd_ring(struct drm_device *dev,
1192 struct drm_file *file)
1193 {
1194 struct drm_i915_private *dev_priv = dev->dev_private;
1195 struct drm_i915_file_private *file_priv = file->driver_priv;
1196
1197 /* Check whether the file_priv is using one ring */
1198 if (file_priv->bsd_ring)
1199 return file_priv->bsd_ring->id;
1200 else {
1201 /* If no, use the ping-pong mechanism to select one ring */
1202 int ring_id;
1203
1204 mutex_lock(&dev->struct_mutex);
1205 if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
1206 ring_id = VCS;
1207 dev_priv->mm.bsd_ring_dispatch_index = 1;
1208 } else {
1209 ring_id = VCS2;
1210 dev_priv->mm.bsd_ring_dispatch_index = 0;
1211 }
1212 file_priv->bsd_ring = &dev_priv->ring[ring_id];
1213 mutex_unlock(&dev->struct_mutex);
1214 return ring_id;
1215 }
1216 }
1217
1218 static struct drm_i915_gem_object *
1219 eb_get_batch(struct eb_vmas *eb)
1220 {
1221 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1222
1223 /*
1224 * SNA is doing fancy tricks with compressing batch buffers, which leads
1225 * to negative relocation deltas. Usually that works out ok since the
1226 * relocate address is still positive, except when the batch is placed
1227 * very low in the GTT. Ensure this doesn't happen.
1228 *
1229 * Note that actual hangs have only been observed on gen7, but for
1230 * paranoia do it everywhere.
1231 */
1232 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1233
1234 return vma->obj;
1235 }
1236
1237 static int
1238 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1239 struct drm_file *file,
1240 struct drm_i915_gem_execbuffer2 *args,
1241 struct drm_i915_gem_exec_object2 *exec)
1242 {
1243 struct drm_i915_private *dev_priv = dev->dev_private;
1244 struct eb_vmas *eb;
1245 struct drm_i915_gem_object *batch_obj;
1246 struct intel_engine_cs *ring;
1247 struct intel_context *ctx;
1248 struct i915_address_space *vm;
1249 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1250 u64 exec_start = args->batch_start_offset;
1251 u32 flags;
1252 int ret;
1253 bool need_relocs;
1254
1255 if (!i915_gem_check_execbuffer(args))
1256 return -EINVAL;
1257
1258 ret = validate_exec_list(exec, args->buffer_count);
1259 if (ret)
1260 return ret;
1261
1262 flags = 0;
1263 if (args->flags & I915_EXEC_SECURE) {
1264 if (!file->is_master || !capable(CAP_SYS_ADMIN))
1265 return -EPERM;
1266
1267 flags |= I915_DISPATCH_SECURE;
1268 }
1269 if (args->flags & I915_EXEC_IS_PINNED)
1270 flags |= I915_DISPATCH_PINNED;
1271
1272 if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
1273 DRM_DEBUG("execbuf with unknown ring: %d\n",
1274 (int)(args->flags & I915_EXEC_RING_MASK));
1275 return -EINVAL;
1276 }
1277
1278 if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
1279 ring = &dev_priv->ring[RCS];
1280 else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
1281 if (HAS_BSD2(dev)) {
1282 int ring_id;
1283 ring_id = gen8_dispatch_bsd_ring(dev, file);
1284 ring = &dev_priv->ring[ring_id];
1285 } else
1286 ring = &dev_priv->ring[VCS];
1287 } else
1288 ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
1289
1290 if (!intel_ring_initialized(ring)) {
1291 DRM_DEBUG("execbuf with invalid ring: %d\n",
1292 (int)(args->flags & I915_EXEC_RING_MASK));
1293 return -EINVAL;
1294 }
1295
1296 if (args->buffer_count < 1) {
1297 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1298 return -EINVAL;
1299 }
1300
1301 intel_runtime_pm_get(dev_priv);
1302
1303 ret = i915_mutex_lock_interruptible(dev);
1304 if (ret)
1305 goto pre_mutex_err;
1306
1307 if (dev_priv->ums.mm_suspended) {
1308 mutex_unlock(&dev->struct_mutex);
1309 ret = -EBUSY;
1310 goto pre_mutex_err;
1311 }
1312
1313 ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1314 if (IS_ERR(ctx)) {
1315 mutex_unlock(&dev->struct_mutex);
1316 ret = PTR_ERR(ctx);
1317 goto pre_mutex_err;
1318 }
1319
1320 i915_gem_context_reference(ctx);
1321
1322 vm = ctx->vm;
1323 if (!USES_FULL_PPGTT(dev))
1324 vm = &dev_priv->gtt.base;
1325
1326 eb = eb_create(args);
1327 if (eb == NULL) {
1328 i915_gem_context_unreference(ctx);
1329 mutex_unlock(&dev->struct_mutex);
1330 ret = -ENOMEM;
1331 goto pre_mutex_err;
1332 }
1333
1334 /* Look up object handles */
1335 ret = eb_lookup_vmas(eb, exec, args, vm, file);
1336 if (ret)
1337 goto err;
1338
1339 /* take note of the batch buffer before we might reorder the lists */
1340 batch_obj = eb_get_batch(eb);
1341
1342 /* Move the objects en-masse into the GTT, evicting if necessary. */
1343 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1344 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1345 if (ret)
1346 goto err;
1347
1348 /* The objects are in their final locations, apply the relocations. */
1349 if (need_relocs)
1350 ret = i915_gem_execbuffer_relocate(eb);
1351 if (ret) {
1352 if (ret == -EFAULT) {
1353 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1354 eb, exec);
1355 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1356 }
1357 if (ret)
1358 goto err;
1359 }
1360
1361 /* Set the pending read domains for the batch buffer to COMMAND */
1362 if (batch_obj->base.pending_write_domain) {
1363 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1364 ret = -EINVAL;
1365 goto err;
1366 }
1367 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1368
1369 if (i915_needs_cmd_parser(ring)) {
1370 ret = i915_parse_cmds(ring,
1371 batch_obj,
1372 args->batch_start_offset,
1373 file->is_master);
1374 if (ret)
1375 goto err;
1376
1377 /*
1378 * XXX: Actually do this when enabling batch copy...
1379 *
1380 * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
1381 * from MI_BATCH_BUFFER_START commands issued in the
1382 * dispatch_execbuffer implementations. We specifically don't
1383 * want that set when the command parser is enabled.
1384 */
1385 }
1386
1387 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1388 * batch" bit. Hence we need to pin secure batches into the global gtt.
1389 * hsw should have this fixed, but bdw mucks it up again. */
1390 if (flags & I915_DISPATCH_SECURE &&
1391 !batch_obj->has_global_gtt_mapping) {
1392 /* When we have multiple VMs, we'll need to make sure that we
1393 * allocate space first */
1394 struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
1395 BUG_ON(!vma);
1396 vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
1397 }
1398
1399 if (flags & I915_DISPATCH_SECURE)
1400 exec_start += i915_gem_obj_ggtt_offset(batch_obj);
1401 else
1402 exec_start += i915_gem_obj_offset(batch_obj, vm);
1403
1404 ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
1405 args, &eb->vmas, batch_obj, exec_start, flags);
1406 if (ret)
1407 goto err;
1408
1409 err:
1410 /* the request owns the ref now */
1411 i915_gem_context_unreference(ctx);
1412 eb_destroy(eb);
1413
1414 mutex_unlock(&dev->struct_mutex);
1415
1416 pre_mutex_err:
1417 /* intel_gpu_busy should also get a ref, so it will free when the device
1418 * is really idle. */
1419 intel_runtime_pm_put(dev_priv);
1420 return ret;
1421 }
1422
1423 /*
1424 * Legacy execbuffer just creates an exec2 list from the original exec object
1425 * list array and passes it to the real function.
1426 */
1427 int
1428 i915_gem_execbuffer(struct drm_device *dev, void *data,
1429 struct drm_file *file)
1430 {
1431 struct drm_i915_gem_execbuffer *args = data;
1432 struct drm_i915_gem_execbuffer2 exec2;
1433 struct drm_i915_gem_exec_object *exec_list = NULL;
1434 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1435 int ret, i;
1436
1437 if (args->buffer_count < 1) {
1438 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1439 return -EINVAL;
1440 }
1441
1442 /* Copy in the exec list from userland */
1443 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1444 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1445 if (exec_list == NULL || exec2_list == NULL) {
1446 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1447 args->buffer_count);
1448 drm_free_large(exec_list);
1449 drm_free_large(exec2_list);
1450 return -ENOMEM;
1451 }
1452 ret = copy_from_user(exec_list,
1453 to_user_ptr(args->buffers_ptr),
1454 sizeof(*exec_list) * args->buffer_count);
1455 if (ret != 0) {
1456 DRM_DEBUG("copy %d exec entries failed %d\n",
1457 args->buffer_count, ret);
1458 drm_free_large(exec_list);
1459 drm_free_large(exec2_list);
1460 return -EFAULT;
1461 }
1462
1463 for (i = 0; i < args->buffer_count; i++) {
1464 exec2_list[i].handle = exec_list[i].handle;
1465 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1466 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1467 exec2_list[i].alignment = exec_list[i].alignment;
1468 exec2_list[i].offset = exec_list[i].offset;
1469 if (INTEL_INFO(dev)->gen < 4)
1470 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1471 else
1472 exec2_list[i].flags = 0;
1473 }
1474
1475 exec2.buffers_ptr = args->buffers_ptr;
1476 exec2.buffer_count = args->buffer_count;
1477 exec2.batch_start_offset = args->batch_start_offset;
1478 exec2.batch_len = args->batch_len;
1479 exec2.DR1 = args->DR1;
1480 exec2.DR4 = args->DR4;
1481 exec2.num_cliprects = args->num_cliprects;
1482 exec2.cliprects_ptr = args->cliprects_ptr;
1483 exec2.flags = I915_EXEC_RENDER;
1484 i915_execbuffer2_set_context_id(exec2, 0);
1485
1486 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1487 if (!ret) {
1488 struct drm_i915_gem_exec_object __user *user_exec_list =
1489 to_user_ptr(args->buffers_ptr);
1490
1491 /* Copy the new buffer offsets back to the user's exec list. */
1492 for (i = 0; i < args->buffer_count; i++) {
1493 ret = __copy_to_user(&user_exec_list[i].offset,
1494 &exec2_list[i].offset,
1495 sizeof(user_exec_list[i].offset));
1496 if (ret) {
1497 ret = -EFAULT;
1498 DRM_DEBUG("failed to copy %d exec entries "
1499 "back to user (%d)\n",
1500 args->buffer_count, ret);
1501 break;
1502 }
1503 }
1504 }
1505
1506 drm_free_large(exec_list);
1507 drm_free_large(exec2_list);
1508 return ret;
1509 }
1510
1511 int
1512 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1513 struct drm_file *file)
1514 {
1515 struct drm_i915_gem_execbuffer2 *args = data;
1516 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1517 int ret;
1518
1519 if (args->buffer_count < 1 ||
1520 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1521 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1522 return -EINVAL;
1523 }
1524
1525 if (args->rsvd2 != 0) {
1526 DRM_DEBUG("dirty rvsd2 field\n");
1527 return -EINVAL;
1528 }
1529
1530 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1531 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1532 if (exec2_list == NULL)
1533 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1534 args->buffer_count);
1535 if (exec2_list == NULL) {
1536 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1537 args->buffer_count);
1538 return -ENOMEM;
1539 }
1540 ret = copy_from_user(exec2_list,
1541 to_user_ptr(args->buffers_ptr),
1542 sizeof(*exec2_list) * args->buffer_count);
1543 if (ret != 0) {
1544 DRM_DEBUG("copy %d exec entries failed %d\n",
1545 args->buffer_count, ret);
1546 drm_free_large(exec2_list);
1547 return -EFAULT;
1548 }
1549
1550 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1551 if (!ret) {
1552 /* Copy the new buffer offsets back to the user's exec list. */
1553 struct drm_i915_gem_exec_object2 __user *user_exec_list =
1554 to_user_ptr(args->buffers_ptr);
1555 int i;
1556
1557 for (i = 0; i < args->buffer_count; i++) {
1558 ret = __copy_to_user(&user_exec_list[i].offset,
1559 &exec2_list[i].offset,
1560 sizeof(user_exec_list[i].offset));
1561 if (ret) {
1562 ret = -EFAULT;
1563 DRM_DEBUG("failed to copy %d exec entries "
1564 "back to user\n",
1565 args->buffer_count);
1566 break;
1567 }
1568 }
1569 }
1570
1571 drm_free_large(exec2_list);
1572 return ret;
1573 }
This page took 0.105519 seconds and 5 git commands to generate.