drm/i915: Reject NEEDS_GTT relocations with full ppgtt
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
1 /*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/dma_remapping.h>
35
36 #define __EXEC_OBJECT_HAS_PIN (1<<31)
37 #define __EXEC_OBJECT_HAS_FENCE (1<<30)
38
39 struct eb_vmas {
40 struct list_head vmas;
41 int and;
42 union {
43 struct i915_vma *lut[0];
44 struct hlist_head buckets[0];
45 };
46 };
47
48 static struct eb_vmas *
49 eb_create(struct drm_i915_gem_execbuffer2 *args)
50 {
51 struct eb_vmas *eb = NULL;
52
53 if (args->flags & I915_EXEC_HANDLE_LUT) {
54 unsigned size = args->buffer_count;
55 size *= sizeof(struct i915_vma *);
56 size += sizeof(struct eb_vmas);
57 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
58 }
59
60 if (eb == NULL) {
61 unsigned size = args->buffer_count;
62 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
63 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
64 while (count > 2*size)
65 count >>= 1;
66 eb = kzalloc(count*sizeof(struct hlist_head) +
67 sizeof(struct eb_vmas),
68 GFP_TEMPORARY);
69 if (eb == NULL)
70 return eb;
71
72 eb->and = count - 1;
73 } else
74 eb->and = -args->buffer_count;
75
76 INIT_LIST_HEAD(&eb->vmas);
77 return eb;
78 }
79
80 static void
81 eb_reset(struct eb_vmas *eb)
82 {
83 if (eb->and >= 0)
84 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
85 }
86
87 static int
88 eb_lookup_vmas(struct eb_vmas *eb,
89 struct drm_i915_gem_exec_object2 *exec,
90 const struct drm_i915_gem_execbuffer2 *args,
91 struct i915_address_space *vm,
92 struct drm_file *file)
93 {
94 struct drm_i915_private *dev_priv = vm->dev->dev_private;
95 struct drm_i915_gem_object *obj;
96 struct list_head objects;
97 int i, ret = 0;
98
99 INIT_LIST_HEAD(&objects);
100 spin_lock(&file->table_lock);
101 /* Grab a reference to the object and release the lock so we can lookup
102 * or create the VMA without using GFP_ATOMIC */
103 for (i = 0; i < args->buffer_count; i++) {
104 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
105 if (obj == NULL) {
106 spin_unlock(&file->table_lock);
107 DRM_DEBUG("Invalid object handle %d at index %d\n",
108 exec[i].handle, i);
109 ret = -ENOENT;
110 goto out;
111 }
112
113 if (!list_empty(&obj->obj_exec_link)) {
114 spin_unlock(&file->table_lock);
115 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
116 obj, exec[i].handle, i);
117 ret = -EINVAL;
118 goto out;
119 }
120
121 drm_gem_object_reference(&obj->base);
122 list_add_tail(&obj->obj_exec_link, &objects);
123 }
124 spin_unlock(&file->table_lock);
125
126 i = 0;
127 list_for_each_entry(obj, &objects, obj_exec_link) {
128 struct i915_vma *vma;
129 struct i915_address_space *bind_vm = vm;
130
131 if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
132 USES_FULL_PPGTT(vm->dev)) {
133 ret = -EINVAL;
134 goto out;
135 }
136
137 /* If we have secure dispatch, or the userspace assures us that
138 * they know what they're doing, use the GGTT VM.
139 */
140 if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT ||
141 ((args->flags & I915_EXEC_SECURE) &&
142 (i == (args->buffer_count - 1))))
143 bind_vm = &dev_priv->gtt.base;
144
145 /*
146 * NOTE: We can leak any vmas created here when something fails
147 * later on. But that's no issue since vma_unbind can deal with
148 * vmas which are not actually bound. And since only
149 * lookup_or_create exists as an interface to get at the vma
150 * from the (obj, vm) we don't run the risk of creating
151 * duplicated vmas for the same vm.
152 */
153 vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
154 if (IS_ERR(vma)) {
155 DRM_DEBUG("Failed to lookup VMA\n");
156 ret = PTR_ERR(vma);
157 goto out;
158 }
159
160 list_add_tail(&vma->exec_list, &eb->vmas);
161
162 vma->exec_entry = &exec[i];
163 if (eb->and < 0) {
164 eb->lut[i] = vma;
165 } else {
166 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
167 vma->exec_handle = handle;
168 hlist_add_head(&vma->exec_node,
169 &eb->buckets[handle & eb->and]);
170 }
171 ++i;
172 }
173
174
175 out:
176 while (!list_empty(&objects)) {
177 obj = list_first_entry(&objects,
178 struct drm_i915_gem_object,
179 obj_exec_link);
180 list_del_init(&obj->obj_exec_link);
181 if (ret)
182 drm_gem_object_unreference(&obj->base);
183 }
184 return ret;
185 }
186
187 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
188 {
189 if (eb->and < 0) {
190 if (handle >= -eb->and)
191 return NULL;
192 return eb->lut[handle];
193 } else {
194 struct hlist_head *head;
195 struct hlist_node *node;
196
197 head = &eb->buckets[handle & eb->and];
198 hlist_for_each(node, head) {
199 struct i915_vma *vma;
200
201 vma = hlist_entry(node, struct i915_vma, exec_node);
202 if (vma->exec_handle == handle)
203 return vma;
204 }
205 return NULL;
206 }
207 }
208
209 static void
210 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
211 {
212 struct drm_i915_gem_exec_object2 *entry;
213 struct drm_i915_gem_object *obj = vma->obj;
214
215 if (!drm_mm_node_allocated(&vma->node))
216 return;
217
218 entry = vma->exec_entry;
219
220 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
221 i915_gem_object_unpin_fence(obj);
222
223 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
224 vma->pin_count--;
225
226 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
227 }
228
229 static void eb_destroy(struct eb_vmas *eb)
230 {
231 while (!list_empty(&eb->vmas)) {
232 struct i915_vma *vma;
233
234 vma = list_first_entry(&eb->vmas,
235 struct i915_vma,
236 exec_list);
237 list_del_init(&vma->exec_list);
238 i915_gem_execbuffer_unreserve_vma(vma);
239 drm_gem_object_unreference(&vma->obj->base);
240 }
241 kfree(eb);
242 }
243
244 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
245 {
246 return (HAS_LLC(obj->base.dev) ||
247 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
248 !obj->map_and_fenceable ||
249 obj->cache_level != I915_CACHE_NONE);
250 }
251
252 static int
253 relocate_entry_cpu(struct drm_i915_gem_object *obj,
254 struct drm_i915_gem_relocation_entry *reloc)
255 {
256 struct drm_device *dev = obj->base.dev;
257 uint32_t page_offset = offset_in_page(reloc->offset);
258 char *vaddr;
259 int ret = -EINVAL;
260
261 ret = i915_gem_object_set_to_cpu_domain(obj, true);
262 if (ret)
263 return ret;
264
265 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
266 reloc->offset >> PAGE_SHIFT));
267 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
268
269 if (INTEL_INFO(dev)->gen >= 8) {
270 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
271
272 if (page_offset == 0) {
273 kunmap_atomic(vaddr);
274 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
275 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
276 }
277
278 *(uint32_t *)(vaddr + page_offset) = 0;
279 }
280
281 kunmap_atomic(vaddr);
282
283 return 0;
284 }
285
286 static int
287 relocate_entry_gtt(struct drm_i915_gem_object *obj,
288 struct drm_i915_gem_relocation_entry *reloc)
289 {
290 struct drm_device *dev = obj->base.dev;
291 struct drm_i915_private *dev_priv = dev->dev_private;
292 uint32_t __iomem *reloc_entry;
293 void __iomem *reloc_page;
294 int ret = -EINVAL;
295
296 ret = i915_gem_object_set_to_gtt_domain(obj, true);
297 if (ret)
298 return ret;
299
300 ret = i915_gem_object_put_fence(obj);
301 if (ret)
302 return ret;
303
304 /* Map the page containing the relocation we're going to perform. */
305 reloc->offset += i915_gem_obj_ggtt_offset(obj);
306 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
307 reloc->offset & PAGE_MASK);
308 reloc_entry = (uint32_t __iomem *)
309 (reloc_page + offset_in_page(reloc->offset));
310 iowrite32(reloc->delta, reloc_entry);
311
312 if (INTEL_INFO(dev)->gen >= 8) {
313 reloc_entry += 1;
314
315 if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
316 io_mapping_unmap_atomic(reloc_page);
317 reloc_page = io_mapping_map_atomic_wc(
318 dev_priv->gtt.mappable,
319 reloc->offset + sizeof(uint32_t));
320 reloc_entry = reloc_page;
321 }
322
323 iowrite32(0, reloc_entry);
324 }
325
326 io_mapping_unmap_atomic(reloc_page);
327
328 return 0;
329 }
330
331 static int
332 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
333 struct eb_vmas *eb,
334 struct drm_i915_gem_relocation_entry *reloc)
335 {
336 struct drm_device *dev = obj->base.dev;
337 struct drm_gem_object *target_obj;
338 struct drm_i915_gem_object *target_i915_obj;
339 struct i915_vma *target_vma;
340 uint32_t target_offset;
341 int ret = -EINVAL;
342
343 /* we've already hold a reference to all valid objects */
344 target_vma = eb_get_vma(eb, reloc->target_handle);
345 if (unlikely(target_vma == NULL))
346 return -ENOENT;
347 target_i915_obj = target_vma->obj;
348 target_obj = &target_vma->obj->base;
349
350 target_offset = target_vma->node.start;
351
352 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
353 * pipe_control writes because the gpu doesn't properly redirect them
354 * through the ppgtt for non_secure batchbuffers. */
355 if (unlikely(IS_GEN6(dev) &&
356 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
357 !target_i915_obj->has_global_gtt_mapping)) {
358 struct i915_vma *vma =
359 list_first_entry(&target_i915_obj->vma_list,
360 typeof(*vma), vma_link);
361 vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
362 }
363
364 /* Validate that the target is in a valid r/w GPU domain */
365 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
366 DRM_DEBUG("reloc with multiple write domains: "
367 "obj %p target %d offset %d "
368 "read %08x write %08x",
369 obj, reloc->target_handle,
370 (int) reloc->offset,
371 reloc->read_domains,
372 reloc->write_domain);
373 return ret;
374 }
375 if (unlikely((reloc->write_domain | reloc->read_domains)
376 & ~I915_GEM_GPU_DOMAINS)) {
377 DRM_DEBUG("reloc with read/write non-GPU domains: "
378 "obj %p target %d offset %d "
379 "read %08x write %08x",
380 obj, reloc->target_handle,
381 (int) reloc->offset,
382 reloc->read_domains,
383 reloc->write_domain);
384 return ret;
385 }
386
387 target_obj->pending_read_domains |= reloc->read_domains;
388 target_obj->pending_write_domain |= reloc->write_domain;
389
390 /* If the relocation already has the right value in it, no
391 * more work needs to be done.
392 */
393 if (target_offset == reloc->presumed_offset)
394 return 0;
395
396 /* Check that the relocation address is valid... */
397 if (unlikely(reloc->offset >
398 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
399 DRM_DEBUG("Relocation beyond object bounds: "
400 "obj %p target %d offset %d size %d.\n",
401 obj, reloc->target_handle,
402 (int) reloc->offset,
403 (int) obj->base.size);
404 return ret;
405 }
406 if (unlikely(reloc->offset & 3)) {
407 DRM_DEBUG("Relocation not 4-byte aligned: "
408 "obj %p target %d offset %d.\n",
409 obj, reloc->target_handle,
410 (int) reloc->offset);
411 return ret;
412 }
413
414 /* We can't wait for rendering with pagefaults disabled */
415 if (obj->active && in_atomic())
416 return -EFAULT;
417
418 reloc->delta += target_offset;
419 if (use_cpu_reloc(obj))
420 ret = relocate_entry_cpu(obj, reloc);
421 else
422 ret = relocate_entry_gtt(obj, reloc);
423
424 if (ret)
425 return ret;
426
427 /* and update the user's relocation entry */
428 reloc->presumed_offset = target_offset;
429
430 return 0;
431 }
432
433 static int
434 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
435 struct eb_vmas *eb)
436 {
437 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
438 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
439 struct drm_i915_gem_relocation_entry __user *user_relocs;
440 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
441 int remain, ret;
442
443 user_relocs = to_user_ptr(entry->relocs_ptr);
444
445 remain = entry->relocation_count;
446 while (remain) {
447 struct drm_i915_gem_relocation_entry *r = stack_reloc;
448 int count = remain;
449 if (count > ARRAY_SIZE(stack_reloc))
450 count = ARRAY_SIZE(stack_reloc);
451 remain -= count;
452
453 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
454 return -EFAULT;
455
456 do {
457 u64 offset = r->presumed_offset;
458
459 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
460 if (ret)
461 return ret;
462
463 if (r->presumed_offset != offset &&
464 __copy_to_user_inatomic(&user_relocs->presumed_offset,
465 &r->presumed_offset,
466 sizeof(r->presumed_offset))) {
467 return -EFAULT;
468 }
469
470 user_relocs++;
471 r++;
472 } while (--count);
473 }
474
475 return 0;
476 #undef N_RELOC
477 }
478
479 static int
480 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
481 struct eb_vmas *eb,
482 struct drm_i915_gem_relocation_entry *relocs)
483 {
484 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
485 int i, ret;
486
487 for (i = 0; i < entry->relocation_count; i++) {
488 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
489 if (ret)
490 return ret;
491 }
492
493 return 0;
494 }
495
496 static int
497 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
498 {
499 struct i915_vma *vma;
500 int ret = 0;
501
502 /* This is the fast path and we cannot handle a pagefault whilst
503 * holding the struct mutex lest the user pass in the relocations
504 * contained within a mmaped bo. For in such a case we, the page
505 * fault handler would call i915_gem_fault() and we would try to
506 * acquire the struct mutex again. Obviously this is bad and so
507 * lockdep complains vehemently.
508 */
509 pagefault_disable();
510 list_for_each_entry(vma, &eb->vmas, exec_list) {
511 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
512 if (ret)
513 break;
514 }
515 pagefault_enable();
516
517 return ret;
518 }
519
520 static int
521 need_reloc_mappable(struct i915_vma *vma)
522 {
523 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
524 return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
525 i915_is_ggtt(vma->vm);
526 }
527
528 static int
529 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
530 struct intel_ring_buffer *ring,
531 bool *need_reloc)
532 {
533 struct drm_i915_gem_object *obj = vma->obj;
534 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
535 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
536 bool need_fence, need_mappable;
537 u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
538 !vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
539 int ret;
540
541 need_fence =
542 has_fenced_gpu_access &&
543 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
544 obj->tiling_mode != I915_TILING_NONE;
545 need_mappable = need_fence || need_reloc_mappable(vma);
546
547 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
548 false);
549 if (ret)
550 return ret;
551
552 entry->flags |= __EXEC_OBJECT_HAS_PIN;
553
554 if (has_fenced_gpu_access) {
555 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
556 ret = i915_gem_object_get_fence(obj);
557 if (ret)
558 return ret;
559
560 if (i915_gem_object_pin_fence(obj))
561 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
562
563 obj->pending_fenced_gpu_access = true;
564 }
565 }
566
567 if (entry->offset != vma->node.start) {
568 entry->offset = vma->node.start;
569 *need_reloc = true;
570 }
571
572 if (entry->flags & EXEC_OBJECT_WRITE) {
573 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
574 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
575 }
576
577 vma->bind_vma(vma, obj->cache_level, flags);
578
579 return 0;
580 }
581
582 static int
583 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
584 struct list_head *vmas,
585 bool *need_relocs)
586 {
587 struct drm_i915_gem_object *obj;
588 struct i915_vma *vma;
589 struct i915_address_space *vm;
590 struct list_head ordered_vmas;
591 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
592 int retry;
593
594 if (list_empty(vmas))
595 return 0;
596
597 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
598
599 INIT_LIST_HEAD(&ordered_vmas);
600 while (!list_empty(vmas)) {
601 struct drm_i915_gem_exec_object2 *entry;
602 bool need_fence, need_mappable;
603
604 vma = list_first_entry(vmas, struct i915_vma, exec_list);
605 obj = vma->obj;
606 entry = vma->exec_entry;
607
608 need_fence =
609 has_fenced_gpu_access &&
610 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
611 obj->tiling_mode != I915_TILING_NONE;
612 need_mappable = need_fence || need_reloc_mappable(vma);
613
614 if (need_mappable)
615 list_move(&vma->exec_list, &ordered_vmas);
616 else
617 list_move_tail(&vma->exec_list, &ordered_vmas);
618
619 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
620 obj->base.pending_write_domain = 0;
621 obj->pending_fenced_gpu_access = false;
622 }
623 list_splice(&ordered_vmas, vmas);
624
625 /* Attempt to pin all of the buffers into the GTT.
626 * This is done in 3 phases:
627 *
628 * 1a. Unbind all objects that do not match the GTT constraints for
629 * the execbuffer (fenceable, mappable, alignment etc).
630 * 1b. Increment pin count for already bound objects.
631 * 2. Bind new objects.
632 * 3. Decrement pin count.
633 *
634 * This avoid unnecessary unbinding of later objects in order to make
635 * room for the earlier objects *unless* we need to defragment.
636 */
637 retry = 0;
638 do {
639 int ret = 0;
640
641 /* Unbind any ill-fitting objects or pin. */
642 list_for_each_entry(vma, vmas, exec_list) {
643 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
644 bool need_fence, need_mappable;
645
646 obj = vma->obj;
647
648 if (!drm_mm_node_allocated(&vma->node))
649 continue;
650
651 need_fence =
652 has_fenced_gpu_access &&
653 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
654 obj->tiling_mode != I915_TILING_NONE;
655 need_mappable = need_fence || need_reloc_mappable(vma);
656
657 WARN_ON((need_mappable || need_fence) &&
658 !i915_is_ggtt(vma->vm));
659
660 if ((entry->alignment &&
661 vma->node.start & (entry->alignment - 1)) ||
662 (need_mappable && !obj->map_and_fenceable))
663 ret = i915_vma_unbind(vma);
664 else
665 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
666 if (ret)
667 goto err;
668 }
669
670 /* Bind fresh objects */
671 list_for_each_entry(vma, vmas, exec_list) {
672 if (drm_mm_node_allocated(&vma->node))
673 continue;
674
675 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
676 if (ret)
677 goto err;
678 }
679
680 err:
681 if (ret != -ENOSPC || retry++)
682 return ret;
683
684 /* Decrement pin count for bound objects */
685 list_for_each_entry(vma, vmas, exec_list)
686 i915_gem_execbuffer_unreserve_vma(vma);
687
688 ret = i915_gem_evict_vm(vm, true);
689 if (ret)
690 return ret;
691 } while (1);
692 }
693
694 static int
695 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
696 struct drm_i915_gem_execbuffer2 *args,
697 struct drm_file *file,
698 struct intel_ring_buffer *ring,
699 struct eb_vmas *eb,
700 struct drm_i915_gem_exec_object2 *exec)
701 {
702 struct drm_i915_gem_relocation_entry *reloc;
703 struct i915_address_space *vm;
704 struct i915_vma *vma;
705 bool need_relocs;
706 int *reloc_offset;
707 int i, total, ret;
708 unsigned count = args->buffer_count;
709
710 if (WARN_ON(list_empty(&eb->vmas)))
711 return 0;
712
713 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
714
715 /* We may process another execbuffer during the unlock... */
716 while (!list_empty(&eb->vmas)) {
717 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
718 list_del_init(&vma->exec_list);
719 i915_gem_execbuffer_unreserve_vma(vma);
720 drm_gem_object_unreference(&vma->obj->base);
721 }
722
723 mutex_unlock(&dev->struct_mutex);
724
725 total = 0;
726 for (i = 0; i < count; i++)
727 total += exec[i].relocation_count;
728
729 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
730 reloc = drm_malloc_ab(total, sizeof(*reloc));
731 if (reloc == NULL || reloc_offset == NULL) {
732 drm_free_large(reloc);
733 drm_free_large(reloc_offset);
734 mutex_lock(&dev->struct_mutex);
735 return -ENOMEM;
736 }
737
738 total = 0;
739 for (i = 0; i < count; i++) {
740 struct drm_i915_gem_relocation_entry __user *user_relocs;
741 u64 invalid_offset = (u64)-1;
742 int j;
743
744 user_relocs = to_user_ptr(exec[i].relocs_ptr);
745
746 if (copy_from_user(reloc+total, user_relocs,
747 exec[i].relocation_count * sizeof(*reloc))) {
748 ret = -EFAULT;
749 mutex_lock(&dev->struct_mutex);
750 goto err;
751 }
752
753 /* As we do not update the known relocation offsets after
754 * relocating (due to the complexities in lock handling),
755 * we need to mark them as invalid now so that we force the
756 * relocation processing next time. Just in case the target
757 * object is evicted and then rebound into its old
758 * presumed_offset before the next execbuffer - if that
759 * happened we would make the mistake of assuming that the
760 * relocations were valid.
761 */
762 for (j = 0; j < exec[i].relocation_count; j++) {
763 if (copy_to_user(&user_relocs[j].presumed_offset,
764 &invalid_offset,
765 sizeof(invalid_offset))) {
766 ret = -EFAULT;
767 mutex_lock(&dev->struct_mutex);
768 goto err;
769 }
770 }
771
772 reloc_offset[i] = total;
773 total += exec[i].relocation_count;
774 }
775
776 ret = i915_mutex_lock_interruptible(dev);
777 if (ret) {
778 mutex_lock(&dev->struct_mutex);
779 goto err;
780 }
781
782 /* reacquire the objects */
783 eb_reset(eb);
784 ret = eb_lookup_vmas(eb, exec, args, vm, file);
785 if (ret)
786 goto err;
787
788 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
789 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
790 if (ret)
791 goto err;
792
793 list_for_each_entry(vma, &eb->vmas, exec_list) {
794 int offset = vma->exec_entry - exec;
795 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
796 reloc + reloc_offset[offset]);
797 if (ret)
798 goto err;
799 }
800
801 /* Leave the user relocations as are, this is the painfully slow path,
802 * and we want to avoid the complication of dropping the lock whilst
803 * having buffers reserved in the aperture and so causing spurious
804 * ENOSPC for random operations.
805 */
806
807 err:
808 drm_free_large(reloc);
809 drm_free_large(reloc_offset);
810 return ret;
811 }
812
813 static int
814 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
815 struct list_head *vmas)
816 {
817 struct i915_vma *vma;
818 uint32_t flush_domains = 0;
819 bool flush_chipset = false;
820 int ret;
821
822 list_for_each_entry(vma, vmas, exec_list) {
823 struct drm_i915_gem_object *obj = vma->obj;
824 ret = i915_gem_object_sync(obj, ring);
825 if (ret)
826 return ret;
827
828 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
829 flush_chipset |= i915_gem_clflush_object(obj, false);
830
831 flush_domains |= obj->base.write_domain;
832 }
833
834 if (flush_chipset)
835 i915_gem_chipset_flush(ring->dev);
836
837 if (flush_domains & I915_GEM_DOMAIN_GTT)
838 wmb();
839
840 /* Unconditionally invalidate gpu caches and ensure that we do flush
841 * any residual writes from the previous batch.
842 */
843 return intel_ring_invalidate_all_caches(ring);
844 }
845
846 static bool
847 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
848 {
849 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
850 return false;
851
852 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
853 }
854
855 static int
856 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
857 int count)
858 {
859 int i;
860 unsigned relocs_total = 0;
861 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
862
863 for (i = 0; i < count; i++) {
864 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
865 int length; /* limited by fault_in_pages_readable() */
866
867 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
868 return -EINVAL;
869
870 /* First check for malicious input causing overflow in
871 * the worst case where we need to allocate the entire
872 * relocation tree as a single array.
873 */
874 if (exec[i].relocation_count > relocs_max - relocs_total)
875 return -EINVAL;
876 relocs_total += exec[i].relocation_count;
877
878 length = exec[i].relocation_count *
879 sizeof(struct drm_i915_gem_relocation_entry);
880 /*
881 * We must check that the entire relocation array is safe
882 * to read, but since we may need to update the presumed
883 * offsets during execution, check for full write access.
884 */
885 if (!access_ok(VERIFY_WRITE, ptr, length))
886 return -EFAULT;
887
888 if (likely(!i915_prefault_disable)) {
889 if (fault_in_multipages_readable(ptr, length))
890 return -EFAULT;
891 }
892 }
893
894 return 0;
895 }
896
897 static struct i915_hw_context *
898 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
899 struct intel_ring_buffer *ring, const u32 ctx_id)
900 {
901 struct i915_hw_context *ctx = NULL;
902 struct i915_ctx_hang_stats *hs;
903
904 if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
905 return ERR_PTR(-EINVAL);
906
907 ctx = i915_gem_context_get(file->driver_priv, ctx_id);
908 if (IS_ERR_OR_NULL(ctx))
909 return ctx;
910
911 hs = &ctx->hang_stats;
912 if (hs->banned) {
913 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
914 return ERR_PTR(-EIO);
915 }
916
917 return ctx;
918 }
919
920 static void
921 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
922 struct intel_ring_buffer *ring)
923 {
924 struct i915_vma *vma;
925
926 list_for_each_entry(vma, vmas, exec_list) {
927 struct drm_i915_gem_object *obj = vma->obj;
928 u32 old_read = obj->base.read_domains;
929 u32 old_write = obj->base.write_domain;
930
931 obj->base.write_domain = obj->base.pending_write_domain;
932 if (obj->base.write_domain == 0)
933 obj->base.pending_read_domains |= obj->base.read_domains;
934 obj->base.read_domains = obj->base.pending_read_domains;
935 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
936
937 i915_vma_move_to_active(vma, ring);
938 if (obj->base.write_domain) {
939 obj->dirty = 1;
940 obj->last_write_seqno = intel_ring_get_seqno(ring);
941 /* check for potential scanout */
942 if (i915_gem_obj_ggtt_bound(obj) &&
943 i915_gem_obj_to_ggtt(obj)->pin_count)
944 intel_mark_fb_busy(obj, ring);
945 }
946
947 trace_i915_gem_object_change_domain(obj, old_read, old_write);
948 }
949 }
950
951 static void
952 i915_gem_execbuffer_retire_commands(struct drm_device *dev,
953 struct drm_file *file,
954 struct intel_ring_buffer *ring,
955 struct drm_i915_gem_object *obj)
956 {
957 /* Unconditionally force add_request to emit a full flush. */
958 ring->gpu_caches_dirty = true;
959
960 /* Add a breadcrumb for the completion of the batch buffer */
961 (void)__i915_add_request(ring, file, obj, NULL);
962 }
963
964 static int
965 i915_reset_gen7_sol_offsets(struct drm_device *dev,
966 struct intel_ring_buffer *ring)
967 {
968 drm_i915_private_t *dev_priv = dev->dev_private;
969 int ret, i;
970
971 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
972 return 0;
973
974 ret = intel_ring_begin(ring, 4 * 3);
975 if (ret)
976 return ret;
977
978 for (i = 0; i < 4; i++) {
979 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
980 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
981 intel_ring_emit(ring, 0);
982 }
983
984 intel_ring_advance(ring);
985
986 return 0;
987 }
988
989 static int
990 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
991 struct drm_file *file,
992 struct drm_i915_gem_execbuffer2 *args,
993 struct drm_i915_gem_exec_object2 *exec)
994 {
995 drm_i915_private_t *dev_priv = dev->dev_private;
996 struct eb_vmas *eb;
997 struct drm_i915_gem_object *batch_obj;
998 struct drm_clip_rect *cliprects = NULL;
999 struct intel_ring_buffer *ring;
1000 struct i915_hw_context *ctx;
1001 struct i915_address_space *vm;
1002 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1003 u32 exec_start = args->batch_start_offset, exec_len;
1004 u32 mask, flags;
1005 int ret, mode, i;
1006 bool need_relocs;
1007
1008 if (!i915_gem_check_execbuffer(args))
1009 return -EINVAL;
1010
1011 ret = validate_exec_list(exec, args->buffer_count);
1012 if (ret)
1013 return ret;
1014
1015 flags = 0;
1016 if (args->flags & I915_EXEC_SECURE) {
1017 if (!file->is_master || !capable(CAP_SYS_ADMIN))
1018 return -EPERM;
1019
1020 flags |= I915_DISPATCH_SECURE;
1021 }
1022 if (args->flags & I915_EXEC_IS_PINNED)
1023 flags |= I915_DISPATCH_PINNED;
1024
1025 if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) {
1026 DRM_DEBUG("execbuf with unknown ring: %d\n",
1027 (int)(args->flags & I915_EXEC_RING_MASK));
1028 return -EINVAL;
1029 }
1030
1031 if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
1032 ring = &dev_priv->ring[RCS];
1033 else
1034 ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
1035
1036 if (!intel_ring_initialized(ring)) {
1037 DRM_DEBUG("execbuf with invalid ring: %d\n",
1038 (int)(args->flags & I915_EXEC_RING_MASK));
1039 return -EINVAL;
1040 }
1041
1042 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1043 mask = I915_EXEC_CONSTANTS_MASK;
1044 switch (mode) {
1045 case I915_EXEC_CONSTANTS_REL_GENERAL:
1046 case I915_EXEC_CONSTANTS_ABSOLUTE:
1047 case I915_EXEC_CONSTANTS_REL_SURFACE:
1048 if (ring == &dev_priv->ring[RCS] &&
1049 mode != dev_priv->relative_constants_mode) {
1050 if (INTEL_INFO(dev)->gen < 4)
1051 return -EINVAL;
1052
1053 if (INTEL_INFO(dev)->gen > 5 &&
1054 mode == I915_EXEC_CONSTANTS_REL_SURFACE)
1055 return -EINVAL;
1056
1057 /* The HW changed the meaning on this bit on gen6 */
1058 if (INTEL_INFO(dev)->gen >= 6)
1059 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1060 }
1061 break;
1062 default:
1063 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1064 return -EINVAL;
1065 }
1066
1067 if (args->buffer_count < 1) {
1068 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1069 return -EINVAL;
1070 }
1071
1072 if (args->num_cliprects != 0) {
1073 if (ring != &dev_priv->ring[RCS]) {
1074 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1075 return -EINVAL;
1076 }
1077
1078 if (INTEL_INFO(dev)->gen >= 5) {
1079 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1080 return -EINVAL;
1081 }
1082
1083 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1084 DRM_DEBUG("execbuf with %u cliprects\n",
1085 args->num_cliprects);
1086 return -EINVAL;
1087 }
1088
1089 cliprects = kcalloc(args->num_cliprects,
1090 sizeof(*cliprects),
1091 GFP_KERNEL);
1092 if (cliprects == NULL) {
1093 ret = -ENOMEM;
1094 goto pre_mutex_err;
1095 }
1096
1097 if (copy_from_user(cliprects,
1098 to_user_ptr(args->cliprects_ptr),
1099 sizeof(*cliprects)*args->num_cliprects)) {
1100 ret = -EFAULT;
1101 goto pre_mutex_err;
1102 }
1103 }
1104
1105 ret = i915_mutex_lock_interruptible(dev);
1106 if (ret)
1107 goto pre_mutex_err;
1108
1109 if (dev_priv->ums.mm_suspended) {
1110 mutex_unlock(&dev->struct_mutex);
1111 ret = -EBUSY;
1112 goto pre_mutex_err;
1113 }
1114
1115 ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1116 if (IS_ERR_OR_NULL(ctx)) {
1117 mutex_unlock(&dev->struct_mutex);
1118 ret = PTR_ERR(ctx);
1119 goto pre_mutex_err;
1120 }
1121
1122 i915_gem_context_reference(ctx);
1123
1124 vm = ctx->vm;
1125 if (!USES_FULL_PPGTT(dev))
1126 vm = &dev_priv->gtt.base;
1127
1128 eb = eb_create(args);
1129 if (eb == NULL) {
1130 mutex_unlock(&dev->struct_mutex);
1131 ret = -ENOMEM;
1132 goto pre_mutex_err;
1133 }
1134
1135 /* Look up object handles */
1136 ret = eb_lookup_vmas(eb, exec, args, vm, file);
1137 if (ret)
1138 goto err;
1139
1140 /* take note of the batch buffer before we might reorder the lists */
1141 batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
1142
1143 /* Move the objects en-masse into the GTT, evicting if necessary. */
1144 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1145 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1146 if (ret)
1147 goto err;
1148
1149 /* The objects are in their final locations, apply the relocations. */
1150 if (need_relocs)
1151 ret = i915_gem_execbuffer_relocate(eb);
1152 if (ret) {
1153 if (ret == -EFAULT) {
1154 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1155 eb, exec);
1156 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1157 }
1158 if (ret)
1159 goto err;
1160 }
1161
1162 /* Set the pending read domains for the batch buffer to COMMAND */
1163 if (batch_obj->base.pending_write_domain) {
1164 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1165 ret = -EINVAL;
1166 goto err;
1167 }
1168 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1169
1170 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1171 * batch" bit. Hence we need to pin secure batches into the global gtt.
1172 * hsw should have this fixed, but bdw mucks it up again. */
1173 if (flags & I915_DISPATCH_SECURE &&
1174 !batch_obj->has_global_gtt_mapping) {
1175 /* When we have multiple VMs, we'll need to make sure that we
1176 * allocate space first */
1177 struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
1178 BUG_ON(!vma);
1179 vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
1180 }
1181
1182 if (flags & I915_DISPATCH_SECURE)
1183 exec_start += i915_gem_obj_ggtt_offset(batch_obj);
1184 else
1185 exec_start += i915_gem_obj_offset(batch_obj, vm);
1186
1187 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
1188 if (ret)
1189 goto err;
1190
1191 ret = i915_switch_context(ring, file, ctx);
1192 if (ret)
1193 goto err;
1194
1195 if (ring == &dev_priv->ring[RCS] &&
1196 mode != dev_priv->relative_constants_mode) {
1197 ret = intel_ring_begin(ring, 4);
1198 if (ret)
1199 goto err;
1200
1201 intel_ring_emit(ring, MI_NOOP);
1202 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1203 intel_ring_emit(ring, INSTPM);
1204 intel_ring_emit(ring, mask << 16 | mode);
1205 intel_ring_advance(ring);
1206
1207 dev_priv->relative_constants_mode = mode;
1208 }
1209
1210 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1211 ret = i915_reset_gen7_sol_offsets(dev, ring);
1212 if (ret)
1213 goto err;
1214 }
1215
1216
1217 exec_len = args->batch_len;
1218 if (cliprects) {
1219 for (i = 0; i < args->num_cliprects; i++) {
1220 ret = i915_emit_box(dev, &cliprects[i],
1221 args->DR1, args->DR4);
1222 if (ret)
1223 goto err;
1224
1225 ret = ring->dispatch_execbuffer(ring,
1226 exec_start, exec_len,
1227 flags);
1228 if (ret)
1229 goto err;
1230 }
1231 } else {
1232 ret = ring->dispatch_execbuffer(ring,
1233 exec_start, exec_len,
1234 flags);
1235 if (ret)
1236 goto err;
1237 }
1238
1239 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1240
1241 i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1242 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1243
1244 err:
1245 /* the request owns the ref now */
1246 i915_gem_context_unreference(ctx);
1247 eb_destroy(eb);
1248
1249 mutex_unlock(&dev->struct_mutex);
1250
1251 pre_mutex_err:
1252 kfree(cliprects);
1253 return ret;
1254 }
1255
1256 /*
1257 * Legacy execbuffer just creates an exec2 list from the original exec object
1258 * list array and passes it to the real function.
1259 */
1260 int
1261 i915_gem_execbuffer(struct drm_device *dev, void *data,
1262 struct drm_file *file)
1263 {
1264 struct drm_i915_gem_execbuffer *args = data;
1265 struct drm_i915_gem_execbuffer2 exec2;
1266 struct drm_i915_gem_exec_object *exec_list = NULL;
1267 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1268 int ret, i;
1269
1270 if (args->buffer_count < 1) {
1271 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1272 return -EINVAL;
1273 }
1274
1275 /* Copy in the exec list from userland */
1276 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1277 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1278 if (exec_list == NULL || exec2_list == NULL) {
1279 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1280 args->buffer_count);
1281 drm_free_large(exec_list);
1282 drm_free_large(exec2_list);
1283 return -ENOMEM;
1284 }
1285 ret = copy_from_user(exec_list,
1286 to_user_ptr(args->buffers_ptr),
1287 sizeof(*exec_list) * args->buffer_count);
1288 if (ret != 0) {
1289 DRM_DEBUG("copy %d exec entries failed %d\n",
1290 args->buffer_count, ret);
1291 drm_free_large(exec_list);
1292 drm_free_large(exec2_list);
1293 return -EFAULT;
1294 }
1295
1296 for (i = 0; i < args->buffer_count; i++) {
1297 exec2_list[i].handle = exec_list[i].handle;
1298 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1299 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1300 exec2_list[i].alignment = exec_list[i].alignment;
1301 exec2_list[i].offset = exec_list[i].offset;
1302 if (INTEL_INFO(dev)->gen < 4)
1303 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1304 else
1305 exec2_list[i].flags = 0;
1306 }
1307
1308 exec2.buffers_ptr = args->buffers_ptr;
1309 exec2.buffer_count = args->buffer_count;
1310 exec2.batch_start_offset = args->batch_start_offset;
1311 exec2.batch_len = args->batch_len;
1312 exec2.DR1 = args->DR1;
1313 exec2.DR4 = args->DR4;
1314 exec2.num_cliprects = args->num_cliprects;
1315 exec2.cliprects_ptr = args->cliprects_ptr;
1316 exec2.flags = I915_EXEC_RENDER;
1317 i915_execbuffer2_set_context_id(exec2, 0);
1318
1319 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1320 if (!ret) {
1321 /* Copy the new buffer offsets back to the user's exec list. */
1322 for (i = 0; i < args->buffer_count; i++)
1323 exec_list[i].offset = exec2_list[i].offset;
1324 /* ... and back out to userspace */
1325 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1326 exec_list,
1327 sizeof(*exec_list) * args->buffer_count);
1328 if (ret) {
1329 ret = -EFAULT;
1330 DRM_DEBUG("failed to copy %d exec entries "
1331 "back to user (%d)\n",
1332 args->buffer_count, ret);
1333 }
1334 }
1335
1336 drm_free_large(exec_list);
1337 drm_free_large(exec2_list);
1338 return ret;
1339 }
1340
1341 int
1342 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1343 struct drm_file *file)
1344 {
1345 struct drm_i915_gem_execbuffer2 *args = data;
1346 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1347 int ret;
1348
1349 if (args->buffer_count < 1 ||
1350 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1351 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1352 return -EINVAL;
1353 }
1354
1355 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1356 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1357 if (exec2_list == NULL)
1358 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1359 args->buffer_count);
1360 if (exec2_list == NULL) {
1361 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1362 args->buffer_count);
1363 return -ENOMEM;
1364 }
1365 ret = copy_from_user(exec2_list,
1366 to_user_ptr(args->buffers_ptr),
1367 sizeof(*exec2_list) * args->buffer_count);
1368 if (ret != 0) {
1369 DRM_DEBUG("copy %d exec entries failed %d\n",
1370 args->buffer_count, ret);
1371 drm_free_large(exec2_list);
1372 return -EFAULT;
1373 }
1374
1375 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1376 if (!ret) {
1377 /* Copy the new buffer offsets back to the user's exec list. */
1378 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1379 exec2_list,
1380 sizeof(*exec2_list) * args->buffer_count);
1381 if (ret) {
1382 ret = -EFAULT;
1383 DRM_DEBUG("failed to copy %d exec entries "
1384 "back to user (%d)\n",
1385 args->buffer_count, ret);
1386 }
1387 }
1388
1389 drm_free_large(exec2_list);
1390 return ret;
1391 }
This page took 0.099784 seconds and 5 git commands to generate.