drm/i915: Infrastructure for supporting different GGTT views per object
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
CommitLineData
54cf91dc
CW
1/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
760285e7
DH
29#include <drm/drmP.h>
30#include <drm/i915_drm.h>
54cf91dc
CW
31#include "i915_drv.h"
32#include "i915_trace.h"
33#include "intel_drv.h"
f45b5557 34#include <linux/dma_remapping.h>
54cf91dc 35
a415d355
CW
36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
e6a84468 38#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
d23db88c
CW
39#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
40
41#define BATCH_OFFSET_BIAS (256*1024)
a415d355 42
27173f1f
BW
43struct eb_vmas {
44 struct list_head vmas;
67731b87 45 int and;
eef90ccb 46 union {
27173f1f 47 struct i915_vma *lut[0];
eef90ccb
CW
48 struct hlist_head buckets[0];
49 };
67731b87
CW
50};
51
27173f1f 52static struct eb_vmas *
17601cbc 53eb_create(struct drm_i915_gem_execbuffer2 *args)
67731b87 54{
27173f1f 55 struct eb_vmas *eb = NULL;
eef90ccb
CW
56
57 if (args->flags & I915_EXEC_HANDLE_LUT) {
b205ca57 58 unsigned size = args->buffer_count;
27173f1f
BW
59 size *= sizeof(struct i915_vma *);
60 size += sizeof(struct eb_vmas);
eef90ccb
CW
61 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
62 }
63
64 if (eb == NULL) {
b205ca57
DV
65 unsigned size = args->buffer_count;
66 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
27b7c63a 67 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
eef90ccb
CW
68 while (count > 2*size)
69 count >>= 1;
70 eb = kzalloc(count*sizeof(struct hlist_head) +
27173f1f 71 sizeof(struct eb_vmas),
eef90ccb
CW
72 GFP_TEMPORARY);
73 if (eb == NULL)
74 return eb;
75
76 eb->and = count - 1;
77 } else
78 eb->and = -args->buffer_count;
79
27173f1f 80 INIT_LIST_HEAD(&eb->vmas);
67731b87
CW
81 return eb;
82}
83
84static void
27173f1f 85eb_reset(struct eb_vmas *eb)
67731b87 86{
eef90ccb
CW
87 if (eb->and >= 0)
88 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
67731b87
CW
89}
90
3b96eff4 91static int
27173f1f
BW
92eb_lookup_vmas(struct eb_vmas *eb,
93 struct drm_i915_gem_exec_object2 *exec,
94 const struct drm_i915_gem_execbuffer2 *args,
95 struct i915_address_space *vm,
96 struct drm_file *file)
3b96eff4 97{
27173f1f
BW
98 struct drm_i915_gem_object *obj;
99 struct list_head objects;
9ae9ab52 100 int i, ret;
3b96eff4 101
27173f1f 102 INIT_LIST_HEAD(&objects);
3b96eff4 103 spin_lock(&file->table_lock);
27173f1f
BW
104 /* Grab a reference to the object and release the lock so we can lookup
105 * or create the VMA without using GFP_ATOMIC */
eef90ccb 106 for (i = 0; i < args->buffer_count; i++) {
3b96eff4
CW
107 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
108 if (obj == NULL) {
109 spin_unlock(&file->table_lock);
110 DRM_DEBUG("Invalid object handle %d at index %d\n",
111 exec[i].handle, i);
27173f1f 112 ret = -ENOENT;
9ae9ab52 113 goto err;
3b96eff4
CW
114 }
115
27173f1f 116 if (!list_empty(&obj->obj_exec_link)) {
3b96eff4
CW
117 spin_unlock(&file->table_lock);
118 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
119 obj, exec[i].handle, i);
27173f1f 120 ret = -EINVAL;
9ae9ab52 121 goto err;
3b96eff4
CW
122 }
123
355a7018
TH
124 WARN_ONCE(obj->base.dumb,
125 "GPU use of dumb buffer is illegal.\n");
126
3b96eff4 127 drm_gem_object_reference(&obj->base);
27173f1f
BW
128 list_add_tail(&obj->obj_exec_link, &objects);
129 }
130 spin_unlock(&file->table_lock);
3b96eff4 131
27173f1f 132 i = 0;
9ae9ab52 133 while (!list_empty(&objects)) {
27173f1f 134 struct i915_vma *vma;
6f65e29a 135
9ae9ab52
CW
136 obj = list_first_entry(&objects,
137 struct drm_i915_gem_object,
138 obj_exec_link);
139
e656a6cb
DV
140 /*
141 * NOTE: We can leak any vmas created here when something fails
142 * later on. But that's no issue since vma_unbind can deal with
143 * vmas which are not actually bound. And since only
144 * lookup_or_create exists as an interface to get at the vma
145 * from the (obj, vm) we don't run the risk of creating
146 * duplicated vmas for the same vm.
147 */
da51a1e7 148 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
27173f1f 149 if (IS_ERR(vma)) {
27173f1f
BW
150 DRM_DEBUG("Failed to lookup VMA\n");
151 ret = PTR_ERR(vma);
9ae9ab52 152 goto err;
27173f1f
BW
153 }
154
9ae9ab52 155 /* Transfer ownership from the objects list to the vmas list. */
27173f1f 156 list_add_tail(&vma->exec_list, &eb->vmas);
9ae9ab52 157 list_del_init(&obj->obj_exec_link);
27173f1f
BW
158
159 vma->exec_entry = &exec[i];
eef90ccb 160 if (eb->and < 0) {
27173f1f 161 eb->lut[i] = vma;
eef90ccb
CW
162 } else {
163 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
27173f1f
BW
164 vma->exec_handle = handle;
165 hlist_add_head(&vma->exec_node,
eef90ccb
CW
166 &eb->buckets[handle & eb->and]);
167 }
27173f1f 168 ++i;
3b96eff4 169 }
3b96eff4 170
9ae9ab52 171 return 0;
27173f1f 172
27173f1f 173
9ae9ab52 174err:
27173f1f
BW
175 while (!list_empty(&objects)) {
176 obj = list_first_entry(&objects,
177 struct drm_i915_gem_object,
178 obj_exec_link);
179 list_del_init(&obj->obj_exec_link);
9ae9ab52 180 drm_gem_object_unreference(&obj->base);
27173f1f 181 }
9ae9ab52
CW
182 /*
183 * Objects already transfered to the vmas list will be unreferenced by
184 * eb_destroy.
185 */
186
27173f1f 187 return ret;
3b96eff4
CW
188}
189
27173f1f 190static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
67731b87 191{
eef90ccb
CW
192 if (eb->and < 0) {
193 if (handle >= -eb->and)
194 return NULL;
195 return eb->lut[handle];
196 } else {
197 struct hlist_head *head;
198 struct hlist_node *node;
67731b87 199
eef90ccb
CW
200 head = &eb->buckets[handle & eb->and];
201 hlist_for_each(node, head) {
27173f1f 202 struct i915_vma *vma;
67731b87 203
27173f1f
BW
204 vma = hlist_entry(node, struct i915_vma, exec_node);
205 if (vma->exec_handle == handle)
206 return vma;
eef90ccb
CW
207 }
208 return NULL;
209 }
67731b87
CW
210}
211
a415d355
CW
212static void
213i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
214{
215 struct drm_i915_gem_exec_object2 *entry;
216 struct drm_i915_gem_object *obj = vma->obj;
217
218 if (!drm_mm_node_allocated(&vma->node))
219 return;
220
221 entry = vma->exec_entry;
222
223 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
224 i915_gem_object_unpin_fence(obj);
225
226 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
3d7f0f9d 227 vma->pin_count--;
a415d355
CW
228
229 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
230}
231
232static void eb_destroy(struct eb_vmas *eb)
233{
27173f1f
BW
234 while (!list_empty(&eb->vmas)) {
235 struct i915_vma *vma;
bcffc3fa 236
27173f1f
BW
237 vma = list_first_entry(&eb->vmas,
238 struct i915_vma,
bcffc3fa 239 exec_list);
27173f1f 240 list_del_init(&vma->exec_list);
a415d355 241 i915_gem_execbuffer_unreserve_vma(vma);
27173f1f 242 drm_gem_object_unreference(&vma->obj->base);
bcffc3fa 243 }
67731b87
CW
244 kfree(eb);
245}
246
dabdfe02
CW
247static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
248{
2cc86b82
CW
249 return (HAS_LLC(obj->base.dev) ||
250 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
504c7267 251 !obj->map_and_fenceable ||
dabdfe02
CW
252 obj->cache_level != I915_CACHE_NONE);
253}
254
5032d871
RB
255static int
256relocate_entry_cpu(struct drm_i915_gem_object *obj,
d9ceb957
BW
257 struct drm_i915_gem_relocation_entry *reloc,
258 uint64_t target_offset)
5032d871 259{
3c94ceee 260 struct drm_device *dev = obj->base.dev;
5032d871 261 uint32_t page_offset = offset_in_page(reloc->offset);
d9ceb957 262 uint64_t delta = reloc->delta + target_offset;
5032d871 263 char *vaddr;
8b78f0e5 264 int ret;
5032d871 265
2cc86b82 266 ret = i915_gem_object_set_to_cpu_domain(obj, true);
5032d871
RB
267 if (ret)
268 return ret;
269
270 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
271 reloc->offset >> PAGE_SHIFT));
d9ceb957 272 *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
3c94ceee
BW
273
274 if (INTEL_INFO(dev)->gen >= 8) {
275 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
276
277 if (page_offset == 0) {
278 kunmap_atomic(vaddr);
279 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
280 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
281 }
282
d9ceb957 283 *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
3c94ceee
BW
284 }
285
5032d871
RB
286 kunmap_atomic(vaddr);
287
288 return 0;
289}
290
291static int
292relocate_entry_gtt(struct drm_i915_gem_object *obj,
d9ceb957
BW
293 struct drm_i915_gem_relocation_entry *reloc,
294 uint64_t target_offset)
5032d871
RB
295{
296 struct drm_device *dev = obj->base.dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
d9ceb957 298 uint64_t delta = reloc->delta + target_offset;
906843c3 299 uint64_t offset;
5032d871 300 void __iomem *reloc_page;
8b78f0e5 301 int ret;
5032d871
RB
302
303 ret = i915_gem_object_set_to_gtt_domain(obj, true);
304 if (ret)
305 return ret;
306
307 ret = i915_gem_object_put_fence(obj);
308 if (ret)
309 return ret;
310
311 /* Map the page containing the relocation we're going to perform. */
906843c3
CW
312 offset = i915_gem_obj_ggtt_offset(obj);
313 offset += reloc->offset;
5032d871 314 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
906843c3
CW
315 offset & PAGE_MASK);
316 iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
3c94ceee
BW
317
318 if (INTEL_INFO(dev)->gen >= 8) {
906843c3 319 offset += sizeof(uint32_t);
3c94ceee 320
906843c3 321 if (offset_in_page(offset) == 0) {
3c94ceee 322 io_mapping_unmap_atomic(reloc_page);
906843c3
CW
323 reloc_page =
324 io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
325 offset);
3c94ceee
BW
326 }
327
906843c3
CW
328 iowrite32(upper_32_bits(delta),
329 reloc_page + offset_in_page(offset));
3c94ceee
BW
330 }
331
5032d871
RB
332 io_mapping_unmap_atomic(reloc_page);
333
334 return 0;
335}
336
54cf91dc
CW
337static int
338i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
27173f1f 339 struct eb_vmas *eb,
3e7a0322 340 struct drm_i915_gem_relocation_entry *reloc)
54cf91dc
CW
341{
342 struct drm_device *dev = obj->base.dev;
343 struct drm_gem_object *target_obj;
149c8407 344 struct drm_i915_gem_object *target_i915_obj;
27173f1f 345 struct i915_vma *target_vma;
d9ceb957 346 uint64_t target_offset;
8b78f0e5 347 int ret;
54cf91dc 348
67731b87 349 /* we've already hold a reference to all valid objects */
27173f1f
BW
350 target_vma = eb_get_vma(eb, reloc->target_handle);
351 if (unlikely(target_vma == NULL))
54cf91dc 352 return -ENOENT;
27173f1f
BW
353 target_i915_obj = target_vma->obj;
354 target_obj = &target_vma->obj->base;
54cf91dc 355
5ce09725 356 target_offset = target_vma->node.start;
54cf91dc 357
e844b990
EA
358 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
359 * pipe_control writes because the gpu doesn't properly redirect them
360 * through the ppgtt for non_secure batchbuffers. */
361 if (unlikely(IS_GEN6(dev) &&
362 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
fe14d5f4
TU
363 !(target_vma->bound & GLOBAL_BIND))) {
364 ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
365 GLOBAL_BIND);
366 if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
367 return ret;
368 }
e844b990 369
54cf91dc 370 /* Validate that the target is in a valid r/w GPU domain */
b8f7ab17 371 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
ff240199 372 DRM_DEBUG("reloc with multiple write domains: "
54cf91dc
CW
373 "obj %p target %d offset %d "
374 "read %08x write %08x",
375 obj, reloc->target_handle,
376 (int) reloc->offset,
377 reloc->read_domains,
378 reloc->write_domain);
8b78f0e5 379 return -EINVAL;
54cf91dc 380 }
4ca4a250
DV
381 if (unlikely((reloc->write_domain | reloc->read_domains)
382 & ~I915_GEM_GPU_DOMAINS)) {
ff240199 383 DRM_DEBUG("reloc with read/write non-GPU domains: "
54cf91dc
CW
384 "obj %p target %d offset %d "
385 "read %08x write %08x",
386 obj, reloc->target_handle,
387 (int) reloc->offset,
388 reloc->read_domains,
389 reloc->write_domain);
8b78f0e5 390 return -EINVAL;
54cf91dc 391 }
54cf91dc
CW
392
393 target_obj->pending_read_domains |= reloc->read_domains;
394 target_obj->pending_write_domain |= reloc->write_domain;
395
396 /* If the relocation already has the right value in it, no
397 * more work needs to be done.
398 */
399 if (target_offset == reloc->presumed_offset)
67731b87 400 return 0;
54cf91dc
CW
401
402 /* Check that the relocation address is valid... */
3c94ceee
BW
403 if (unlikely(reloc->offset >
404 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
ff240199 405 DRM_DEBUG("Relocation beyond object bounds: "
54cf91dc
CW
406 "obj %p target %d offset %d size %d.\n",
407 obj, reloc->target_handle,
408 (int) reloc->offset,
409 (int) obj->base.size);
8b78f0e5 410 return -EINVAL;
54cf91dc 411 }
b8f7ab17 412 if (unlikely(reloc->offset & 3)) {
ff240199 413 DRM_DEBUG("Relocation not 4-byte aligned: "
54cf91dc
CW
414 "obj %p target %d offset %d.\n",
415 obj, reloc->target_handle,
416 (int) reloc->offset);
8b78f0e5 417 return -EINVAL;
54cf91dc
CW
418 }
419
dabdfe02
CW
420 /* We can't wait for rendering with pagefaults disabled */
421 if (obj->active && in_atomic())
422 return -EFAULT;
423
5032d871 424 if (use_cpu_reloc(obj))
d9ceb957 425 ret = relocate_entry_cpu(obj, reloc, target_offset);
5032d871 426 else
d9ceb957 427 ret = relocate_entry_gtt(obj, reloc, target_offset);
54cf91dc 428
d4d36014
DV
429 if (ret)
430 return ret;
431
54cf91dc
CW
432 /* and update the user's relocation entry */
433 reloc->presumed_offset = target_offset;
434
67731b87 435 return 0;
54cf91dc
CW
436}
437
438static int
27173f1f
BW
439i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
440 struct eb_vmas *eb)
54cf91dc 441{
1d83f442
CW
442#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
443 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
54cf91dc 444 struct drm_i915_gem_relocation_entry __user *user_relocs;
27173f1f 445 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
1d83f442 446 int remain, ret;
54cf91dc 447
2bb4629a 448 user_relocs = to_user_ptr(entry->relocs_ptr);
54cf91dc 449
1d83f442
CW
450 remain = entry->relocation_count;
451 while (remain) {
452 struct drm_i915_gem_relocation_entry *r = stack_reloc;
453 int count = remain;
454 if (count > ARRAY_SIZE(stack_reloc))
455 count = ARRAY_SIZE(stack_reloc);
456 remain -= count;
457
458 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
54cf91dc
CW
459 return -EFAULT;
460
1d83f442
CW
461 do {
462 u64 offset = r->presumed_offset;
54cf91dc 463
3e7a0322 464 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
1d83f442
CW
465 if (ret)
466 return ret;
467
468 if (r->presumed_offset != offset &&
469 __copy_to_user_inatomic(&user_relocs->presumed_offset,
470 &r->presumed_offset,
471 sizeof(r->presumed_offset))) {
472 return -EFAULT;
473 }
474
475 user_relocs++;
476 r++;
477 } while (--count);
54cf91dc
CW
478 }
479
480 return 0;
1d83f442 481#undef N_RELOC
54cf91dc
CW
482}
483
484static int
27173f1f
BW
485i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
486 struct eb_vmas *eb,
487 struct drm_i915_gem_relocation_entry *relocs)
54cf91dc 488{
27173f1f 489 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
54cf91dc
CW
490 int i, ret;
491
492 for (i = 0; i < entry->relocation_count; i++) {
3e7a0322 493 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
54cf91dc
CW
494 if (ret)
495 return ret;
496 }
497
498 return 0;
499}
500
501static int
17601cbc 502i915_gem_execbuffer_relocate(struct eb_vmas *eb)
54cf91dc 503{
27173f1f 504 struct i915_vma *vma;
d4aeee77
CW
505 int ret = 0;
506
507 /* This is the fast path and we cannot handle a pagefault whilst
508 * holding the struct mutex lest the user pass in the relocations
509 * contained within a mmaped bo. For in such a case we, the page
510 * fault handler would call i915_gem_fault() and we would try to
511 * acquire the struct mutex again. Obviously this is bad and so
512 * lockdep complains vehemently.
513 */
514 pagefault_disable();
27173f1f
BW
515 list_for_each_entry(vma, &eb->vmas, exec_list) {
516 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
54cf91dc 517 if (ret)
d4aeee77 518 break;
54cf91dc 519 }
d4aeee77 520 pagefault_enable();
54cf91dc 521
d4aeee77 522 return ret;
54cf91dc
CW
523}
524
1690e1eb 525static int
27173f1f 526i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
a4872ba6 527 struct intel_engine_cs *ring,
27173f1f 528 bool *need_reloc)
1690e1eb 529{
6f65e29a 530 struct drm_i915_gem_object *obj = vma->obj;
27173f1f 531 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
d23db88c 532 uint64_t flags;
1690e1eb
CW
533 int ret;
534
1ec9e26d 535 flags = 0;
e6a84468 536 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
c826c449 537 flags |= PIN_GLOBAL | PIN_MAPPABLE;
1ec9e26d 538 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
bf3d149b 539 flags |= PIN_GLOBAL;
d23db88c
CW
540 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
541 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
1ec9e26d
DV
542
543 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
1690e1eb
CW
544 if (ret)
545 return ret;
546
7788a765
CW
547 entry->flags |= __EXEC_OBJECT_HAS_PIN;
548
82b6b6d7
CW
549 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
550 ret = i915_gem_object_get_fence(obj);
551 if (ret)
552 return ret;
9a5a53b3 553
82b6b6d7
CW
554 if (i915_gem_object_pin_fence(obj))
555 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
1690e1eb
CW
556 }
557
27173f1f
BW
558 if (entry->offset != vma->node.start) {
559 entry->offset = vma->node.start;
ed5982e6
DV
560 *need_reloc = true;
561 }
562
563 if (entry->flags & EXEC_OBJECT_WRITE) {
564 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
565 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
566 }
567
1690e1eb 568 return 0;
7788a765 569}
1690e1eb 570
d23db88c 571static bool
e6a84468 572need_reloc_mappable(struct i915_vma *vma)
d23db88c
CW
573{
574 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
d23db88c 575
e6a84468
CW
576 if (entry->relocation_count == 0)
577 return false;
578
579 if (!i915_is_ggtt(vma->vm))
580 return false;
581
582 /* See also use_cpu_reloc() */
583 if (HAS_LLC(vma->obj->base.dev))
584 return false;
585
586 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
587 return false;
588
589 return true;
590}
591
592static bool
593eb_vma_misplaced(struct i915_vma *vma)
594{
595 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
596 struct drm_i915_gem_object *obj = vma->obj;
d23db88c 597
e6a84468 598 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
d23db88c
CW
599 !i915_is_ggtt(vma->vm));
600
601 if (entry->alignment &&
602 vma->node.start & (entry->alignment - 1))
603 return true;
604
e6a84468 605 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
d23db88c
CW
606 return true;
607
608 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
609 vma->node.start < BATCH_OFFSET_BIAS)
610 return true;
611
612 return false;
613}
614
54cf91dc 615static int
a4872ba6 616i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
27173f1f 617 struct list_head *vmas,
ed5982e6 618 bool *need_relocs)
54cf91dc 619{
432e58ed 620 struct drm_i915_gem_object *obj;
27173f1f 621 struct i915_vma *vma;
68c8c17f 622 struct i915_address_space *vm;
27173f1f 623 struct list_head ordered_vmas;
7788a765
CW
624 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
625 int retry;
6fe4f140 626
227f782e
CW
627 i915_gem_retire_requests_ring(ring);
628
68c8c17f
BW
629 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
630
27173f1f
BW
631 INIT_LIST_HEAD(&ordered_vmas);
632 while (!list_empty(vmas)) {
6fe4f140
CW
633 struct drm_i915_gem_exec_object2 *entry;
634 bool need_fence, need_mappable;
635
27173f1f
BW
636 vma = list_first_entry(vmas, struct i915_vma, exec_list);
637 obj = vma->obj;
638 entry = vma->exec_entry;
6fe4f140 639
82b6b6d7
CW
640 if (!has_fenced_gpu_access)
641 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
6fe4f140 642 need_fence =
6fe4f140
CW
643 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
644 obj->tiling_mode != I915_TILING_NONE;
27173f1f 645 need_mappable = need_fence || need_reloc_mappable(vma);
6fe4f140 646
e6a84468
CW
647 if (need_mappable) {
648 entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
27173f1f 649 list_move(&vma->exec_list, &ordered_vmas);
e6a84468 650 } else
27173f1f 651 list_move_tail(&vma->exec_list, &ordered_vmas);
595dad76 652
ed5982e6 653 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
595dad76 654 obj->base.pending_write_domain = 0;
6fe4f140 655 }
27173f1f 656 list_splice(&ordered_vmas, vmas);
54cf91dc
CW
657
658 /* Attempt to pin all of the buffers into the GTT.
659 * This is done in 3 phases:
660 *
661 * 1a. Unbind all objects that do not match the GTT constraints for
662 * the execbuffer (fenceable, mappable, alignment etc).
663 * 1b. Increment pin count for already bound objects.
664 * 2. Bind new objects.
665 * 3. Decrement pin count.
666 *
7788a765 667 * This avoid unnecessary unbinding of later objects in order to make
54cf91dc
CW
668 * room for the earlier objects *unless* we need to defragment.
669 */
670 retry = 0;
671 do {
7788a765 672 int ret = 0;
54cf91dc
CW
673
674 /* Unbind any ill-fitting objects or pin. */
27173f1f 675 list_for_each_entry(vma, vmas, exec_list) {
27173f1f 676 if (!drm_mm_node_allocated(&vma->node))
54cf91dc
CW
677 continue;
678
e6a84468 679 if (eb_vma_misplaced(vma))
27173f1f 680 ret = i915_vma_unbind(vma);
54cf91dc 681 else
27173f1f 682 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
432e58ed 683 if (ret)
54cf91dc 684 goto err;
54cf91dc
CW
685 }
686
687 /* Bind fresh objects */
27173f1f
BW
688 list_for_each_entry(vma, vmas, exec_list) {
689 if (drm_mm_node_allocated(&vma->node))
1690e1eb 690 continue;
54cf91dc 691
27173f1f 692 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
7788a765
CW
693 if (ret)
694 goto err;
54cf91dc
CW
695 }
696
a415d355 697err:
6c085a72 698 if (ret != -ENOSPC || retry++)
54cf91dc
CW
699 return ret;
700
a415d355
CW
701 /* Decrement pin count for bound objects */
702 list_for_each_entry(vma, vmas, exec_list)
703 i915_gem_execbuffer_unreserve_vma(vma);
704
68c8c17f 705 ret = i915_gem_evict_vm(vm, true);
54cf91dc
CW
706 if (ret)
707 return ret;
54cf91dc
CW
708 } while (1);
709}
710
711static int
712i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
ed5982e6 713 struct drm_i915_gem_execbuffer2 *args,
54cf91dc 714 struct drm_file *file,
a4872ba6 715 struct intel_engine_cs *ring,
27173f1f
BW
716 struct eb_vmas *eb,
717 struct drm_i915_gem_exec_object2 *exec)
54cf91dc
CW
718{
719 struct drm_i915_gem_relocation_entry *reloc;
27173f1f
BW
720 struct i915_address_space *vm;
721 struct i915_vma *vma;
ed5982e6 722 bool need_relocs;
dd6864a4 723 int *reloc_offset;
54cf91dc 724 int i, total, ret;
b205ca57 725 unsigned count = args->buffer_count;
54cf91dc 726
27173f1f
BW
727 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
728
67731b87 729 /* We may process another execbuffer during the unlock... */
27173f1f
BW
730 while (!list_empty(&eb->vmas)) {
731 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
732 list_del_init(&vma->exec_list);
a415d355 733 i915_gem_execbuffer_unreserve_vma(vma);
27173f1f 734 drm_gem_object_unreference(&vma->obj->base);
67731b87
CW
735 }
736
54cf91dc
CW
737 mutex_unlock(&dev->struct_mutex);
738
739 total = 0;
740 for (i = 0; i < count; i++)
432e58ed 741 total += exec[i].relocation_count;
54cf91dc 742
dd6864a4 743 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
54cf91dc 744 reloc = drm_malloc_ab(total, sizeof(*reloc));
dd6864a4
CW
745 if (reloc == NULL || reloc_offset == NULL) {
746 drm_free_large(reloc);
747 drm_free_large(reloc_offset);
54cf91dc
CW
748 mutex_lock(&dev->struct_mutex);
749 return -ENOMEM;
750 }
751
752 total = 0;
753 for (i = 0; i < count; i++) {
754 struct drm_i915_gem_relocation_entry __user *user_relocs;
262b6d36
CW
755 u64 invalid_offset = (u64)-1;
756 int j;
54cf91dc 757
2bb4629a 758 user_relocs = to_user_ptr(exec[i].relocs_ptr);
54cf91dc
CW
759
760 if (copy_from_user(reloc+total, user_relocs,
432e58ed 761 exec[i].relocation_count * sizeof(*reloc))) {
54cf91dc
CW
762 ret = -EFAULT;
763 mutex_lock(&dev->struct_mutex);
764 goto err;
765 }
766
262b6d36
CW
767 /* As we do not update the known relocation offsets after
768 * relocating (due to the complexities in lock handling),
769 * we need to mark them as invalid now so that we force the
770 * relocation processing next time. Just in case the target
771 * object is evicted and then rebound into its old
772 * presumed_offset before the next execbuffer - if that
773 * happened we would make the mistake of assuming that the
774 * relocations were valid.
775 */
776 for (j = 0; j < exec[i].relocation_count; j++) {
9aab8bff
CW
777 if (__copy_to_user(&user_relocs[j].presumed_offset,
778 &invalid_offset,
779 sizeof(invalid_offset))) {
262b6d36
CW
780 ret = -EFAULT;
781 mutex_lock(&dev->struct_mutex);
782 goto err;
783 }
784 }
785
dd6864a4 786 reloc_offset[i] = total;
432e58ed 787 total += exec[i].relocation_count;
54cf91dc
CW
788 }
789
790 ret = i915_mutex_lock_interruptible(dev);
791 if (ret) {
792 mutex_lock(&dev->struct_mutex);
793 goto err;
794 }
795
67731b87 796 /* reacquire the objects */
67731b87 797 eb_reset(eb);
27173f1f 798 ret = eb_lookup_vmas(eb, exec, args, vm, file);
3b96eff4
CW
799 if (ret)
800 goto err;
67731b87 801
ed5982e6 802 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
27173f1f 803 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
54cf91dc
CW
804 if (ret)
805 goto err;
806
27173f1f
BW
807 list_for_each_entry(vma, &eb->vmas, exec_list) {
808 int offset = vma->exec_entry - exec;
809 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
810 reloc + reloc_offset[offset]);
54cf91dc
CW
811 if (ret)
812 goto err;
54cf91dc
CW
813 }
814
815 /* Leave the user relocations as are, this is the painfully slow path,
816 * and we want to avoid the complication of dropping the lock whilst
817 * having buffers reserved in the aperture and so causing spurious
818 * ENOSPC for random operations.
819 */
820
821err:
822 drm_free_large(reloc);
dd6864a4 823 drm_free_large(reloc_offset);
54cf91dc
CW
824 return ret;
825}
826
54cf91dc 827static int
a4872ba6 828i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
27173f1f 829 struct list_head *vmas)
54cf91dc 830{
27173f1f 831 struct i915_vma *vma;
6ac42f41 832 uint32_t flush_domains = 0;
000433b6 833 bool flush_chipset = false;
432e58ed 834 int ret;
54cf91dc 835
27173f1f
BW
836 list_for_each_entry(vma, vmas, exec_list) {
837 struct drm_i915_gem_object *obj = vma->obj;
6ac42f41 838 ret = i915_gem_object_sync(obj, ring);
c59a333f
CW
839 if (ret)
840 return ret;
6ac42f41
DV
841
842 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
000433b6 843 flush_chipset |= i915_gem_clflush_object(obj, false);
6ac42f41 844
6ac42f41 845 flush_domains |= obj->base.write_domain;
c59a333f
CW
846 }
847
000433b6 848 if (flush_chipset)
e76e9aeb 849 i915_gem_chipset_flush(ring->dev);
6ac42f41
DV
850
851 if (flush_domains & I915_GEM_DOMAIN_GTT)
852 wmb();
853
09cf7c9a
CW
854 /* Unconditionally invalidate gpu caches and ensure that we do flush
855 * any residual writes from the previous batch.
856 */
a7b9761d 857 return intel_ring_invalidate_all_caches(ring);
54cf91dc
CW
858}
859
432e58ed
CW
860static bool
861i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
54cf91dc 862{
ed5982e6
DV
863 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
864 return false;
865
432e58ed 866 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
54cf91dc
CW
867}
868
869static int
ad19f10b
CW
870validate_exec_list(struct drm_device *dev,
871 struct drm_i915_gem_exec_object2 *exec,
54cf91dc
CW
872 int count)
873{
b205ca57
DV
874 unsigned relocs_total = 0;
875 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
ad19f10b
CW
876 unsigned invalid_flags;
877 int i;
878
879 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
880 if (USES_FULL_PPGTT(dev))
881 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
54cf91dc
CW
882
883 for (i = 0; i < count; i++) {
2bb4629a 884 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
54cf91dc
CW
885 int length; /* limited by fault_in_pages_readable() */
886
ad19f10b 887 if (exec[i].flags & invalid_flags)
ed5982e6
DV
888 return -EINVAL;
889
3118a4f6
KC
890 /* First check for malicious input causing overflow in
891 * the worst case where we need to allocate the entire
892 * relocation tree as a single array.
893 */
894 if (exec[i].relocation_count > relocs_max - relocs_total)
54cf91dc 895 return -EINVAL;
3118a4f6 896 relocs_total += exec[i].relocation_count;
54cf91dc
CW
897
898 length = exec[i].relocation_count *
899 sizeof(struct drm_i915_gem_relocation_entry);
30587535
KC
900 /*
901 * We must check that the entire relocation array is safe
902 * to read, but since we may need to update the presumed
903 * offsets during execution, check for full write access.
904 */
54cf91dc
CW
905 if (!access_ok(VERIFY_WRITE, ptr, length))
906 return -EFAULT;
907
d330a953 908 if (likely(!i915.prefault_disable)) {
0b74b508
XZ
909 if (fault_in_multipages_readable(ptr, length))
910 return -EFAULT;
911 }
54cf91dc
CW
912 }
913
914 return 0;
915}
916
273497e5 917static struct intel_context *
d299cce7 918i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
a4872ba6 919 struct intel_engine_cs *ring, const u32 ctx_id)
d299cce7 920{
273497e5 921 struct intel_context *ctx = NULL;
d299cce7
MK
922 struct i915_ctx_hang_stats *hs;
923
821d66dd 924 if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
7c9c4b8f
DV
925 return ERR_PTR(-EINVAL);
926
41bde553 927 ctx = i915_gem_context_get(file->driver_priv, ctx_id);
72ad5c45 928 if (IS_ERR(ctx))
41bde553 929 return ctx;
d299cce7 930
41bde553 931 hs = &ctx->hang_stats;
d299cce7
MK
932 if (hs->banned) {
933 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
41bde553 934 return ERR_PTR(-EIO);
d299cce7
MK
935 }
936
ec3e9963
OM
937 if (i915.enable_execlists && !ctx->engine[ring->id].state) {
938 int ret = intel_lr_context_deferred_create(ctx, ring);
939 if (ret) {
940 DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
941 return ERR_PTR(ret);
942 }
943 }
944
41bde553 945 return ctx;
d299cce7
MK
946}
947
ba8b7ccb 948void
27173f1f 949i915_gem_execbuffer_move_to_active(struct list_head *vmas,
a4872ba6 950 struct intel_engine_cs *ring)
432e58ed 951{
97b2a6a1 952 struct drm_i915_gem_request *req = intel_ring_get_request(ring);
27173f1f 953 struct i915_vma *vma;
432e58ed 954
27173f1f 955 list_for_each_entry(vma, vmas, exec_list) {
82b6b6d7 956 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
27173f1f 957 struct drm_i915_gem_object *obj = vma->obj;
69c2fc89
CW
958 u32 old_read = obj->base.read_domains;
959 u32 old_write = obj->base.write_domain;
db53a302 960
432e58ed 961 obj->base.write_domain = obj->base.pending_write_domain;
ed5982e6
DV
962 if (obj->base.write_domain == 0)
963 obj->base.pending_read_domains |= obj->base.read_domains;
964 obj->base.read_domains = obj->base.pending_read_domains;
432e58ed 965
e2d05a8b 966 i915_vma_move_to_active(vma, ring);
432e58ed
CW
967 if (obj->base.write_domain) {
968 obj->dirty = 1;
97b2a6a1 969 i915_gem_request_assign(&obj->last_write_req, req);
f99d7069
DV
970
971 intel_fb_obj_invalidate(obj, ring);
c8725f3d
CW
972
973 /* update for the implicit flush after a batch */
974 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
432e58ed 975 }
82b6b6d7 976 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
97b2a6a1 977 i915_gem_request_assign(&obj->last_fenced_req, req);
82b6b6d7
CW
978 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
979 struct drm_i915_private *dev_priv = to_i915(ring->dev);
980 list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
981 &dev_priv->mm.fence_list);
982 }
983 }
432e58ed 984
db53a302 985 trace_i915_gem_object_change_domain(obj, old_read, old_write);
432e58ed
CW
986 }
987}
988
ba8b7ccb 989void
54cf91dc 990i915_gem_execbuffer_retire_commands(struct drm_device *dev,
432e58ed 991 struct drm_file *file,
a4872ba6 992 struct intel_engine_cs *ring,
7d736f4f 993 struct drm_i915_gem_object *obj)
54cf91dc 994{
cc889e0f
DV
995 /* Unconditionally force add_request to emit a full flush. */
996 ring->gpu_caches_dirty = true;
54cf91dc 997
432e58ed 998 /* Add a breadcrumb for the completion of the batch buffer */
9400ae5c 999 (void)__i915_add_request(ring, file, obj);
432e58ed 1000}
54cf91dc 1001
ae662d31
EA
1002static int
1003i915_reset_gen7_sol_offsets(struct drm_device *dev,
a4872ba6 1004 struct intel_engine_cs *ring)
ae662d31 1005{
50227e1c 1006 struct drm_i915_private *dev_priv = dev->dev_private;
ae662d31
EA
1007 int ret, i;
1008
9d662da8
DV
1009 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
1010 DRM_DEBUG("sol reset is gen7/rcs only\n");
1011 return -EINVAL;
1012 }
ae662d31
EA
1013
1014 ret = intel_ring_begin(ring, 4 * 3);
1015 if (ret)
1016 return ret;
1017
1018 for (i = 0; i < 4; i++) {
1019 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1020 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
1021 intel_ring_emit(ring, 0);
1022 }
1023
1024 intel_ring_advance(ring);
1025
1026 return 0;
1027}
1028
5c6c6003
CW
1029static int
1030i915_emit_box(struct intel_engine_cs *ring,
1031 struct drm_clip_rect *box,
1032 int DR1, int DR4)
1033{
1034 int ret;
1035
1036 if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
1037 box->y2 <= 0 || box->x2 <= 0) {
1038 DRM_ERROR("Bad box %d,%d..%d,%d\n",
1039 box->x1, box->y1, box->x2, box->y2);
1040 return -EINVAL;
1041 }
1042
1043 if (INTEL_INFO(ring->dev)->gen >= 4) {
1044 ret = intel_ring_begin(ring, 4);
1045 if (ret)
1046 return ret;
1047
1048 intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
1049 intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
1050 intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
1051 intel_ring_emit(ring, DR4);
1052 } else {
1053 ret = intel_ring_begin(ring, 6);
1054 if (ret)
1055 return ret;
1056
1057 intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
1058 intel_ring_emit(ring, DR1);
1059 intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
1060 intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
1061 intel_ring_emit(ring, DR4);
1062 intel_ring_emit(ring, 0);
1063 }
1064 intel_ring_advance(ring);
1065
1066 return 0;
1067}
1068
1069
a83014d3
OM
1070int
1071i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1072 struct intel_engine_cs *ring,
1073 struct intel_context *ctx,
1074 struct drm_i915_gem_execbuffer2 *args,
1075 struct list_head *vmas,
1076 struct drm_i915_gem_object *batch_obj,
1077 u64 exec_start, u32 flags)
78382593
OM
1078{
1079 struct drm_clip_rect *cliprects = NULL;
1080 struct drm_i915_private *dev_priv = dev->dev_private;
1081 u64 exec_len;
1082 int instp_mode;
1083 u32 instp_mask;
1084 int i, ret = 0;
1085
1086 if (args->num_cliprects != 0) {
1087 if (ring != &dev_priv->ring[RCS]) {
1088 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1089 return -EINVAL;
1090 }
1091
1092 if (INTEL_INFO(dev)->gen >= 5) {
1093 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1094 return -EINVAL;
1095 }
1096
1097 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1098 DRM_DEBUG("execbuf with %u cliprects\n",
1099 args->num_cliprects);
1100 return -EINVAL;
1101 }
1102
1103 cliprects = kcalloc(args->num_cliprects,
1104 sizeof(*cliprects),
1105 GFP_KERNEL);
1106 if (cliprects == NULL) {
1107 ret = -ENOMEM;
1108 goto error;
1109 }
1110
1111 if (copy_from_user(cliprects,
1112 to_user_ptr(args->cliprects_ptr),
1113 sizeof(*cliprects)*args->num_cliprects)) {
1114 ret = -EFAULT;
1115 goto error;
1116 }
1117 } else {
1118 if (args->DR4 == 0xffffffff) {
1119 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1120 args->DR4 = 0;
1121 }
1122
1123 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
1124 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
1125 return -EINVAL;
1126 }
1127 }
1128
1129 ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
1130 if (ret)
1131 goto error;
1132
1133 ret = i915_switch_context(ring, ctx);
1134 if (ret)
1135 goto error;
1136
1137 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1138 instp_mask = I915_EXEC_CONSTANTS_MASK;
1139 switch (instp_mode) {
1140 case I915_EXEC_CONSTANTS_REL_GENERAL:
1141 case I915_EXEC_CONSTANTS_ABSOLUTE:
1142 case I915_EXEC_CONSTANTS_REL_SURFACE:
1143 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
1144 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1145 ret = -EINVAL;
1146 goto error;
1147 }
1148
1149 if (instp_mode != dev_priv->relative_constants_mode) {
1150 if (INTEL_INFO(dev)->gen < 4) {
1151 DRM_DEBUG("no rel constants on pre-gen4\n");
1152 ret = -EINVAL;
1153 goto error;
1154 }
1155
1156 if (INTEL_INFO(dev)->gen > 5 &&
1157 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1158 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1159 ret = -EINVAL;
1160 goto error;
1161 }
1162
1163 /* The HW changed the meaning on this bit on gen6 */
1164 if (INTEL_INFO(dev)->gen >= 6)
1165 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1166 }
1167 break;
1168 default:
1169 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1170 ret = -EINVAL;
1171 goto error;
1172 }
1173
1174 if (ring == &dev_priv->ring[RCS] &&
1175 instp_mode != dev_priv->relative_constants_mode) {
1176 ret = intel_ring_begin(ring, 4);
1177 if (ret)
1178 goto error;
1179
1180 intel_ring_emit(ring, MI_NOOP);
1181 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1182 intel_ring_emit(ring, INSTPM);
1183 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1184 intel_ring_advance(ring);
1185
1186 dev_priv->relative_constants_mode = instp_mode;
1187 }
1188
1189 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1190 ret = i915_reset_gen7_sol_offsets(dev, ring);
1191 if (ret)
1192 goto error;
1193 }
1194
1195 exec_len = args->batch_len;
1196 if (cliprects) {
1197 for (i = 0; i < args->num_cliprects; i++) {
5c6c6003 1198 ret = i915_emit_box(ring, &cliprects[i],
78382593
OM
1199 args->DR1, args->DR4);
1200 if (ret)
1201 goto error;
1202
1203 ret = ring->dispatch_execbuffer(ring,
1204 exec_start, exec_len,
1205 flags);
1206 if (ret)
1207 goto error;
1208 }
1209 } else {
1210 ret = ring->dispatch_execbuffer(ring,
1211 exec_start, exec_len,
1212 flags);
1213 if (ret)
1214 return ret;
1215 }
1216
74328ee5 1217 trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
78382593
OM
1218
1219 i915_gem_execbuffer_move_to_active(vmas, ring);
1220 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1221
1222error:
1223 kfree(cliprects);
1224 return ret;
1225}
1226
a8ebba75
ZY
1227/**
1228 * Find one BSD ring to dispatch the corresponding BSD command.
1229 * The Ring ID is returned.
1230 */
1231static int gen8_dispatch_bsd_ring(struct drm_device *dev,
1232 struct drm_file *file)
1233{
1234 struct drm_i915_private *dev_priv = dev->dev_private;
1235 struct drm_i915_file_private *file_priv = file->driver_priv;
1236
1237 /* Check whether the file_priv is using one ring */
1238 if (file_priv->bsd_ring)
1239 return file_priv->bsd_ring->id;
1240 else {
1241 /* If no, use the ping-pong mechanism to select one ring */
1242 int ring_id;
1243
1244 mutex_lock(&dev->struct_mutex);
bdf1e7e3 1245 if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
a8ebba75 1246 ring_id = VCS;
bdf1e7e3 1247 dev_priv->mm.bsd_ring_dispatch_index = 1;
a8ebba75
ZY
1248 } else {
1249 ring_id = VCS2;
bdf1e7e3 1250 dev_priv->mm.bsd_ring_dispatch_index = 0;
a8ebba75
ZY
1251 }
1252 file_priv->bsd_ring = &dev_priv->ring[ring_id];
1253 mutex_unlock(&dev->struct_mutex);
1254 return ring_id;
1255 }
1256}
1257
d23db88c
CW
1258static struct drm_i915_gem_object *
1259eb_get_batch(struct eb_vmas *eb)
1260{
1261 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1262
1263 /*
1264 * SNA is doing fancy tricks with compressing batch buffers, which leads
1265 * to negative relocation deltas. Usually that works out ok since the
1266 * relocate address is still positive, except when the batch is placed
1267 * very low in the GTT. Ensure this doesn't happen.
1268 *
1269 * Note that actual hangs have only been observed on gen7, but for
1270 * paranoia do it everywhere.
1271 */
1272 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1273
1274 return vma->obj;
1275}
1276
54cf91dc
CW
1277static int
1278i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1279 struct drm_file *file,
1280 struct drm_i915_gem_execbuffer2 *args,
41bde553 1281 struct drm_i915_gem_exec_object2 *exec)
54cf91dc 1282{
50227e1c 1283 struct drm_i915_private *dev_priv = dev->dev_private;
27173f1f 1284 struct eb_vmas *eb;
54cf91dc 1285 struct drm_i915_gem_object *batch_obj;
a4872ba6 1286 struct intel_engine_cs *ring;
273497e5 1287 struct intel_context *ctx;
41bde553 1288 struct i915_address_space *vm;
d299cce7 1289 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
78382593
OM
1290 u64 exec_start = args->batch_start_offset;
1291 u32 flags;
1292 int ret;
ed5982e6 1293 bool need_relocs;
54cf91dc 1294
ed5982e6 1295 if (!i915_gem_check_execbuffer(args))
432e58ed 1296 return -EINVAL;
432e58ed 1297
ad19f10b 1298 ret = validate_exec_list(dev, exec, args->buffer_count);
54cf91dc
CW
1299 if (ret)
1300 return ret;
1301
d7d4eedd
CW
1302 flags = 0;
1303 if (args->flags & I915_EXEC_SECURE) {
1304 if (!file->is_master || !capable(CAP_SYS_ADMIN))
1305 return -EPERM;
1306
1307 flags |= I915_DISPATCH_SECURE;
1308 }
b45305fc
DV
1309 if (args->flags & I915_EXEC_IS_PINNED)
1310 flags |= I915_DISPATCH_PINNED;
d7d4eedd 1311
b1a93306 1312 if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
ff240199 1313 DRM_DEBUG("execbuf with unknown ring: %d\n",
54cf91dc
CW
1314 (int)(args->flags & I915_EXEC_RING_MASK));
1315 return -EINVAL;
1316 }
ca01b12b
BW
1317
1318 if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
1319 ring = &dev_priv->ring[RCS];
a8ebba75
ZY
1320 else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
1321 if (HAS_BSD2(dev)) {
1322 int ring_id;
1323 ring_id = gen8_dispatch_bsd_ring(dev, file);
1324 ring = &dev_priv->ring[ring_id];
1325 } else
1326 ring = &dev_priv->ring[VCS];
1327 } else
ca01b12b
BW
1328 ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
1329
a15817cf
CW
1330 if (!intel_ring_initialized(ring)) {
1331 DRM_DEBUG("execbuf with invalid ring: %d\n",
1332 (int)(args->flags & I915_EXEC_RING_MASK));
1333 return -EINVAL;
1334 }
54cf91dc
CW
1335
1336 if (args->buffer_count < 1) {
ff240199 1337 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
54cf91dc
CW
1338 return -EINVAL;
1339 }
54cf91dc 1340
f65c9168
PZ
1341 intel_runtime_pm_get(dev_priv);
1342
54cf91dc
CW
1343 ret = i915_mutex_lock_interruptible(dev);
1344 if (ret)
1345 goto pre_mutex_err;
1346
7c9c4b8f 1347 ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
72ad5c45 1348 if (IS_ERR(ctx)) {
d299cce7 1349 mutex_unlock(&dev->struct_mutex);
41bde553 1350 ret = PTR_ERR(ctx);
d299cce7 1351 goto pre_mutex_err;
935f38d6 1352 }
41bde553
BW
1353
1354 i915_gem_context_reference(ctx);
1355
ae6c4806
DV
1356 if (ctx->ppgtt)
1357 vm = &ctx->ppgtt->base;
1358 else
7e0d96bc 1359 vm = &dev_priv->gtt.base;
d299cce7 1360
17601cbc 1361 eb = eb_create(args);
67731b87 1362 if (eb == NULL) {
935f38d6 1363 i915_gem_context_unreference(ctx);
67731b87
CW
1364 mutex_unlock(&dev->struct_mutex);
1365 ret = -ENOMEM;
1366 goto pre_mutex_err;
1367 }
1368
54cf91dc 1369 /* Look up object handles */
27173f1f 1370 ret = eb_lookup_vmas(eb, exec, args, vm, file);
3b96eff4
CW
1371 if (ret)
1372 goto err;
54cf91dc 1373
6fe4f140 1374 /* take note of the batch buffer before we might reorder the lists */
d23db88c 1375 batch_obj = eb_get_batch(eb);
6fe4f140 1376
54cf91dc 1377 /* Move the objects en-masse into the GTT, evicting if necessary. */
ed5982e6 1378 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
27173f1f 1379 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
54cf91dc
CW
1380 if (ret)
1381 goto err;
1382
1383 /* The objects are in their final locations, apply the relocations. */
ed5982e6 1384 if (need_relocs)
17601cbc 1385 ret = i915_gem_execbuffer_relocate(eb);
54cf91dc
CW
1386 if (ret) {
1387 if (ret == -EFAULT) {
ed5982e6 1388 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
27173f1f 1389 eb, exec);
54cf91dc
CW
1390 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1391 }
1392 if (ret)
1393 goto err;
1394 }
1395
1396 /* Set the pending read domains for the batch buffer to COMMAND */
54cf91dc 1397 if (batch_obj->base.pending_write_domain) {
ff240199 1398 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
54cf91dc
CW
1399 ret = -EINVAL;
1400 goto err;
1401 }
1402 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1403
351e3db2
BV
1404 if (i915_needs_cmd_parser(ring)) {
1405 ret = i915_parse_cmds(ring,
1406 batch_obj,
1407 args->batch_start_offset,
1408 file->is_master);
42c7156a
BV
1409 if (ret) {
1410 if (ret != -EACCES)
1411 goto err;
1412 } else {
1413 /*
1414 * XXX: Actually do this when enabling batch copy...
1415 *
1416 * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
1417 * from MI_BATCH_BUFFER_START commands issued in the
1418 * dispatch_execbuffer implementations. We specifically don't
1419 * want that set when the command parser is enabled.
1420 */
1421 }
351e3db2
BV
1422 }
1423
d7d4eedd
CW
1424 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1425 * batch" bit. Hence we need to pin secure batches into the global gtt.
28cf5415 1426 * hsw should have this fixed, but bdw mucks it up again. */
da51a1e7
DV
1427 if (flags & I915_DISPATCH_SECURE) {
1428 /*
1429 * So on first glance it looks freaky that we pin the batch here
1430 * outside of the reservation loop. But:
1431 * - The batch is already pinned into the relevant ppgtt, so we
1432 * already have the backing storage fully allocated.
1433 * - No other BO uses the global gtt (well contexts, but meh),
1434 * so we don't really have issues with mutliple objects not
1435 * fitting due to fragmentation.
1436 * So this is actually safe.
1437 */
1438 ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
1439 if (ret)
1440 goto err;
d7d4eedd 1441
7e0d96bc 1442 exec_start += i915_gem_obj_ggtt_offset(batch_obj);
da51a1e7 1443 } else
7e0d96bc 1444 exec_start += i915_gem_obj_offset(batch_obj, vm);
d7d4eedd 1445
a83014d3
OM
1446 ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
1447 &eb->vmas, batch_obj, exec_start, flags);
54cf91dc 1448
da51a1e7
DV
1449 /*
1450 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1451 * batch vma for correctness. For less ugly and less fragility this
1452 * needs to be adjusted to also track the ggtt batch vma properly as
1453 * active.
1454 */
1455 if (flags & I915_DISPATCH_SECURE)
1456 i915_gem_object_ggtt_unpin(batch_obj);
54cf91dc 1457err:
41bde553
BW
1458 /* the request owns the ref now */
1459 i915_gem_context_unreference(ctx);
67731b87 1460 eb_destroy(eb);
54cf91dc
CW
1461
1462 mutex_unlock(&dev->struct_mutex);
1463
1464pre_mutex_err:
f65c9168
PZ
1465 /* intel_gpu_busy should also get a ref, so it will free when the device
1466 * is really idle. */
1467 intel_runtime_pm_put(dev_priv);
54cf91dc
CW
1468 return ret;
1469}
1470
1471/*
1472 * Legacy execbuffer just creates an exec2 list from the original exec object
1473 * list array and passes it to the real function.
1474 */
1475int
1476i915_gem_execbuffer(struct drm_device *dev, void *data,
1477 struct drm_file *file)
1478{
1479 struct drm_i915_gem_execbuffer *args = data;
1480 struct drm_i915_gem_execbuffer2 exec2;
1481 struct drm_i915_gem_exec_object *exec_list = NULL;
1482 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1483 int ret, i;
1484
54cf91dc 1485 if (args->buffer_count < 1) {
ff240199 1486 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
54cf91dc
CW
1487 return -EINVAL;
1488 }
1489
1490 /* Copy in the exec list from userland */
1491 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1492 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1493 if (exec_list == NULL || exec2_list == NULL) {
ff240199 1494 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc
CW
1495 args->buffer_count);
1496 drm_free_large(exec_list);
1497 drm_free_large(exec2_list);
1498 return -ENOMEM;
1499 }
1500 ret = copy_from_user(exec_list,
2bb4629a 1501 to_user_ptr(args->buffers_ptr),
54cf91dc
CW
1502 sizeof(*exec_list) * args->buffer_count);
1503 if (ret != 0) {
ff240199 1504 DRM_DEBUG("copy %d exec entries failed %d\n",
54cf91dc
CW
1505 args->buffer_count, ret);
1506 drm_free_large(exec_list);
1507 drm_free_large(exec2_list);
1508 return -EFAULT;
1509 }
1510
1511 for (i = 0; i < args->buffer_count; i++) {
1512 exec2_list[i].handle = exec_list[i].handle;
1513 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1514 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1515 exec2_list[i].alignment = exec_list[i].alignment;
1516 exec2_list[i].offset = exec_list[i].offset;
1517 if (INTEL_INFO(dev)->gen < 4)
1518 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1519 else
1520 exec2_list[i].flags = 0;
1521 }
1522
1523 exec2.buffers_ptr = args->buffers_ptr;
1524 exec2.buffer_count = args->buffer_count;
1525 exec2.batch_start_offset = args->batch_start_offset;
1526 exec2.batch_len = args->batch_len;
1527 exec2.DR1 = args->DR1;
1528 exec2.DR4 = args->DR4;
1529 exec2.num_cliprects = args->num_cliprects;
1530 exec2.cliprects_ptr = args->cliprects_ptr;
1531 exec2.flags = I915_EXEC_RENDER;
6e0a69db 1532 i915_execbuffer2_set_context_id(exec2, 0);
54cf91dc 1533
41bde553 1534 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
54cf91dc 1535 if (!ret) {
9aab8bff
CW
1536 struct drm_i915_gem_exec_object __user *user_exec_list =
1537 to_user_ptr(args->buffers_ptr);
1538
54cf91dc 1539 /* Copy the new buffer offsets back to the user's exec list. */
9aab8bff
CW
1540 for (i = 0; i < args->buffer_count; i++) {
1541 ret = __copy_to_user(&user_exec_list[i].offset,
1542 &exec2_list[i].offset,
1543 sizeof(user_exec_list[i].offset));
1544 if (ret) {
1545 ret = -EFAULT;
1546 DRM_DEBUG("failed to copy %d exec entries "
1547 "back to user (%d)\n",
1548 args->buffer_count, ret);
1549 break;
1550 }
54cf91dc
CW
1551 }
1552 }
1553
1554 drm_free_large(exec_list);
1555 drm_free_large(exec2_list);
1556 return ret;
1557}
1558
1559int
1560i915_gem_execbuffer2(struct drm_device *dev, void *data,
1561 struct drm_file *file)
1562{
1563 struct drm_i915_gem_execbuffer2 *args = data;
1564 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1565 int ret;
1566
ed8cd3b2
XW
1567 if (args->buffer_count < 1 ||
1568 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
ff240199 1569 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
54cf91dc
CW
1570 return -EINVAL;
1571 }
1572
9cb34664
DV
1573 if (args->rsvd2 != 0) {
1574 DRM_DEBUG("dirty rvsd2 field\n");
1575 return -EINVAL;
1576 }
1577
8408c282 1578 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
419fa72a 1579 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
8408c282
CW
1580 if (exec2_list == NULL)
1581 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1582 args->buffer_count);
54cf91dc 1583 if (exec2_list == NULL) {
ff240199 1584 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc
CW
1585 args->buffer_count);
1586 return -ENOMEM;
1587 }
1588 ret = copy_from_user(exec2_list,
2bb4629a 1589 to_user_ptr(args->buffers_ptr),
54cf91dc
CW
1590 sizeof(*exec2_list) * args->buffer_count);
1591 if (ret != 0) {
ff240199 1592 DRM_DEBUG("copy %d exec entries failed %d\n",
54cf91dc
CW
1593 args->buffer_count, ret);
1594 drm_free_large(exec2_list);
1595 return -EFAULT;
1596 }
1597
41bde553 1598 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
54cf91dc
CW
1599 if (!ret) {
1600 /* Copy the new buffer offsets back to the user's exec list. */
d593d992 1601 struct drm_i915_gem_exec_object2 __user *user_exec_list =
9aab8bff
CW
1602 to_user_ptr(args->buffers_ptr);
1603 int i;
1604
1605 for (i = 0; i < args->buffer_count; i++) {
1606 ret = __copy_to_user(&user_exec_list[i].offset,
1607 &exec2_list[i].offset,
1608 sizeof(user_exec_list[i].offset));
1609 if (ret) {
1610 ret = -EFAULT;
1611 DRM_DEBUG("failed to copy %d exec entries "
1612 "back to user\n",
1613 args->buffer_count);
1614 break;
1615 }
54cf91dc
CW
1616 }
1617 }
1618
1619 drm_free_large(exec2_list);
1620 return ret;
1621}
This page took 0.345048 seconds and 5 git commands to generate.