drm/i915: Drop local struct_mutex around intel_init_emon[ilk]
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
CommitLineData
673a394b 1/*
be6a0376 2 * Copyright © 2008-2015 Intel Corporation
673a394b
EA
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
760285e7 28#include <drm/drmP.h>
0de23977 29#include <drm/drm_vma_manager.h>
760285e7 30#include <drm/i915_drm.h>
673a394b 31#include "i915_drv.h"
c13d87ea 32#include "i915_gem_dmabuf.h"
eb82289a 33#include "i915_vgpu.h"
1c5d22f7 34#include "i915_trace.h"
652c393a 35#include "intel_drv.h"
5d723d7a 36#include "intel_frontbuffer.h"
0ccdacf6 37#include "intel_mocs.h"
c13d87ea 38#include <linux/reservation.h>
5949eac4 39#include <linux/shmem_fs.h>
5a0e3ad6 40#include <linux/slab.h>
673a394b 41#include <linux/swap.h>
79e53945 42#include <linux/pci.h>
1286ff73 43#include <linux/dma-buf.h>
673a394b 44
05394f39 45static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
e62b59e4 46static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
61050808 47
c76ce038
CW
48static bool cpu_cache_is_coherent(struct drm_device *dev,
49 enum i915_cache_level level)
50{
51 return HAS_LLC(dev) || level != I915_CACHE_NONE;
52}
53
2c22569b
CW
54static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
55{
b50a5371
AS
56 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
57 return false;
58
2c22569b
CW
59 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
60 return true;
61
62 return obj->pin_display;
63}
64
4f1959ee
AS
65static int
66insert_mappable_node(struct drm_i915_private *i915,
67 struct drm_mm_node *node, u32 size)
68{
69 memset(node, 0, sizeof(*node));
70 return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
71 size, 0, 0, 0,
72 i915->ggtt.mappable_end,
73 DRM_MM_SEARCH_DEFAULT,
74 DRM_MM_CREATE_DEFAULT);
75}
76
77static void
78remove_mappable_node(struct drm_mm_node *node)
79{
80 drm_mm_remove_node(node);
81}
82
73aa808f
CW
83/* some bookkeeping */
84static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
85 size_t size)
86{
c20e8355 87 spin_lock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
88 dev_priv->mm.object_count++;
89 dev_priv->mm.object_memory += size;
c20e8355 90 spin_unlock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
91}
92
93static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
94 size_t size)
95{
c20e8355 96 spin_lock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
97 dev_priv->mm.object_count--;
98 dev_priv->mm.object_memory -= size;
c20e8355 99 spin_unlock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
100}
101
21dd3734 102static int
33196ded 103i915_gem_wait_for_error(struct i915_gpu_error *error)
30dbf0c0 104{
30dbf0c0
CW
105 int ret;
106
d98c52cf 107 if (!i915_reset_in_progress(error))
30dbf0c0
CW
108 return 0;
109
0a6759c6
DV
110 /*
111 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
112 * userspace. If it takes that long something really bad is going on and
113 * we should simply try to bail out and fail as gracefully as possible.
114 */
1f83fee0 115 ret = wait_event_interruptible_timeout(error->reset_queue,
d98c52cf 116 !i915_reset_in_progress(error),
1f83fee0 117 10*HZ);
0a6759c6
DV
118 if (ret == 0) {
119 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
120 return -EIO;
121 } else if (ret < 0) {
30dbf0c0 122 return ret;
d98c52cf
CW
123 } else {
124 return 0;
0a6759c6 125 }
30dbf0c0
CW
126}
127
54cf91dc 128int i915_mutex_lock_interruptible(struct drm_device *dev)
76c1dec1 129{
fac5e23e 130 struct drm_i915_private *dev_priv = to_i915(dev);
76c1dec1
CW
131 int ret;
132
33196ded 133 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
76c1dec1
CW
134 if (ret)
135 return ret;
136
137 ret = mutex_lock_interruptible(&dev->struct_mutex);
138 if (ret)
139 return ret;
140
76c1dec1
CW
141 return 0;
142}
30dbf0c0 143
5a125c3c
EA
144int
145i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
05394f39 146 struct drm_file *file)
5a125c3c 147{
72e96d64 148 struct drm_i915_private *dev_priv = to_i915(dev);
62106b4f 149 struct i915_ggtt *ggtt = &dev_priv->ggtt;
72e96d64 150 struct drm_i915_gem_get_aperture *args = data;
ca1543be 151 struct i915_vma *vma;
6299f992 152 size_t pinned;
5a125c3c 153
6299f992 154 pinned = 0;
73aa808f 155 mutex_lock(&dev->struct_mutex);
1c7f4bca 156 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
20dfbde4 157 if (i915_vma_is_pinned(vma))
ca1543be 158 pinned += vma->node.size;
1c7f4bca 159 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
20dfbde4 160 if (i915_vma_is_pinned(vma))
ca1543be 161 pinned += vma->node.size;
73aa808f 162 mutex_unlock(&dev->struct_mutex);
5a125c3c 163
72e96d64 164 args->aper_size = ggtt->base.total;
0206e353 165 args->aper_available_size = args->aper_size - pinned;
6299f992 166
5a125c3c
EA
167 return 0;
168}
169
6a2c4232
CW
170static int
171i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
00731155 172{
93c76a3d 173 struct address_space *mapping = obj->base.filp->f_mapping;
6a2c4232
CW
174 char *vaddr = obj->phys_handle->vaddr;
175 struct sg_table *st;
176 struct scatterlist *sg;
177 int i;
00731155 178
6a2c4232
CW
179 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
180 return -EINVAL;
181
182 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
183 struct page *page;
184 char *src;
185
186 page = shmem_read_mapping_page(mapping, i);
187 if (IS_ERR(page))
188 return PTR_ERR(page);
189
190 src = kmap_atomic(page);
191 memcpy(vaddr, src, PAGE_SIZE);
192 drm_clflush_virt_range(vaddr, PAGE_SIZE);
193 kunmap_atomic(src);
194
09cbfeaf 195 put_page(page);
6a2c4232
CW
196 vaddr += PAGE_SIZE;
197 }
198
c033666a 199 i915_gem_chipset_flush(to_i915(obj->base.dev));
6a2c4232
CW
200
201 st = kmalloc(sizeof(*st), GFP_KERNEL);
202 if (st == NULL)
203 return -ENOMEM;
204
205 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
206 kfree(st);
207 return -ENOMEM;
208 }
209
210 sg = st->sgl;
211 sg->offset = 0;
212 sg->length = obj->base.size;
00731155 213
6a2c4232
CW
214 sg_dma_address(sg) = obj->phys_handle->busaddr;
215 sg_dma_len(sg) = obj->base.size;
216
217 obj->pages = st;
6a2c4232
CW
218 return 0;
219}
220
221static void
222i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
223{
224 int ret;
225
226 BUG_ON(obj->madv == __I915_MADV_PURGED);
00731155 227
6a2c4232 228 ret = i915_gem_object_set_to_cpu_domain(obj, true);
f4457ae7 229 if (WARN_ON(ret)) {
6a2c4232
CW
230 /* In the event of a disaster, abandon all caches and
231 * hope for the best.
232 */
6a2c4232
CW
233 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
234 }
235
236 if (obj->madv == I915_MADV_DONTNEED)
237 obj->dirty = 0;
238
239 if (obj->dirty) {
93c76a3d 240 struct address_space *mapping = obj->base.filp->f_mapping;
6a2c4232 241 char *vaddr = obj->phys_handle->vaddr;
00731155
CW
242 int i;
243
244 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
6a2c4232
CW
245 struct page *page;
246 char *dst;
247
248 page = shmem_read_mapping_page(mapping, i);
249 if (IS_ERR(page))
250 continue;
251
252 dst = kmap_atomic(page);
253 drm_clflush_virt_range(vaddr, PAGE_SIZE);
254 memcpy(dst, vaddr, PAGE_SIZE);
255 kunmap_atomic(dst);
256
257 set_page_dirty(page);
258 if (obj->madv == I915_MADV_WILLNEED)
00731155 259 mark_page_accessed(page);
09cbfeaf 260 put_page(page);
00731155
CW
261 vaddr += PAGE_SIZE;
262 }
6a2c4232 263 obj->dirty = 0;
00731155
CW
264 }
265
6a2c4232
CW
266 sg_free_table(obj->pages);
267 kfree(obj->pages);
6a2c4232
CW
268}
269
270static void
271i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
272{
273 drm_pci_free(obj->base.dev, obj->phys_handle);
274}
275
276static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
277 .get_pages = i915_gem_object_get_pages_phys,
278 .put_pages = i915_gem_object_put_pages_phys,
279 .release = i915_gem_object_release_phys,
280};
281
35a9611c 282int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
aa653a68
CW
283{
284 struct i915_vma *vma;
285 LIST_HEAD(still_in_list);
02bef8f9
CW
286 int ret;
287
288 lockdep_assert_held(&obj->base.dev->struct_mutex);
aa653a68 289
02bef8f9
CW
290 /* Closed vma are removed from the obj->vma_list - but they may
291 * still have an active binding on the object. To remove those we
292 * must wait for all rendering to complete to the object (as unbinding
293 * must anyway), and retire the requests.
aa653a68 294 */
02bef8f9
CW
295 ret = i915_gem_object_wait_rendering(obj, false);
296 if (ret)
297 return ret;
298
299 i915_gem_retire_requests(to_i915(obj->base.dev));
300
aa653a68
CW
301 while ((vma = list_first_entry_or_null(&obj->vma_list,
302 struct i915_vma,
303 obj_link))) {
304 list_move_tail(&vma->obj_link, &still_in_list);
305 ret = i915_vma_unbind(vma);
306 if (ret)
307 break;
308 }
309 list_splice(&still_in_list, &obj->vma_list);
310
311 return ret;
312}
313
00e60f26
CW
314/**
315 * Ensures that all rendering to the object has completed and the object is
316 * safe to unbind from the GTT or access from the CPU.
317 * @obj: i915 gem object
318 * @readonly: waiting for just read access or read-write access
319 */
320int
321i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
322 bool readonly)
323{
324 struct reservation_object *resv;
325 struct i915_gem_active *active;
326 unsigned long active_mask;
327 int idx;
328
329 lockdep_assert_held(&obj->base.dev->struct_mutex);
330
331 if (!readonly) {
332 active = obj->last_read;
333 active_mask = i915_gem_object_get_active(obj);
334 } else {
335 active_mask = 1;
336 active = &obj->last_write;
337 }
338
339 for_each_active(active_mask, idx) {
340 int ret;
341
342 ret = i915_gem_active_wait(&active[idx],
343 &obj->base.dev->struct_mutex);
344 if (ret)
345 return ret;
346 }
347
348 resv = i915_gem_object_get_dmabuf_resv(obj);
349 if (resv) {
350 long err;
351
352 err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
353 MAX_SCHEDULE_TIMEOUT);
354 if (err < 0)
355 return err;
356 }
357
358 return 0;
359}
360
b8f9096d
CW
361/* A nonblocking variant of the above wait. Must be called prior to
362 * acquiring the mutex for the object, as the object state may change
363 * during this call. A reference must be held by the caller for the object.
00e60f26
CW
364 */
365static __must_check int
b8f9096d
CW
366__unsafe_wait_rendering(struct drm_i915_gem_object *obj,
367 struct intel_rps_client *rps,
368 bool readonly)
00e60f26 369{
00e60f26
CW
370 struct i915_gem_active *active;
371 unsigned long active_mask;
b8f9096d 372 int idx;
00e60f26 373
b8f9096d 374 active_mask = __I915_BO_ACTIVE(obj);
00e60f26
CW
375 if (!active_mask)
376 return 0;
377
378 if (!readonly) {
379 active = obj->last_read;
380 } else {
381 active_mask = 1;
382 active = &obj->last_write;
383 }
384
b8f9096d
CW
385 for_each_active(active_mask, idx) {
386 int ret;
00e60f26 387
b8f9096d
CW
388 ret = i915_gem_active_wait_unlocked(&active[idx],
389 true, NULL, rps);
390 if (ret)
391 return ret;
00e60f26
CW
392 }
393
b8f9096d 394 return 0;
00e60f26
CW
395}
396
397static struct intel_rps_client *to_rps_client(struct drm_file *file)
398{
399 struct drm_i915_file_private *fpriv = file->driver_priv;
400
401 return &fpriv->rps;
402}
403
00731155
CW
404int
405i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
406 int align)
407{
408 drm_dma_handle_t *phys;
6a2c4232 409 int ret;
00731155
CW
410
411 if (obj->phys_handle) {
412 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
413 return -EBUSY;
414
415 return 0;
416 }
417
418 if (obj->madv != I915_MADV_WILLNEED)
419 return -EFAULT;
420
421 if (obj->base.filp == NULL)
422 return -EINVAL;
423
4717ca9e
CW
424 ret = i915_gem_object_unbind(obj);
425 if (ret)
426 return ret;
427
428 ret = i915_gem_object_put_pages(obj);
6a2c4232
CW
429 if (ret)
430 return ret;
431
00731155
CW
432 /* create a new object */
433 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
434 if (!phys)
435 return -ENOMEM;
436
00731155 437 obj->phys_handle = phys;
6a2c4232
CW
438 obj->ops = &i915_gem_phys_ops;
439
440 return i915_gem_object_get_pages(obj);
00731155
CW
441}
442
443static int
444i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
445 struct drm_i915_gem_pwrite *args,
446 struct drm_file *file_priv)
447{
448 struct drm_device *dev = obj->base.dev;
449 void *vaddr = obj->phys_handle->vaddr + args->offset;
3ed605bc 450 char __user *user_data = u64_to_user_ptr(args->data_ptr);
063e4e6b 451 int ret = 0;
6a2c4232
CW
452
453 /* We manually control the domain here and pretend that it
454 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
455 */
456 ret = i915_gem_object_wait_rendering(obj, false);
457 if (ret)
458 return ret;
00731155 459
77a0d1ca 460 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
00731155
CW
461 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
462 unsigned long unwritten;
463
464 /* The physical object once assigned is fixed for the lifetime
465 * of the obj, so we can safely drop the lock and continue
466 * to access vaddr.
467 */
468 mutex_unlock(&dev->struct_mutex);
469 unwritten = copy_from_user(vaddr, user_data, args->size);
470 mutex_lock(&dev->struct_mutex);
063e4e6b
PZ
471 if (unwritten) {
472 ret = -EFAULT;
473 goto out;
474 }
00731155
CW
475 }
476
6a2c4232 477 drm_clflush_virt_range(vaddr, args->size);
c033666a 478 i915_gem_chipset_flush(to_i915(dev));
063e4e6b
PZ
479
480out:
de152b62 481 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
063e4e6b 482 return ret;
00731155
CW
483}
484
42dcedd4
CW
485void *i915_gem_object_alloc(struct drm_device *dev)
486{
fac5e23e 487 struct drm_i915_private *dev_priv = to_i915(dev);
efab6d8d 488 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
42dcedd4
CW
489}
490
491void i915_gem_object_free(struct drm_i915_gem_object *obj)
492{
fac5e23e 493 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
efab6d8d 494 kmem_cache_free(dev_priv->objects, obj);
42dcedd4
CW
495}
496
ff72145b
DA
497static int
498i915_gem_create(struct drm_file *file,
499 struct drm_device *dev,
500 uint64_t size,
501 uint32_t *handle_p)
673a394b 502{
05394f39 503 struct drm_i915_gem_object *obj;
a1a2d1d3
PP
504 int ret;
505 u32 handle;
673a394b 506
ff72145b 507 size = roundup(size, PAGE_SIZE);
8ffc0246
CW
508 if (size == 0)
509 return -EINVAL;
673a394b
EA
510
511 /* Allocate the new object */
d37cd8a8 512 obj = i915_gem_object_create(dev, size);
fe3db79b
CW
513 if (IS_ERR(obj))
514 return PTR_ERR(obj);
673a394b 515
05394f39 516 ret = drm_gem_handle_create(file, &obj->base, &handle);
202f2fef 517 /* drop reference from allocate - handle holds it now */
34911fd3 518 i915_gem_object_put_unlocked(obj);
d861e338
DV
519 if (ret)
520 return ret;
202f2fef 521
ff72145b 522 *handle_p = handle;
673a394b
EA
523 return 0;
524}
525
ff72145b
DA
526int
527i915_gem_dumb_create(struct drm_file *file,
528 struct drm_device *dev,
529 struct drm_mode_create_dumb *args)
530{
531 /* have to work out size/pitch and return them */
de45eaf7 532 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
ff72145b
DA
533 args->size = args->pitch * args->height;
534 return i915_gem_create(file, dev,
da6b51d0 535 args->size, &args->handle);
ff72145b
DA
536}
537
ff72145b
DA
538/**
539 * Creates a new mm object and returns a handle to it.
14bb2c11
TU
540 * @dev: drm device pointer
541 * @data: ioctl data blob
542 * @file: drm file pointer
ff72145b
DA
543 */
544int
545i915_gem_create_ioctl(struct drm_device *dev, void *data,
546 struct drm_file *file)
547{
548 struct drm_i915_gem_create *args = data;
63ed2cb2 549
ff72145b 550 return i915_gem_create(file, dev,
da6b51d0 551 args->size, &args->handle);
ff72145b
DA
552}
553
8461d226
DV
554static inline int
555__copy_to_user_swizzled(char __user *cpu_vaddr,
556 const char *gpu_vaddr, int gpu_offset,
557 int length)
558{
559 int ret, cpu_offset = 0;
560
561 while (length > 0) {
562 int cacheline_end = ALIGN(gpu_offset + 1, 64);
563 int this_length = min(cacheline_end - gpu_offset, length);
564 int swizzled_gpu_offset = gpu_offset ^ 64;
565
566 ret = __copy_to_user(cpu_vaddr + cpu_offset,
567 gpu_vaddr + swizzled_gpu_offset,
568 this_length);
569 if (ret)
570 return ret + length;
571
572 cpu_offset += this_length;
573 gpu_offset += this_length;
574 length -= this_length;
575 }
576
577 return 0;
578}
579
8c59967c 580static inline int
4f0c7cfb
BW
581__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
582 const char __user *cpu_vaddr,
8c59967c
DV
583 int length)
584{
585 int ret, cpu_offset = 0;
586
587 while (length > 0) {
588 int cacheline_end = ALIGN(gpu_offset + 1, 64);
589 int this_length = min(cacheline_end - gpu_offset, length);
590 int swizzled_gpu_offset = gpu_offset ^ 64;
591
592 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
593 cpu_vaddr + cpu_offset,
594 this_length);
595 if (ret)
596 return ret + length;
597
598 cpu_offset += this_length;
599 gpu_offset += this_length;
600 length -= this_length;
601 }
602
603 return 0;
604}
605
4c914c0c
BV
606/*
607 * Pins the specified object's pages and synchronizes the object with
608 * GPU accesses. Sets needs_clflush to non-zero if the caller should
609 * flush the object from the CPU cache.
610 */
611int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
43394c7d 612 unsigned int *needs_clflush)
4c914c0c
BV
613{
614 int ret;
615
616 *needs_clflush = 0;
617
43394c7d
CW
618 if (!i915_gem_object_has_struct_page(obj))
619 return -ENODEV;
4c914c0c 620
c13d87ea
CW
621 ret = i915_gem_object_wait_rendering(obj, true);
622 if (ret)
623 return ret;
624
9764951e
CW
625 ret = i915_gem_object_get_pages(obj);
626 if (ret)
627 return ret;
628
629 i915_gem_object_pin_pages(obj);
630
a314d5cb
CW
631 i915_gem_object_flush_gtt_write_domain(obj);
632
43394c7d
CW
633 /* If we're not in the cpu read domain, set ourself into the gtt
634 * read domain and manually flush cachelines (if required). This
635 * optimizes for the case when the gpu will dirty the data
636 * anyway again before the next pread happens.
637 */
638 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
4c914c0c
BV
639 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
640 obj->cache_level);
43394c7d 641
43394c7d
CW
642 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
643 ret = i915_gem_object_set_to_cpu_domain(obj, false);
9764951e
CW
644 if (ret)
645 goto err_unpin;
646
43394c7d 647 *needs_clflush = 0;
4c914c0c
BV
648 }
649
9764951e 650 /* return with the pages pinned */
43394c7d 651 return 0;
9764951e
CW
652
653err_unpin:
654 i915_gem_object_unpin_pages(obj);
655 return ret;
43394c7d
CW
656}
657
658int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
659 unsigned int *needs_clflush)
660{
661 int ret;
662
663 *needs_clflush = 0;
664 if (!i915_gem_object_has_struct_page(obj))
665 return -ENODEV;
666
667 ret = i915_gem_object_wait_rendering(obj, false);
668 if (ret)
669 return ret;
670
9764951e
CW
671 ret = i915_gem_object_get_pages(obj);
672 if (ret)
673 return ret;
674
675 i915_gem_object_pin_pages(obj);
676
a314d5cb
CW
677 i915_gem_object_flush_gtt_write_domain(obj);
678
43394c7d
CW
679 /* If we're not in the cpu write domain, set ourself into the
680 * gtt write domain and manually flush cachelines (as required).
681 * This optimizes for the case when the gpu will use the data
682 * right away and we therefore have to clflush anyway.
683 */
684 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
685 *needs_clflush |= cpu_write_needs_clflush(obj) << 1;
686
687 /* Same trick applies to invalidate partially written cachelines read
688 * before writing.
689 */
690 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
691 *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
692 obj->cache_level);
693
43394c7d
CW
694 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
695 ret = i915_gem_object_set_to_cpu_domain(obj, true);
9764951e
CW
696 if (ret)
697 goto err_unpin;
698
43394c7d
CW
699 *needs_clflush = 0;
700 }
701
702 if ((*needs_clflush & CLFLUSH_AFTER) == 0)
703 obj->cache_dirty = true;
704
705 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
706 obj->dirty = 1;
9764951e 707 /* return with the pages pinned */
43394c7d 708 return 0;
9764951e
CW
709
710err_unpin:
711 i915_gem_object_unpin_pages(obj);
712 return ret;
4c914c0c
BV
713}
714
d174bd64
DV
715/* Per-page copy function for the shmem pread fastpath.
716 * Flushes invalid cachelines before reading the target if
717 * needs_clflush is set. */
eb01459f 718static int
d174bd64
DV
719shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
720 char __user *user_data,
721 bool page_do_bit17_swizzling, bool needs_clflush)
722{
723 char *vaddr;
724 int ret;
725
e7e58eb5 726 if (unlikely(page_do_bit17_swizzling))
d174bd64
DV
727 return -EINVAL;
728
729 vaddr = kmap_atomic(page);
730 if (needs_clflush)
731 drm_clflush_virt_range(vaddr + shmem_page_offset,
732 page_length);
733 ret = __copy_to_user_inatomic(user_data,
734 vaddr + shmem_page_offset,
735 page_length);
736 kunmap_atomic(vaddr);
737
f60d7f0c 738 return ret ? -EFAULT : 0;
d174bd64
DV
739}
740
23c18c71
DV
741static void
742shmem_clflush_swizzled_range(char *addr, unsigned long length,
743 bool swizzled)
744{
e7e58eb5 745 if (unlikely(swizzled)) {
23c18c71
DV
746 unsigned long start = (unsigned long) addr;
747 unsigned long end = (unsigned long) addr + length;
748
749 /* For swizzling simply ensure that we always flush both
750 * channels. Lame, but simple and it works. Swizzled
751 * pwrite/pread is far from a hotpath - current userspace
752 * doesn't use it at all. */
753 start = round_down(start, 128);
754 end = round_up(end, 128);
755
756 drm_clflush_virt_range((void *)start, end - start);
757 } else {
758 drm_clflush_virt_range(addr, length);
759 }
760
761}
762
d174bd64
DV
763/* Only difference to the fast-path function is that this can handle bit17
764 * and uses non-atomic copy and kmap functions. */
765static int
766shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
767 char __user *user_data,
768 bool page_do_bit17_swizzling, bool needs_clflush)
769{
770 char *vaddr;
771 int ret;
772
773 vaddr = kmap(page);
774 if (needs_clflush)
23c18c71
DV
775 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
776 page_length,
777 page_do_bit17_swizzling);
d174bd64
DV
778
779 if (page_do_bit17_swizzling)
780 ret = __copy_to_user_swizzled(user_data,
781 vaddr, shmem_page_offset,
782 page_length);
783 else
784 ret = __copy_to_user(user_data,
785 vaddr + shmem_page_offset,
786 page_length);
787 kunmap(page);
788
f60d7f0c 789 return ret ? - EFAULT : 0;
d174bd64
DV
790}
791
b50a5371
AS
792static inline unsigned long
793slow_user_access(struct io_mapping *mapping,
794 uint64_t page_base, int page_offset,
795 char __user *user_data,
796 unsigned long length, bool pwrite)
797{
798 void __iomem *ioaddr;
799 void *vaddr;
800 uint64_t unwritten;
801
802 ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
803 /* We can use the cpu mem copy function because this is X86. */
804 vaddr = (void __force *)ioaddr + page_offset;
805 if (pwrite)
806 unwritten = __copy_from_user(vaddr, user_data, length);
807 else
808 unwritten = __copy_to_user(user_data, vaddr, length);
809
810 io_mapping_unmap(ioaddr);
811 return unwritten;
812}
813
814static int
815i915_gem_gtt_pread(struct drm_device *dev,
816 struct drm_i915_gem_object *obj, uint64_t size,
817 uint64_t data_offset, uint64_t data_ptr)
818{
fac5e23e 819 struct drm_i915_private *dev_priv = to_i915(dev);
b50a5371 820 struct i915_ggtt *ggtt = &dev_priv->ggtt;
058d88c4 821 struct i915_vma *vma;
b50a5371
AS
822 struct drm_mm_node node;
823 char __user *user_data;
824 uint64_t remain;
825 uint64_t offset;
826 int ret;
827
058d88c4 828 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
18034584
CW
829 if (!IS_ERR(vma)) {
830 node.start = i915_ggtt_offset(vma);
831 node.allocated = false;
49ef5294 832 ret = i915_vma_put_fence(vma);
18034584
CW
833 if (ret) {
834 i915_vma_unpin(vma);
835 vma = ERR_PTR(ret);
836 }
837 }
058d88c4 838 if (IS_ERR(vma)) {
b50a5371
AS
839 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
840 if (ret)
841 goto out;
842
843 ret = i915_gem_object_get_pages(obj);
844 if (ret) {
845 remove_mappable_node(&node);
846 goto out;
847 }
848
849 i915_gem_object_pin_pages(obj);
b50a5371
AS
850 }
851
852 ret = i915_gem_object_set_to_gtt_domain(obj, false);
853 if (ret)
854 goto out_unpin;
855
856 user_data = u64_to_user_ptr(data_ptr);
857 remain = size;
858 offset = data_offset;
859
860 mutex_unlock(&dev->struct_mutex);
861 if (likely(!i915.prefault_disable)) {
862 ret = fault_in_multipages_writeable(user_data, remain);
863 if (ret) {
864 mutex_lock(&dev->struct_mutex);
865 goto out_unpin;
866 }
867 }
868
869 while (remain > 0) {
870 /* Operation in this page
871 *
872 * page_base = page offset within aperture
873 * page_offset = offset within page
874 * page_length = bytes to copy for this page
875 */
876 u32 page_base = node.start;
877 unsigned page_offset = offset_in_page(offset);
878 unsigned page_length = PAGE_SIZE - page_offset;
879 page_length = remain < page_length ? remain : page_length;
880 if (node.allocated) {
881 wmb();
882 ggtt->base.insert_page(&ggtt->base,
883 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
884 node.start,
885 I915_CACHE_NONE, 0);
886 wmb();
887 } else {
888 page_base += offset & PAGE_MASK;
889 }
890 /* This is a slow read/write as it tries to read from
891 * and write to user memory which may result into page
892 * faults, and so we cannot perform this under struct_mutex.
893 */
f7bbe788 894 if (slow_user_access(&ggtt->mappable, page_base,
b50a5371
AS
895 page_offset, user_data,
896 page_length, false)) {
897 ret = -EFAULT;
898 break;
899 }
900
901 remain -= page_length;
902 user_data += page_length;
903 offset += page_length;
904 }
905
906 mutex_lock(&dev->struct_mutex);
907 if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
908 /* The user has modified the object whilst we tried
909 * reading from it, and we now have no idea what domain
910 * the pages should be in. As we have just been touching
911 * them directly, flush everything back to the GTT
912 * domain.
913 */
914 ret = i915_gem_object_set_to_gtt_domain(obj, false);
915 }
916
917out_unpin:
918 if (node.allocated) {
919 wmb();
920 ggtt->base.clear_range(&ggtt->base,
921 node.start, node.size,
922 true);
923 i915_gem_object_unpin_pages(obj);
924 remove_mappable_node(&node);
925 } else {
058d88c4 926 i915_vma_unpin(vma);
b50a5371
AS
927 }
928out:
929 return ret;
930}
931
eb01459f 932static int
dbf7bff0
DV
933i915_gem_shmem_pread(struct drm_device *dev,
934 struct drm_i915_gem_object *obj,
935 struct drm_i915_gem_pread *args,
936 struct drm_file *file)
eb01459f 937{
8461d226 938 char __user *user_data;
eb01459f 939 ssize_t remain;
8461d226 940 loff_t offset;
eb2c0c81 941 int shmem_page_offset, page_length, ret = 0;
8461d226 942 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
96d79b52 943 int prefaulted = 0;
8489731c 944 int needs_clflush = 0;
67d5a50c 945 struct sg_page_iter sg_iter;
eb01459f 946
4c914c0c 947 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
f60d7f0c
CW
948 if (ret)
949 return ret;
950
43394c7d
CW
951 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
952 user_data = u64_to_user_ptr(args->data_ptr);
8461d226 953 offset = args->offset;
43394c7d 954 remain = args->size;
eb01459f 955
67d5a50c
ID
956 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
957 offset >> PAGE_SHIFT) {
2db76d7c 958 struct page *page = sg_page_iter_page(&sg_iter);
9da3da66
CW
959
960 if (remain <= 0)
961 break;
962
eb01459f
EA
963 /* Operation in this page
964 *
eb01459f 965 * shmem_page_offset = offset within page in shmem file
eb01459f
EA
966 * page_length = bytes to copy for this page
967 */
c8cbbb8b 968 shmem_page_offset = offset_in_page(offset);
eb01459f
EA
969 page_length = remain;
970 if ((shmem_page_offset + page_length) > PAGE_SIZE)
971 page_length = PAGE_SIZE - shmem_page_offset;
eb01459f 972
8461d226
DV
973 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
974 (page_to_phys(page) & (1 << 17)) != 0;
975
d174bd64
DV
976 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
977 user_data, page_do_bit17_swizzling,
978 needs_clflush);
979 if (ret == 0)
980 goto next_page;
dbf7bff0 981
dbf7bff0
DV
982 mutex_unlock(&dev->struct_mutex);
983
d330a953 984 if (likely(!i915.prefault_disable) && !prefaulted) {
f56f821f 985 ret = fault_in_multipages_writeable(user_data, remain);
96d79b52
DV
986 /* Userspace is tricking us, but we've already clobbered
987 * its pages with the prefault and promised to write the
988 * data up to the first fault. Hence ignore any errors
989 * and just continue. */
990 (void)ret;
991 prefaulted = 1;
992 }
eb01459f 993
d174bd64
DV
994 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
995 user_data, page_do_bit17_swizzling,
996 needs_clflush);
eb01459f 997
dbf7bff0 998 mutex_lock(&dev->struct_mutex);
f60d7f0c 999
f60d7f0c 1000 if (ret)
8461d226 1001 goto out;
8461d226 1002
17793c9a 1003next_page:
eb01459f 1004 remain -= page_length;
8461d226 1005 user_data += page_length;
eb01459f
EA
1006 offset += page_length;
1007 }
1008
4f27b75d 1009out:
43394c7d 1010 i915_gem_obj_finish_shmem_access(obj);
f60d7f0c 1011
eb01459f
EA
1012 return ret;
1013}
1014
673a394b
EA
1015/**
1016 * Reads data from the object referenced by handle.
14bb2c11
TU
1017 * @dev: drm device pointer
1018 * @data: ioctl data blob
1019 * @file: drm file pointer
673a394b
EA
1020 *
1021 * On error, the contents of *data are undefined.
1022 */
1023int
1024i915_gem_pread_ioctl(struct drm_device *dev, void *data,
05394f39 1025 struct drm_file *file)
673a394b
EA
1026{
1027 struct drm_i915_gem_pread *args = data;
05394f39 1028 struct drm_i915_gem_object *obj;
35b62a89 1029 int ret = 0;
673a394b 1030
51311d0a
CW
1031 if (args->size == 0)
1032 return 0;
1033
1034 if (!access_ok(VERIFY_WRITE,
3ed605bc 1035 u64_to_user_ptr(args->data_ptr),
51311d0a
CW
1036 args->size))
1037 return -EFAULT;
1038
03ac0642 1039 obj = i915_gem_object_lookup(file, args->handle);
258a5ede
CW
1040 if (!obj)
1041 return -ENOENT;
673a394b 1042
7dcd2499 1043 /* Bounds check source. */
05394f39
CW
1044 if (args->offset > obj->base.size ||
1045 args->size > obj->base.size - args->offset) {
ce9d419d 1046 ret = -EINVAL;
258a5ede 1047 goto err;
ce9d419d
CW
1048 }
1049
db53a302
CW
1050 trace_i915_gem_object_pread(obj, args->offset, args->size);
1051
258a5ede
CW
1052 ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
1053 if (ret)
1054 goto err;
1055
1056 ret = i915_mutex_lock_interruptible(dev);
1057 if (ret)
1058 goto err;
1059
dbf7bff0 1060 ret = i915_gem_shmem_pread(dev, obj, args, file);
673a394b 1061
b50a5371 1062 /* pread for non shmem backed objects */
1dd5b6f2
CW
1063 if (ret == -EFAULT || ret == -ENODEV) {
1064 intel_runtime_pm_get(to_i915(dev));
b50a5371
AS
1065 ret = i915_gem_gtt_pread(dev, obj, args->size,
1066 args->offset, args->data_ptr);
1dd5b6f2
CW
1067 intel_runtime_pm_put(to_i915(dev));
1068 }
b50a5371 1069
f8c417cd 1070 i915_gem_object_put(obj);
4f27b75d 1071 mutex_unlock(&dev->struct_mutex);
258a5ede
CW
1072
1073 return ret;
1074
1075err:
1076 i915_gem_object_put_unlocked(obj);
eb01459f 1077 return ret;
673a394b
EA
1078}
1079
0839ccb8
KP
1080/* This is the fast write path which cannot handle
1081 * page faults in the source data
9b7530cc 1082 */
0839ccb8
KP
1083
1084static inline int
1085fast_user_write(struct io_mapping *mapping,
1086 loff_t page_base, int page_offset,
1087 char __user *user_data,
1088 int length)
9b7530cc 1089{
4f0c7cfb
BW
1090 void __iomem *vaddr_atomic;
1091 void *vaddr;
0839ccb8 1092 unsigned long unwritten;
9b7530cc 1093
3e4d3af5 1094 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
4f0c7cfb
BW
1095 /* We can use the cpu mem copy function because this is X86. */
1096 vaddr = (void __force*)vaddr_atomic + page_offset;
1097 unwritten = __copy_from_user_inatomic_nocache(vaddr,
0839ccb8 1098 user_data, length);
3e4d3af5 1099 io_mapping_unmap_atomic(vaddr_atomic);
fbd5a26d 1100 return unwritten;
0839ccb8
KP
1101}
1102
3de09aa3
EA
1103/**
1104 * This is the fast pwrite path, where we copy the data directly from the
1105 * user into the GTT, uncached.
62f90b38 1106 * @i915: i915 device private data
14bb2c11
TU
1107 * @obj: i915 gem object
1108 * @args: pwrite arguments structure
1109 * @file: drm file pointer
3de09aa3 1110 */
673a394b 1111static int
4f1959ee 1112i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
05394f39 1113 struct drm_i915_gem_object *obj,
3de09aa3 1114 struct drm_i915_gem_pwrite *args,
05394f39 1115 struct drm_file *file)
673a394b 1116{
4f1959ee 1117 struct i915_ggtt *ggtt = &i915->ggtt;
b50a5371 1118 struct drm_device *dev = obj->base.dev;
058d88c4 1119 struct i915_vma *vma;
4f1959ee
AS
1120 struct drm_mm_node node;
1121 uint64_t remain, offset;
673a394b 1122 char __user *user_data;
4f1959ee 1123 int ret;
b50a5371
AS
1124 bool hit_slow_path = false;
1125
3e510a8e 1126 if (i915_gem_object_is_tiled(obj))
b50a5371 1127 return -EFAULT;
935aaa69 1128
058d88c4 1129 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
de895082 1130 PIN_MAPPABLE | PIN_NONBLOCK);
18034584
CW
1131 if (!IS_ERR(vma)) {
1132 node.start = i915_ggtt_offset(vma);
1133 node.allocated = false;
49ef5294 1134 ret = i915_vma_put_fence(vma);
18034584
CW
1135 if (ret) {
1136 i915_vma_unpin(vma);
1137 vma = ERR_PTR(ret);
1138 }
1139 }
058d88c4 1140 if (IS_ERR(vma)) {
4f1959ee
AS
1141 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
1142 if (ret)
1143 goto out;
1144
1145 ret = i915_gem_object_get_pages(obj);
1146 if (ret) {
1147 remove_mappable_node(&node);
1148 goto out;
1149 }
1150
1151 i915_gem_object_pin_pages(obj);
4f1959ee 1152 }
935aaa69
DV
1153
1154 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1155 if (ret)
1156 goto out_unpin;
1157
b19482d7 1158 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
4f1959ee 1159 obj->dirty = true;
063e4e6b 1160
4f1959ee
AS
1161 user_data = u64_to_user_ptr(args->data_ptr);
1162 offset = args->offset;
1163 remain = args->size;
1164 while (remain) {
673a394b
EA
1165 /* Operation in this page
1166 *
0839ccb8
KP
1167 * page_base = page offset within aperture
1168 * page_offset = offset within page
1169 * page_length = bytes to copy for this page
673a394b 1170 */
4f1959ee
AS
1171 u32 page_base = node.start;
1172 unsigned page_offset = offset_in_page(offset);
1173 unsigned page_length = PAGE_SIZE - page_offset;
1174 page_length = remain < page_length ? remain : page_length;
1175 if (node.allocated) {
1176 wmb(); /* flush the write before we modify the GGTT */
1177 ggtt->base.insert_page(&ggtt->base,
1178 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1179 node.start, I915_CACHE_NONE, 0);
1180 wmb(); /* flush modifications to the GGTT (insert_page) */
1181 } else {
1182 page_base += offset & PAGE_MASK;
1183 }
0839ccb8 1184 /* If we get a fault while copying data, then (presumably) our
3de09aa3
EA
1185 * source page isn't available. Return the error and we'll
1186 * retry in the slow path.
b50a5371
AS
1187 * If the object is non-shmem backed, we retry again with the
1188 * path that handles page fault.
0839ccb8 1189 */
f7bbe788 1190 if (fast_user_write(&ggtt->mappable, page_base,
935aaa69 1191 page_offset, user_data, page_length)) {
b50a5371
AS
1192 hit_slow_path = true;
1193 mutex_unlock(&dev->struct_mutex);
f7bbe788 1194 if (slow_user_access(&ggtt->mappable,
b50a5371
AS
1195 page_base,
1196 page_offset, user_data,
1197 page_length, true)) {
1198 ret = -EFAULT;
1199 mutex_lock(&dev->struct_mutex);
1200 goto out_flush;
1201 }
1202
1203 mutex_lock(&dev->struct_mutex);
935aaa69 1204 }
673a394b 1205
0839ccb8
KP
1206 remain -= page_length;
1207 user_data += page_length;
1208 offset += page_length;
673a394b 1209 }
673a394b 1210
063e4e6b 1211out_flush:
b50a5371
AS
1212 if (hit_slow_path) {
1213 if (ret == 0 &&
1214 (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1215 /* The user has modified the object whilst we tried
1216 * reading from it, and we now have no idea what domain
1217 * the pages should be in. As we have just been touching
1218 * them directly, flush everything back to the GTT
1219 * domain.
1220 */
1221 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1222 }
1223 }
1224
b19482d7 1225 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
935aaa69 1226out_unpin:
4f1959ee
AS
1227 if (node.allocated) {
1228 wmb();
1229 ggtt->base.clear_range(&ggtt->base,
1230 node.start, node.size,
1231 true);
1232 i915_gem_object_unpin_pages(obj);
1233 remove_mappable_node(&node);
1234 } else {
058d88c4 1235 i915_vma_unpin(vma);
4f1959ee 1236 }
935aaa69 1237out:
3de09aa3 1238 return ret;
673a394b
EA
1239}
1240
d174bd64
DV
1241/* Per-page copy function for the shmem pwrite fastpath.
1242 * Flushes invalid cachelines before writing to the target if
1243 * needs_clflush_before is set and flushes out any written cachelines after
1244 * writing if needs_clflush is set. */
3043c60c 1245static int
d174bd64
DV
1246shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
1247 char __user *user_data,
1248 bool page_do_bit17_swizzling,
1249 bool needs_clflush_before,
1250 bool needs_clflush_after)
673a394b 1251{
d174bd64 1252 char *vaddr;
673a394b 1253 int ret;
3de09aa3 1254
e7e58eb5 1255 if (unlikely(page_do_bit17_swizzling))
d174bd64 1256 return -EINVAL;
3de09aa3 1257
d174bd64
DV
1258 vaddr = kmap_atomic(page);
1259 if (needs_clflush_before)
1260 drm_clflush_virt_range(vaddr + shmem_page_offset,
1261 page_length);
c2831a94
CW
1262 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
1263 user_data, page_length);
d174bd64
DV
1264 if (needs_clflush_after)
1265 drm_clflush_virt_range(vaddr + shmem_page_offset,
1266 page_length);
1267 kunmap_atomic(vaddr);
3de09aa3 1268
755d2218 1269 return ret ? -EFAULT : 0;
3de09aa3
EA
1270}
1271
d174bd64
DV
1272/* Only difference to the fast-path function is that this can handle bit17
1273 * and uses non-atomic copy and kmap functions. */
3043c60c 1274static int
d174bd64
DV
1275shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
1276 char __user *user_data,
1277 bool page_do_bit17_swizzling,
1278 bool needs_clflush_before,
1279 bool needs_clflush_after)
673a394b 1280{
d174bd64
DV
1281 char *vaddr;
1282 int ret;
e5281ccd 1283
d174bd64 1284 vaddr = kmap(page);
e7e58eb5 1285 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
23c18c71
DV
1286 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1287 page_length,
1288 page_do_bit17_swizzling);
d174bd64
DV
1289 if (page_do_bit17_swizzling)
1290 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
e5281ccd
CW
1291 user_data,
1292 page_length);
d174bd64
DV
1293 else
1294 ret = __copy_from_user(vaddr + shmem_page_offset,
1295 user_data,
1296 page_length);
1297 if (needs_clflush_after)
23c18c71
DV
1298 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1299 page_length,
1300 page_do_bit17_swizzling);
d174bd64 1301 kunmap(page);
40123c1f 1302
755d2218 1303 return ret ? -EFAULT : 0;
40123c1f
EA
1304}
1305
40123c1f 1306static int
e244a443
DV
1307i915_gem_shmem_pwrite(struct drm_device *dev,
1308 struct drm_i915_gem_object *obj,
1309 struct drm_i915_gem_pwrite *args,
1310 struct drm_file *file)
40123c1f 1311{
40123c1f 1312 ssize_t remain;
8c59967c
DV
1313 loff_t offset;
1314 char __user *user_data;
eb2c0c81 1315 int shmem_page_offset, page_length, ret = 0;
8c59967c 1316 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
e244a443 1317 int hit_slowpath = 0;
43394c7d 1318 unsigned int needs_clflush;
67d5a50c 1319 struct sg_page_iter sg_iter;
40123c1f 1320
43394c7d 1321 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
755d2218
CW
1322 if (ret)
1323 return ret;
1324
43394c7d
CW
1325 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
1326 user_data = u64_to_user_ptr(args->data_ptr);
673a394b 1327 offset = args->offset;
43394c7d 1328 remain = args->size;
673a394b 1329
67d5a50c
ID
1330 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1331 offset >> PAGE_SHIFT) {
2db76d7c 1332 struct page *page = sg_page_iter_page(&sg_iter);
58642885 1333 int partial_cacheline_write;
e5281ccd 1334
9da3da66
CW
1335 if (remain <= 0)
1336 break;
1337
40123c1f
EA
1338 /* Operation in this page
1339 *
40123c1f 1340 * shmem_page_offset = offset within page in shmem file
40123c1f
EA
1341 * page_length = bytes to copy for this page
1342 */
c8cbbb8b 1343 shmem_page_offset = offset_in_page(offset);
40123c1f
EA
1344
1345 page_length = remain;
1346 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1347 page_length = PAGE_SIZE - shmem_page_offset;
40123c1f 1348
58642885
DV
1349 /* If we don't overwrite a cacheline completely we need to be
1350 * careful to have up-to-date data by first clflushing. Don't
1351 * overcomplicate things and flush the entire patch. */
43394c7d 1352 partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
58642885
DV
1353 ((shmem_page_offset | page_length)
1354 & (boot_cpu_data.x86_clflush_size - 1));
1355
8c59967c
DV
1356 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1357 (page_to_phys(page) & (1 << 17)) != 0;
1358
d174bd64
DV
1359 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1360 user_data, page_do_bit17_swizzling,
1361 partial_cacheline_write,
43394c7d 1362 needs_clflush & CLFLUSH_AFTER);
d174bd64
DV
1363 if (ret == 0)
1364 goto next_page;
e244a443
DV
1365
1366 hit_slowpath = 1;
e244a443 1367 mutex_unlock(&dev->struct_mutex);
d174bd64
DV
1368 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1369 user_data, page_do_bit17_swizzling,
1370 partial_cacheline_write,
43394c7d 1371 needs_clflush & CLFLUSH_AFTER);
40123c1f 1372
e244a443 1373 mutex_lock(&dev->struct_mutex);
755d2218 1374
755d2218 1375 if (ret)
8c59967c 1376 goto out;
8c59967c 1377
17793c9a 1378next_page:
40123c1f 1379 remain -= page_length;
8c59967c 1380 user_data += page_length;
40123c1f 1381 offset += page_length;
673a394b
EA
1382 }
1383
fbd5a26d 1384out:
43394c7d 1385 i915_gem_obj_finish_shmem_access(obj);
755d2218 1386
e244a443 1387 if (hit_slowpath) {
8dcf015e
DV
1388 /*
1389 * Fixup: Flush cpu caches in case we didn't flush the dirty
1390 * cachelines in-line while writing and the object moved
1391 * out of the cpu write domain while we've dropped the lock.
1392 */
43394c7d 1393 if (!(needs_clflush & CLFLUSH_AFTER) &&
8dcf015e 1394 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
000433b6 1395 if (i915_gem_clflush_object(obj, obj->pin_display))
43394c7d 1396 needs_clflush |= CLFLUSH_AFTER;
e244a443 1397 }
8c59967c 1398 }
673a394b 1399
43394c7d 1400 if (needs_clflush & CLFLUSH_AFTER)
c033666a 1401 i915_gem_chipset_flush(to_i915(dev));
58642885 1402
de152b62 1403 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
40123c1f 1404 return ret;
673a394b
EA
1405}
1406
1407/**
1408 * Writes data to the object referenced by handle.
14bb2c11
TU
1409 * @dev: drm device
1410 * @data: ioctl data blob
1411 * @file: drm file
673a394b
EA
1412 *
1413 * On error, the contents of the buffer that were to be modified are undefined.
1414 */
1415int
1416i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
fbd5a26d 1417 struct drm_file *file)
673a394b 1418{
fac5e23e 1419 struct drm_i915_private *dev_priv = to_i915(dev);
673a394b 1420 struct drm_i915_gem_pwrite *args = data;
05394f39 1421 struct drm_i915_gem_object *obj;
51311d0a
CW
1422 int ret;
1423
1424 if (args->size == 0)
1425 return 0;
1426
1427 if (!access_ok(VERIFY_READ,
3ed605bc 1428 u64_to_user_ptr(args->data_ptr),
51311d0a
CW
1429 args->size))
1430 return -EFAULT;
1431
d330a953 1432 if (likely(!i915.prefault_disable)) {
3ed605bc 1433 ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
0b74b508
XZ
1434 args->size);
1435 if (ret)
1436 return -EFAULT;
1437 }
673a394b 1438
03ac0642 1439 obj = i915_gem_object_lookup(file, args->handle);
258a5ede
CW
1440 if (!obj)
1441 return -ENOENT;
673a394b 1442
7dcd2499 1443 /* Bounds check destination. */
05394f39
CW
1444 if (args->offset > obj->base.size ||
1445 args->size > obj->base.size - args->offset) {
ce9d419d 1446 ret = -EINVAL;
258a5ede 1447 goto err;
ce9d419d
CW
1448 }
1449
db53a302
CW
1450 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1451
258a5ede
CW
1452 ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
1453 if (ret)
1454 goto err;
1455
1456 intel_runtime_pm_get(dev_priv);
1457
1458 ret = i915_mutex_lock_interruptible(dev);
1459 if (ret)
1460 goto err_rpm;
1461
935aaa69 1462 ret = -EFAULT;
673a394b
EA
1463 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1464 * it would end up going through the fenced access, and we'll get
1465 * different detiling behavior between reading and writing.
1466 * pread/pwrite currently are reading and writing from the CPU
1467 * perspective, requiring manual detiling by the client.
1468 */
6eae0059
CW
1469 if (!i915_gem_object_has_struct_page(obj) ||
1470 cpu_write_needs_clflush(obj)) {
4f1959ee 1471 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
935aaa69
DV
1472 /* Note that the gtt paths might fail with non-page-backed user
1473 * pointers (e.g. gtt mappings when moving data between
1474 * textures). Fallback to the shmem path in that case. */
fbd5a26d 1475 }
673a394b 1476
d1054ee4 1477 if (ret == -EFAULT || ret == -ENOSPC) {
6a2c4232
CW
1478 if (obj->phys_handle)
1479 ret = i915_gem_phys_pwrite(obj, args, file);
b50a5371 1480 else
43394c7d 1481 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
6a2c4232 1482 }
5c0480f2 1483
f8c417cd 1484 i915_gem_object_put(obj);
fbd5a26d 1485 mutex_unlock(&dev->struct_mutex);
5d77d9c5
ID
1486 intel_runtime_pm_put(dev_priv);
1487
673a394b 1488 return ret;
258a5ede
CW
1489
1490err_rpm:
1491 intel_runtime_pm_put(dev_priv);
1492err:
1493 i915_gem_object_put_unlocked(obj);
1494 return ret;
673a394b
EA
1495}
1496
d243ad82 1497static inline enum fb_op_origin
aeecc969
CW
1498write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1499{
50349247
CW
1500 return (domain == I915_GEM_DOMAIN_GTT ?
1501 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
aeecc969
CW
1502}
1503
673a394b 1504/**
2ef7eeaa
EA
1505 * Called when user space prepares to use an object with the CPU, either
1506 * through the mmap ioctl's mapping or a GTT mapping.
14bb2c11
TU
1507 * @dev: drm device
1508 * @data: ioctl data blob
1509 * @file: drm file
673a394b
EA
1510 */
1511int
1512i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
05394f39 1513 struct drm_file *file)
673a394b
EA
1514{
1515 struct drm_i915_gem_set_domain *args = data;
05394f39 1516 struct drm_i915_gem_object *obj;
2ef7eeaa
EA
1517 uint32_t read_domains = args->read_domains;
1518 uint32_t write_domain = args->write_domain;
673a394b
EA
1519 int ret;
1520
2ef7eeaa 1521 /* Only handle setting domains to types used by the CPU. */
b8f9096d 1522 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1523 return -EINVAL;
1524
1525 /* Having something in the write domain implies it's in the read
1526 * domain, and only that read domain. Enforce that in the request.
1527 */
1528 if (write_domain != 0 && read_domains != write_domain)
1529 return -EINVAL;
1530
03ac0642 1531 obj = i915_gem_object_lookup(file, args->handle);
b8f9096d
CW
1532 if (!obj)
1533 return -ENOENT;
673a394b 1534
3236f57a
CW
1535 /* Try to flush the object off the GPU without holding the lock.
1536 * We will repeat the flush holding the lock in the normal manner
1537 * to catch cases where we are gazumped.
1538 */
b8f9096d
CW
1539 ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
1540 if (ret)
1541 goto err;
1542
1543 ret = i915_mutex_lock_interruptible(dev);
3236f57a 1544 if (ret)
b8f9096d 1545 goto err;
3236f57a 1546
43566ded 1547 if (read_domains & I915_GEM_DOMAIN_GTT)
2ef7eeaa 1548 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
43566ded 1549 else
e47c68e9 1550 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
2ef7eeaa 1551
031b698a 1552 if (write_domain != 0)
aeecc969 1553 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
031b698a 1554
f8c417cd 1555 i915_gem_object_put(obj);
673a394b
EA
1556 mutex_unlock(&dev->struct_mutex);
1557 return ret;
b8f9096d
CW
1558
1559err:
1560 i915_gem_object_put_unlocked(obj);
1561 return ret;
673a394b
EA
1562}
1563
1564/**
1565 * Called when user space has done writes to this buffer
14bb2c11
TU
1566 * @dev: drm device
1567 * @data: ioctl data blob
1568 * @file: drm file
673a394b
EA
1569 */
1570int
1571i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
05394f39 1572 struct drm_file *file)
673a394b
EA
1573{
1574 struct drm_i915_gem_sw_finish *args = data;
05394f39 1575 struct drm_i915_gem_object *obj;
c21724cc 1576 int err = 0;
1d7cfea1 1577
03ac0642 1578 obj = i915_gem_object_lookup(file, args->handle);
c21724cc
CW
1579 if (!obj)
1580 return -ENOENT;
673a394b 1581
673a394b 1582 /* Pinned buffers may be scanout, so flush the cache */
c21724cc
CW
1583 if (READ_ONCE(obj->pin_display)) {
1584 err = i915_mutex_lock_interruptible(dev);
1585 if (!err) {
1586 i915_gem_object_flush_cpu_write_domain(obj);
1587 mutex_unlock(&dev->struct_mutex);
1588 }
1589 }
e47c68e9 1590
c21724cc
CW
1591 i915_gem_object_put_unlocked(obj);
1592 return err;
673a394b
EA
1593}
1594
1595/**
14bb2c11
TU
1596 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1597 * it is mapped to.
1598 * @dev: drm device
1599 * @data: ioctl data blob
1600 * @file: drm file
673a394b
EA
1601 *
1602 * While the mapping holds a reference on the contents of the object, it doesn't
1603 * imply a ref on the object itself.
34367381
DV
1604 *
1605 * IMPORTANT:
1606 *
1607 * DRM driver writers who look a this function as an example for how to do GEM
1608 * mmap support, please don't implement mmap support like here. The modern way
1609 * to implement DRM mmap support is with an mmap offset ioctl (like
1610 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1611 * That way debug tooling like valgrind will understand what's going on, hiding
1612 * the mmap call in a driver private ioctl will break that. The i915 driver only
1613 * does cpu mmaps this way because we didn't know better.
673a394b
EA
1614 */
1615int
1616i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
05394f39 1617 struct drm_file *file)
673a394b
EA
1618{
1619 struct drm_i915_gem_mmap *args = data;
03ac0642 1620 struct drm_i915_gem_object *obj;
673a394b
EA
1621 unsigned long addr;
1622
1816f923
AG
1623 if (args->flags & ~(I915_MMAP_WC))
1624 return -EINVAL;
1625
568a58e5 1626 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1816f923
AG
1627 return -ENODEV;
1628
03ac0642
CW
1629 obj = i915_gem_object_lookup(file, args->handle);
1630 if (!obj)
bf79cb91 1631 return -ENOENT;
673a394b 1632
1286ff73
DV
1633 /* prime objects have no backing filp to GEM mmap
1634 * pages from.
1635 */
03ac0642 1636 if (!obj->base.filp) {
34911fd3 1637 i915_gem_object_put_unlocked(obj);
1286ff73
DV
1638 return -EINVAL;
1639 }
1640
03ac0642 1641 addr = vm_mmap(obj->base.filp, 0, args->size,
673a394b
EA
1642 PROT_READ | PROT_WRITE, MAP_SHARED,
1643 args->offset);
1816f923
AG
1644 if (args->flags & I915_MMAP_WC) {
1645 struct mm_struct *mm = current->mm;
1646 struct vm_area_struct *vma;
1647
80a89a5e 1648 if (down_write_killable(&mm->mmap_sem)) {
34911fd3 1649 i915_gem_object_put_unlocked(obj);
80a89a5e
MH
1650 return -EINTR;
1651 }
1816f923
AG
1652 vma = find_vma(mm, addr);
1653 if (vma)
1654 vma->vm_page_prot =
1655 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1656 else
1657 addr = -ENOMEM;
1658 up_write(&mm->mmap_sem);
aeecc969
CW
1659
1660 /* This may race, but that's ok, it only gets set */
50349247 1661 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1816f923 1662 }
34911fd3 1663 i915_gem_object_put_unlocked(obj);
673a394b
EA
1664 if (IS_ERR((void *)addr))
1665 return addr;
1666
1667 args->addr_ptr = (uint64_t) addr;
1668
1669 return 0;
1670}
1671
03af84fe
CW
1672static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1673{
1674 u64 size;
1675
1676 size = i915_gem_object_get_stride(obj);
1677 size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
1678
1679 return size >> PAGE_SHIFT;
1680}
1681
4cc69075
CW
1682/**
1683 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1684 *
1685 * A history of the GTT mmap interface:
1686 *
1687 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1688 * aligned and suitable for fencing, and still fit into the available
1689 * mappable space left by the pinned display objects. A classic problem
1690 * we called the page-fault-of-doom where we would ping-pong between
1691 * two objects that could not fit inside the GTT and so the memcpy
1692 * would page one object in at the expense of the other between every
1693 * single byte.
1694 *
1695 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1696 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1697 * object is too large for the available space (or simply too large
1698 * for the mappable aperture!), a view is created instead and faulted
1699 * into userspace. (This view is aligned and sized appropriately for
1700 * fenced access.)
1701 *
1702 * Restrictions:
1703 *
1704 * * snoopable objects cannot be accessed via the GTT. It can cause machine
1705 * hangs on some architectures, corruption on others. An attempt to service
1706 * a GTT page fault from a snoopable object will generate a SIGBUS.
1707 *
1708 * * the object must be able to fit into RAM (physical memory, though no
1709 * limited to the mappable aperture).
1710 *
1711 *
1712 * Caveats:
1713 *
1714 * * a new GTT page fault will synchronize rendering from the GPU and flush
1715 * all data to system memory. Subsequent access will not be synchronized.
1716 *
1717 * * all mappings are revoked on runtime device suspend.
1718 *
1719 * * there are only 8, 16 or 32 fence registers to share between all users
1720 * (older machines require fence register for display and blitter access
1721 * as well). Contention of the fence registers will cause the previous users
1722 * to be unmapped and any new access will generate new page faults.
1723 *
1724 * * running out of memory while servicing a fault may generate a SIGBUS,
1725 * rather than the expected SIGSEGV.
1726 */
1727int i915_gem_mmap_gtt_version(void)
1728{
1729 return 1;
1730}
1731
de151cf6
JB
1732/**
1733 * i915_gem_fault - fault a page into the GTT
058d88c4 1734 * @area: CPU VMA in question
d9072a3e 1735 * @vmf: fault info
de151cf6
JB
1736 *
1737 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1738 * from userspace. The fault handler takes care of binding the object to
1739 * the GTT (if needed), allocating and programming a fence register (again,
1740 * only if needed based on whether the old reg is still valid or the object
1741 * is tiled) and inserting a new PTE into the faulting process.
1742 *
1743 * Note that the faulting process may involve evicting existing objects
1744 * from the GTT and/or fence registers to make room. So performance may
1745 * suffer if the GTT working set is large or there are few fence registers
1746 * left.
4cc69075
CW
1747 *
1748 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1749 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
de151cf6 1750 */
058d88c4 1751int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
de151cf6 1752{
03af84fe 1753#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
058d88c4 1754 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
05394f39 1755 struct drm_device *dev = obj->base.dev;
72e96d64
JL
1756 struct drm_i915_private *dev_priv = to_i915(dev);
1757 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b8f9096d 1758 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
058d88c4 1759 struct i915_vma *vma;
de151cf6 1760 pgoff_t page_offset;
82118877 1761 unsigned int flags;
b8f9096d 1762 int ret;
f65c9168 1763
de151cf6 1764 /* We don't use vmf->pgoff since that has the fake offset */
058d88c4 1765 page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
de151cf6
JB
1766 PAGE_SHIFT;
1767
db53a302
CW
1768 trace_i915_gem_object_fault(obj, page_offset, true, write);
1769
6e4930f6 1770 /* Try to flush the object off the GPU first without holding the lock.
b8f9096d 1771 * Upon acquiring the lock, we will perform our sanity checks and then
6e4930f6
CW
1772 * repeat the flush holding the lock in the normal manner to catch cases
1773 * where we are gazumped.
1774 */
b8f9096d 1775 ret = __unsafe_wait_rendering(obj, NULL, !write);
6e4930f6 1776 if (ret)
b8f9096d
CW
1777 goto err;
1778
1779 intel_runtime_pm_get(dev_priv);
1780
1781 ret = i915_mutex_lock_interruptible(dev);
1782 if (ret)
1783 goto err_rpm;
6e4930f6 1784
eb119bd6
CW
1785 /* Access to snoopable pages through the GTT is incoherent. */
1786 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
ddeff6ee 1787 ret = -EFAULT;
b8f9096d 1788 goto err_unlock;
eb119bd6
CW
1789 }
1790
82118877
CW
1791 /* If the object is smaller than a couple of partial vma, it is
1792 * not worth only creating a single partial vma - we may as well
1793 * clear enough space for the full object.
1794 */
1795 flags = PIN_MAPPABLE;
1796 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1797 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1798
a61007a8 1799 /* Now pin it into the GTT as needed */
82118877 1800 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
a61007a8
CW
1801 if (IS_ERR(vma)) {
1802 struct i915_ggtt_view view;
03af84fe
CW
1803 unsigned int chunk_size;
1804
a61007a8 1805 /* Use a partial view if it is bigger than available space */
03af84fe
CW
1806 chunk_size = MIN_CHUNK_PAGES;
1807 if (i915_gem_object_is_tiled(obj))
1808 chunk_size = max(chunk_size, tile_row_pages(obj));
e7ded2d7 1809
c5ad54cf
JL
1810 memset(&view, 0, sizeof(view));
1811 view.type = I915_GGTT_VIEW_PARTIAL;
1812 view.params.partial.offset = rounddown(page_offset, chunk_size);
1813 view.params.partial.size =
a61007a8 1814 min_t(unsigned int, chunk_size,
058d88c4 1815 (area->vm_end - area->vm_start) / PAGE_SIZE -
c5ad54cf 1816 view.params.partial.offset);
c5ad54cf 1817
aa136d9d
CW
1818 /* If the partial covers the entire object, just create a
1819 * normal VMA.
1820 */
1821 if (chunk_size >= obj->base.size >> PAGE_SHIFT)
1822 view.type = I915_GGTT_VIEW_NORMAL;
1823
50349247
CW
1824 /* Userspace is now writing through an untracked VMA, abandon
1825 * all hope that the hardware is able to track future writes.
1826 */
1827 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1828
a61007a8
CW
1829 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1830 }
058d88c4
CW
1831 if (IS_ERR(vma)) {
1832 ret = PTR_ERR(vma);
b8f9096d 1833 goto err_unlock;
058d88c4 1834 }
4a684a41 1835
c9839303
CW
1836 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1837 if (ret)
b8f9096d 1838 goto err_unpin;
74898d7e 1839
49ef5294 1840 ret = i915_vma_get_fence(vma);
d9e86c0e 1841 if (ret)
b8f9096d 1842 goto err_unpin;
7d1c4804 1843
b90b91d8 1844 /* Finally, remap it using the new GTT offset */
c58305af
CW
1845 ret = remap_io_mapping(area,
1846 area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
1847 (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
1848 min_t(u64, vma->size, area->vm_end - area->vm_start),
1849 &ggtt->mappable);
1850 if (ret)
1851 goto err_unpin;
a61007a8
CW
1852
1853 obj->fault_mappable = true;
b8f9096d 1854err_unpin:
058d88c4 1855 __i915_vma_unpin(vma);
b8f9096d 1856err_unlock:
de151cf6 1857 mutex_unlock(&dev->struct_mutex);
b8f9096d
CW
1858err_rpm:
1859 intel_runtime_pm_put(dev_priv);
1860err:
de151cf6 1861 switch (ret) {
d9bc7e9f 1862 case -EIO:
2232f031
DV
1863 /*
1864 * We eat errors when the gpu is terminally wedged to avoid
1865 * userspace unduly crashing (gl has no provisions for mmaps to
1866 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1867 * and so needs to be reported.
1868 */
1869 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
f65c9168
PZ
1870 ret = VM_FAULT_SIGBUS;
1871 break;
1872 }
045e769a 1873 case -EAGAIN:
571c608d
DV
1874 /*
1875 * EAGAIN means the gpu is hung and we'll wait for the error
1876 * handler to reset everything when re-faulting in
1877 * i915_mutex_lock_interruptible.
d9bc7e9f 1878 */
c715089f
CW
1879 case 0:
1880 case -ERESTARTSYS:
bed636ab 1881 case -EINTR:
e79e0fe3
DR
1882 case -EBUSY:
1883 /*
1884 * EBUSY is ok: this just means that another thread
1885 * already did the job.
1886 */
f65c9168
PZ
1887 ret = VM_FAULT_NOPAGE;
1888 break;
de151cf6 1889 case -ENOMEM:
f65c9168
PZ
1890 ret = VM_FAULT_OOM;
1891 break;
a7c2e1aa 1892 case -ENOSPC:
45d67817 1893 case -EFAULT:
f65c9168
PZ
1894 ret = VM_FAULT_SIGBUS;
1895 break;
de151cf6 1896 default:
a7c2e1aa 1897 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
f65c9168
PZ
1898 ret = VM_FAULT_SIGBUS;
1899 break;
de151cf6 1900 }
f65c9168 1901 return ret;
de151cf6
JB
1902}
1903
901782b2
CW
1904/**
1905 * i915_gem_release_mmap - remove physical page mappings
1906 * @obj: obj in question
1907 *
af901ca1 1908 * Preserve the reservation of the mmapping with the DRM core code, but
901782b2
CW
1909 * relinquish ownership of the pages back to the system.
1910 *
1911 * It is vital that we remove the page mapping if we have mapped a tiled
1912 * object through the GTT and then lose the fence register due to
1913 * resource pressure. Similarly if the object has been moved out of the
1914 * aperture, than pages mapped into userspace must be revoked. Removing the
1915 * mapping will then trigger a page fault on the next user access, allowing
1916 * fixup by i915_gem_fault().
1917 */
d05ca301 1918void
05394f39 1919i915_gem_release_mmap(struct drm_i915_gem_object *obj)
901782b2 1920{
349f2ccf
CW
1921 /* Serialisation between user GTT access and our code depends upon
1922 * revoking the CPU's PTE whilst the mutex is held. The next user
1923 * pagefault then has to wait until we release the mutex.
1924 */
1925 lockdep_assert_held(&obj->base.dev->struct_mutex);
1926
6299f992
CW
1927 if (!obj->fault_mappable)
1928 return;
901782b2 1929
6796cb16
DH
1930 drm_vma_node_unmap(&obj->base.vma_node,
1931 obj->base.dev->anon_inode->i_mapping);
349f2ccf
CW
1932
1933 /* Ensure that the CPU's PTE are revoked and there are not outstanding
1934 * memory transactions from userspace before we return. The TLB
1935 * flushing implied above by changing the PTE above *should* be
1936 * sufficient, an extra barrier here just provides us with a bit
1937 * of paranoid documentation about our requirement to serialise
1938 * memory writes before touching registers / GSM.
1939 */
1940 wmb();
1941
6299f992 1942 obj->fault_mappable = false;
901782b2
CW
1943}
1944
eedd10f4
CW
1945void
1946i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1947{
1948 struct drm_i915_gem_object *obj;
1949
1950 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1951 i915_gem_release_mmap(obj);
1952}
1953
ad1a7d20
CW
1954/**
1955 * i915_gem_get_ggtt_size - return required global GTT size for an object
a9f1481f 1956 * @dev_priv: i915 device
ad1a7d20
CW
1957 * @size: object size
1958 * @tiling_mode: tiling mode
1959 *
1960 * Return the required global GTT size for an object, taking into account
1961 * potential fence register mapping.
1962 */
a9f1481f
CW
1963u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
1964 u64 size, int tiling_mode)
92b88aeb 1965{
ad1a7d20 1966 u64 ggtt_size;
92b88aeb 1967
ad1a7d20
CW
1968 GEM_BUG_ON(size == 0);
1969
a9f1481f 1970 if (INTEL_GEN(dev_priv) >= 4 ||
e28f8711
CW
1971 tiling_mode == I915_TILING_NONE)
1972 return size;
92b88aeb
CW
1973
1974 /* Previous chips need a power-of-two fence region when tiling */
a9f1481f 1975 if (IS_GEN3(dev_priv))
ad1a7d20 1976 ggtt_size = 1024*1024;
92b88aeb 1977 else
ad1a7d20 1978 ggtt_size = 512*1024;
92b88aeb 1979
ad1a7d20
CW
1980 while (ggtt_size < size)
1981 ggtt_size <<= 1;
92b88aeb 1982
ad1a7d20 1983 return ggtt_size;
92b88aeb
CW
1984}
1985
de151cf6 1986/**
ad1a7d20 1987 * i915_gem_get_ggtt_alignment - return required global GTT alignment
a9f1481f 1988 * @dev_priv: i915 device
14bb2c11
TU
1989 * @size: object size
1990 * @tiling_mode: tiling mode
ad1a7d20 1991 * @fenced: is fenced alignment required or not
de151cf6 1992 *
ad1a7d20 1993 * Return the required global GTT alignment for an object, taking into account
5e783301 1994 * potential fence register mapping.
de151cf6 1995 */
a9f1481f 1996u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
ad1a7d20 1997 int tiling_mode, bool fenced)
de151cf6 1998{
ad1a7d20
CW
1999 GEM_BUG_ON(size == 0);
2000
de151cf6
JB
2001 /*
2002 * Minimum alignment is 4k (GTT page size), but might be greater
2003 * if a fence register is needed for the object.
2004 */
a9f1481f 2005 if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
e28f8711 2006 tiling_mode == I915_TILING_NONE)
de151cf6
JB
2007 return 4096;
2008
a00b10c3
CW
2009 /*
2010 * Previous chips need to be aligned to the size of the smallest
2011 * fence register that can contain the object.
2012 */
a9f1481f 2013 return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
a00b10c3
CW
2014}
2015
d8cb5086
CW
2016static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2017{
fac5e23e 2018 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
f3f6184c 2019 int err;
da494d7c 2020
f3f6184c
CW
2021 err = drm_gem_create_mmap_offset(&obj->base);
2022 if (!err)
2023 return 0;
d8cb5086 2024
f3f6184c
CW
2025 /* We can idle the GPU locklessly to flush stale objects, but in order
2026 * to claim that space for ourselves, we need to take the big
2027 * struct_mutex to free the requests+objects and allocate our slot.
d8cb5086 2028 */
f3f6184c
CW
2029 err = i915_gem_wait_for_idle(dev_priv, true);
2030 if (err)
2031 return err;
2032
2033 err = i915_mutex_lock_interruptible(&dev_priv->drm);
2034 if (!err) {
2035 i915_gem_retire_requests(dev_priv);
2036 err = drm_gem_create_mmap_offset(&obj->base);
2037 mutex_unlock(&dev_priv->drm.struct_mutex);
2038 }
da494d7c 2039
f3f6184c 2040 return err;
d8cb5086
CW
2041}
2042
2043static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2044{
d8cb5086
CW
2045 drm_gem_free_mmap_offset(&obj->base);
2046}
2047
da6b51d0 2048int
ff72145b
DA
2049i915_gem_mmap_gtt(struct drm_file *file,
2050 struct drm_device *dev,
da6b51d0 2051 uint32_t handle,
ff72145b 2052 uint64_t *offset)
de151cf6 2053{
05394f39 2054 struct drm_i915_gem_object *obj;
de151cf6
JB
2055 int ret;
2056
03ac0642 2057 obj = i915_gem_object_lookup(file, handle);
f3f6184c
CW
2058 if (!obj)
2059 return -ENOENT;
ab18282d 2060
d8cb5086 2061 ret = i915_gem_object_create_mmap_offset(obj);
f3f6184c
CW
2062 if (ret == 0)
2063 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
de151cf6 2064
f3f6184c 2065 i915_gem_object_put_unlocked(obj);
1d7cfea1 2066 return ret;
de151cf6
JB
2067}
2068
ff72145b
DA
2069/**
2070 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2071 * @dev: DRM device
2072 * @data: GTT mapping ioctl data
2073 * @file: GEM object info
2074 *
2075 * Simply returns the fake offset to userspace so it can mmap it.
2076 * The mmap call will end up in drm_gem_mmap(), which will set things
2077 * up so we can get faults in the handler above.
2078 *
2079 * The fault handler will take care of binding the object into the GTT
2080 * (since it may have been evicted to make room for something), allocating
2081 * a fence register, and mapping the appropriate aperture address into
2082 * userspace.
2083 */
2084int
2085i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2086 struct drm_file *file)
2087{
2088 struct drm_i915_gem_mmap_gtt *args = data;
2089
da6b51d0 2090 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
ff72145b
DA
2091}
2092
225067ee
DV
2093/* Immediately discard the backing storage */
2094static void
2095i915_gem_object_truncate(struct drm_i915_gem_object *obj)
e5281ccd 2096{
4d6294bf 2097 i915_gem_object_free_mmap_offset(obj);
1286ff73 2098
4d6294bf
CW
2099 if (obj->base.filp == NULL)
2100 return;
e5281ccd 2101
225067ee
DV
2102 /* Our goal here is to return as much of the memory as
2103 * is possible back to the system as we are called from OOM.
2104 * To do this we must instruct the shmfs to drop all of its
2105 * backing pages, *now*.
2106 */
5537252b 2107 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
225067ee
DV
2108 obj->madv = __I915_MADV_PURGED;
2109}
e5281ccd 2110
5537252b
CW
2111/* Try to discard unwanted pages */
2112static void
2113i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
225067ee 2114{
5537252b
CW
2115 struct address_space *mapping;
2116
2117 switch (obj->madv) {
2118 case I915_MADV_DONTNEED:
2119 i915_gem_object_truncate(obj);
2120 case __I915_MADV_PURGED:
2121 return;
2122 }
2123
2124 if (obj->base.filp == NULL)
2125 return;
2126
93c76a3d 2127 mapping = obj->base.filp->f_mapping,
5537252b 2128 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
e5281ccd
CW
2129}
2130
5cdf5881 2131static void
05394f39 2132i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
673a394b 2133{
85d1225e
DG
2134 struct sgt_iter sgt_iter;
2135 struct page *page;
90797e6d 2136 int ret;
1286ff73 2137
05394f39 2138 BUG_ON(obj->madv == __I915_MADV_PURGED);
673a394b 2139
6c085a72 2140 ret = i915_gem_object_set_to_cpu_domain(obj, true);
f4457ae7 2141 if (WARN_ON(ret)) {
6c085a72
CW
2142 /* In the event of a disaster, abandon all caches and
2143 * hope for the best.
2144 */
2c22569b 2145 i915_gem_clflush_object(obj, true);
6c085a72
CW
2146 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2147 }
2148
e2273302
ID
2149 i915_gem_gtt_finish_object(obj);
2150
6dacfd2f 2151 if (i915_gem_object_needs_bit17_swizzle(obj))
280b713b
EA
2152 i915_gem_object_save_bit_17_swizzle(obj);
2153
05394f39
CW
2154 if (obj->madv == I915_MADV_DONTNEED)
2155 obj->dirty = 0;
3ef94daa 2156
85d1225e 2157 for_each_sgt_page(page, sgt_iter, obj->pages) {
05394f39 2158 if (obj->dirty)
9da3da66 2159 set_page_dirty(page);
3ef94daa 2160
05394f39 2161 if (obj->madv == I915_MADV_WILLNEED)
9da3da66 2162 mark_page_accessed(page);
3ef94daa 2163
09cbfeaf 2164 put_page(page);
3ef94daa 2165 }
05394f39 2166 obj->dirty = 0;
673a394b 2167
9da3da66
CW
2168 sg_free_table(obj->pages);
2169 kfree(obj->pages);
37e680a1 2170}
6c085a72 2171
dd624afd 2172int
37e680a1
CW
2173i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2174{
2175 const struct drm_i915_gem_object_ops *ops = obj->ops;
2176
2f745ad3 2177 if (obj->pages == NULL)
37e680a1
CW
2178 return 0;
2179
a5570178
CW
2180 if (obj->pages_pin_count)
2181 return -EBUSY;
2182
15717de2 2183 GEM_BUG_ON(obj->bind_count);
3e123027 2184
a2165e31
CW
2185 /* ->put_pages might need to allocate memory for the bit17 swizzle
2186 * array, hence protect them from being reaped by removing them from gtt
2187 * lists early. */
35c20a60 2188 list_del(&obj->global_list);
a2165e31 2189
0a798eb9 2190 if (obj->mapping) {
4b30cb23
CW
2191 void *ptr;
2192
2193 ptr = ptr_mask_bits(obj->mapping);
2194 if (is_vmalloc_addr(ptr))
2195 vunmap(ptr);
fb8621d3 2196 else
4b30cb23
CW
2197 kunmap(kmap_to_page(ptr));
2198
0a798eb9
CW
2199 obj->mapping = NULL;
2200 }
2201
37e680a1 2202 ops->put_pages(obj);
05394f39 2203 obj->pages = NULL;
37e680a1 2204
5537252b 2205 i915_gem_object_invalidate(obj);
6c085a72
CW
2206
2207 return 0;
2208}
2209
37e680a1 2210static int
6c085a72 2211i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
e5281ccd 2212{
fac5e23e 2213 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
e5281ccd
CW
2214 int page_count, i;
2215 struct address_space *mapping;
9da3da66
CW
2216 struct sg_table *st;
2217 struct scatterlist *sg;
85d1225e 2218 struct sgt_iter sgt_iter;
e5281ccd 2219 struct page *page;
90797e6d 2220 unsigned long last_pfn = 0; /* suppress gcc warning */
e2273302 2221 int ret;
6c085a72 2222 gfp_t gfp;
e5281ccd 2223
6c085a72
CW
2224 /* Assert that the object is not currently in any GPU domain. As it
2225 * wasn't in the GTT, there shouldn't be any way it could have been in
2226 * a GPU cache
2227 */
2228 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2229 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2230
9da3da66
CW
2231 st = kmalloc(sizeof(*st), GFP_KERNEL);
2232 if (st == NULL)
2233 return -ENOMEM;
2234
05394f39 2235 page_count = obj->base.size / PAGE_SIZE;
9da3da66 2236 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
9da3da66 2237 kfree(st);
e5281ccd 2238 return -ENOMEM;
9da3da66 2239 }
e5281ccd 2240
9da3da66
CW
2241 /* Get the list of pages out of our struct file. They'll be pinned
2242 * at this point until we release them.
2243 *
2244 * Fail silently without starting the shrinker
2245 */
93c76a3d 2246 mapping = obj->base.filp->f_mapping;
c62d2555 2247 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
d0164adc 2248 gfp |= __GFP_NORETRY | __GFP_NOWARN;
90797e6d
ID
2249 sg = st->sgl;
2250 st->nents = 0;
2251 for (i = 0; i < page_count; i++) {
6c085a72
CW
2252 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2253 if (IS_ERR(page)) {
21ab4e74
CW
2254 i915_gem_shrink(dev_priv,
2255 page_count,
2256 I915_SHRINK_BOUND |
2257 I915_SHRINK_UNBOUND |
2258 I915_SHRINK_PURGEABLE);
6c085a72
CW
2259 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2260 }
2261 if (IS_ERR(page)) {
2262 /* We've tried hard to allocate the memory by reaping
2263 * our own buffer, now let the real VM do its job and
2264 * go down in flames if truly OOM.
2265 */
6c085a72 2266 i915_gem_shrink_all(dev_priv);
f461d1be 2267 page = shmem_read_mapping_page(mapping, i);
e2273302
ID
2268 if (IS_ERR(page)) {
2269 ret = PTR_ERR(page);
6c085a72 2270 goto err_pages;
e2273302 2271 }
6c085a72 2272 }
426729dc
KRW
2273#ifdef CONFIG_SWIOTLB
2274 if (swiotlb_nr_tbl()) {
2275 st->nents++;
2276 sg_set_page(sg, page, PAGE_SIZE, 0);
2277 sg = sg_next(sg);
2278 continue;
2279 }
2280#endif
90797e6d
ID
2281 if (!i || page_to_pfn(page) != last_pfn + 1) {
2282 if (i)
2283 sg = sg_next(sg);
2284 st->nents++;
2285 sg_set_page(sg, page, PAGE_SIZE, 0);
2286 } else {
2287 sg->length += PAGE_SIZE;
2288 }
2289 last_pfn = page_to_pfn(page);
3bbbe706
DV
2290
2291 /* Check that the i965g/gm workaround works. */
2292 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
e5281ccd 2293 }
426729dc
KRW
2294#ifdef CONFIG_SWIOTLB
2295 if (!swiotlb_nr_tbl())
2296#endif
2297 sg_mark_end(sg);
74ce6b6c
CW
2298 obj->pages = st;
2299
e2273302
ID
2300 ret = i915_gem_gtt_prepare_object(obj);
2301 if (ret)
2302 goto err_pages;
2303
6dacfd2f 2304 if (i915_gem_object_needs_bit17_swizzle(obj))
e5281ccd
CW
2305 i915_gem_object_do_bit_17_swizzle(obj);
2306
3e510a8e 2307 if (i915_gem_object_is_tiled(obj) &&
656bfa3a
DV
2308 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2309 i915_gem_object_pin_pages(obj);
2310
e5281ccd
CW
2311 return 0;
2312
2313err_pages:
90797e6d 2314 sg_mark_end(sg);
85d1225e
DG
2315 for_each_sgt_page(page, sgt_iter, st)
2316 put_page(page);
9da3da66
CW
2317 sg_free_table(st);
2318 kfree(st);
0820baf3
CW
2319
2320 /* shmemfs first checks if there is enough memory to allocate the page
2321 * and reports ENOSPC should there be insufficient, along with the usual
2322 * ENOMEM for a genuine allocation failure.
2323 *
2324 * We use ENOSPC in our driver to mean that we have run out of aperture
2325 * space and so want to translate the error from shmemfs back to our
2326 * usual understanding of ENOMEM.
2327 */
e2273302
ID
2328 if (ret == -ENOSPC)
2329 ret = -ENOMEM;
2330
2331 return ret;
673a394b
EA
2332}
2333
37e680a1
CW
2334/* Ensure that the associated pages are gathered from the backing storage
2335 * and pinned into our object. i915_gem_object_get_pages() may be called
2336 * multiple times before they are released by a single call to
2337 * i915_gem_object_put_pages() - once the pages are no longer referenced
2338 * either as a result of memory pressure (reaping pages under the shrinker)
2339 * or as the object is itself released.
2340 */
2341int
2342i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2343{
fac5e23e 2344 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
37e680a1
CW
2345 const struct drm_i915_gem_object_ops *ops = obj->ops;
2346 int ret;
2347
2f745ad3 2348 if (obj->pages)
37e680a1
CW
2349 return 0;
2350
43e28f09 2351 if (obj->madv != I915_MADV_WILLNEED) {
bd9b6a4e 2352 DRM_DEBUG("Attempting to obtain a purgeable object\n");
8c99e57d 2353 return -EFAULT;
43e28f09
CW
2354 }
2355
a5570178
CW
2356 BUG_ON(obj->pages_pin_count);
2357
37e680a1
CW
2358 ret = ops->get_pages(obj);
2359 if (ret)
2360 return ret;
2361
35c20a60 2362 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
ee286370
CW
2363
2364 obj->get_page.sg = obj->pages->sgl;
2365 obj->get_page.last = 0;
2366
37e680a1 2367 return 0;
673a394b
EA
2368}
2369
dd6034c6 2370/* The 'mapping' part of i915_gem_object_pin_map() below */
d31d7cb1
CW
2371static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2372 enum i915_map_type type)
dd6034c6
DG
2373{
2374 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2375 struct sg_table *sgt = obj->pages;
85d1225e
DG
2376 struct sgt_iter sgt_iter;
2377 struct page *page;
b338fa47
DG
2378 struct page *stack_pages[32];
2379 struct page **pages = stack_pages;
dd6034c6 2380 unsigned long i = 0;
d31d7cb1 2381 pgprot_t pgprot;
dd6034c6
DG
2382 void *addr;
2383
2384 /* A single page can always be kmapped */
d31d7cb1 2385 if (n_pages == 1 && type == I915_MAP_WB)
dd6034c6
DG
2386 return kmap(sg_page(sgt->sgl));
2387
b338fa47
DG
2388 if (n_pages > ARRAY_SIZE(stack_pages)) {
2389 /* Too big for stack -- allocate temporary array instead */
2390 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2391 if (!pages)
2392 return NULL;
2393 }
dd6034c6 2394
85d1225e
DG
2395 for_each_sgt_page(page, sgt_iter, sgt)
2396 pages[i++] = page;
dd6034c6
DG
2397
2398 /* Check that we have the expected number of pages */
2399 GEM_BUG_ON(i != n_pages);
2400
d31d7cb1
CW
2401 switch (type) {
2402 case I915_MAP_WB:
2403 pgprot = PAGE_KERNEL;
2404 break;
2405 case I915_MAP_WC:
2406 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2407 break;
2408 }
2409 addr = vmap(pages, n_pages, 0, pgprot);
dd6034c6 2410
b338fa47
DG
2411 if (pages != stack_pages)
2412 drm_free_large(pages);
dd6034c6
DG
2413
2414 return addr;
2415}
2416
2417/* get, pin, and map the pages of the object into kernel space */
d31d7cb1
CW
2418void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2419 enum i915_map_type type)
0a798eb9 2420{
d31d7cb1
CW
2421 enum i915_map_type has_type;
2422 bool pinned;
2423 void *ptr;
0a798eb9
CW
2424 int ret;
2425
2426 lockdep_assert_held(&obj->base.dev->struct_mutex);
d31d7cb1 2427 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
0a798eb9
CW
2428
2429 ret = i915_gem_object_get_pages(obj);
2430 if (ret)
2431 return ERR_PTR(ret);
2432
2433 i915_gem_object_pin_pages(obj);
d31d7cb1 2434 pinned = obj->pages_pin_count > 1;
0a798eb9 2435
d31d7cb1
CW
2436 ptr = ptr_unpack_bits(obj->mapping, has_type);
2437 if (ptr && has_type != type) {
2438 if (pinned) {
2439 ret = -EBUSY;
2440 goto err;
0a798eb9 2441 }
d31d7cb1
CW
2442
2443 if (is_vmalloc_addr(ptr))
2444 vunmap(ptr);
2445 else
2446 kunmap(kmap_to_page(ptr));
2447
2448 ptr = obj->mapping = NULL;
0a798eb9
CW
2449 }
2450
d31d7cb1
CW
2451 if (!ptr) {
2452 ptr = i915_gem_object_map(obj, type);
2453 if (!ptr) {
2454 ret = -ENOMEM;
2455 goto err;
2456 }
2457
2458 obj->mapping = ptr_pack_bits(ptr, type);
2459 }
2460
2461 return ptr;
2462
2463err:
2464 i915_gem_object_unpin_pages(obj);
2465 return ERR_PTR(ret);
0a798eb9
CW
2466}
2467
b4716185 2468static void
fa545cbf
CW
2469i915_gem_object_retire__write(struct i915_gem_active *active,
2470 struct drm_i915_gem_request *request)
e2d05a8b 2471{
fa545cbf
CW
2472 struct drm_i915_gem_object *obj =
2473 container_of(active, struct drm_i915_gem_object, last_write);
b4716185 2474
de152b62 2475 intel_fb_obj_flush(obj, true, ORIGIN_CS);
e2d05a8b
BW
2476}
2477
caea7476 2478static void
fa545cbf
CW
2479i915_gem_object_retire__read(struct i915_gem_active *active,
2480 struct drm_i915_gem_request *request)
ce44b0ea 2481{
fa545cbf
CW
2482 int idx = request->engine->id;
2483 struct drm_i915_gem_object *obj =
2484 container_of(active, struct drm_i915_gem_object, last_read[idx]);
ce44b0ea 2485
573adb39 2486 GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
b4716185 2487
573adb39
CW
2488 i915_gem_object_clear_active(obj, idx);
2489 if (i915_gem_object_is_active(obj))
b4716185 2490 return;
caea7476 2491
6c246959
CW
2492 /* Bump our place on the bound list to keep it roughly in LRU order
2493 * so that we don't steal from recently used but inactive objects
2494 * (unless we are forced to ofc!)
2495 */
b0decaf7
CW
2496 if (obj->bind_count)
2497 list_move_tail(&obj->global_list,
2498 &request->i915->mm.bound_list);
caea7476 2499
f8c417cd 2500 i915_gem_object_put(obj);
c8725f3d
CW
2501}
2502
7b4d3a16 2503static bool i915_context_is_banned(const struct i915_gem_context *ctx)
be62acb4 2504{
44e2c070 2505 unsigned long elapsed;
be62acb4 2506
44e2c070 2507 if (ctx->hang_stats.banned)
be62acb4
MK
2508 return true;
2509
7b4d3a16 2510 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
676fa572
CW
2511 if (ctx->hang_stats.ban_period_seconds &&
2512 elapsed <= ctx->hang_stats.ban_period_seconds) {
7b4d3a16
CW
2513 DRM_DEBUG("context hanging too fast, banning!\n");
2514 return true;
be62acb4
MK
2515 }
2516
2517 return false;
2518}
2519
7b4d3a16 2520static void i915_set_reset_status(struct i915_gem_context *ctx,
b6b0fac0 2521 const bool guilty)
aa60c664 2522{
7b4d3a16 2523 struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
44e2c070
MK
2524
2525 if (guilty) {
7b4d3a16 2526 hs->banned = i915_context_is_banned(ctx);
44e2c070
MK
2527 hs->batch_active++;
2528 hs->guilty_ts = get_seconds();
2529 } else {
2530 hs->batch_pending++;
aa60c664
MK
2531 }
2532}
2533
8d9fc7fd 2534struct drm_i915_gem_request *
0bc40be8 2535i915_gem_find_active_request(struct intel_engine_cs *engine)
9375e446 2536{
4db080f9
CW
2537 struct drm_i915_gem_request *request;
2538
f69a02c9
CW
2539 /* We are called by the error capture and reset at a random
2540 * point in time. In particular, note that neither is crucially
2541 * ordered with an interrupt. After a hang, the GPU is dead and we
2542 * assume that no more writes can happen (we waited long enough for
2543 * all writes that were in transaction to be flushed) - adding an
2544 * extra delay for a recent interrupt is pointless. Hence, we do
2545 * not need an engine->irq_seqno_barrier() before the seqno reads.
2546 */
efdf7c06 2547 list_for_each_entry(request, &engine->request_list, link) {
f69a02c9 2548 if (i915_gem_request_completed(request))
4db080f9 2549 continue;
aa60c664 2550
b6b0fac0 2551 return request;
4db080f9 2552 }
b6b0fac0
MK
2553
2554 return NULL;
2555}
2556
7b4d3a16 2557static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
b6b0fac0
MK
2558{
2559 struct drm_i915_gem_request *request;
2560 bool ring_hung;
2561
0bc40be8 2562 request = i915_gem_find_active_request(engine);
b6b0fac0
MK
2563 if (request == NULL)
2564 return;
2565
0bc40be8 2566 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
b6b0fac0 2567
7b4d3a16 2568 i915_set_reset_status(request->ctx, ring_hung);
efdf7c06 2569 list_for_each_entry_continue(request, &engine->request_list, link)
7b4d3a16 2570 i915_set_reset_status(request->ctx, false);
4db080f9 2571}
aa60c664 2572
7b4d3a16 2573static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
4db080f9 2574{
dcff85c8 2575 struct drm_i915_gem_request *request;
7e37f889 2576 struct intel_ring *ring;
608c1a52 2577
70c2a24d
CW
2578 /* Ensure irq handler finishes, and not run again. */
2579 tasklet_kill(&engine->irq_tasklet);
2580
c4b0930b
CW
2581 /* Mark all pending requests as complete so that any concurrent
2582 * (lockless) lookup doesn't try and wait upon the request as we
2583 * reset it.
2584 */
87b723a1 2585 intel_engine_init_seqno(engine, engine->last_submitted_seqno);
c4b0930b 2586
dcb4c12a
OM
2587 /*
2588 * Clear the execlists queue up before freeing the requests, as those
2589 * are the ones that keep the context and ringbuffer backing objects
2590 * pinned in place.
2591 */
dcb4c12a 2592
7de1691a 2593 if (i915.enable_execlists) {
70c2a24d
CW
2594 spin_lock(&engine->execlist_lock);
2595 INIT_LIST_HEAD(&engine->execlist_queue);
2596 i915_gem_request_put(engine->execlist_port[0].request);
2597 i915_gem_request_put(engine->execlist_port[1].request);
2598 memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
2599 spin_unlock(&engine->execlist_lock);
dcb4c12a
OM
2600 }
2601
1d62beea
BW
2602 /*
2603 * We must free the requests after all the corresponding objects have
2604 * been moved off active lists. Which is the same order as the normal
2605 * retire_requests function does. This is important if object hold
2606 * implicit references on things like e.g. ppgtt address spaces through
2607 * the request.
2608 */
87b723a1
CW
2609 request = i915_gem_active_raw(&engine->last_request,
2610 &engine->i915->drm.struct_mutex);
dcff85c8 2611 if (request)
05235c53 2612 i915_gem_request_retire_upto(request);
dcff85c8 2613 GEM_BUG_ON(intel_engine_is_active(engine));
608c1a52
CW
2614
2615 /* Having flushed all requests from all queues, we know that all
2616 * ringbuffers must now be empty. However, since we do not reclaim
2617 * all space when retiring the request (to prevent HEADs colliding
2618 * with rapid ringbuffer wraparound) the amount of available space
2619 * upon reset is less than when we start. Do one more pass over
2620 * all the ringbuffers to reset last_retired_head.
2621 */
7e37f889
CW
2622 list_for_each_entry(ring, &engine->buffers, link) {
2623 ring->last_retired_head = ring->tail;
2624 intel_ring_update_space(ring);
608c1a52 2625 }
2ed53a94 2626
b913b33c 2627 engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
673a394b
EA
2628}
2629
069efc1d 2630void i915_gem_reset(struct drm_device *dev)
673a394b 2631{
fac5e23e 2632 struct drm_i915_private *dev_priv = to_i915(dev);
e2f80391 2633 struct intel_engine_cs *engine;
673a394b 2634
4db080f9
CW
2635 /*
2636 * Before we free the objects from the requests, we need to inspect
2637 * them for finding the guilty party. As the requests only borrow
2638 * their reference to the objects, the inspection must be done first.
2639 */
b4ac5afc 2640 for_each_engine(engine, dev_priv)
7b4d3a16 2641 i915_gem_reset_engine_status(engine);
4db080f9 2642
b4ac5afc 2643 for_each_engine(engine, dev_priv)
7b4d3a16 2644 i915_gem_reset_engine_cleanup(engine);
b913b33c 2645 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
dfaae392 2646
acce9ffa
BW
2647 i915_gem_context_reset(dev);
2648
19b2dbde 2649 i915_gem_restore_fences(dev);
673a394b
EA
2650}
2651
75ef9da2 2652static void
673a394b
EA
2653i915_gem_retire_work_handler(struct work_struct *work)
2654{
b29c19b6 2655 struct drm_i915_private *dev_priv =
67d97da3 2656 container_of(work, typeof(*dev_priv), gt.retire_work.work);
91c8a326 2657 struct drm_device *dev = &dev_priv->drm;
673a394b 2658
891b48cf 2659 /* Come back later if the device is busy... */
b29c19b6 2660 if (mutex_trylock(&dev->struct_mutex)) {
67d97da3 2661 i915_gem_retire_requests(dev_priv);
b29c19b6 2662 mutex_unlock(&dev->struct_mutex);
673a394b 2663 }
67d97da3
CW
2664
2665 /* Keep the retire handler running until we are finally idle.
2666 * We do not need to do this test under locking as in the worst-case
2667 * we queue the retire worker once too often.
2668 */
c9615613
CW
2669 if (READ_ONCE(dev_priv->gt.awake)) {
2670 i915_queue_hangcheck(dev_priv);
67d97da3
CW
2671 queue_delayed_work(dev_priv->wq,
2672 &dev_priv->gt.retire_work,
bcb45086 2673 round_jiffies_up_relative(HZ));
c9615613 2674 }
b29c19b6 2675}
0a58705b 2676
b29c19b6
CW
2677static void
2678i915_gem_idle_work_handler(struct work_struct *work)
2679{
2680 struct drm_i915_private *dev_priv =
67d97da3 2681 container_of(work, typeof(*dev_priv), gt.idle_work.work);
91c8a326 2682 struct drm_device *dev = &dev_priv->drm;
b4ac5afc 2683 struct intel_engine_cs *engine;
67d97da3
CW
2684 bool rearm_hangcheck;
2685
2686 if (!READ_ONCE(dev_priv->gt.awake))
2687 return;
2688
2689 if (READ_ONCE(dev_priv->gt.active_engines))
2690 return;
2691
2692 rearm_hangcheck =
2693 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2694
2695 if (!mutex_trylock(&dev->struct_mutex)) {
2696 /* Currently busy, come back later */
2697 mod_delayed_work(dev_priv->wq,
2698 &dev_priv->gt.idle_work,
2699 msecs_to_jiffies(50));
2700 goto out_rearm;
2701 }
2702
2703 if (dev_priv->gt.active_engines)
2704 goto out_unlock;
b29c19b6 2705
b4ac5afc 2706 for_each_engine(engine, dev_priv)
67d97da3 2707 i915_gem_batch_pool_fini(&engine->batch_pool);
35c94185 2708
67d97da3
CW
2709 GEM_BUG_ON(!dev_priv->gt.awake);
2710 dev_priv->gt.awake = false;
2711 rearm_hangcheck = false;
30ecad77 2712
67d97da3
CW
2713 if (INTEL_GEN(dev_priv) >= 6)
2714 gen6_rps_idle(dev_priv);
2715 intel_runtime_pm_put(dev_priv);
2716out_unlock:
2717 mutex_unlock(&dev->struct_mutex);
b29c19b6 2718
67d97da3
CW
2719out_rearm:
2720 if (rearm_hangcheck) {
2721 GEM_BUG_ON(!dev_priv->gt.awake);
2722 i915_queue_hangcheck(dev_priv);
35c94185 2723 }
673a394b
EA
2724}
2725
b1f788c6
CW
2726void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2727{
2728 struct drm_i915_gem_object *obj = to_intel_bo(gem);
2729 struct drm_i915_file_private *fpriv = file->driver_priv;
2730 struct i915_vma *vma, *vn;
2731
2732 mutex_lock(&obj->base.dev->struct_mutex);
2733 list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2734 if (vma->vm->file == fpriv)
2735 i915_vma_close(vma);
2736 mutex_unlock(&obj->base.dev->struct_mutex);
2737}
2738
23ba4fd0
BW
2739/**
2740 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
14bb2c11
TU
2741 * @dev: drm device pointer
2742 * @data: ioctl data blob
2743 * @file: drm file pointer
23ba4fd0
BW
2744 *
2745 * Returns 0 if successful, else an error is returned with the remaining time in
2746 * the timeout parameter.
2747 * -ETIME: object is still busy after timeout
2748 * -ERESTARTSYS: signal interrupted the wait
2749 * -ENONENT: object doesn't exist
2750 * Also possible, but rare:
2751 * -EAGAIN: GPU wedged
2752 * -ENOMEM: damn
2753 * -ENODEV: Internal IRQ fail
2754 * -E?: The add request failed
2755 *
2756 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2757 * non-zero timeout parameter the wait ioctl will wait for the given number of
2758 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2759 * without holding struct_mutex the object may become re-busied before this
2760 * function completes. A similar but shorter * race condition exists in the busy
2761 * ioctl
2762 */
2763int
2764i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2765{
2766 struct drm_i915_gem_wait *args = data;
033d549b 2767 struct intel_rps_client *rps = to_rps_client(file);
23ba4fd0 2768 struct drm_i915_gem_object *obj;
033d549b
CW
2769 unsigned long active;
2770 int idx, ret = 0;
23ba4fd0 2771
11b5d511
DV
2772 if (args->flags != 0)
2773 return -EINVAL;
2774
03ac0642 2775 obj = i915_gem_object_lookup(file, args->bo_handle);
033d549b 2776 if (!obj)
23ba4fd0 2777 return -ENOENT;
23ba4fd0 2778
033d549b
CW
2779 active = __I915_BO_ACTIVE(obj);
2780 for_each_active(active, idx) {
2781 s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
2782 ret = i915_gem_active_wait_unlocked(&obj->last_read[idx], true,
2783 timeout, rps);
2784 if (ret)
2785 break;
b4716185
CW
2786 }
2787
033d549b 2788 i915_gem_object_put_unlocked(obj);
ff865885 2789 return ret;
23ba4fd0
BW
2790}
2791
b4716185 2792static int
fa545cbf 2793__i915_gem_object_sync(struct drm_i915_gem_request *to,
8e637178 2794 struct drm_i915_gem_request *from)
b4716185 2795{
b4716185
CW
2796 int ret;
2797
8e637178 2798 if (to->engine == from->engine)
b4716185
CW
2799 return 0;
2800
39df9190 2801 if (!i915.semaphores) {
776f3236
CW
2802 ret = i915_wait_request(from,
2803 from->i915->mm.interruptible,
2804 NULL,
2805 NO_WAITBOOST);
b4716185
CW
2806 if (ret)
2807 return ret;
b4716185 2808 } else {
8e637178 2809 int idx = intel_engine_sync_index(from->engine, to->engine);
ddf07be7 2810 if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
b4716185
CW
2811 return 0;
2812
8e637178 2813 trace_i915_gem_ring_sync_to(to, from);
ddf07be7 2814 ret = to->engine->semaphore.sync_to(to, from);
b4716185
CW
2815 if (ret)
2816 return ret;
2817
ddf07be7 2818 from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
b4716185
CW
2819 }
2820
2821 return 0;
2822}
2823
5816d648
BW
2824/**
2825 * i915_gem_object_sync - sync an object to a ring.
2826 *
2827 * @obj: object which may be in use on another ring.
8e637178 2828 * @to: request we are wishing to use
5816d648
BW
2829 *
2830 * This code is meant to abstract object synchronization with the GPU.
8e637178
CW
2831 * Conceptually we serialise writes between engines inside the GPU.
2832 * We only allow one engine to write into a buffer at any time, but
2833 * multiple readers. To ensure each has a coherent view of memory, we must:
b4716185
CW
2834 *
2835 * - If there is an outstanding write request to the object, the new
2836 * request must wait for it to complete (either CPU or in hw, requests
2837 * on the same ring will be naturally ordered).
2838 *
2839 * - If we are a write request (pending_write_domain is set), the new
2840 * request must wait for outstanding read requests to complete.
5816d648
BW
2841 *
2842 * Returns 0 if successful, else propagates up the lower layer error.
2843 */
2911a35b
BW
2844int
2845i915_gem_object_sync(struct drm_i915_gem_object *obj,
8e637178 2846 struct drm_i915_gem_request *to)
2911a35b 2847{
8cac6f6c
CW
2848 struct i915_gem_active *active;
2849 unsigned long active_mask;
2850 int idx;
41c52415 2851
8cac6f6c 2852 lockdep_assert_held(&obj->base.dev->struct_mutex);
2911a35b 2853
573adb39 2854 active_mask = i915_gem_object_get_active(obj);
8cac6f6c
CW
2855 if (!active_mask)
2856 return 0;
27c01aae 2857
8cac6f6c
CW
2858 if (obj->base.pending_write_domain) {
2859 active = obj->last_read;
b4716185 2860 } else {
8cac6f6c
CW
2861 active_mask = 1;
2862 active = &obj->last_write;
b4716185 2863 }
8cac6f6c
CW
2864
2865 for_each_active(active_mask, idx) {
2866 struct drm_i915_gem_request *request;
2867 int ret;
2868
2869 request = i915_gem_active_peek(&active[idx],
2870 &obj->base.dev->struct_mutex);
2871 if (!request)
2872 continue;
2873
fa545cbf 2874 ret = __i915_gem_object_sync(to, request);
b4716185
CW
2875 if (ret)
2876 return ret;
2877 }
2911a35b 2878
b4716185 2879 return 0;
2911a35b
BW
2880}
2881
8ef8561f
CW
2882static void __i915_vma_iounmap(struct i915_vma *vma)
2883{
20dfbde4 2884 GEM_BUG_ON(i915_vma_is_pinned(vma));
8ef8561f
CW
2885
2886 if (vma->iomap == NULL)
2887 return;
2888
2889 io_mapping_unmap(vma->iomap);
2890 vma->iomap = NULL;
2891}
2892
df0e9a28 2893int i915_vma_unbind(struct i915_vma *vma)
673a394b 2894{
07fe0b12 2895 struct drm_i915_gem_object *obj = vma->obj;
b0decaf7 2896 unsigned long active;
43e28f09 2897 int ret;
673a394b 2898
b0decaf7
CW
2899 /* First wait upon any activity as retiring the request may
2900 * have side-effects such as unpinning or even unbinding this vma.
2901 */
2902 active = i915_vma_get_active(vma);
df0e9a28 2903 if (active) {
b0decaf7
CW
2904 int idx;
2905
b1f788c6
CW
2906 /* When a closed VMA is retired, it is unbound - eek.
2907 * In order to prevent it from being recursively closed,
2908 * take a pin on the vma so that the second unbind is
2909 * aborted.
2910 */
20dfbde4 2911 __i915_vma_pin(vma);
b1f788c6 2912
b0decaf7
CW
2913 for_each_active(active, idx) {
2914 ret = i915_gem_active_retire(&vma->last_read[idx],
2915 &vma->vm->dev->struct_mutex);
2916 if (ret)
b1f788c6 2917 break;
b0decaf7
CW
2918 }
2919
20dfbde4 2920 __i915_vma_unpin(vma);
b1f788c6
CW
2921 if (ret)
2922 return ret;
2923
b0decaf7
CW
2924 GEM_BUG_ON(i915_vma_is_active(vma));
2925 }
2926
20dfbde4 2927 if (i915_vma_is_pinned(vma))
b0decaf7
CW
2928 return -EBUSY;
2929
b1f788c6
CW
2930 if (!drm_mm_node_allocated(&vma->node))
2931 goto destroy;
433544bd 2932
15717de2
CW
2933 GEM_BUG_ON(obj->bind_count == 0);
2934 GEM_BUG_ON(!obj->pages);
c4670ad0 2935
05a20d09 2936 if (i915_vma_is_map_and_fenceable(vma)) {
8b1bc9b4 2937 /* release the fence reg _after_ flushing */
49ef5294 2938 ret = i915_vma_put_fence(vma);
8b1bc9b4
DV
2939 if (ret)
2940 return ret;
8ef8561f 2941
cd3127d6
CW
2942 /* Force a pagefault for domain tracking on next user access */
2943 i915_gem_release_mmap(obj);
2944
8ef8561f 2945 __i915_vma_iounmap(vma);
05a20d09 2946 vma->flags &= ~I915_VMA_CAN_FENCE;
8b1bc9b4 2947 }
96b47b65 2948
50e046b6
CW
2949 if (likely(!vma->vm->closed)) {
2950 trace_i915_vma_unbind(vma);
2951 vma->vm->unbind_vma(vma);
2952 }
3272db53 2953 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
6f65e29a 2954
50e046b6
CW
2955 drm_mm_remove_node(&vma->node);
2956 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
2957
05a20d09
CW
2958 if (vma->pages != obj->pages) {
2959 GEM_BUG_ON(!vma->pages);
2960 sg_free_table(vma->pages);
2961 kfree(vma->pages);
fe14d5f4 2962 }
247177dd 2963 vma->pages = NULL;
673a394b 2964
2f633156 2965 /* Since the unbound list is global, only move to that list if
b93dab6e 2966 * no more VMAs exist. */
15717de2
CW
2967 if (--obj->bind_count == 0)
2968 list_move_tail(&obj->global_list,
2969 &to_i915(obj->base.dev)->mm.unbound_list);
673a394b 2970
70903c3b
CW
2971 /* And finally now the object is completely decoupled from this vma,
2972 * we can drop its hold on the backing storage and allow it to be
2973 * reaped by the shrinker.
2974 */
2975 i915_gem_object_unpin_pages(obj);
2976
b1f788c6 2977destroy:
3272db53 2978 if (unlikely(i915_vma_is_closed(vma)))
b1f788c6
CW
2979 i915_vma_destroy(vma);
2980
88241785 2981 return 0;
54cf91dc
CW
2982}
2983
dcff85c8
CW
2984int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
2985 bool interruptible)
4df2faf4 2986{
e2f80391 2987 struct intel_engine_cs *engine;
b4ac5afc 2988 int ret;
4df2faf4 2989
b4ac5afc 2990 for_each_engine(engine, dev_priv) {
62e63007
CW
2991 if (engine->last_context == NULL)
2992 continue;
2993
dcff85c8 2994 ret = intel_engine_idle(engine, interruptible);
1ec14ad3
CW
2995 if (ret)
2996 return ret;
2997 }
4df2faf4 2998
8a1a49f9 2999 return 0;
4df2faf4
DV
3000}
3001
4144f9b5 3002static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
42d6ab48
CW
3003 unsigned long cache_level)
3004{
4144f9b5 3005 struct drm_mm_node *gtt_space = &vma->node;
42d6ab48
CW
3006 struct drm_mm_node *other;
3007
4144f9b5
CW
3008 /*
3009 * On some machines we have to be careful when putting differing types
3010 * of snoopable memory together to avoid the prefetcher crossing memory
3011 * domains and dying. During vm initialisation, we decide whether or not
3012 * these constraints apply and set the drm_mm.color_adjust
3013 * appropriately.
42d6ab48 3014 */
4144f9b5 3015 if (vma->vm->mm.color_adjust == NULL)
42d6ab48
CW
3016 return true;
3017
c6cfb325 3018 if (!drm_mm_node_allocated(gtt_space))
42d6ab48
CW
3019 return true;
3020
3021 if (list_empty(&gtt_space->node_list))
3022 return true;
3023
3024 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3025 if (other->allocated && !other->hole_follows && other->color != cache_level)
3026 return false;
3027
3028 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3029 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3030 return false;
3031
3032 return true;
3033}
3034
673a394b 3035/**
59bfa124
CW
3036 * i915_vma_insert - finds a slot for the vma in its address space
3037 * @vma: the vma
91b2db6f 3038 * @size: requested size in bytes (can be larger than the VMA)
59bfa124 3039 * @alignment: required alignment
14bb2c11 3040 * @flags: mask of PIN_* flags to use
59bfa124
CW
3041 *
3042 * First we try to allocate some free space that meets the requirements for
3043 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
3044 * preferrably the oldest idle entry to make room for the new VMA.
3045 *
3046 * Returns:
3047 * 0 on success, negative error code otherwise.
673a394b 3048 */
59bfa124
CW
3049static int
3050i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
673a394b 3051{
59bfa124
CW
3052 struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
3053 struct drm_i915_gem_object *obj = vma->obj;
de180033 3054 u64 start, end;
07f73f69 3055 int ret;
673a394b 3056
3272db53 3057 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
59bfa124 3058 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
de180033
CW
3059
3060 size = max(size, vma->size);
3061 if (flags & PIN_MAPPABLE)
3e510a8e
CW
3062 size = i915_gem_get_ggtt_size(dev_priv, size,
3063 i915_gem_object_get_tiling(obj));
de180033 3064
d8923dcf
CW
3065 alignment = max(max(alignment, vma->display_alignment),
3066 i915_gem_get_ggtt_alignment(dev_priv, size,
3067 i915_gem_object_get_tiling(obj),
3068 flags & PIN_MAPPABLE));
a00b10c3 3069
101b506a 3070 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
de180033
CW
3071
3072 end = vma->vm->total;
101b506a 3073 if (flags & PIN_MAPPABLE)
91b2db6f 3074 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
101b506a 3075 if (flags & PIN_ZONE_4G)
48ea1e32 3076 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
101b506a 3077
91e6711e
JL
3078 /* If binding the object/GGTT view requires more space than the entire
3079 * aperture has, reject it early before evicting everything in a vain
3080 * attempt to find space.
654fc607 3081 */
91e6711e 3082 if (size > end) {
de180033 3083 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
91b2db6f 3084 size, obj->base.size,
1ec9e26d 3085 flags & PIN_MAPPABLE ? "mappable" : "total",
d23db88c 3086 end);
59bfa124 3087 return -E2BIG;
654fc607
CW
3088 }
3089
37e680a1 3090 ret = i915_gem_object_get_pages(obj);
6c085a72 3091 if (ret)
59bfa124 3092 return ret;
6c085a72 3093
fbdda6fb
CW
3094 i915_gem_object_pin_pages(obj);
3095
506a8e87 3096 if (flags & PIN_OFFSET_FIXED) {
59bfa124 3097 u64 offset = flags & PIN_OFFSET_MASK;
de180033 3098 if (offset & (alignment - 1) || offset > end - size) {
506a8e87 3099 ret = -EINVAL;
de180033 3100 goto err_unpin;
506a8e87 3101 }
de180033 3102
506a8e87
CW
3103 vma->node.start = offset;
3104 vma->node.size = size;
3105 vma->node.color = obj->cache_level;
de180033 3106 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
506a8e87
CW
3107 if (ret) {
3108 ret = i915_gem_evict_for_vma(vma);
3109 if (ret == 0)
de180033
CW
3110 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3111 if (ret)
3112 goto err_unpin;
506a8e87 3113 }
101b506a 3114 } else {
de180033
CW
3115 u32 search_flag, alloc_flag;
3116
506a8e87
CW
3117 if (flags & PIN_HIGH) {
3118 search_flag = DRM_MM_SEARCH_BELOW;
3119 alloc_flag = DRM_MM_CREATE_TOP;
3120 } else {
3121 search_flag = DRM_MM_SEARCH_DEFAULT;
3122 alloc_flag = DRM_MM_CREATE_DEFAULT;
3123 }
101b506a 3124
954c4691
CW
3125 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3126 * so we know that we always have a minimum alignment of 4096.
3127 * The drm_mm range manager is optimised to return results
3128 * with zero alignment, so where possible use the optimal
3129 * path.
3130 */
3131 if (alignment <= 4096)
3132 alignment = 0;
3133
0a9ae0d7 3134search_free:
de180033
CW
3135 ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
3136 &vma->node,
506a8e87
CW
3137 size, alignment,
3138 obj->cache_level,
3139 start, end,
3140 search_flag,
3141 alloc_flag);
3142 if (ret) {
de180033 3143 ret = i915_gem_evict_something(vma->vm, size, alignment,
506a8e87
CW
3144 obj->cache_level,
3145 start, end,
3146 flags);
3147 if (ret == 0)
3148 goto search_free;
9731129c 3149
de180033 3150 goto err_unpin;
506a8e87 3151 }
673a394b 3152 }
37508589 3153 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
673a394b 3154
35c20a60 3155 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
de180033 3156 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
15717de2 3157 obj->bind_count++;
bf1a1092 3158
59bfa124 3159 return 0;
2f633156 3160
bc6bc15b 3161err_unpin:
2f633156 3162 i915_gem_object_unpin_pages(obj);
59bfa124 3163 return ret;
673a394b
EA
3164}
3165
000433b6 3166bool
2c22569b
CW
3167i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3168 bool force)
673a394b 3169{
673a394b
EA
3170 /* If we don't have a page list set up, then we're not pinned
3171 * to GPU, and we can ignore the cache flush because it'll happen
3172 * again at bind time.
3173 */
05394f39 3174 if (obj->pages == NULL)
000433b6 3175 return false;
673a394b 3176
769ce464
ID
3177 /*
3178 * Stolen memory is always coherent with the GPU as it is explicitly
3179 * marked as wc by the system, or the system is cache-coherent.
3180 */
6a2c4232 3181 if (obj->stolen || obj->phys_handle)
000433b6 3182 return false;
769ce464 3183
9c23f7fc
CW
3184 /* If the GPU is snooping the contents of the CPU cache,
3185 * we do not need to manually clear the CPU cache lines. However,
3186 * the caches are only snooped when the render cache is
3187 * flushed/invalidated. As we always have to emit invalidations
3188 * and flushes when moving into and out of the RENDER domain, correct
3189 * snooping behaviour occurs naturally as the result of our domain
3190 * tracking.
3191 */
0f71979a
CW
3192 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3193 obj->cache_dirty = true;
000433b6 3194 return false;
0f71979a 3195 }
9c23f7fc 3196
1c5d22f7 3197 trace_i915_gem_object_clflush(obj);
9da3da66 3198 drm_clflush_sg(obj->pages);
0f71979a 3199 obj->cache_dirty = false;
000433b6
CW
3200
3201 return true;
e47c68e9
EA
3202}
3203
3204/** Flushes the GTT write domain for the object if it's dirty. */
3205static void
05394f39 3206i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 3207{
3b5724d7 3208 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1c5d22f7 3209
05394f39 3210 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
e47c68e9
EA
3211 return;
3212
63256ec5 3213 /* No actual flushing is required for the GTT write domain. Writes
3b5724d7 3214 * to it "immediately" go to main memory as far as we know, so there's
e47c68e9 3215 * no chipset flush. It also doesn't land in render cache.
63256ec5
CW
3216 *
3217 * However, we do have to enforce the order so that all writes through
3218 * the GTT land before any writes to the device, such as updates to
3219 * the GATT itself.
3b5724d7
CW
3220 *
3221 * We also have to wait a bit for the writes to land from the GTT.
3222 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
3223 * timing. This issue has only been observed when switching quickly
3224 * between GTT writes and CPU reads from inside the kernel on recent hw,
3225 * and it appears to only affect discrete GTT blocks (i.e. on LLC
3226 * system agents we cannot reproduce this behaviour).
e47c68e9 3227 */
63256ec5 3228 wmb();
3b5724d7
CW
3229 if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
3230 POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base));
63256ec5 3231
d243ad82 3232 intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
f99d7069 3233
b0dc465f 3234 obj->base.write_domain = 0;
1c5d22f7 3235 trace_i915_gem_object_change_domain(obj,
05394f39 3236 obj->base.read_domains,
b0dc465f 3237 I915_GEM_DOMAIN_GTT);
e47c68e9
EA
3238}
3239
3240/** Flushes the CPU write domain for the object if it's dirty. */
3241static void
e62b59e4 3242i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 3243{
05394f39 3244 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
e47c68e9
EA
3245 return;
3246
e62b59e4 3247 if (i915_gem_clflush_object(obj, obj->pin_display))
c033666a 3248 i915_gem_chipset_flush(to_i915(obj->base.dev));
000433b6 3249
de152b62 3250 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
f99d7069 3251
b0dc465f 3252 obj->base.write_domain = 0;
1c5d22f7 3253 trace_i915_gem_object_change_domain(obj,
05394f39 3254 obj->base.read_domains,
b0dc465f 3255 I915_GEM_DOMAIN_CPU);
e47c68e9
EA
3256}
3257
383d5823
CW
3258static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
3259{
3260 struct i915_vma *vma;
3261
3262 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3263 if (!i915_vma_is_ggtt(vma))
3264 continue;
3265
3266 if (i915_vma_is_active(vma))
3267 continue;
3268
3269 if (!drm_mm_node_allocated(&vma->node))
3270 continue;
3271
3272 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3273 }
3274}
3275
2ef7eeaa
EA
3276/**
3277 * Moves a single object to the GTT read, and possibly write domain.
14bb2c11
TU
3278 * @obj: object to act on
3279 * @write: ask for write access or read only
2ef7eeaa
EA
3280 *
3281 * This function returns when the move is complete, including waiting on
3282 * flushes to occur.
3283 */
79e53945 3284int
2021746e 3285i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2ef7eeaa 3286{
1c5d22f7 3287 uint32_t old_write_domain, old_read_domains;
e47c68e9 3288 int ret;
2ef7eeaa 3289
0201f1ec 3290 ret = i915_gem_object_wait_rendering(obj, !write);
88241785
CW
3291 if (ret)
3292 return ret;
3293
c13d87ea
CW
3294 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3295 return 0;
3296
43566ded
CW
3297 /* Flush and acquire obj->pages so that we are coherent through
3298 * direct access in memory with previous cached writes through
3299 * shmemfs and that our cache domain tracking remains valid.
3300 * For example, if the obj->filp was moved to swap without us
3301 * being notified and releasing the pages, we would mistakenly
3302 * continue to assume that the obj remained out of the CPU cached
3303 * domain.
3304 */
3305 ret = i915_gem_object_get_pages(obj);
3306 if (ret)
3307 return ret;
3308
e62b59e4 3309 i915_gem_object_flush_cpu_write_domain(obj);
1c5d22f7 3310
d0a57789
CW
3311 /* Serialise direct access to this object with the barriers for
3312 * coherent writes from the GPU, by effectively invalidating the
3313 * GTT domain upon first access.
3314 */
3315 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3316 mb();
3317
05394f39
CW
3318 old_write_domain = obj->base.write_domain;
3319 old_read_domains = obj->base.read_domains;
1c5d22f7 3320
e47c68e9
EA
3321 /* It should now be out of any other write domains, and we can update
3322 * the domain values for our changes.
3323 */
05394f39
CW
3324 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3325 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
e47c68e9 3326 if (write) {
05394f39
CW
3327 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3328 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3329 obj->dirty = 1;
2ef7eeaa
EA
3330 }
3331
1c5d22f7
CW
3332 trace_i915_gem_object_change_domain(obj,
3333 old_read_domains,
3334 old_write_domain);
3335
8325a09d 3336 /* And bump the LRU for this access */
383d5823 3337 i915_gem_object_bump_inactive_ggtt(obj);
8325a09d 3338
e47c68e9
EA
3339 return 0;
3340}
3341
ef55f92a
CW
3342/**
3343 * Changes the cache-level of an object across all VMA.
14bb2c11
TU
3344 * @obj: object to act on
3345 * @cache_level: new cache level to set for the object
ef55f92a
CW
3346 *
3347 * After this function returns, the object will be in the new cache-level
3348 * across all GTT and the contents of the backing storage will be coherent,
3349 * with respect to the new cache-level. In order to keep the backing storage
3350 * coherent for all users, we only allow a single cache level to be set
3351 * globally on the object and prevent it from being changed whilst the
3352 * hardware is reading from the object. That is if the object is currently
3353 * on the scanout it will be set to uncached (or equivalent display
3354 * cache coherency) and all non-MOCS GPU access will also be uncached so
3355 * that all direct access to the scanout remains coherent.
3356 */
e4ffd173
CW
3357int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3358 enum i915_cache_level cache_level)
3359{
aa653a68 3360 struct i915_vma *vma;
ed75a55b 3361 int ret = 0;
e4ffd173
CW
3362
3363 if (obj->cache_level == cache_level)
ed75a55b 3364 goto out;
e4ffd173 3365
ef55f92a
CW
3366 /* Inspect the list of currently bound VMA and unbind any that would
3367 * be invalid given the new cache-level. This is principally to
3368 * catch the issue of the CS prefetch crossing page boundaries and
3369 * reading an invalid PTE on older architectures.
3370 */
aa653a68
CW
3371restart:
3372 list_for_each_entry(vma, &obj->vma_list, obj_link) {
ef55f92a
CW
3373 if (!drm_mm_node_allocated(&vma->node))
3374 continue;
3375
20dfbde4 3376 if (i915_vma_is_pinned(vma)) {
ef55f92a
CW
3377 DRM_DEBUG("can not change the cache level of pinned objects\n");
3378 return -EBUSY;
3379 }
3380
aa653a68
CW
3381 if (i915_gem_valid_gtt_space(vma, cache_level))
3382 continue;
3383
3384 ret = i915_vma_unbind(vma);
3385 if (ret)
3386 return ret;
3387
3388 /* As unbinding may affect other elements in the
3389 * obj->vma_list (due to side-effects from retiring
3390 * an active vma), play safe and restart the iterator.
3391 */
3392 goto restart;
42d6ab48
CW
3393 }
3394
ef55f92a
CW
3395 /* We can reuse the existing drm_mm nodes but need to change the
3396 * cache-level on the PTE. We could simply unbind them all and
3397 * rebind with the correct cache-level on next use. However since
3398 * we already have a valid slot, dma mapping, pages etc, we may as
3399 * rewrite the PTE in the belief that doing so tramples upon less
3400 * state and so involves less work.
3401 */
15717de2 3402 if (obj->bind_count) {
ef55f92a
CW
3403 /* Before we change the PTE, the GPU must not be accessing it.
3404 * If we wait upon the object, we know that all the bound
3405 * VMA are no longer active.
3406 */
2e2f351d 3407 ret = i915_gem_object_wait_rendering(obj, false);
e4ffd173
CW
3408 if (ret)
3409 return ret;
3410
aa653a68 3411 if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
ef55f92a
CW
3412 /* Access to snoopable pages through the GTT is
3413 * incoherent and on some machines causes a hard
3414 * lockup. Relinquish the CPU mmaping to force
3415 * userspace to refault in the pages and we can
3416 * then double check if the GTT mapping is still
3417 * valid for that pointer access.
3418 */
3419 i915_gem_release_mmap(obj);
3420
3421 /* As we no longer need a fence for GTT access,
3422 * we can relinquish it now (and so prevent having
3423 * to steal a fence from someone else on the next
3424 * fence request). Note GPU activity would have
3425 * dropped the fence as all snoopable access is
3426 * supposed to be linear.
3427 */
49ef5294
CW
3428 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3429 ret = i915_vma_put_fence(vma);
3430 if (ret)
3431 return ret;
3432 }
ef55f92a
CW
3433 } else {
3434 /* We either have incoherent backing store and
3435 * so no GTT access or the architecture is fully
3436 * coherent. In such cases, existing GTT mmaps
3437 * ignore the cache bit in the PTE and we can
3438 * rewrite it without confusing the GPU or having
3439 * to force userspace to fault back in its mmaps.
3440 */
e4ffd173
CW
3441 }
3442
1c7f4bca 3443 list_for_each_entry(vma, &obj->vma_list, obj_link) {
ef55f92a
CW
3444 if (!drm_mm_node_allocated(&vma->node))
3445 continue;
3446
3447 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3448 if (ret)
3449 return ret;
3450 }
e4ffd173
CW
3451 }
3452
1c7f4bca 3453 list_for_each_entry(vma, &obj->vma_list, obj_link)
2c22569b
CW
3454 vma->node.color = cache_level;
3455 obj->cache_level = cache_level;
3456
ed75a55b 3457out:
ef55f92a
CW
3458 /* Flush the dirty CPU caches to the backing storage so that the
3459 * object is now coherent at its new cache level (with respect
3460 * to the access domain).
3461 */
b50a5371 3462 if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
0f71979a 3463 if (i915_gem_clflush_object(obj, true))
c033666a 3464 i915_gem_chipset_flush(to_i915(obj->base.dev));
e4ffd173
CW
3465 }
3466
e4ffd173
CW
3467 return 0;
3468}
3469
199adf40
BW
3470int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3471 struct drm_file *file)
e6994aee 3472{
199adf40 3473 struct drm_i915_gem_caching *args = data;
e6994aee 3474 struct drm_i915_gem_object *obj;
e6994aee 3475
03ac0642
CW
3476 obj = i915_gem_object_lookup(file, args->handle);
3477 if (!obj)
432be69d 3478 return -ENOENT;
e6994aee 3479
651d794f
CW
3480 switch (obj->cache_level) {
3481 case I915_CACHE_LLC:
3482 case I915_CACHE_L3_LLC:
3483 args->caching = I915_CACHING_CACHED;
3484 break;
3485
4257d3ba
CW
3486 case I915_CACHE_WT:
3487 args->caching = I915_CACHING_DISPLAY;
3488 break;
3489
651d794f
CW
3490 default:
3491 args->caching = I915_CACHING_NONE;
3492 break;
3493 }
e6994aee 3494
34911fd3 3495 i915_gem_object_put_unlocked(obj);
432be69d 3496 return 0;
e6994aee
CW
3497}
3498
199adf40
BW
3499int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3500 struct drm_file *file)
e6994aee 3501{
fac5e23e 3502 struct drm_i915_private *dev_priv = to_i915(dev);
199adf40 3503 struct drm_i915_gem_caching *args = data;
e6994aee
CW
3504 struct drm_i915_gem_object *obj;
3505 enum i915_cache_level level;
3506 int ret;
3507
199adf40
BW
3508 switch (args->caching) {
3509 case I915_CACHING_NONE:
e6994aee
CW
3510 level = I915_CACHE_NONE;
3511 break;
199adf40 3512 case I915_CACHING_CACHED:
e5756c10
ID
3513 /*
3514 * Due to a HW issue on BXT A stepping, GPU stores via a
3515 * snooped mapping may leave stale data in a corresponding CPU
3516 * cacheline, whereas normally such cachelines would get
3517 * invalidated.
3518 */
ca377809 3519 if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
e5756c10
ID
3520 return -ENODEV;
3521
e6994aee
CW
3522 level = I915_CACHE_LLC;
3523 break;
4257d3ba
CW
3524 case I915_CACHING_DISPLAY:
3525 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3526 break;
e6994aee
CW
3527 default:
3528 return -EINVAL;
3529 }
3530
fd0fe6ac
ID
3531 intel_runtime_pm_get(dev_priv);
3532
3bc2913e
BW
3533 ret = i915_mutex_lock_interruptible(dev);
3534 if (ret)
fd0fe6ac 3535 goto rpm_put;
3bc2913e 3536
03ac0642
CW
3537 obj = i915_gem_object_lookup(file, args->handle);
3538 if (!obj) {
e6994aee
CW
3539 ret = -ENOENT;
3540 goto unlock;
3541 }
3542
3543 ret = i915_gem_object_set_cache_level(obj, level);
3544
f8c417cd 3545 i915_gem_object_put(obj);
e6994aee
CW
3546unlock:
3547 mutex_unlock(&dev->struct_mutex);
fd0fe6ac
ID
3548rpm_put:
3549 intel_runtime_pm_put(dev_priv);
3550
e6994aee
CW
3551 return ret;
3552}
3553
b9241ea3 3554/*
2da3b9b9
CW
3555 * Prepare buffer for display plane (scanout, cursors, etc).
3556 * Can be called from an uninterruptible phase (modesetting) and allows
3557 * any flushes to be pipelined (for pageflips).
b9241ea3 3558 */
058d88c4 3559struct i915_vma *
2da3b9b9
CW
3560i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3561 u32 alignment,
e6617330 3562 const struct i915_ggtt_view *view)
b9241ea3 3563{
058d88c4 3564 struct i915_vma *vma;
2da3b9b9 3565 u32 old_read_domains, old_write_domain;
b9241ea3
ZW
3566 int ret;
3567
cc98b413
CW
3568 /* Mark the pin_display early so that we account for the
3569 * display coherency whilst setting up the cache domains.
3570 */
8a0c39b1 3571 obj->pin_display++;
cc98b413 3572
a7ef0640
EA
3573 /* The display engine is not coherent with the LLC cache on gen6. As
3574 * a result, we make sure that the pinning that is about to occur is
3575 * done with uncached PTEs. This is lowest common denominator for all
3576 * chipsets.
3577 *
3578 * However for gen6+, we could do better by using the GFDT bit instead
3579 * of uncaching, which would allow us to flush all the LLC-cached data
3580 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3581 */
651d794f
CW
3582 ret = i915_gem_object_set_cache_level(obj,
3583 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
058d88c4
CW
3584 if (ret) {
3585 vma = ERR_PTR(ret);
cc98b413 3586 goto err_unpin_display;
058d88c4 3587 }
a7ef0640 3588
2da3b9b9
CW
3589 /* As the user may map the buffer once pinned in the display plane
3590 * (e.g. libkms for the bootup splash), we have to ensure that we
2efb813d
CW
3591 * always use map_and_fenceable for all scanout buffers. However,
3592 * it may simply be too big to fit into mappable, in which case
3593 * put it anyway and hope that userspace can cope (but always first
3594 * try to preserve the existing ABI).
2da3b9b9 3595 */
2efb813d
CW
3596 vma = ERR_PTR(-ENOSPC);
3597 if (view->type == I915_GGTT_VIEW_NORMAL)
3598 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3599 PIN_MAPPABLE | PIN_NONBLOCK);
3600 if (IS_ERR(vma))
3601 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0);
058d88c4 3602 if (IS_ERR(vma))
cc98b413 3603 goto err_unpin_display;
2da3b9b9 3604
d8923dcf
CW
3605 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3606
058d88c4
CW
3607 WARN_ON(obj->pin_display > i915_vma_pin_count(vma));
3608
e62b59e4 3609 i915_gem_object_flush_cpu_write_domain(obj);
b118c1e3 3610
2da3b9b9 3611 old_write_domain = obj->base.write_domain;
05394f39 3612 old_read_domains = obj->base.read_domains;
2da3b9b9
CW
3613
3614 /* It should now be out of any other write domains, and we can update
3615 * the domain values for our changes.
3616 */
e5f1d962 3617 obj->base.write_domain = 0;
05394f39 3618 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
b9241ea3
ZW
3619
3620 trace_i915_gem_object_change_domain(obj,
3621 old_read_domains,
2da3b9b9 3622 old_write_domain);
b9241ea3 3623
058d88c4 3624 return vma;
cc98b413
CW
3625
3626err_unpin_display:
8a0c39b1 3627 obj->pin_display--;
058d88c4 3628 return vma;
cc98b413
CW
3629}
3630
3631void
058d88c4 3632i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
cc98b413 3633{
058d88c4 3634 if (WARN_ON(vma->obj->pin_display == 0))
8a0c39b1
TU
3635 return;
3636
d8923dcf
CW
3637 if (--vma->obj->pin_display == 0)
3638 vma->display_alignment = 0;
e6617330 3639
383d5823
CW
3640 /* Bump the LRU to try and avoid premature eviction whilst flipping */
3641 if (!i915_vma_is_active(vma))
3642 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3643
058d88c4
CW
3644 i915_vma_unpin(vma);
3645 WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma));
b9241ea3
ZW
3646}
3647
e47c68e9
EA
3648/**
3649 * Moves a single object to the CPU read, and possibly write domain.
14bb2c11
TU
3650 * @obj: object to act on
3651 * @write: requesting write or read-only access
e47c68e9
EA
3652 *
3653 * This function returns when the move is complete, including waiting on
3654 * flushes to occur.
3655 */
dabdfe02 3656int
919926ae 3657i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
e47c68e9 3658{
1c5d22f7 3659 uint32_t old_write_domain, old_read_domains;
e47c68e9
EA
3660 int ret;
3661
0201f1ec 3662 ret = i915_gem_object_wait_rendering(obj, !write);
88241785
CW
3663 if (ret)
3664 return ret;
3665
c13d87ea
CW
3666 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3667 return 0;
3668
e47c68e9 3669 i915_gem_object_flush_gtt_write_domain(obj);
2ef7eeaa 3670
05394f39
CW
3671 old_write_domain = obj->base.write_domain;
3672 old_read_domains = obj->base.read_domains;
1c5d22f7 3673
e47c68e9 3674 /* Flush the CPU cache if it's still invalid. */
05394f39 3675 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2c22569b 3676 i915_gem_clflush_object(obj, false);
2ef7eeaa 3677
05394f39 3678 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2ef7eeaa
EA
3679 }
3680
3681 /* It should now be out of any other write domains, and we can update
3682 * the domain values for our changes.
3683 */
05394f39 3684 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
e47c68e9
EA
3685
3686 /* If we're writing through the CPU, then the GPU read domains will
3687 * need to be invalidated at next use.
3688 */
3689 if (write) {
05394f39
CW
3690 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3691 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
e47c68e9 3692 }
2ef7eeaa 3693
1c5d22f7
CW
3694 trace_i915_gem_object_change_domain(obj,
3695 old_read_domains,
3696 old_write_domain);
3697
2ef7eeaa
EA
3698 return 0;
3699}
3700
673a394b
EA
3701/* Throttle our rendering by waiting until the ring has completed our requests
3702 * emitted over 20 msec ago.
3703 *
b962442e
EA
3704 * Note that if we were to use the current jiffies each time around the loop,
3705 * we wouldn't escape the function with any frames outstanding if the time to
3706 * render a frame was over 20ms.
3707 *
673a394b
EA
3708 * This should get us reasonable parallelism between CPU and GPU but also
3709 * relatively low latency when blocking on a particular request to finish.
3710 */
40a5f0de 3711static int
f787a5f5 3712i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
40a5f0de 3713{
fac5e23e 3714 struct drm_i915_private *dev_priv = to_i915(dev);
f787a5f5 3715 struct drm_i915_file_private *file_priv = file->driver_priv;
d0bc54f2 3716 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
54fb2411 3717 struct drm_i915_gem_request *request, *target = NULL;
f787a5f5 3718 int ret;
93533c29 3719
308887aa
DV
3720 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3721 if (ret)
3722 return ret;
3723
f4457ae7
CW
3724 /* ABI: return -EIO if already wedged */
3725 if (i915_terminally_wedged(&dev_priv->gpu_error))
3726 return -EIO;
e110e8d6 3727
1c25595f 3728 spin_lock(&file_priv->mm.lock);
f787a5f5 3729 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
b962442e
EA
3730 if (time_after_eq(request->emitted_jiffies, recent_enough))
3731 break;
40a5f0de 3732
fcfa423c
JH
3733 /*
3734 * Note that the request might not have been submitted yet.
3735 * In which case emitted_jiffies will be zero.
3736 */
3737 if (!request->emitted_jiffies)
3738 continue;
3739
54fb2411 3740 target = request;
b962442e 3741 }
ff865885 3742 if (target)
e8a261ea 3743 i915_gem_request_get(target);
1c25595f 3744 spin_unlock(&file_priv->mm.lock);
40a5f0de 3745
54fb2411 3746 if (target == NULL)
f787a5f5 3747 return 0;
2bc43b5c 3748
776f3236 3749 ret = i915_wait_request(target, true, NULL, NULL);
e8a261ea 3750 i915_gem_request_put(target);
ff865885 3751
40a5f0de
EA
3752 return ret;
3753}
3754
d23db88c 3755static bool
91b2db6f 3756i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
d23db88c 3757{
59bfa124
CW
3758 if (!drm_mm_node_allocated(&vma->node))
3759 return false;
3760
91b2db6f
CW
3761 if (vma->node.size < size)
3762 return true;
3763
3764 if (alignment && vma->node.start & (alignment - 1))
d23db88c
CW
3765 return true;
3766
05a20d09 3767 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
d23db88c
CW
3768 return true;
3769
3770 if (flags & PIN_OFFSET_BIAS &&
3771 vma->node.start < (flags & PIN_OFFSET_MASK))
3772 return true;
3773
506a8e87
CW
3774 if (flags & PIN_OFFSET_FIXED &&
3775 vma->node.start != (flags & PIN_OFFSET_MASK))
3776 return true;
3777
d23db88c
CW
3778 return false;
3779}
3780
d0710abb
CW
3781void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
3782{
3783 struct drm_i915_gem_object *obj = vma->obj;
a9f1481f 3784 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
d0710abb
CW
3785 bool mappable, fenceable;
3786 u32 fence_size, fence_alignment;
3787
a9f1481f 3788 fence_size = i915_gem_get_ggtt_size(dev_priv,
05a20d09 3789 vma->size,
3e510a8e 3790 i915_gem_object_get_tiling(obj));
a9f1481f 3791 fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
05a20d09 3792 vma->size,
3e510a8e 3793 i915_gem_object_get_tiling(obj),
ad1a7d20 3794 true);
d0710abb
CW
3795
3796 fenceable = (vma->node.size == fence_size &&
3797 (vma->node.start & (fence_alignment - 1)) == 0);
3798
3799 mappable = (vma->node.start + fence_size <=
a9f1481f 3800 dev_priv->ggtt.mappable_end);
d0710abb 3801
05a20d09
CW
3802 if (mappable && fenceable)
3803 vma->flags |= I915_VMA_CAN_FENCE;
3804 else
3805 vma->flags &= ~I915_VMA_CAN_FENCE;
d0710abb
CW
3806}
3807
305bc234
CW
3808int __i915_vma_do_pin(struct i915_vma *vma,
3809 u64 size, u64 alignment, u64 flags)
673a394b 3810{
305bc234 3811 unsigned int bound = vma->flags;
673a394b
EA
3812 int ret;
3813
59bfa124 3814 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
3272db53 3815 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
d7f46fc4 3816
305bc234
CW
3817 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
3818 ret = -EBUSY;
3819 goto err;
3820 }
ac0c6b5a 3821
de895082 3822 if ((bound & I915_VMA_BIND_MASK) == 0) {
59bfa124
CW
3823 ret = i915_vma_insert(vma, size, alignment, flags);
3824 if (ret)
3825 goto err;
fe14d5f4 3826 }
74898d7e 3827
59bfa124 3828 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
3b16525c 3829 if (ret)
59bfa124 3830 goto err;
3b16525c 3831
3272db53 3832 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
d0710abb 3833 __i915_vma_set_map_and_fenceable(vma);
ef79e17c 3834
3b16525c 3835 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
673a394b 3836 return 0;
673a394b 3837
59bfa124
CW
3838err:
3839 __i915_vma_unpin(vma);
3840 return ret;
ec7adb6e
JL
3841}
3842
058d88c4 3843struct i915_vma *
ec7adb6e
JL
3844i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3845 const struct i915_ggtt_view *view,
91b2db6f 3846 u64 size,
2ffffd0f
CW
3847 u64 alignment,
3848 u64 flags)
ec7adb6e 3849{
058d88c4 3850 struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
59bfa124
CW
3851 struct i915_vma *vma;
3852 int ret;
72e96d64 3853
058d88c4 3854 vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
59bfa124 3855 if (IS_ERR(vma))
058d88c4 3856 return vma;
59bfa124
CW
3857
3858 if (i915_vma_misplaced(vma, size, alignment, flags)) {
3859 if (flags & PIN_NONBLOCK &&
3860 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
058d88c4 3861 return ERR_PTR(-ENOSPC);
59bfa124
CW
3862
3863 WARN(i915_vma_is_pinned(vma),
3864 "bo is already pinned in ggtt with incorrect alignment:"
05a20d09
CW
3865 " offset=%08x, req.alignment=%llx,"
3866 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
3867 i915_ggtt_offset(vma), alignment,
59bfa124 3868 !!(flags & PIN_MAPPABLE),
05a20d09 3869 i915_vma_is_map_and_fenceable(vma));
59bfa124
CW
3870 ret = i915_vma_unbind(vma);
3871 if (ret)
058d88c4 3872 return ERR_PTR(ret);
59bfa124
CW
3873 }
3874
058d88c4
CW
3875 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3876 if (ret)
3877 return ERR_PTR(ret);
ec7adb6e 3878
058d88c4 3879 return vma;
673a394b
EA
3880}
3881
edf6b76f 3882static __always_inline unsigned int __busy_read_flag(unsigned int id)
3fdc13c7
CW
3883{
3884 /* Note that we could alias engines in the execbuf API, but
3885 * that would be very unwise as it prevents userspace from
3886 * fine control over engine selection. Ahem.
3887 *
3888 * This should be something like EXEC_MAX_ENGINE instead of
3889 * I915_NUM_ENGINES.
3890 */
3891 BUILD_BUG_ON(I915_NUM_ENGINES > 16);
3892 return 0x10000 << id;
3893}
3894
3895static __always_inline unsigned int __busy_write_id(unsigned int id)
3896{
70cb472c
CW
3897 /* The uABI guarantees an active writer is also amongst the read
3898 * engines. This would be true if we accessed the activity tracking
3899 * under the lock, but as we perform the lookup of the object and
3900 * its activity locklessly we can not guarantee that the last_write
3901 * being active implies that we have set the same engine flag from
3902 * last_read - hence we always set both read and write busy for
3903 * last_write.
3904 */
3905 return id | __busy_read_flag(id);
3fdc13c7
CW
3906}
3907
edf6b76f 3908static __always_inline unsigned int
3fdc13c7
CW
3909__busy_set_if_active(const struct i915_gem_active *active,
3910 unsigned int (*flag)(unsigned int id))
3911{
1255501d 3912 struct drm_i915_gem_request *request;
3fdc13c7 3913
1255501d
CW
3914 request = rcu_dereference(active->request);
3915 if (!request || i915_gem_request_completed(request))
3916 return 0;
3fdc13c7 3917
1255501d
CW
3918 /* This is racy. See __i915_gem_active_get_rcu() for an in detail
3919 * discussion of how to handle the race correctly, but for reporting
3920 * the busy state we err on the side of potentially reporting the
3921 * wrong engine as being busy (but we guarantee that the result
3922 * is at least self-consistent).
3923 *
3924 * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
3925 * whilst we are inspecting it, even under the RCU read lock as we are.
3926 * This means that there is a small window for the engine and/or the
3927 * seqno to have been overwritten. The seqno will always be in the
3928 * future compared to the intended, and so we know that if that
3929 * seqno is idle (on whatever engine) our request is idle and the
3930 * return 0 above is correct.
3931 *
3932 * The issue is that if the engine is switched, it is just as likely
3933 * to report that it is busy (but since the switch happened, we know
3934 * the request should be idle). So there is a small chance that a busy
3935 * result is actually the wrong engine.
3936 *
3937 * So why don't we care?
3938 *
3939 * For starters, the busy ioctl is a heuristic that is by definition
3940 * racy. Even with perfect serialisation in the driver, the hardware
3941 * state is constantly advancing - the state we report to the user
3942 * is stale.
3943 *
3944 * The critical information for the busy-ioctl is whether the object
3945 * is idle as userspace relies on that to detect whether its next
3946 * access will stall, or if it has missed submitting commands to
3947 * the hardware allowing the GPU to stall. We never generate a
3948 * false-positive for idleness, thus busy-ioctl is reliable at the
3949 * most fundamental level, and we maintain the guarantee that a
3950 * busy object left to itself will eventually become idle (and stay
3951 * idle!).
3952 *
3953 * We allow ourselves the leeway of potentially misreporting the busy
3954 * state because that is an optimisation heuristic that is constantly
3955 * in flux. Being quickly able to detect the busy/idle state is much
3956 * more important than accurate logging of exactly which engines were
3957 * busy.
3958 *
3959 * For accuracy in reporting the engine, we could use
3960 *
3961 * result = 0;
3962 * request = __i915_gem_active_get_rcu(active);
3963 * if (request) {
3964 * if (!i915_gem_request_completed(request))
3965 * result = flag(request->engine->exec_id);
3966 * i915_gem_request_put(request);
3967 * }
3968 *
3969 * but that still remains susceptible to both hardware and userspace
3970 * races. So we accept making the result of that race slightly worse,
3971 * given the rarity of the race and its low impact on the result.
3972 */
3973 return flag(READ_ONCE(request->engine->exec_id));
3fdc13c7
CW
3974}
3975
edf6b76f 3976static __always_inline unsigned int
3fdc13c7
CW
3977busy_check_reader(const struct i915_gem_active *active)
3978{
3979 return __busy_set_if_active(active, __busy_read_flag);
3980}
3981
edf6b76f 3982static __always_inline unsigned int
3fdc13c7
CW
3983busy_check_writer(const struct i915_gem_active *active)
3984{
3985 return __busy_set_if_active(active, __busy_write_id);
3986}
3987
673a394b
EA
3988int
3989i915_gem_busy_ioctl(struct drm_device *dev, void *data,
05394f39 3990 struct drm_file *file)
673a394b
EA
3991{
3992 struct drm_i915_gem_busy *args = data;
05394f39 3993 struct drm_i915_gem_object *obj;
3fdc13c7 3994 unsigned long active;
673a394b 3995
03ac0642 3996 obj = i915_gem_object_lookup(file, args->handle);
3fdc13c7
CW
3997 if (!obj)
3998 return -ENOENT;
d1b851fc 3999
426960be 4000 args->busy = 0;
3fdc13c7
CW
4001 active = __I915_BO_ACTIVE(obj);
4002 if (active) {
4003 int idx;
426960be 4004
3fdc13c7
CW
4005 /* Yes, the lookups are intentionally racy.
4006 *
4007 * First, we cannot simply rely on __I915_BO_ACTIVE. We have
4008 * to regard the value as stale and as our ABI guarantees
4009 * forward progress, we confirm the status of each active
4010 * request with the hardware.
4011 *
4012 * Even though we guard the pointer lookup by RCU, that only
4013 * guarantees that the pointer and its contents remain
4014 * dereferencable and does *not* mean that the request we
4015 * have is the same as the one being tracked by the object.
4016 *
4017 * Consider that we lookup the request just as it is being
4018 * retired and freed. We take a local copy of the pointer,
4019 * but before we add its engine into the busy set, the other
4020 * thread reallocates it and assigns it to a task on another
1255501d
CW
4021 * engine with a fresh and incomplete seqno. Guarding against
4022 * that requires careful serialisation and reference counting,
4023 * i.e. using __i915_gem_active_get_request_rcu(). We don't,
4024 * instead we expect that if the result is busy, which engines
4025 * are busy is not completely reliable - we only guarantee
4026 * that the object was busy.
3fdc13c7
CW
4027 */
4028 rcu_read_lock();
4029
4030 for_each_active(active, idx)
4031 args->busy |= busy_check_reader(&obj->last_read[idx]);
4032
4033 /* For ABI sanity, we only care that the write engine is in
70cb472c
CW
4034 * the set of read engines. This should be ensured by the
4035 * ordering of setting last_read/last_write in
4036 * i915_vma_move_to_active(), and then in reverse in retire.
4037 * However, for good measure, we always report the last_write
4038 * request as a busy read as well as being a busy write.
3fdc13c7
CW
4039 *
4040 * We don't care that the set of active read/write engines
4041 * may change during construction of the result, as it is
4042 * equally liable to change before userspace can inspect
4043 * the result.
4044 */
4045 args->busy |= busy_check_writer(&obj->last_write);
4046
4047 rcu_read_unlock();
426960be 4048 }
673a394b 4049
3fdc13c7
CW
4050 i915_gem_object_put_unlocked(obj);
4051 return 0;
673a394b
EA
4052}
4053
4054int
4055i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4056 struct drm_file *file_priv)
4057{
0206e353 4058 return i915_gem_ring_throttle(dev, file_priv);
673a394b
EA
4059}
4060
3ef94daa
CW
4061int
4062i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4063 struct drm_file *file_priv)
4064{
fac5e23e 4065 struct drm_i915_private *dev_priv = to_i915(dev);
3ef94daa 4066 struct drm_i915_gem_madvise *args = data;
05394f39 4067 struct drm_i915_gem_object *obj;
76c1dec1 4068 int ret;
3ef94daa
CW
4069
4070 switch (args->madv) {
4071 case I915_MADV_DONTNEED:
4072 case I915_MADV_WILLNEED:
4073 break;
4074 default:
4075 return -EINVAL;
4076 }
4077
1d7cfea1
CW
4078 ret = i915_mutex_lock_interruptible(dev);
4079 if (ret)
4080 return ret;
4081
03ac0642
CW
4082 obj = i915_gem_object_lookup(file_priv, args->handle);
4083 if (!obj) {
1d7cfea1
CW
4084 ret = -ENOENT;
4085 goto unlock;
3ef94daa 4086 }
3ef94daa 4087
656bfa3a 4088 if (obj->pages &&
3e510a8e 4089 i915_gem_object_is_tiled(obj) &&
656bfa3a
DV
4090 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4091 if (obj->madv == I915_MADV_WILLNEED)
4092 i915_gem_object_unpin_pages(obj);
4093 if (args->madv == I915_MADV_WILLNEED)
4094 i915_gem_object_pin_pages(obj);
4095 }
4096
05394f39
CW
4097 if (obj->madv != __I915_MADV_PURGED)
4098 obj->madv = args->madv;
3ef94daa 4099
6c085a72 4100 /* if the object is no longer attached, discard its backing storage */
be6a0376 4101 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
2d7ef395
CW
4102 i915_gem_object_truncate(obj);
4103
05394f39 4104 args->retained = obj->madv != __I915_MADV_PURGED;
bb6baf76 4105
f8c417cd 4106 i915_gem_object_put(obj);
1d7cfea1 4107unlock:
3ef94daa 4108 mutex_unlock(&dev->struct_mutex);
1d7cfea1 4109 return ret;
3ef94daa
CW
4110}
4111
37e680a1
CW
4112void i915_gem_object_init(struct drm_i915_gem_object *obj,
4113 const struct drm_i915_gem_object_ops *ops)
0327d6ba 4114{
b4716185
CW
4115 int i;
4116
35c20a60 4117 INIT_LIST_HEAD(&obj->global_list);
666796da 4118 for (i = 0; i < I915_NUM_ENGINES; i++)
fa545cbf
CW
4119 init_request_active(&obj->last_read[i],
4120 i915_gem_object_retire__read);
4121 init_request_active(&obj->last_write,
4122 i915_gem_object_retire__write);
b25cb2f8 4123 INIT_LIST_HEAD(&obj->obj_exec_link);
2f633156 4124 INIT_LIST_HEAD(&obj->vma_list);
8d9d5744 4125 INIT_LIST_HEAD(&obj->batch_pool_link);
0327d6ba 4126
37e680a1
CW
4127 obj->ops = ops;
4128
50349247 4129 obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
0327d6ba 4130 obj->madv = I915_MADV_WILLNEED;
0327d6ba 4131
f19ec8cb 4132 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
0327d6ba
CW
4133}
4134
37e680a1 4135static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
de472664 4136 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
37e680a1
CW
4137 .get_pages = i915_gem_object_get_pages_gtt,
4138 .put_pages = i915_gem_object_put_pages_gtt,
4139};
4140
d37cd8a8 4141struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
05394f39 4142 size_t size)
ac52bc56 4143{
c397b908 4144 struct drm_i915_gem_object *obj;
5949eac4 4145 struct address_space *mapping;
1a240d4d 4146 gfp_t mask;
fe3db79b 4147 int ret;
ac52bc56 4148
42dcedd4 4149 obj = i915_gem_object_alloc(dev);
c397b908 4150 if (obj == NULL)
fe3db79b 4151 return ERR_PTR(-ENOMEM);
673a394b 4152
fe3db79b
CW
4153 ret = drm_gem_object_init(dev, &obj->base, size);
4154 if (ret)
4155 goto fail;
673a394b 4156
bed1ea95
CW
4157 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4158 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4159 /* 965gm cannot relocate objects above 4GiB. */
4160 mask &= ~__GFP_HIGHMEM;
4161 mask |= __GFP_DMA32;
4162 }
4163
93c76a3d 4164 mapping = obj->base.filp->f_mapping;
bed1ea95 4165 mapping_set_gfp_mask(mapping, mask);
5949eac4 4166
37e680a1 4167 i915_gem_object_init(obj, &i915_gem_object_ops);
73aa808f 4168
c397b908
DV
4169 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4170 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
673a394b 4171
3d29b842
ED
4172 if (HAS_LLC(dev)) {
4173 /* On some devices, we can have the GPU use the LLC (the CPU
a1871112
EA
4174 * cache) for about a 10% performance improvement
4175 * compared to uncached. Graphics requests other than
4176 * display scanout are coherent with the CPU in
4177 * accessing this cache. This means in this mode we
4178 * don't need to clflush on the CPU side, and on the
4179 * GPU side we only need to flush internal caches to
4180 * get data visible to the CPU.
4181 *
4182 * However, we maintain the display planes as UC, and so
4183 * need to rebind when first used as such.
4184 */
4185 obj->cache_level = I915_CACHE_LLC;
4186 } else
4187 obj->cache_level = I915_CACHE_NONE;
4188
d861e338
DV
4189 trace_i915_gem_object_create(obj);
4190
05394f39 4191 return obj;
fe3db79b
CW
4192
4193fail:
4194 i915_gem_object_free(obj);
4195
4196 return ERR_PTR(ret);
c397b908
DV
4197}
4198
340fbd8c
CW
4199static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4200{
4201 /* If we are the last user of the backing storage (be it shmemfs
4202 * pages or stolen etc), we know that the pages are going to be
4203 * immediately released. In this case, we can then skip copying
4204 * back the contents from the GPU.
4205 */
4206
4207 if (obj->madv != I915_MADV_WILLNEED)
4208 return false;
4209
4210 if (obj->base.filp == NULL)
4211 return true;
4212
4213 /* At first glance, this looks racy, but then again so would be
4214 * userspace racing mmap against close. However, the first external
4215 * reference to the filp can only be obtained through the
4216 * i915_gem_mmap_ioctl() which safeguards us against the user
4217 * acquiring such a reference whilst we are in the middle of
4218 * freeing the object.
4219 */
4220 return atomic_long_read(&obj->base.filp->f_count) == 1;
4221}
4222
1488fc08 4223void i915_gem_free_object(struct drm_gem_object *gem_obj)
673a394b 4224{
1488fc08 4225 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
05394f39 4226 struct drm_device *dev = obj->base.dev;
fac5e23e 4227 struct drm_i915_private *dev_priv = to_i915(dev);
07fe0b12 4228 struct i915_vma *vma, *next;
673a394b 4229
f65c9168
PZ
4230 intel_runtime_pm_get(dev_priv);
4231
26e12f89
CW
4232 trace_i915_gem_object_destroy(obj);
4233
b1f788c6
CW
4234 /* All file-owned VMA should have been released by this point through
4235 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4236 * However, the object may also be bound into the global GTT (e.g.
4237 * older GPUs without per-process support, or for direct access through
4238 * the GTT either for the user or for scanout). Those VMA still need to
4239 * unbound now.
4240 */
1c7f4bca 4241 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
3272db53 4242 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
b1f788c6 4243 GEM_BUG_ON(i915_vma_is_active(vma));
3272db53 4244 vma->flags &= ~I915_VMA_PIN_MASK;
b1f788c6 4245 i915_vma_close(vma);
1488fc08 4246 }
15717de2 4247 GEM_BUG_ON(obj->bind_count);
1488fc08 4248
1d64ae71
BW
4249 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4250 * before progressing. */
4251 if (obj->stolen)
4252 i915_gem_object_unpin_pages(obj);
4253
faf5bf0a 4254 WARN_ON(atomic_read(&obj->frontbuffer_bits));
a071fa00 4255
656bfa3a
DV
4256 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4257 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
3e510a8e 4258 i915_gem_object_is_tiled(obj))
656bfa3a
DV
4259 i915_gem_object_unpin_pages(obj);
4260
401c29f6
BW
4261 if (WARN_ON(obj->pages_pin_count))
4262 obj->pages_pin_count = 0;
340fbd8c 4263 if (discard_backing_storage(obj))
5537252b 4264 obj->madv = I915_MADV_DONTNEED;
37e680a1 4265 i915_gem_object_put_pages(obj);
de151cf6 4266
9da3da66
CW
4267 BUG_ON(obj->pages);
4268
2f745ad3
CW
4269 if (obj->base.import_attach)
4270 drm_prime_gem_destroy(&obj->base, NULL);
de151cf6 4271
5cc9ed4b
CW
4272 if (obj->ops->release)
4273 obj->ops->release(obj);
4274
05394f39
CW
4275 drm_gem_object_release(&obj->base);
4276 i915_gem_info_remove_obj(dev_priv, obj->base.size);
c397b908 4277
05394f39 4278 kfree(obj->bit_17);
42dcedd4 4279 i915_gem_object_free(obj);
f65c9168
PZ
4280
4281 intel_runtime_pm_put(dev_priv);
673a394b
EA
4282}
4283
dcff85c8 4284int i915_gem_suspend(struct drm_device *dev)
29105ccc 4285{
fac5e23e 4286 struct drm_i915_private *dev_priv = to_i915(dev);
dcff85c8 4287 int ret;
28dfe52a 4288
54b4f68f
CW
4289 intel_suspend_gt_powersave(dev_priv);
4290
45c5f202 4291 mutex_lock(&dev->struct_mutex);
5ab57c70
CW
4292
4293 /* We have to flush all the executing contexts to main memory so
4294 * that they can saved in the hibernation image. To ensure the last
4295 * context image is coherent, we have to switch away from it. That
4296 * leaves the dev_priv->kernel_context still active when
4297 * we actually suspend, and its image in memory may not match the GPU
4298 * state. Fortunately, the kernel_context is disposable and we do
4299 * not rely on its state.
4300 */
4301 ret = i915_gem_switch_to_kernel_context(dev_priv);
4302 if (ret)
4303 goto err;
4304
dcff85c8 4305 ret = i915_gem_wait_for_idle(dev_priv, true);
f7403347 4306 if (ret)
45c5f202 4307 goto err;
f7403347 4308
c033666a 4309 i915_gem_retire_requests(dev_priv);
673a394b 4310
b2e862d0 4311 i915_gem_context_lost(dev_priv);
45c5f202
CW
4312 mutex_unlock(&dev->struct_mutex);
4313
737b1506 4314 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
67d97da3
CW
4315 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4316 flush_delayed_work(&dev_priv->gt.idle_work);
29105ccc 4317
bdcf120b
CW
4318 /* Assert that we sucessfully flushed all the work and
4319 * reset the GPU back to its idle, low power state.
4320 */
67d97da3 4321 WARN_ON(dev_priv->gt.awake);
bdcf120b 4322
673a394b 4323 return 0;
45c5f202
CW
4324
4325err:
4326 mutex_unlock(&dev->struct_mutex);
4327 return ret;
673a394b
EA
4328}
4329
5ab57c70
CW
4330void i915_gem_resume(struct drm_device *dev)
4331{
4332 struct drm_i915_private *dev_priv = to_i915(dev);
4333
4334 mutex_lock(&dev->struct_mutex);
4335 i915_gem_restore_gtt_mappings(dev);
4336
4337 /* As we didn't flush the kernel context before suspend, we cannot
4338 * guarantee that the context image is complete. So let's just reset
4339 * it and start again.
4340 */
4341 if (i915.enable_execlists)
4342 intel_lr_context_reset(dev_priv, dev_priv->kernel_context);
4343
4344 mutex_unlock(&dev->struct_mutex);
4345}
4346
f691e2f4
DV
4347void i915_gem_init_swizzling(struct drm_device *dev)
4348{
fac5e23e 4349 struct drm_i915_private *dev_priv = to_i915(dev);
f691e2f4 4350
11782b02 4351 if (INTEL_INFO(dev)->gen < 5 ||
f691e2f4
DV
4352 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4353 return;
4354
4355 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4356 DISP_TILE_SURFACE_SWIZZLING);
4357
11782b02
DV
4358 if (IS_GEN5(dev))
4359 return;
4360
f691e2f4
DV
4361 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4362 if (IS_GEN6(dev))
6b26c86d 4363 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
8782e26c 4364 else if (IS_GEN7(dev))
6b26c86d 4365 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
31a5336e
BW
4366 else if (IS_GEN8(dev))
4367 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
8782e26c
BW
4368 else
4369 BUG();
f691e2f4 4370}
e21af88d 4371
81e7f200
VS
4372static void init_unused_ring(struct drm_device *dev, u32 base)
4373{
fac5e23e 4374 struct drm_i915_private *dev_priv = to_i915(dev);
81e7f200
VS
4375
4376 I915_WRITE(RING_CTL(base), 0);
4377 I915_WRITE(RING_HEAD(base), 0);
4378 I915_WRITE(RING_TAIL(base), 0);
4379 I915_WRITE(RING_START(base), 0);
4380}
4381
4382static void init_unused_rings(struct drm_device *dev)
4383{
4384 if (IS_I830(dev)) {
4385 init_unused_ring(dev, PRB1_BASE);
4386 init_unused_ring(dev, SRB0_BASE);
4387 init_unused_ring(dev, SRB1_BASE);
4388 init_unused_ring(dev, SRB2_BASE);
4389 init_unused_ring(dev, SRB3_BASE);
4390 } else if (IS_GEN2(dev)) {
4391 init_unused_ring(dev, SRB0_BASE);
4392 init_unused_ring(dev, SRB1_BASE);
4393 } else if (IS_GEN3(dev)) {
4394 init_unused_ring(dev, PRB1_BASE);
4395 init_unused_ring(dev, PRB2_BASE);
4396 }
4397}
4398
4fc7c971
BW
4399int
4400i915_gem_init_hw(struct drm_device *dev)
4401{
fac5e23e 4402 struct drm_i915_private *dev_priv = to_i915(dev);
e2f80391 4403 struct intel_engine_cs *engine;
d200cda6 4404 int ret;
4fc7c971 4405
5e4f5189
CW
4406 /* Double layer security blanket, see i915_gem_init() */
4407 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4408
3accaf7e 4409 if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
05e21cc4 4410 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4fc7c971 4411
0bf21347
VS
4412 if (IS_HASWELL(dev))
4413 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4414 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
9435373e 4415
88a2b2a3 4416 if (HAS_PCH_NOP(dev)) {
6ba844b0
DV
4417 if (IS_IVYBRIDGE(dev)) {
4418 u32 temp = I915_READ(GEN7_MSG_CTL);
4419 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4420 I915_WRITE(GEN7_MSG_CTL, temp);
4421 } else if (INTEL_INFO(dev)->gen >= 7) {
4422 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4423 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4424 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4425 }
88a2b2a3
BW
4426 }
4427
4fc7c971
BW
4428 i915_gem_init_swizzling(dev);
4429
d5abdfda
DV
4430 /*
4431 * At least 830 can leave some of the unused rings
4432 * "active" (ie. head != tail) after resume which
4433 * will prevent c3 entry. Makes sure all unused rings
4434 * are totally idle.
4435 */
4436 init_unused_rings(dev);
4437
ed54c1a1 4438 BUG_ON(!dev_priv->kernel_context);
90638cc1 4439
4ad2fd88
JH
4440 ret = i915_ppgtt_init_hw(dev);
4441 if (ret) {
4442 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4443 goto out;
4444 }
4445
4446 /* Need to do basic initialisation of all rings first: */
b4ac5afc 4447 for_each_engine(engine, dev_priv) {
e2f80391 4448 ret = engine->init_hw(engine);
35a57ffb 4449 if (ret)
5e4f5189 4450 goto out;
35a57ffb 4451 }
99433931 4452
0ccdacf6
PA
4453 intel_mocs_init_l3cc_table(dev);
4454
33a732f4 4455 /* We can't enable contexts until all firmware is loaded */
e556f7c1
DG
4456 ret = intel_guc_setup(dev);
4457 if (ret)
4458 goto out;
33a732f4 4459
5e4f5189
CW
4460out:
4461 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2fa48d8d 4462 return ret;
8187a2b7
ZN
4463}
4464
39df9190
CW
4465bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4466{
4467 if (INTEL_INFO(dev_priv)->gen < 6)
4468 return false;
4469
4470 /* TODO: make semaphores and Execlists play nicely together */
4471 if (i915.enable_execlists)
4472 return false;
4473
4474 if (value >= 0)
4475 return value;
4476
4477#ifdef CONFIG_INTEL_IOMMU
4478 /* Enable semaphores on SNB when IO remapping is off */
4479 if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4480 return false;
4481#endif
4482
4483 return true;
4484}
4485
1070a42b
CW
4486int i915_gem_init(struct drm_device *dev)
4487{
fac5e23e 4488 struct drm_i915_private *dev_priv = to_i915(dev);
1070a42b
CW
4489 int ret;
4490
1070a42b 4491 mutex_lock(&dev->struct_mutex);
d62b4892 4492
a83014d3 4493 if (!i915.enable_execlists) {
7e37f889 4494 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
454afebd 4495 } else {
117897f4 4496 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
a83014d3
OM
4497 }
4498
5e4f5189
CW
4499 /* This is just a security blanket to placate dragons.
4500 * On some systems, we very sporadically observe that the first TLBs
4501 * used by the CS may be stale, despite us poking the TLB reset. If
4502 * we hold the forcewake during initialisation these problems
4503 * just magically go away.
4504 */
4505 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4506
72778cb2 4507 i915_gem_init_userptr(dev_priv);
f6b9d5ca
CW
4508
4509 ret = i915_gem_init_ggtt(dev_priv);
4510 if (ret)
4511 goto out_unlock;
d62b4892 4512
2fa48d8d 4513 ret = i915_gem_context_init(dev);
7bcc3777
JN
4514 if (ret)
4515 goto out_unlock;
2fa48d8d 4516
8b3e2d36 4517 ret = intel_engines_init(dev);
35a57ffb 4518 if (ret)
7bcc3777 4519 goto out_unlock;
2fa48d8d 4520
1070a42b 4521 ret = i915_gem_init_hw(dev);
60990320 4522 if (ret == -EIO) {
7e21d648 4523 /* Allow engine initialisation to fail by marking the GPU as
60990320
CW
4524 * wedged. But we only want to do this where the GPU is angry,
4525 * for all other failure, such as an allocation failure, bail.
4526 */
4527 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
8af29b0c 4528 set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
60990320 4529 ret = 0;
1070a42b 4530 }
7bcc3777
JN
4531
4532out_unlock:
5e4f5189 4533 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
60990320 4534 mutex_unlock(&dev->struct_mutex);
1070a42b 4535
60990320 4536 return ret;
1070a42b
CW
4537}
4538
8187a2b7 4539void
117897f4 4540i915_gem_cleanup_engines(struct drm_device *dev)
8187a2b7 4541{
fac5e23e 4542 struct drm_i915_private *dev_priv = to_i915(dev);
e2f80391 4543 struct intel_engine_cs *engine;
8187a2b7 4544
b4ac5afc 4545 for_each_engine(engine, dev_priv)
117897f4 4546 dev_priv->gt.cleanup_engine(engine);
8187a2b7
ZN
4547}
4548
64193406 4549static void
666796da 4550init_engine_lists(struct intel_engine_cs *engine)
64193406 4551{
0bc40be8 4552 INIT_LIST_HEAD(&engine->request_list);
64193406
CW
4553}
4554
40ae4e16
ID
4555void
4556i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4557{
91c8a326 4558 struct drm_device *dev = &dev_priv->drm;
49ef5294 4559 int i;
40ae4e16
ID
4560
4561 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4562 !IS_CHERRYVIEW(dev_priv))
4563 dev_priv->num_fence_regs = 32;
4564 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4565 IS_I945GM(dev_priv) || IS_G33(dev_priv))
4566 dev_priv->num_fence_regs = 16;
4567 else
4568 dev_priv->num_fence_regs = 8;
4569
c033666a 4570 if (intel_vgpu_active(dev_priv))
40ae4e16
ID
4571 dev_priv->num_fence_regs =
4572 I915_READ(vgtif_reg(avail_rs.fence_num));
4573
4574 /* Initialize fence registers to zero */
49ef5294
CW
4575 for (i = 0; i < dev_priv->num_fence_regs; i++) {
4576 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
4577
4578 fence->i915 = dev_priv;
4579 fence->id = i;
4580 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
4581 }
40ae4e16
ID
4582 i915_gem_restore_fences(dev);
4583
4584 i915_gem_detect_bit_6_swizzle(dev);
4585}
4586
673a394b 4587void
d64aa096 4588i915_gem_load_init(struct drm_device *dev)
673a394b 4589{
fac5e23e 4590 struct drm_i915_private *dev_priv = to_i915(dev);
42dcedd4
CW
4591 int i;
4592
efab6d8d 4593 dev_priv->objects =
42dcedd4
CW
4594 kmem_cache_create("i915_gem_object",
4595 sizeof(struct drm_i915_gem_object), 0,
4596 SLAB_HWCACHE_ALIGN,
4597 NULL);
e20d2ab7
CW
4598 dev_priv->vmas =
4599 kmem_cache_create("i915_gem_vma",
4600 sizeof(struct i915_vma), 0,
4601 SLAB_HWCACHE_ALIGN,
4602 NULL);
efab6d8d
CW
4603 dev_priv->requests =
4604 kmem_cache_create("i915_gem_request",
4605 sizeof(struct drm_i915_gem_request), 0,
0eafec6d
CW
4606 SLAB_HWCACHE_ALIGN |
4607 SLAB_RECLAIM_ACCOUNT |
4608 SLAB_DESTROY_BY_RCU,
efab6d8d 4609 NULL);
673a394b 4610
a33afea5 4611 INIT_LIST_HEAD(&dev_priv->context_list);
6c085a72
CW
4612 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4613 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
a09ba7fa 4614 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
666796da
TU
4615 for (i = 0; i < I915_NUM_ENGINES; i++)
4616 init_engine_lists(&dev_priv->engine[i]);
67d97da3 4617 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
673a394b 4618 i915_gem_retire_work_handler);
67d97da3 4619 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
b29c19b6 4620 i915_gem_idle_work_handler);
1f15b76f 4621 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
1f83fee0 4622 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
31169714 4623
72bfa19c
CW
4624 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4625
6b95a207 4626 init_waitqueue_head(&dev_priv->pending_flip_queue);
17250b71 4627
ce453d81
CW
4628 dev_priv->mm.interruptible = true;
4629
6f633402
JL
4630 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
4631
b5add959 4632 spin_lock_init(&dev_priv->fb_tracking.lock);
673a394b 4633}
71acb5eb 4634
d64aa096
ID
4635void i915_gem_load_cleanup(struct drm_device *dev)
4636{
4637 struct drm_i915_private *dev_priv = to_i915(dev);
4638
4639 kmem_cache_destroy(dev_priv->requests);
4640 kmem_cache_destroy(dev_priv->vmas);
4641 kmem_cache_destroy(dev_priv->objects);
0eafec6d
CW
4642
4643 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
4644 rcu_barrier();
d64aa096
ID
4645}
4646
461fb99c
CW
4647int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4648{
4649 struct drm_i915_gem_object *obj;
4650
4651 /* Called just before we write the hibernation image.
4652 *
4653 * We need to update the domain tracking to reflect that the CPU
4654 * will be accessing all the pages to create and restore from the
4655 * hibernation, and so upon restoration those pages will be in the
4656 * CPU domain.
4657 *
4658 * To make sure the hibernation image contains the latest state,
4659 * we update that state just before writing out the image.
4660 */
4661
4662 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
4663 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4664 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4665 }
4666
4667 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4668 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4669 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4670 }
4671
4672 return 0;
4673}
4674
f787a5f5 4675void i915_gem_release(struct drm_device *dev, struct drm_file *file)
b962442e 4676{
f787a5f5 4677 struct drm_i915_file_private *file_priv = file->driver_priv;
15f7bbc7 4678 struct drm_i915_gem_request *request;
b962442e
EA
4679
4680 /* Clean up our request list when the client is going away, so that
4681 * later retire_requests won't dereference our soon-to-be-gone
4682 * file_priv.
4683 */
1c25595f 4684 spin_lock(&file_priv->mm.lock);
15f7bbc7 4685 list_for_each_entry(request, &file_priv->mm.request_list, client_list)
f787a5f5 4686 request->file_priv = NULL;
1c25595f 4687 spin_unlock(&file_priv->mm.lock);
b29c19b6 4688
2e1b8730 4689 if (!list_empty(&file_priv->rps.link)) {
8d3afd7d 4690 spin_lock(&to_i915(dev)->rps.client_lock);
2e1b8730 4691 list_del(&file_priv->rps.link);
8d3afd7d 4692 spin_unlock(&to_i915(dev)->rps.client_lock);
1854d5ca 4693 }
b29c19b6
CW
4694}
4695
4696int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4697{
4698 struct drm_i915_file_private *file_priv;
e422b888 4699 int ret;
b29c19b6
CW
4700
4701 DRM_DEBUG_DRIVER("\n");
4702
4703 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4704 if (!file_priv)
4705 return -ENOMEM;
4706
4707 file->driver_priv = file_priv;
f19ec8cb 4708 file_priv->dev_priv = to_i915(dev);
ab0e7ff9 4709 file_priv->file = file;
2e1b8730 4710 INIT_LIST_HEAD(&file_priv->rps.link);
b29c19b6
CW
4711
4712 spin_lock_init(&file_priv->mm.lock);
4713 INIT_LIST_HEAD(&file_priv->mm.request_list);
b29c19b6 4714
c80ff16e 4715 file_priv->bsd_engine = -1;
de1add36 4716
e422b888
BW
4717 ret = i915_gem_context_open(dev, file);
4718 if (ret)
4719 kfree(file_priv);
b29c19b6 4720
e422b888 4721 return ret;
b29c19b6
CW
4722}
4723
b680c37a
DV
4724/**
4725 * i915_gem_track_fb - update frontbuffer tracking
d9072a3e
GT
4726 * @old: current GEM buffer for the frontbuffer slots
4727 * @new: new GEM buffer for the frontbuffer slots
4728 * @frontbuffer_bits: bitmask of frontbuffer slots
b680c37a
DV
4729 *
4730 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4731 * from @old and setting them in @new. Both @old and @new can be NULL.
4732 */
a071fa00
DV
4733void i915_gem_track_fb(struct drm_i915_gem_object *old,
4734 struct drm_i915_gem_object *new,
4735 unsigned frontbuffer_bits)
4736{
faf5bf0a
CW
4737 /* Control of individual bits within the mask are guarded by
4738 * the owning plane->mutex, i.e. we can never see concurrent
4739 * manipulation of individual bits. But since the bitfield as a whole
4740 * is updated using RMW, we need to use atomics in order to update
4741 * the bits.
4742 */
4743 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
4744 sizeof(atomic_t) * BITS_PER_BYTE);
4745
a071fa00 4746 if (old) {
faf5bf0a
CW
4747 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
4748 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
a071fa00
DV
4749 }
4750
4751 if (new) {
faf5bf0a
CW
4752 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
4753 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
a071fa00
DV
4754 }
4755}
4756
033908ae
DG
4757/* Like i915_gem_object_get_page(), but mark the returned page dirty */
4758struct page *
4759i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
4760{
4761 struct page *page;
4762
4763 /* Only default objects have per-page dirty tracking */
b9bcd14a 4764 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
033908ae
DG
4765 return NULL;
4766
4767 page = i915_gem_object_get_page(obj, n);
4768 set_page_dirty(page);
4769 return page;
4770}
4771
ea70299d
DG
4772/* Allocate a new GEM object and fill it with the supplied data */
4773struct drm_i915_gem_object *
4774i915_gem_object_create_from_data(struct drm_device *dev,
4775 const void *data, size_t size)
4776{
4777 struct drm_i915_gem_object *obj;
4778 struct sg_table *sg;
4779 size_t bytes;
4780 int ret;
4781
d37cd8a8 4782 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
fe3db79b 4783 if (IS_ERR(obj))
ea70299d
DG
4784 return obj;
4785
4786 ret = i915_gem_object_set_to_cpu_domain(obj, true);
4787 if (ret)
4788 goto fail;
4789
4790 ret = i915_gem_object_get_pages(obj);
4791 if (ret)
4792 goto fail;
4793
4794 i915_gem_object_pin_pages(obj);
4795 sg = obj->pages;
4796 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
9e7d18c0 4797 obj->dirty = 1; /* Backing store is now out of date */
ea70299d
DG
4798 i915_gem_object_unpin_pages(obj);
4799
4800 if (WARN_ON(bytes != size)) {
4801 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4802 ret = -EFAULT;
4803 goto fail;
4804 }
4805
4806 return obj;
4807
4808fail:
f8c417cd 4809 i915_gem_object_put(obj);
ea70299d
DG
4810 return ERR_PTR(ret);
4811}
This page took 1.43234 seconds and 5 git commands to generate.