2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_gem_dmabuf.h"
33 #include "i915_vgpu.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 #include "intel_mocs.h"
37 #include <linux/reservation.h>
38 #include <linux/shmem_fs.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/pci.h>
42 #include <linux/dma-buf.h>
44 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object
*obj
);
45 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object
*obj
);
47 static bool cpu_cache_is_coherent(struct drm_device
*dev
,
48 enum i915_cache_level level
)
50 return HAS_LLC(dev
) || level
!= I915_CACHE_NONE
;
53 static bool cpu_write_needs_clflush(struct drm_i915_gem_object
*obj
)
55 if (obj
->base
.write_domain
== I915_GEM_DOMAIN_CPU
)
58 if (!cpu_cache_is_coherent(obj
->base
.dev
, obj
->cache_level
))
61 return obj
->pin_display
;
65 insert_mappable_node(struct drm_i915_private
*i915
,
66 struct drm_mm_node
*node
, u32 size
)
68 memset(node
, 0, sizeof(*node
));
69 return drm_mm_insert_node_in_range_generic(&i915
->ggtt
.base
.mm
, node
,
71 i915
->ggtt
.mappable_end
,
72 DRM_MM_SEARCH_DEFAULT
,
73 DRM_MM_CREATE_DEFAULT
);
77 remove_mappable_node(struct drm_mm_node
*node
)
79 drm_mm_remove_node(node
);
82 /* some bookkeeping */
83 static void i915_gem_info_add_obj(struct drm_i915_private
*dev_priv
,
86 spin_lock(&dev_priv
->mm
.object_stat_lock
);
87 dev_priv
->mm
.object_count
++;
88 dev_priv
->mm
.object_memory
+= size
;
89 spin_unlock(&dev_priv
->mm
.object_stat_lock
);
92 static void i915_gem_info_remove_obj(struct drm_i915_private
*dev_priv
,
95 spin_lock(&dev_priv
->mm
.object_stat_lock
);
96 dev_priv
->mm
.object_count
--;
97 dev_priv
->mm
.object_memory
-= size
;
98 spin_unlock(&dev_priv
->mm
.object_stat_lock
);
102 i915_gem_wait_for_error(struct i915_gpu_error
*error
)
106 if (!i915_reset_in_progress(error
))
110 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
111 * userspace. If it takes that long something really bad is going on and
112 * we should simply try to bail out and fail as gracefully as possible.
114 ret
= wait_event_interruptible_timeout(error
->reset_queue
,
115 !i915_reset_in_progress(error
),
118 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
120 } else if (ret
< 0) {
127 int i915_mutex_lock_interruptible(struct drm_device
*dev
)
129 struct drm_i915_private
*dev_priv
= to_i915(dev
);
132 ret
= i915_gem_wait_for_error(&dev_priv
->gpu_error
);
136 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
144 i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
145 struct drm_file
*file
)
147 struct drm_i915_private
*dev_priv
= to_i915(dev
);
148 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
149 struct drm_i915_gem_get_aperture
*args
= data
;
150 struct i915_vma
*vma
;
154 mutex_lock(&dev
->struct_mutex
);
155 list_for_each_entry(vma
, &ggtt
->base
.active_list
, vm_link
)
156 if (i915_vma_is_pinned(vma
))
157 pinned
+= vma
->node
.size
;
158 list_for_each_entry(vma
, &ggtt
->base
.inactive_list
, vm_link
)
159 if (i915_vma_is_pinned(vma
))
160 pinned
+= vma
->node
.size
;
161 mutex_unlock(&dev
->struct_mutex
);
163 args
->aper_size
= ggtt
->base
.total
;
164 args
->aper_available_size
= args
->aper_size
- pinned
;
170 i915_gem_object_get_pages_phys(struct drm_i915_gem_object
*obj
)
172 struct address_space
*mapping
= file_inode(obj
->base
.filp
)->i_mapping
;
173 char *vaddr
= obj
->phys_handle
->vaddr
;
175 struct scatterlist
*sg
;
178 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj
)))
181 for (i
= 0; i
< obj
->base
.size
/ PAGE_SIZE
; i
++) {
185 page
= shmem_read_mapping_page(mapping
, i
);
187 return PTR_ERR(page
);
189 src
= kmap_atomic(page
);
190 memcpy(vaddr
, src
, PAGE_SIZE
);
191 drm_clflush_virt_range(vaddr
, PAGE_SIZE
);
198 i915_gem_chipset_flush(to_i915(obj
->base
.dev
));
200 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
204 if (sg_alloc_table(st
, 1, GFP_KERNEL
)) {
211 sg
->length
= obj
->base
.size
;
213 sg_dma_address(sg
) = obj
->phys_handle
->busaddr
;
214 sg_dma_len(sg
) = obj
->base
.size
;
221 i915_gem_object_put_pages_phys(struct drm_i915_gem_object
*obj
)
225 BUG_ON(obj
->madv
== __I915_MADV_PURGED
);
227 ret
= i915_gem_object_set_to_cpu_domain(obj
, true);
229 /* In the event of a disaster, abandon all caches and
232 obj
->base
.read_domains
= obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
235 if (obj
->madv
== I915_MADV_DONTNEED
)
239 struct address_space
*mapping
= file_inode(obj
->base
.filp
)->i_mapping
;
240 char *vaddr
= obj
->phys_handle
->vaddr
;
243 for (i
= 0; i
< obj
->base
.size
/ PAGE_SIZE
; i
++) {
247 page
= shmem_read_mapping_page(mapping
, i
);
251 dst
= kmap_atomic(page
);
252 drm_clflush_virt_range(vaddr
, PAGE_SIZE
);
253 memcpy(dst
, vaddr
, PAGE_SIZE
);
256 set_page_dirty(page
);
257 if (obj
->madv
== I915_MADV_WILLNEED
)
258 mark_page_accessed(page
);
265 sg_free_table(obj
->pages
);
270 i915_gem_object_release_phys(struct drm_i915_gem_object
*obj
)
272 drm_pci_free(obj
->base
.dev
, obj
->phys_handle
);
275 static const struct drm_i915_gem_object_ops i915_gem_phys_ops
= {
276 .get_pages
= i915_gem_object_get_pages_phys
,
277 .put_pages
= i915_gem_object_put_pages_phys
,
278 .release
= i915_gem_object_release_phys
,
282 i915_gem_object_unbind(struct drm_i915_gem_object
*obj
)
284 struct i915_vma
*vma
;
285 LIST_HEAD(still_in_list
);
288 /* The vma will only be freed if it is marked as closed, and if we wait
289 * upon rendering to the vma, we may unbind anything in the list.
291 while ((vma
= list_first_entry_or_null(&obj
->vma_list
,
294 list_move_tail(&vma
->obj_link
, &still_in_list
);
295 ret
= i915_vma_unbind(vma
);
299 list_splice(&still_in_list
, &obj
->vma_list
);
305 i915_gem_object_attach_phys(struct drm_i915_gem_object
*obj
,
308 drm_dma_handle_t
*phys
;
311 if (obj
->phys_handle
) {
312 if ((unsigned long)obj
->phys_handle
->vaddr
& (align
-1))
318 if (obj
->madv
!= I915_MADV_WILLNEED
)
321 if (obj
->base
.filp
== NULL
)
324 ret
= i915_gem_object_unbind(obj
);
328 ret
= i915_gem_object_put_pages(obj
);
332 /* create a new object */
333 phys
= drm_pci_alloc(obj
->base
.dev
, obj
->base
.size
, align
);
337 obj
->phys_handle
= phys
;
338 obj
->ops
= &i915_gem_phys_ops
;
340 return i915_gem_object_get_pages(obj
);
344 i915_gem_phys_pwrite(struct drm_i915_gem_object
*obj
,
345 struct drm_i915_gem_pwrite
*args
,
346 struct drm_file
*file_priv
)
348 struct drm_device
*dev
= obj
->base
.dev
;
349 void *vaddr
= obj
->phys_handle
->vaddr
+ args
->offset
;
350 char __user
*user_data
= u64_to_user_ptr(args
->data_ptr
);
353 /* We manually control the domain here and pretend that it
354 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
356 ret
= i915_gem_object_wait_rendering(obj
, false);
360 intel_fb_obj_invalidate(obj
, ORIGIN_CPU
);
361 if (__copy_from_user_inatomic_nocache(vaddr
, user_data
, args
->size
)) {
362 unsigned long unwritten
;
364 /* The physical object once assigned is fixed for the lifetime
365 * of the obj, so we can safely drop the lock and continue
368 mutex_unlock(&dev
->struct_mutex
);
369 unwritten
= copy_from_user(vaddr
, user_data
, args
->size
);
370 mutex_lock(&dev
->struct_mutex
);
377 drm_clflush_virt_range(vaddr
, args
->size
);
378 i915_gem_chipset_flush(to_i915(dev
));
381 intel_fb_obj_flush(obj
, false, ORIGIN_CPU
);
385 void *i915_gem_object_alloc(struct drm_device
*dev
)
387 struct drm_i915_private
*dev_priv
= to_i915(dev
);
388 return kmem_cache_zalloc(dev_priv
->objects
, GFP_KERNEL
);
391 void i915_gem_object_free(struct drm_i915_gem_object
*obj
)
393 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
394 kmem_cache_free(dev_priv
->objects
, obj
);
398 i915_gem_create(struct drm_file
*file
,
399 struct drm_device
*dev
,
403 struct drm_i915_gem_object
*obj
;
407 size
= roundup(size
, PAGE_SIZE
);
411 /* Allocate the new object */
412 obj
= i915_gem_object_create(dev
, size
);
416 ret
= drm_gem_handle_create(file
, &obj
->base
, &handle
);
417 /* drop reference from allocate - handle holds it now */
418 i915_gem_object_put_unlocked(obj
);
427 i915_gem_dumb_create(struct drm_file
*file
,
428 struct drm_device
*dev
,
429 struct drm_mode_create_dumb
*args
)
431 /* have to work out size/pitch and return them */
432 args
->pitch
= ALIGN(args
->width
* DIV_ROUND_UP(args
->bpp
, 8), 64);
433 args
->size
= args
->pitch
* args
->height
;
434 return i915_gem_create(file
, dev
,
435 args
->size
, &args
->handle
);
439 * Creates a new mm object and returns a handle to it.
440 * @dev: drm device pointer
441 * @data: ioctl data blob
442 * @file: drm file pointer
445 i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
446 struct drm_file
*file
)
448 struct drm_i915_gem_create
*args
= data
;
450 return i915_gem_create(file
, dev
,
451 args
->size
, &args
->handle
);
455 __copy_to_user_swizzled(char __user
*cpu_vaddr
,
456 const char *gpu_vaddr
, int gpu_offset
,
459 int ret
, cpu_offset
= 0;
462 int cacheline_end
= ALIGN(gpu_offset
+ 1, 64);
463 int this_length
= min(cacheline_end
- gpu_offset
, length
);
464 int swizzled_gpu_offset
= gpu_offset
^ 64;
466 ret
= __copy_to_user(cpu_vaddr
+ cpu_offset
,
467 gpu_vaddr
+ swizzled_gpu_offset
,
472 cpu_offset
+= this_length
;
473 gpu_offset
+= this_length
;
474 length
-= this_length
;
481 __copy_from_user_swizzled(char *gpu_vaddr
, int gpu_offset
,
482 const char __user
*cpu_vaddr
,
485 int ret
, cpu_offset
= 0;
488 int cacheline_end
= ALIGN(gpu_offset
+ 1, 64);
489 int this_length
= min(cacheline_end
- gpu_offset
, length
);
490 int swizzled_gpu_offset
= gpu_offset
^ 64;
492 ret
= __copy_from_user(gpu_vaddr
+ swizzled_gpu_offset
,
493 cpu_vaddr
+ cpu_offset
,
498 cpu_offset
+= this_length
;
499 gpu_offset
+= this_length
;
500 length
-= this_length
;
507 * Pins the specified object's pages and synchronizes the object with
508 * GPU accesses. Sets needs_clflush to non-zero if the caller should
509 * flush the object from the CPU cache.
511 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object
*obj
,
518 if (WARN_ON(!i915_gem_object_has_struct_page(obj
)))
521 ret
= i915_gem_object_wait_rendering(obj
, true);
525 if (!(obj
->base
.read_domains
& I915_GEM_DOMAIN_CPU
)) {
526 /* If we're not in the cpu read domain, set ourself into the gtt
527 * read domain and manually flush cachelines (if required). This
528 * optimizes for the case when the gpu will dirty the data
529 * anyway again before the next pread happens. */
530 *needs_clflush
= !cpu_cache_is_coherent(obj
->base
.dev
,
534 ret
= i915_gem_object_get_pages(obj
);
538 i915_gem_object_pin_pages(obj
);
543 /* Per-page copy function for the shmem pread fastpath.
544 * Flushes invalid cachelines before reading the target if
545 * needs_clflush is set. */
547 shmem_pread_fast(struct page
*page
, int shmem_page_offset
, int page_length
,
548 char __user
*user_data
,
549 bool page_do_bit17_swizzling
, bool needs_clflush
)
554 if (unlikely(page_do_bit17_swizzling
))
557 vaddr
= kmap_atomic(page
);
559 drm_clflush_virt_range(vaddr
+ shmem_page_offset
,
561 ret
= __copy_to_user_inatomic(user_data
,
562 vaddr
+ shmem_page_offset
,
564 kunmap_atomic(vaddr
);
566 return ret
? -EFAULT
: 0;
570 shmem_clflush_swizzled_range(char *addr
, unsigned long length
,
573 if (unlikely(swizzled
)) {
574 unsigned long start
= (unsigned long) addr
;
575 unsigned long end
= (unsigned long) addr
+ length
;
577 /* For swizzling simply ensure that we always flush both
578 * channels. Lame, but simple and it works. Swizzled
579 * pwrite/pread is far from a hotpath - current userspace
580 * doesn't use it at all. */
581 start
= round_down(start
, 128);
582 end
= round_up(end
, 128);
584 drm_clflush_virt_range((void *)start
, end
- start
);
586 drm_clflush_virt_range(addr
, length
);
591 /* Only difference to the fast-path function is that this can handle bit17
592 * and uses non-atomic copy and kmap functions. */
594 shmem_pread_slow(struct page
*page
, int shmem_page_offset
, int page_length
,
595 char __user
*user_data
,
596 bool page_do_bit17_swizzling
, bool needs_clflush
)
603 shmem_clflush_swizzled_range(vaddr
+ shmem_page_offset
,
605 page_do_bit17_swizzling
);
607 if (page_do_bit17_swizzling
)
608 ret
= __copy_to_user_swizzled(user_data
,
609 vaddr
, shmem_page_offset
,
612 ret
= __copy_to_user(user_data
,
613 vaddr
+ shmem_page_offset
,
617 return ret
? - EFAULT
: 0;
620 static inline unsigned long
621 slow_user_access(struct io_mapping
*mapping
,
622 uint64_t page_base
, int page_offset
,
623 char __user
*user_data
,
624 unsigned long length
, bool pwrite
)
626 void __iomem
*ioaddr
;
630 ioaddr
= io_mapping_map_wc(mapping
, page_base
, PAGE_SIZE
);
631 /* We can use the cpu mem copy function because this is X86. */
632 vaddr
= (void __force
*)ioaddr
+ page_offset
;
634 unwritten
= __copy_from_user(vaddr
, user_data
, length
);
636 unwritten
= __copy_to_user(user_data
, vaddr
, length
);
638 io_mapping_unmap(ioaddr
);
643 i915_gem_gtt_pread(struct drm_device
*dev
,
644 struct drm_i915_gem_object
*obj
, uint64_t size
,
645 uint64_t data_offset
, uint64_t data_ptr
)
647 struct drm_i915_private
*dev_priv
= to_i915(dev
);
648 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
649 struct drm_mm_node node
;
650 char __user
*user_data
;
655 ret
= i915_gem_object_ggtt_pin(obj
, NULL
, 0, 0, PIN_MAPPABLE
);
657 ret
= insert_mappable_node(dev_priv
, &node
, PAGE_SIZE
);
661 ret
= i915_gem_object_get_pages(obj
);
663 remove_mappable_node(&node
);
667 i915_gem_object_pin_pages(obj
);
669 node
.start
= i915_gem_obj_ggtt_offset(obj
);
670 node
.allocated
= false;
671 ret
= i915_gem_object_put_fence(obj
);
676 ret
= i915_gem_object_set_to_gtt_domain(obj
, false);
680 user_data
= u64_to_user_ptr(data_ptr
);
682 offset
= data_offset
;
684 mutex_unlock(&dev
->struct_mutex
);
685 if (likely(!i915
.prefault_disable
)) {
686 ret
= fault_in_multipages_writeable(user_data
, remain
);
688 mutex_lock(&dev
->struct_mutex
);
694 /* Operation in this page
696 * page_base = page offset within aperture
697 * page_offset = offset within page
698 * page_length = bytes to copy for this page
700 u32 page_base
= node
.start
;
701 unsigned page_offset
= offset_in_page(offset
);
702 unsigned page_length
= PAGE_SIZE
- page_offset
;
703 page_length
= remain
< page_length
? remain
: page_length
;
704 if (node
.allocated
) {
706 ggtt
->base
.insert_page(&ggtt
->base
,
707 i915_gem_object_get_dma_address(obj
, offset
>> PAGE_SHIFT
),
712 page_base
+= offset
& PAGE_MASK
;
714 /* This is a slow read/write as it tries to read from
715 * and write to user memory which may result into page
716 * faults, and so we cannot perform this under struct_mutex.
718 if (slow_user_access(ggtt
->mappable
, page_base
,
719 page_offset
, user_data
,
720 page_length
, false)) {
725 remain
-= page_length
;
726 user_data
+= page_length
;
727 offset
+= page_length
;
730 mutex_lock(&dev
->struct_mutex
);
731 if (ret
== 0 && (obj
->base
.read_domains
& I915_GEM_DOMAIN_GTT
) == 0) {
732 /* The user has modified the object whilst we tried
733 * reading from it, and we now have no idea what domain
734 * the pages should be in. As we have just been touching
735 * them directly, flush everything back to the GTT
738 ret
= i915_gem_object_set_to_gtt_domain(obj
, false);
742 if (node
.allocated
) {
744 ggtt
->base
.clear_range(&ggtt
->base
,
745 node
.start
, node
.size
,
747 i915_gem_object_unpin_pages(obj
);
748 remove_mappable_node(&node
);
750 i915_gem_object_ggtt_unpin(obj
);
757 i915_gem_shmem_pread(struct drm_device
*dev
,
758 struct drm_i915_gem_object
*obj
,
759 struct drm_i915_gem_pread
*args
,
760 struct drm_file
*file
)
762 char __user
*user_data
;
765 int shmem_page_offset
, page_length
, ret
= 0;
766 int obj_do_bit17_swizzling
, page_do_bit17_swizzling
;
768 int needs_clflush
= 0;
769 struct sg_page_iter sg_iter
;
771 if (!i915_gem_object_has_struct_page(obj
))
774 user_data
= u64_to_user_ptr(args
->data_ptr
);
777 obj_do_bit17_swizzling
= i915_gem_object_needs_bit17_swizzle(obj
);
779 ret
= i915_gem_obj_prepare_shmem_read(obj
, &needs_clflush
);
783 offset
= args
->offset
;
785 for_each_sg_page(obj
->pages
->sgl
, &sg_iter
, obj
->pages
->nents
,
786 offset
>> PAGE_SHIFT
) {
787 struct page
*page
= sg_page_iter_page(&sg_iter
);
792 /* Operation in this page
794 * shmem_page_offset = offset within page in shmem file
795 * page_length = bytes to copy for this page
797 shmem_page_offset
= offset_in_page(offset
);
798 page_length
= remain
;
799 if ((shmem_page_offset
+ page_length
) > PAGE_SIZE
)
800 page_length
= PAGE_SIZE
- shmem_page_offset
;
802 page_do_bit17_swizzling
= obj_do_bit17_swizzling
&&
803 (page_to_phys(page
) & (1 << 17)) != 0;
805 ret
= shmem_pread_fast(page
, shmem_page_offset
, page_length
,
806 user_data
, page_do_bit17_swizzling
,
811 mutex_unlock(&dev
->struct_mutex
);
813 if (likely(!i915
.prefault_disable
) && !prefaulted
) {
814 ret
= fault_in_multipages_writeable(user_data
, remain
);
815 /* Userspace is tricking us, but we've already clobbered
816 * its pages with the prefault and promised to write the
817 * data up to the first fault. Hence ignore any errors
818 * and just continue. */
823 ret
= shmem_pread_slow(page
, shmem_page_offset
, page_length
,
824 user_data
, page_do_bit17_swizzling
,
827 mutex_lock(&dev
->struct_mutex
);
833 remain
-= page_length
;
834 user_data
+= page_length
;
835 offset
+= page_length
;
839 i915_gem_object_unpin_pages(obj
);
845 * Reads data from the object referenced by handle.
846 * @dev: drm device pointer
847 * @data: ioctl data blob
848 * @file: drm file pointer
850 * On error, the contents of *data are undefined.
853 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
854 struct drm_file
*file
)
856 struct drm_i915_gem_pread
*args
= data
;
857 struct drm_i915_gem_object
*obj
;
863 if (!access_ok(VERIFY_WRITE
,
864 u64_to_user_ptr(args
->data_ptr
),
868 ret
= i915_mutex_lock_interruptible(dev
);
872 obj
= i915_gem_object_lookup(file
, args
->handle
);
878 /* Bounds check source. */
879 if (args
->offset
> obj
->base
.size
||
880 args
->size
> obj
->base
.size
- args
->offset
) {
885 trace_i915_gem_object_pread(obj
, args
->offset
, args
->size
);
887 ret
= i915_gem_shmem_pread(dev
, obj
, args
, file
);
889 /* pread for non shmem backed objects */
890 if (ret
== -EFAULT
|| ret
== -ENODEV
) {
891 intel_runtime_pm_get(to_i915(dev
));
892 ret
= i915_gem_gtt_pread(dev
, obj
, args
->size
,
893 args
->offset
, args
->data_ptr
);
894 intel_runtime_pm_put(to_i915(dev
));
898 i915_gem_object_put(obj
);
900 mutex_unlock(&dev
->struct_mutex
);
904 /* This is the fast write path which cannot handle
905 * page faults in the source data
909 fast_user_write(struct io_mapping
*mapping
,
910 loff_t page_base
, int page_offset
,
911 char __user
*user_data
,
914 void __iomem
*vaddr_atomic
;
916 unsigned long unwritten
;
918 vaddr_atomic
= io_mapping_map_atomic_wc(mapping
, page_base
);
919 /* We can use the cpu mem copy function because this is X86. */
920 vaddr
= (void __force
*)vaddr_atomic
+ page_offset
;
921 unwritten
= __copy_from_user_inatomic_nocache(vaddr
,
923 io_mapping_unmap_atomic(vaddr_atomic
);
928 * This is the fast pwrite path, where we copy the data directly from the
929 * user into the GTT, uncached.
930 * @i915: i915 device private data
931 * @obj: i915 gem object
932 * @args: pwrite arguments structure
933 * @file: drm file pointer
936 i915_gem_gtt_pwrite_fast(struct drm_i915_private
*i915
,
937 struct drm_i915_gem_object
*obj
,
938 struct drm_i915_gem_pwrite
*args
,
939 struct drm_file
*file
)
941 struct i915_ggtt
*ggtt
= &i915
->ggtt
;
942 struct drm_device
*dev
= obj
->base
.dev
;
943 struct drm_mm_node node
;
944 uint64_t remain
, offset
;
945 char __user
*user_data
;
947 bool hit_slow_path
= false;
949 if (obj
->tiling_mode
!= I915_TILING_NONE
)
952 ret
= i915_gem_object_ggtt_pin(obj
, NULL
, 0, 0,
953 PIN_MAPPABLE
| PIN_NONBLOCK
);
955 ret
= insert_mappable_node(i915
, &node
, PAGE_SIZE
);
959 ret
= i915_gem_object_get_pages(obj
);
961 remove_mappable_node(&node
);
965 i915_gem_object_pin_pages(obj
);
967 node
.start
= i915_gem_obj_ggtt_offset(obj
);
968 node
.allocated
= false;
969 ret
= i915_gem_object_put_fence(obj
);
974 ret
= i915_gem_object_set_to_gtt_domain(obj
, true);
978 intel_fb_obj_invalidate(obj
, ORIGIN_GTT
);
981 user_data
= u64_to_user_ptr(args
->data_ptr
);
982 offset
= args
->offset
;
985 /* Operation in this page
987 * page_base = page offset within aperture
988 * page_offset = offset within page
989 * page_length = bytes to copy for this page
991 u32 page_base
= node
.start
;
992 unsigned page_offset
= offset_in_page(offset
);
993 unsigned page_length
= PAGE_SIZE
- page_offset
;
994 page_length
= remain
< page_length
? remain
: page_length
;
995 if (node
.allocated
) {
996 wmb(); /* flush the write before we modify the GGTT */
997 ggtt
->base
.insert_page(&ggtt
->base
,
998 i915_gem_object_get_dma_address(obj
, offset
>> PAGE_SHIFT
),
999 node
.start
, I915_CACHE_NONE
, 0);
1000 wmb(); /* flush modifications to the GGTT (insert_page) */
1002 page_base
+= offset
& PAGE_MASK
;
1004 /* If we get a fault while copying data, then (presumably) our
1005 * source page isn't available. Return the error and we'll
1006 * retry in the slow path.
1007 * If the object is non-shmem backed, we retry again with the
1008 * path that handles page fault.
1010 if (fast_user_write(ggtt
->mappable
, page_base
,
1011 page_offset
, user_data
, page_length
)) {
1012 hit_slow_path
= true;
1013 mutex_unlock(&dev
->struct_mutex
);
1014 if (slow_user_access(ggtt
->mappable
,
1016 page_offset
, user_data
,
1017 page_length
, true)) {
1019 mutex_lock(&dev
->struct_mutex
);
1023 mutex_lock(&dev
->struct_mutex
);
1026 remain
-= page_length
;
1027 user_data
+= page_length
;
1028 offset
+= page_length
;
1032 if (hit_slow_path
) {
1034 (obj
->base
.read_domains
& I915_GEM_DOMAIN_GTT
) == 0) {
1035 /* The user has modified the object whilst we tried
1036 * reading from it, and we now have no idea what domain
1037 * the pages should be in. As we have just been touching
1038 * them directly, flush everything back to the GTT
1041 ret
= i915_gem_object_set_to_gtt_domain(obj
, false);
1045 intel_fb_obj_flush(obj
, false, ORIGIN_GTT
);
1047 if (node
.allocated
) {
1049 ggtt
->base
.clear_range(&ggtt
->base
,
1050 node
.start
, node
.size
,
1052 i915_gem_object_unpin_pages(obj
);
1053 remove_mappable_node(&node
);
1055 i915_gem_object_ggtt_unpin(obj
);
1061 /* Per-page copy function for the shmem pwrite fastpath.
1062 * Flushes invalid cachelines before writing to the target if
1063 * needs_clflush_before is set and flushes out any written cachelines after
1064 * writing if needs_clflush is set. */
1066 shmem_pwrite_fast(struct page
*page
, int shmem_page_offset
, int page_length
,
1067 char __user
*user_data
,
1068 bool page_do_bit17_swizzling
,
1069 bool needs_clflush_before
,
1070 bool needs_clflush_after
)
1075 if (unlikely(page_do_bit17_swizzling
))
1078 vaddr
= kmap_atomic(page
);
1079 if (needs_clflush_before
)
1080 drm_clflush_virt_range(vaddr
+ shmem_page_offset
,
1082 ret
= __copy_from_user_inatomic(vaddr
+ shmem_page_offset
,
1083 user_data
, page_length
);
1084 if (needs_clflush_after
)
1085 drm_clflush_virt_range(vaddr
+ shmem_page_offset
,
1087 kunmap_atomic(vaddr
);
1089 return ret
? -EFAULT
: 0;
1092 /* Only difference to the fast-path function is that this can handle bit17
1093 * and uses non-atomic copy and kmap functions. */
1095 shmem_pwrite_slow(struct page
*page
, int shmem_page_offset
, int page_length
,
1096 char __user
*user_data
,
1097 bool page_do_bit17_swizzling
,
1098 bool needs_clflush_before
,
1099 bool needs_clflush_after
)
1105 if (unlikely(needs_clflush_before
|| page_do_bit17_swizzling
))
1106 shmem_clflush_swizzled_range(vaddr
+ shmem_page_offset
,
1108 page_do_bit17_swizzling
);
1109 if (page_do_bit17_swizzling
)
1110 ret
= __copy_from_user_swizzled(vaddr
, shmem_page_offset
,
1114 ret
= __copy_from_user(vaddr
+ shmem_page_offset
,
1117 if (needs_clflush_after
)
1118 shmem_clflush_swizzled_range(vaddr
+ shmem_page_offset
,
1120 page_do_bit17_swizzling
);
1123 return ret
? -EFAULT
: 0;
1127 i915_gem_shmem_pwrite(struct drm_device
*dev
,
1128 struct drm_i915_gem_object
*obj
,
1129 struct drm_i915_gem_pwrite
*args
,
1130 struct drm_file
*file
)
1134 char __user
*user_data
;
1135 int shmem_page_offset
, page_length
, ret
= 0;
1136 int obj_do_bit17_swizzling
, page_do_bit17_swizzling
;
1137 int hit_slowpath
= 0;
1138 int needs_clflush_after
= 0;
1139 int needs_clflush_before
= 0;
1140 struct sg_page_iter sg_iter
;
1142 user_data
= u64_to_user_ptr(args
->data_ptr
);
1143 remain
= args
->size
;
1145 obj_do_bit17_swizzling
= i915_gem_object_needs_bit17_swizzle(obj
);
1147 ret
= i915_gem_object_wait_rendering(obj
, false);
1151 if (obj
->base
.write_domain
!= I915_GEM_DOMAIN_CPU
) {
1152 /* If we're not in the cpu write domain, set ourself into the gtt
1153 * write domain and manually flush cachelines (if required). This
1154 * optimizes for the case when the gpu will use the data
1155 * right away and we therefore have to clflush anyway. */
1156 needs_clflush_after
= cpu_write_needs_clflush(obj
);
1158 /* Same trick applies to invalidate partially written cachelines read
1159 * before writing. */
1160 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_CPU
) == 0)
1161 needs_clflush_before
=
1162 !cpu_cache_is_coherent(dev
, obj
->cache_level
);
1164 ret
= i915_gem_object_get_pages(obj
);
1168 intel_fb_obj_invalidate(obj
, ORIGIN_CPU
);
1170 i915_gem_object_pin_pages(obj
);
1172 offset
= args
->offset
;
1175 for_each_sg_page(obj
->pages
->sgl
, &sg_iter
, obj
->pages
->nents
,
1176 offset
>> PAGE_SHIFT
) {
1177 struct page
*page
= sg_page_iter_page(&sg_iter
);
1178 int partial_cacheline_write
;
1183 /* Operation in this page
1185 * shmem_page_offset = offset within page in shmem file
1186 * page_length = bytes to copy for this page
1188 shmem_page_offset
= offset_in_page(offset
);
1190 page_length
= remain
;
1191 if ((shmem_page_offset
+ page_length
) > PAGE_SIZE
)
1192 page_length
= PAGE_SIZE
- shmem_page_offset
;
1194 /* If we don't overwrite a cacheline completely we need to be
1195 * careful to have up-to-date data by first clflushing. Don't
1196 * overcomplicate things and flush the entire patch. */
1197 partial_cacheline_write
= needs_clflush_before
&&
1198 ((shmem_page_offset
| page_length
)
1199 & (boot_cpu_data
.x86_clflush_size
- 1));
1201 page_do_bit17_swizzling
= obj_do_bit17_swizzling
&&
1202 (page_to_phys(page
) & (1 << 17)) != 0;
1204 ret
= shmem_pwrite_fast(page
, shmem_page_offset
, page_length
,
1205 user_data
, page_do_bit17_swizzling
,
1206 partial_cacheline_write
,
1207 needs_clflush_after
);
1212 mutex_unlock(&dev
->struct_mutex
);
1213 ret
= shmem_pwrite_slow(page
, shmem_page_offset
, page_length
,
1214 user_data
, page_do_bit17_swizzling
,
1215 partial_cacheline_write
,
1216 needs_clflush_after
);
1218 mutex_lock(&dev
->struct_mutex
);
1224 remain
-= page_length
;
1225 user_data
+= page_length
;
1226 offset
+= page_length
;
1230 i915_gem_object_unpin_pages(obj
);
1234 * Fixup: Flush cpu caches in case we didn't flush the dirty
1235 * cachelines in-line while writing and the object moved
1236 * out of the cpu write domain while we've dropped the lock.
1238 if (!needs_clflush_after
&&
1239 obj
->base
.write_domain
!= I915_GEM_DOMAIN_CPU
) {
1240 if (i915_gem_clflush_object(obj
, obj
->pin_display
))
1241 needs_clflush_after
= true;
1245 if (needs_clflush_after
)
1246 i915_gem_chipset_flush(to_i915(dev
));
1248 obj
->cache_dirty
= true;
1250 intel_fb_obj_flush(obj
, false, ORIGIN_CPU
);
1255 * Writes data to the object referenced by handle.
1257 * @data: ioctl data blob
1260 * On error, the contents of the buffer that were to be modified are undefined.
1263 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
1264 struct drm_file
*file
)
1266 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1267 struct drm_i915_gem_pwrite
*args
= data
;
1268 struct drm_i915_gem_object
*obj
;
1271 if (args
->size
== 0)
1274 if (!access_ok(VERIFY_READ
,
1275 u64_to_user_ptr(args
->data_ptr
),
1279 if (likely(!i915
.prefault_disable
)) {
1280 ret
= fault_in_multipages_readable(u64_to_user_ptr(args
->data_ptr
),
1286 intel_runtime_pm_get(dev_priv
);
1288 ret
= i915_mutex_lock_interruptible(dev
);
1292 obj
= i915_gem_object_lookup(file
, args
->handle
);
1298 /* Bounds check destination. */
1299 if (args
->offset
> obj
->base
.size
||
1300 args
->size
> obj
->base
.size
- args
->offset
) {
1305 trace_i915_gem_object_pwrite(obj
, args
->offset
, args
->size
);
1308 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1309 * it would end up going through the fenced access, and we'll get
1310 * different detiling behavior between reading and writing.
1311 * pread/pwrite currently are reading and writing from the CPU
1312 * perspective, requiring manual detiling by the client.
1314 if (!i915_gem_object_has_struct_page(obj
) ||
1315 cpu_write_needs_clflush(obj
)) {
1316 ret
= i915_gem_gtt_pwrite_fast(dev_priv
, obj
, args
, file
);
1317 /* Note that the gtt paths might fail with non-page-backed user
1318 * pointers (e.g. gtt mappings when moving data between
1319 * textures). Fallback to the shmem path in that case. */
1322 if (ret
== -EFAULT
|| ret
== -ENOSPC
) {
1323 if (obj
->phys_handle
)
1324 ret
= i915_gem_phys_pwrite(obj
, args
, file
);
1325 else if (i915_gem_object_has_struct_page(obj
))
1326 ret
= i915_gem_shmem_pwrite(dev
, obj
, args
, file
);
1332 i915_gem_object_put(obj
);
1334 mutex_unlock(&dev
->struct_mutex
);
1336 intel_runtime_pm_put(dev_priv
);
1342 * Ensures that all rendering to the object has completed and the object is
1343 * safe to unbind from the GTT or access from the CPU.
1344 * @obj: i915 gem object
1345 * @readonly: waiting for read access or write
1348 i915_gem_object_wait_rendering(struct drm_i915_gem_object
*obj
,
1351 struct reservation_object
*resv
;
1352 struct i915_gem_active
*active
;
1353 unsigned long active_mask
;
1356 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
1359 active
= obj
->last_read
;
1360 active_mask
= obj
->active
;
1363 active
= &obj
->last_write
;
1366 for_each_active(active_mask
, idx
) {
1367 ret
= i915_gem_active_wait(&active
[idx
],
1368 &obj
->base
.dev
->struct_mutex
);
1373 resv
= i915_gem_object_get_dmabuf_resv(obj
);
1377 err
= reservation_object_wait_timeout_rcu(resv
, !readonly
, true,
1378 MAX_SCHEDULE_TIMEOUT
);
1386 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1387 * as the object state may change during this call.
1389 static __must_check
int
1390 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object
*obj
,
1391 struct intel_rps_client
*rps
,
1394 struct drm_device
*dev
= obj
->base
.dev
;
1395 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1396 struct drm_i915_gem_request
*requests
[I915_NUM_ENGINES
];
1397 struct i915_gem_active
*active
;
1398 unsigned long active_mask
;
1401 BUG_ON(!mutex_is_locked(&dev
->struct_mutex
));
1402 BUG_ON(!dev_priv
->mm
.interruptible
);
1404 active_mask
= obj
->active
;
1409 active
= obj
->last_read
;
1412 active
= &obj
->last_write
;
1415 for_each_active(active_mask
, i
) {
1416 struct drm_i915_gem_request
*req
;
1418 req
= i915_gem_active_get(&active
[i
],
1419 &obj
->base
.dev
->struct_mutex
);
1421 requests
[n
++] = req
;
1424 mutex_unlock(&dev
->struct_mutex
);
1426 for (i
= 0; ret
== 0 && i
< n
; i
++)
1427 ret
= i915_wait_request(requests
[i
], true, NULL
, rps
);
1428 mutex_lock(&dev
->struct_mutex
);
1430 for (i
= 0; i
< n
; i
++)
1431 i915_gem_request_put(requests
[i
]);
1436 static struct intel_rps_client
*to_rps_client(struct drm_file
*file
)
1438 struct drm_i915_file_private
*fpriv
= file
->driver_priv
;
1442 static enum fb_op_origin
1443 write_origin(struct drm_i915_gem_object
*obj
, unsigned domain
)
1445 return domain
== I915_GEM_DOMAIN_GTT
&& !obj
->has_wc_mmap
?
1446 ORIGIN_GTT
: ORIGIN_CPU
;
1450 * Called when user space prepares to use an object with the CPU, either
1451 * through the mmap ioctl's mapping or a GTT mapping.
1453 * @data: ioctl data blob
1457 i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
1458 struct drm_file
*file
)
1460 struct drm_i915_gem_set_domain
*args
= data
;
1461 struct drm_i915_gem_object
*obj
;
1462 uint32_t read_domains
= args
->read_domains
;
1463 uint32_t write_domain
= args
->write_domain
;
1466 /* Only handle setting domains to types used by the CPU. */
1467 if (write_domain
& I915_GEM_GPU_DOMAINS
)
1470 if (read_domains
& I915_GEM_GPU_DOMAINS
)
1473 /* Having something in the write domain implies it's in the read
1474 * domain, and only that read domain. Enforce that in the request.
1476 if (write_domain
!= 0 && read_domains
!= write_domain
)
1479 ret
= i915_mutex_lock_interruptible(dev
);
1483 obj
= i915_gem_object_lookup(file
, args
->handle
);
1489 /* Try to flush the object off the GPU without holding the lock.
1490 * We will repeat the flush holding the lock in the normal manner
1491 * to catch cases where we are gazumped.
1493 ret
= i915_gem_object_wait_rendering__nonblocking(obj
,
1494 to_rps_client(file
),
1499 if (read_domains
& I915_GEM_DOMAIN_GTT
)
1500 ret
= i915_gem_object_set_to_gtt_domain(obj
, write_domain
!= 0);
1502 ret
= i915_gem_object_set_to_cpu_domain(obj
, write_domain
!= 0);
1504 if (write_domain
!= 0)
1505 intel_fb_obj_invalidate(obj
, write_origin(obj
, write_domain
));
1508 i915_gem_object_put(obj
);
1510 mutex_unlock(&dev
->struct_mutex
);
1515 * Called when user space has done writes to this buffer
1517 * @data: ioctl data blob
1521 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
1522 struct drm_file
*file
)
1524 struct drm_i915_gem_sw_finish
*args
= data
;
1525 struct drm_i915_gem_object
*obj
;
1528 ret
= i915_mutex_lock_interruptible(dev
);
1532 obj
= i915_gem_object_lookup(file
, args
->handle
);
1538 /* Pinned buffers may be scanout, so flush the cache */
1539 if (obj
->pin_display
)
1540 i915_gem_object_flush_cpu_write_domain(obj
);
1542 i915_gem_object_put(obj
);
1544 mutex_unlock(&dev
->struct_mutex
);
1549 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1552 * @data: ioctl data blob
1555 * While the mapping holds a reference on the contents of the object, it doesn't
1556 * imply a ref on the object itself.
1560 * DRM driver writers who look a this function as an example for how to do GEM
1561 * mmap support, please don't implement mmap support like here. The modern way
1562 * to implement DRM mmap support is with an mmap offset ioctl (like
1563 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1564 * That way debug tooling like valgrind will understand what's going on, hiding
1565 * the mmap call in a driver private ioctl will break that. The i915 driver only
1566 * does cpu mmaps this way because we didn't know better.
1569 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
1570 struct drm_file
*file
)
1572 struct drm_i915_gem_mmap
*args
= data
;
1573 struct drm_i915_gem_object
*obj
;
1576 if (args
->flags
& ~(I915_MMAP_WC
))
1579 if (args
->flags
& I915_MMAP_WC
&& !boot_cpu_has(X86_FEATURE_PAT
))
1582 obj
= i915_gem_object_lookup(file
, args
->handle
);
1586 /* prime objects have no backing filp to GEM mmap
1589 if (!obj
->base
.filp
) {
1590 i915_gem_object_put_unlocked(obj
);
1594 addr
= vm_mmap(obj
->base
.filp
, 0, args
->size
,
1595 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
1597 if (args
->flags
& I915_MMAP_WC
) {
1598 struct mm_struct
*mm
= current
->mm
;
1599 struct vm_area_struct
*vma
;
1601 if (down_write_killable(&mm
->mmap_sem
)) {
1602 i915_gem_object_put_unlocked(obj
);
1605 vma
= find_vma(mm
, addr
);
1608 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
1611 up_write(&mm
->mmap_sem
);
1613 /* This may race, but that's ok, it only gets set */
1614 WRITE_ONCE(obj
->has_wc_mmap
, true);
1616 i915_gem_object_put_unlocked(obj
);
1617 if (IS_ERR((void *)addr
))
1620 args
->addr_ptr
= (uint64_t) addr
;
1626 * i915_gem_fault - fault a page into the GTT
1627 * @vma: VMA in question
1630 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1631 * from userspace. The fault handler takes care of binding the object to
1632 * the GTT (if needed), allocating and programming a fence register (again,
1633 * only if needed based on whether the old reg is still valid or the object
1634 * is tiled) and inserting a new PTE into the faulting process.
1636 * Note that the faulting process may involve evicting existing objects
1637 * from the GTT and/or fence registers to make room. So performance may
1638 * suffer if the GTT working set is large or there are few fence registers
1641 int i915_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1643 struct drm_i915_gem_object
*obj
= to_intel_bo(vma
->vm_private_data
);
1644 struct drm_device
*dev
= obj
->base
.dev
;
1645 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1646 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
1647 struct i915_ggtt_view view
= i915_ggtt_view_normal
;
1648 pgoff_t page_offset
;
1651 bool write
= !!(vmf
->flags
& FAULT_FLAG_WRITE
);
1653 intel_runtime_pm_get(dev_priv
);
1655 /* We don't use vmf->pgoff since that has the fake offset */
1656 page_offset
= ((unsigned long)vmf
->virtual_address
- vma
->vm_start
) >>
1659 ret
= i915_mutex_lock_interruptible(dev
);
1663 trace_i915_gem_object_fault(obj
, page_offset
, true, write
);
1665 /* Try to flush the object off the GPU first without holding the lock.
1666 * Upon reacquiring the lock, we will perform our sanity checks and then
1667 * repeat the flush holding the lock in the normal manner to catch cases
1668 * where we are gazumped.
1670 ret
= i915_gem_object_wait_rendering__nonblocking(obj
, NULL
, !write
);
1674 /* Access to snoopable pages through the GTT is incoherent. */
1675 if (obj
->cache_level
!= I915_CACHE_NONE
&& !HAS_LLC(dev
)) {
1680 /* Use a partial view if the object is bigger than the aperture. */
1681 if (obj
->base
.size
>= ggtt
->mappable_end
&&
1682 obj
->tiling_mode
== I915_TILING_NONE
) {
1683 static const unsigned int chunk_size
= 256; // 1 MiB
1685 memset(&view
, 0, sizeof(view
));
1686 view
.type
= I915_GGTT_VIEW_PARTIAL
;
1687 view
.params
.partial
.offset
= rounddown(page_offset
, chunk_size
);
1688 view
.params
.partial
.size
=
1691 (vma
->vm_end
- vma
->vm_start
)/PAGE_SIZE
-
1692 view
.params
.partial
.offset
);
1695 /* Now pin it into the GTT if needed */
1696 ret
= i915_gem_object_ggtt_pin(obj
, &view
, 0, 0, PIN_MAPPABLE
);
1700 ret
= i915_gem_object_set_to_gtt_domain(obj
, write
);
1704 ret
= i915_gem_object_get_fence(obj
);
1708 /* Finally, remap it using the new GTT offset */
1709 pfn
= ggtt
->mappable_base
+
1710 i915_gem_obj_ggtt_offset_view(obj
, &view
);
1713 if (unlikely(view
.type
== I915_GGTT_VIEW_PARTIAL
)) {
1714 /* Overriding existing pages in partial view does not cause
1715 * us any trouble as TLBs are still valid because the fault
1716 * is due to userspace losing part of the mapping or never
1717 * having accessed it before (at this partials' range).
1719 unsigned long base
= vma
->vm_start
+
1720 (view
.params
.partial
.offset
<< PAGE_SHIFT
);
1723 for (i
= 0; i
< view
.params
.partial
.size
; i
++) {
1724 ret
= vm_insert_pfn(vma
, base
+ i
* PAGE_SIZE
, pfn
+ i
);
1729 obj
->fault_mappable
= true;
1731 if (!obj
->fault_mappable
) {
1732 unsigned long size
= min_t(unsigned long,
1733 vma
->vm_end
- vma
->vm_start
,
1737 for (i
= 0; i
< size
>> PAGE_SHIFT
; i
++) {
1738 ret
= vm_insert_pfn(vma
,
1739 (unsigned long)vma
->vm_start
+ i
* PAGE_SIZE
,
1745 obj
->fault_mappable
= true;
1747 ret
= vm_insert_pfn(vma
,
1748 (unsigned long)vmf
->virtual_address
,
1752 i915_gem_object_ggtt_unpin_view(obj
, &view
);
1754 mutex_unlock(&dev
->struct_mutex
);
1759 * We eat errors when the gpu is terminally wedged to avoid
1760 * userspace unduly crashing (gl has no provisions for mmaps to
1761 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1762 * and so needs to be reported.
1764 if (!i915_terminally_wedged(&dev_priv
->gpu_error
)) {
1765 ret
= VM_FAULT_SIGBUS
;
1770 * EAGAIN means the gpu is hung and we'll wait for the error
1771 * handler to reset everything when re-faulting in
1772 * i915_mutex_lock_interruptible.
1779 * EBUSY is ok: this just means that another thread
1780 * already did the job.
1782 ret
= VM_FAULT_NOPAGE
;
1789 ret
= VM_FAULT_SIGBUS
;
1792 WARN_ONCE(ret
, "unhandled error in i915_gem_fault: %i\n", ret
);
1793 ret
= VM_FAULT_SIGBUS
;
1797 intel_runtime_pm_put(dev_priv
);
1802 * i915_gem_release_mmap - remove physical page mappings
1803 * @obj: obj in question
1805 * Preserve the reservation of the mmapping with the DRM core code, but
1806 * relinquish ownership of the pages back to the system.
1808 * It is vital that we remove the page mapping if we have mapped a tiled
1809 * object through the GTT and then lose the fence register due to
1810 * resource pressure. Similarly if the object has been moved out of the
1811 * aperture, than pages mapped into userspace must be revoked. Removing the
1812 * mapping will then trigger a page fault on the next user access, allowing
1813 * fixup by i915_gem_fault().
1816 i915_gem_release_mmap(struct drm_i915_gem_object
*obj
)
1818 /* Serialisation between user GTT access and our code depends upon
1819 * revoking the CPU's PTE whilst the mutex is held. The next user
1820 * pagefault then has to wait until we release the mutex.
1822 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
1824 if (!obj
->fault_mappable
)
1827 drm_vma_node_unmap(&obj
->base
.vma_node
,
1828 obj
->base
.dev
->anon_inode
->i_mapping
);
1830 /* Ensure that the CPU's PTE are revoked and there are not outstanding
1831 * memory transactions from userspace before we return. The TLB
1832 * flushing implied above by changing the PTE above *should* be
1833 * sufficient, an extra barrier here just provides us with a bit
1834 * of paranoid documentation about our requirement to serialise
1835 * memory writes before touching registers / GSM.
1839 obj
->fault_mappable
= false;
1843 i915_gem_release_all_mmaps(struct drm_i915_private
*dev_priv
)
1845 struct drm_i915_gem_object
*obj
;
1847 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
)
1848 i915_gem_release_mmap(obj
);
1852 * i915_gem_get_ggtt_size - return required global GTT size for an object
1853 * @dev_priv: i915 device
1854 * @size: object size
1855 * @tiling_mode: tiling mode
1857 * Return the required global GTT size for an object, taking into account
1858 * potential fence register mapping.
1860 u64
i915_gem_get_ggtt_size(struct drm_i915_private
*dev_priv
,
1861 u64 size
, int tiling_mode
)
1865 GEM_BUG_ON(size
== 0);
1867 if (INTEL_GEN(dev_priv
) >= 4 ||
1868 tiling_mode
== I915_TILING_NONE
)
1871 /* Previous chips need a power-of-two fence region when tiling */
1872 if (IS_GEN3(dev_priv
))
1873 ggtt_size
= 1024*1024;
1875 ggtt_size
= 512*1024;
1877 while (ggtt_size
< size
)
1884 * i915_gem_get_ggtt_alignment - return required global GTT alignment
1885 * @dev_priv: i915 device
1886 * @size: object size
1887 * @tiling_mode: tiling mode
1888 * @fenced: is fenced alignment required or not
1890 * Return the required global GTT alignment for an object, taking into account
1891 * potential fence register mapping.
1893 u64
i915_gem_get_ggtt_alignment(struct drm_i915_private
*dev_priv
, u64 size
,
1894 int tiling_mode
, bool fenced
)
1896 GEM_BUG_ON(size
== 0);
1899 * Minimum alignment is 4k (GTT page size), but might be greater
1900 * if a fence register is needed for the object.
1902 if (INTEL_GEN(dev_priv
) >= 4 || (!fenced
&& IS_G33(dev_priv
)) ||
1903 tiling_mode
== I915_TILING_NONE
)
1907 * Previous chips need to be aligned to the size of the smallest
1908 * fence register that can contain the object.
1910 return i915_gem_get_ggtt_size(dev_priv
, size
, tiling_mode
);
1913 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object
*obj
)
1915 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
1918 dev_priv
->mm
.shrinker_no_lock_stealing
= true;
1920 ret
= drm_gem_create_mmap_offset(&obj
->base
);
1924 /* Badly fragmented mmap space? The only way we can recover
1925 * space is by destroying unwanted objects. We can't randomly release
1926 * mmap_offsets as userspace expects them to be persistent for the
1927 * lifetime of the objects. The closest we can is to release the
1928 * offsets on purgeable objects by truncating it and marking it purged,
1929 * which prevents userspace from ever using that object again.
1931 i915_gem_shrink(dev_priv
,
1932 obj
->base
.size
>> PAGE_SHIFT
,
1934 I915_SHRINK_UNBOUND
|
1935 I915_SHRINK_PURGEABLE
);
1936 ret
= drm_gem_create_mmap_offset(&obj
->base
);
1940 i915_gem_shrink_all(dev_priv
);
1941 ret
= drm_gem_create_mmap_offset(&obj
->base
);
1943 dev_priv
->mm
.shrinker_no_lock_stealing
= false;
1948 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object
*obj
)
1950 drm_gem_free_mmap_offset(&obj
->base
);
1954 i915_gem_mmap_gtt(struct drm_file
*file
,
1955 struct drm_device
*dev
,
1959 struct drm_i915_gem_object
*obj
;
1962 ret
= i915_mutex_lock_interruptible(dev
);
1966 obj
= i915_gem_object_lookup(file
, handle
);
1972 if (obj
->madv
!= I915_MADV_WILLNEED
) {
1973 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1978 ret
= i915_gem_object_create_mmap_offset(obj
);
1982 *offset
= drm_vma_node_offset_addr(&obj
->base
.vma_node
);
1985 i915_gem_object_put(obj
);
1987 mutex_unlock(&dev
->struct_mutex
);
1992 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1994 * @data: GTT mapping ioctl data
1995 * @file: GEM object info
1997 * Simply returns the fake offset to userspace so it can mmap it.
1998 * The mmap call will end up in drm_gem_mmap(), which will set things
1999 * up so we can get faults in the handler above.
2001 * The fault handler will take care of binding the object into the GTT
2002 * (since it may have been evicted to make room for something), allocating
2003 * a fence register, and mapping the appropriate aperture address into
2007 i915_gem_mmap_gtt_ioctl(struct drm_device
*dev
, void *data
,
2008 struct drm_file
*file
)
2010 struct drm_i915_gem_mmap_gtt
*args
= data
;
2012 return i915_gem_mmap_gtt(file
, dev
, args
->handle
, &args
->offset
);
2015 /* Immediately discard the backing storage */
2017 i915_gem_object_truncate(struct drm_i915_gem_object
*obj
)
2019 i915_gem_object_free_mmap_offset(obj
);
2021 if (obj
->base
.filp
== NULL
)
2024 /* Our goal here is to return as much of the memory as
2025 * is possible back to the system as we are called from OOM.
2026 * To do this we must instruct the shmfs to drop all of its
2027 * backing pages, *now*.
2029 shmem_truncate_range(file_inode(obj
->base
.filp
), 0, (loff_t
)-1);
2030 obj
->madv
= __I915_MADV_PURGED
;
2033 /* Try to discard unwanted pages */
2035 i915_gem_object_invalidate(struct drm_i915_gem_object
*obj
)
2037 struct address_space
*mapping
;
2039 switch (obj
->madv
) {
2040 case I915_MADV_DONTNEED
:
2041 i915_gem_object_truncate(obj
);
2042 case __I915_MADV_PURGED
:
2046 if (obj
->base
.filp
== NULL
)
2049 mapping
= file_inode(obj
->base
.filp
)->i_mapping
,
2050 invalidate_mapping_pages(mapping
, 0, (loff_t
)-1);
2054 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object
*obj
)
2056 struct sgt_iter sgt_iter
;
2060 BUG_ON(obj
->madv
== __I915_MADV_PURGED
);
2062 ret
= i915_gem_object_set_to_cpu_domain(obj
, true);
2064 /* In the event of a disaster, abandon all caches and
2065 * hope for the best.
2067 i915_gem_clflush_object(obj
, true);
2068 obj
->base
.read_domains
= obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
2071 i915_gem_gtt_finish_object(obj
);
2073 if (i915_gem_object_needs_bit17_swizzle(obj
))
2074 i915_gem_object_save_bit_17_swizzle(obj
);
2076 if (obj
->madv
== I915_MADV_DONTNEED
)
2079 for_each_sgt_page(page
, sgt_iter
, obj
->pages
) {
2081 set_page_dirty(page
);
2083 if (obj
->madv
== I915_MADV_WILLNEED
)
2084 mark_page_accessed(page
);
2090 sg_free_table(obj
->pages
);
2095 i915_gem_object_put_pages(struct drm_i915_gem_object
*obj
)
2097 const struct drm_i915_gem_object_ops
*ops
= obj
->ops
;
2099 if (obj
->pages
== NULL
)
2102 if (obj
->pages_pin_count
)
2105 GEM_BUG_ON(obj
->bind_count
);
2107 /* ->put_pages might need to allocate memory for the bit17 swizzle
2108 * array, hence protect them from being reaped by removing them from gtt
2110 list_del(&obj
->global_list
);
2113 if (is_vmalloc_addr(obj
->mapping
))
2114 vunmap(obj
->mapping
);
2116 kunmap(kmap_to_page(obj
->mapping
));
2117 obj
->mapping
= NULL
;
2120 ops
->put_pages(obj
);
2123 i915_gem_object_invalidate(obj
);
2129 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object
*obj
)
2131 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
2133 struct address_space
*mapping
;
2134 struct sg_table
*st
;
2135 struct scatterlist
*sg
;
2136 struct sgt_iter sgt_iter
;
2138 unsigned long last_pfn
= 0; /* suppress gcc warning */
2142 /* Assert that the object is not currently in any GPU domain. As it
2143 * wasn't in the GTT, there shouldn't be any way it could have been in
2146 BUG_ON(obj
->base
.read_domains
& I915_GEM_GPU_DOMAINS
);
2147 BUG_ON(obj
->base
.write_domain
& I915_GEM_GPU_DOMAINS
);
2149 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
2153 page_count
= obj
->base
.size
/ PAGE_SIZE
;
2154 if (sg_alloc_table(st
, page_count
, GFP_KERNEL
)) {
2159 /* Get the list of pages out of our struct file. They'll be pinned
2160 * at this point until we release them.
2162 * Fail silently without starting the shrinker
2164 mapping
= file_inode(obj
->base
.filp
)->i_mapping
;
2165 gfp
= mapping_gfp_constraint(mapping
, ~(__GFP_IO
| __GFP_RECLAIM
));
2166 gfp
|= __GFP_NORETRY
| __GFP_NOWARN
;
2169 for (i
= 0; i
< page_count
; i
++) {
2170 page
= shmem_read_mapping_page_gfp(mapping
, i
, gfp
);
2172 i915_gem_shrink(dev_priv
,
2175 I915_SHRINK_UNBOUND
|
2176 I915_SHRINK_PURGEABLE
);
2177 page
= shmem_read_mapping_page_gfp(mapping
, i
, gfp
);
2180 /* We've tried hard to allocate the memory by reaping
2181 * our own buffer, now let the real VM do its job and
2182 * go down in flames if truly OOM.
2184 i915_gem_shrink_all(dev_priv
);
2185 page
= shmem_read_mapping_page(mapping
, i
);
2187 ret
= PTR_ERR(page
);
2191 #ifdef CONFIG_SWIOTLB
2192 if (swiotlb_nr_tbl()) {
2194 sg_set_page(sg
, page
, PAGE_SIZE
, 0);
2199 if (!i
|| page_to_pfn(page
) != last_pfn
+ 1) {
2203 sg_set_page(sg
, page
, PAGE_SIZE
, 0);
2205 sg
->length
+= PAGE_SIZE
;
2207 last_pfn
= page_to_pfn(page
);
2209 /* Check that the i965g/gm workaround works. */
2210 WARN_ON((gfp
& __GFP_DMA32
) && (last_pfn
>= 0x00100000UL
));
2212 #ifdef CONFIG_SWIOTLB
2213 if (!swiotlb_nr_tbl())
2218 ret
= i915_gem_gtt_prepare_object(obj
);
2222 if (i915_gem_object_needs_bit17_swizzle(obj
))
2223 i915_gem_object_do_bit_17_swizzle(obj
);
2225 if (obj
->tiling_mode
!= I915_TILING_NONE
&&
2226 dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
)
2227 i915_gem_object_pin_pages(obj
);
2233 for_each_sgt_page(page
, sgt_iter
, st
)
2238 /* shmemfs first checks if there is enough memory to allocate the page
2239 * and reports ENOSPC should there be insufficient, along with the usual
2240 * ENOMEM for a genuine allocation failure.
2242 * We use ENOSPC in our driver to mean that we have run out of aperture
2243 * space and so want to translate the error from shmemfs back to our
2244 * usual understanding of ENOMEM.
2252 /* Ensure that the associated pages are gathered from the backing storage
2253 * and pinned into our object. i915_gem_object_get_pages() may be called
2254 * multiple times before they are released by a single call to
2255 * i915_gem_object_put_pages() - once the pages are no longer referenced
2256 * either as a result of memory pressure (reaping pages under the shrinker)
2257 * or as the object is itself released.
2260 i915_gem_object_get_pages(struct drm_i915_gem_object
*obj
)
2262 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
2263 const struct drm_i915_gem_object_ops
*ops
= obj
->ops
;
2269 if (obj
->madv
!= I915_MADV_WILLNEED
) {
2270 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2274 BUG_ON(obj
->pages_pin_count
);
2276 ret
= ops
->get_pages(obj
);
2280 list_add_tail(&obj
->global_list
, &dev_priv
->mm
.unbound_list
);
2282 obj
->get_page
.sg
= obj
->pages
->sgl
;
2283 obj
->get_page
.last
= 0;
2288 /* The 'mapping' part of i915_gem_object_pin_map() below */
2289 static void *i915_gem_object_map(const struct drm_i915_gem_object
*obj
)
2291 unsigned long n_pages
= obj
->base
.size
>> PAGE_SHIFT
;
2292 struct sg_table
*sgt
= obj
->pages
;
2293 struct sgt_iter sgt_iter
;
2295 struct page
*stack_pages
[32];
2296 struct page
**pages
= stack_pages
;
2297 unsigned long i
= 0;
2300 /* A single page can always be kmapped */
2302 return kmap(sg_page(sgt
->sgl
));
2304 if (n_pages
> ARRAY_SIZE(stack_pages
)) {
2305 /* Too big for stack -- allocate temporary array instead */
2306 pages
= drm_malloc_gfp(n_pages
, sizeof(*pages
), GFP_TEMPORARY
);
2311 for_each_sgt_page(page
, sgt_iter
, sgt
)
2314 /* Check that we have the expected number of pages */
2315 GEM_BUG_ON(i
!= n_pages
);
2317 addr
= vmap(pages
, n_pages
, 0, PAGE_KERNEL
);
2319 if (pages
!= stack_pages
)
2320 drm_free_large(pages
);
2325 /* get, pin, and map the pages of the object into kernel space */
2326 void *i915_gem_object_pin_map(struct drm_i915_gem_object
*obj
)
2330 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
2332 ret
= i915_gem_object_get_pages(obj
);
2334 return ERR_PTR(ret
);
2336 i915_gem_object_pin_pages(obj
);
2338 if (!obj
->mapping
) {
2339 obj
->mapping
= i915_gem_object_map(obj
);
2340 if (!obj
->mapping
) {
2341 i915_gem_object_unpin_pages(obj
);
2342 return ERR_PTR(-ENOMEM
);
2346 return obj
->mapping
;
2350 i915_gem_object_retire__write(struct i915_gem_active
*active
,
2351 struct drm_i915_gem_request
*request
)
2353 struct drm_i915_gem_object
*obj
=
2354 container_of(active
, struct drm_i915_gem_object
, last_write
);
2356 intel_fb_obj_flush(obj
, true, ORIGIN_CS
);
2360 i915_gem_object_retire__read(struct i915_gem_active
*active
,
2361 struct drm_i915_gem_request
*request
)
2363 int idx
= request
->engine
->id
;
2364 struct drm_i915_gem_object
*obj
=
2365 container_of(active
, struct drm_i915_gem_object
, last_read
[idx
]);
2367 GEM_BUG_ON((obj
->active
& (1 << idx
)) == 0);
2369 obj
->active
&= ~(1 << idx
);
2373 /* Bump our place on the bound list to keep it roughly in LRU order
2374 * so that we don't steal from recently used but inactive objects
2375 * (unless we are forced to ofc!)
2377 if (obj
->bind_count
)
2378 list_move_tail(&obj
->global_list
,
2379 &request
->i915
->mm
.bound_list
);
2381 i915_gem_object_put(obj
);
2384 static bool i915_context_is_banned(const struct i915_gem_context
*ctx
)
2386 unsigned long elapsed
;
2388 if (ctx
->hang_stats
.banned
)
2391 elapsed
= get_seconds() - ctx
->hang_stats
.guilty_ts
;
2392 if (ctx
->hang_stats
.ban_period_seconds
&&
2393 elapsed
<= ctx
->hang_stats
.ban_period_seconds
) {
2394 DRM_DEBUG("context hanging too fast, banning!\n");
2401 static void i915_set_reset_status(struct i915_gem_context
*ctx
,
2404 struct i915_ctx_hang_stats
*hs
= &ctx
->hang_stats
;
2407 hs
->banned
= i915_context_is_banned(ctx
);
2409 hs
->guilty_ts
= get_seconds();
2411 hs
->batch_pending
++;
2415 struct drm_i915_gem_request
*
2416 i915_gem_find_active_request(struct intel_engine_cs
*engine
)
2418 struct drm_i915_gem_request
*request
;
2420 /* We are called by the error capture and reset at a random
2421 * point in time. In particular, note that neither is crucially
2422 * ordered with an interrupt. After a hang, the GPU is dead and we
2423 * assume that no more writes can happen (we waited long enough for
2424 * all writes that were in transaction to be flushed) - adding an
2425 * extra delay for a recent interrupt is pointless. Hence, we do
2426 * not need an engine->irq_seqno_barrier() before the seqno reads.
2428 list_for_each_entry(request
, &engine
->request_list
, link
) {
2429 if (i915_gem_request_completed(request
))
2438 static void i915_gem_reset_engine_status(struct intel_engine_cs
*engine
)
2440 struct drm_i915_gem_request
*request
;
2443 request
= i915_gem_find_active_request(engine
);
2444 if (request
== NULL
)
2447 ring_hung
= engine
->hangcheck
.score
>= HANGCHECK_SCORE_RING_HUNG
;
2449 i915_set_reset_status(request
->ctx
, ring_hung
);
2450 list_for_each_entry_continue(request
, &engine
->request_list
, link
)
2451 i915_set_reset_status(request
->ctx
, false);
2454 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs
*engine
)
2456 struct intel_ring
*ring
;
2458 /* Mark all pending requests as complete so that any concurrent
2459 * (lockless) lookup doesn't try and wait upon the request as we
2462 intel_engine_init_seqno(engine
, engine
->last_submitted_seqno
);
2465 * Clear the execlists queue up before freeing the requests, as those
2466 * are the ones that keep the context and ringbuffer backing objects
2470 if (i915
.enable_execlists
) {
2471 /* Ensure irq handler finishes or is cancelled. */
2472 tasklet_kill(&engine
->irq_tasklet
);
2474 intel_execlists_cancel_requests(engine
);
2478 * We must free the requests after all the corresponding objects have
2479 * been moved off active lists. Which is the same order as the normal
2480 * retire_requests function does. This is important if object hold
2481 * implicit references on things like e.g. ppgtt address spaces through
2484 if (!list_empty(&engine
->request_list
)) {
2485 struct drm_i915_gem_request
*request
;
2487 request
= list_last_entry(&engine
->request_list
,
2488 struct drm_i915_gem_request
,
2491 i915_gem_request_retire_upto(request
);
2494 /* Having flushed all requests from all queues, we know that all
2495 * ringbuffers must now be empty. However, since we do not reclaim
2496 * all space when retiring the request (to prevent HEADs colliding
2497 * with rapid ringbuffer wraparound) the amount of available space
2498 * upon reset is less than when we start. Do one more pass over
2499 * all the ringbuffers to reset last_retired_head.
2501 list_for_each_entry(ring
, &engine
->buffers
, link
) {
2502 ring
->last_retired_head
= ring
->tail
;
2503 intel_ring_update_space(ring
);
2506 engine
->i915
->gt
.active_engines
&= ~intel_engine_flag(engine
);
2509 void i915_gem_reset(struct drm_device
*dev
)
2511 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2512 struct intel_engine_cs
*engine
;
2515 * Before we free the objects from the requests, we need to inspect
2516 * them for finding the guilty party. As the requests only borrow
2517 * their reference to the objects, the inspection must be done first.
2519 for_each_engine(engine
, dev_priv
)
2520 i915_gem_reset_engine_status(engine
);
2522 for_each_engine(engine
, dev_priv
)
2523 i915_gem_reset_engine_cleanup(engine
);
2524 mod_delayed_work(dev_priv
->wq
, &dev_priv
->gt
.idle_work
, 0);
2526 i915_gem_context_reset(dev
);
2528 i915_gem_restore_fences(dev
);
2532 i915_gem_retire_work_handler(struct work_struct
*work
)
2534 struct drm_i915_private
*dev_priv
=
2535 container_of(work
, typeof(*dev_priv
), gt
.retire_work
.work
);
2536 struct drm_device
*dev
= &dev_priv
->drm
;
2538 /* Come back later if the device is busy... */
2539 if (mutex_trylock(&dev
->struct_mutex
)) {
2540 i915_gem_retire_requests(dev_priv
);
2541 mutex_unlock(&dev
->struct_mutex
);
2544 /* Keep the retire handler running until we are finally idle.
2545 * We do not need to do this test under locking as in the worst-case
2546 * we queue the retire worker once too often.
2548 if (READ_ONCE(dev_priv
->gt
.awake
)) {
2549 i915_queue_hangcheck(dev_priv
);
2550 queue_delayed_work(dev_priv
->wq
,
2551 &dev_priv
->gt
.retire_work
,
2552 round_jiffies_up_relative(HZ
));
2557 i915_gem_idle_work_handler(struct work_struct
*work
)
2559 struct drm_i915_private
*dev_priv
=
2560 container_of(work
, typeof(*dev_priv
), gt
.idle_work
.work
);
2561 struct drm_device
*dev
= &dev_priv
->drm
;
2562 struct intel_engine_cs
*engine
;
2563 unsigned int stuck_engines
;
2564 bool rearm_hangcheck
;
2566 if (!READ_ONCE(dev_priv
->gt
.awake
))
2569 if (READ_ONCE(dev_priv
->gt
.active_engines
))
2573 cancel_delayed_work_sync(&dev_priv
->gpu_error
.hangcheck_work
);
2575 if (!mutex_trylock(&dev
->struct_mutex
)) {
2576 /* Currently busy, come back later */
2577 mod_delayed_work(dev_priv
->wq
,
2578 &dev_priv
->gt
.idle_work
,
2579 msecs_to_jiffies(50));
2583 if (dev_priv
->gt
.active_engines
)
2586 for_each_engine(engine
, dev_priv
)
2587 i915_gem_batch_pool_fini(&engine
->batch_pool
);
2589 GEM_BUG_ON(!dev_priv
->gt
.awake
);
2590 dev_priv
->gt
.awake
= false;
2591 rearm_hangcheck
= false;
2593 /* As we have disabled hangcheck, we need to unstick any waiters still
2594 * hanging around. However, as we may be racing against the interrupt
2595 * handler or the waiters themselves, we skip enabling the fake-irq.
2597 stuck_engines
= intel_kick_waiters(dev_priv
);
2598 if (unlikely(stuck_engines
))
2599 DRM_DEBUG_DRIVER("kicked stuck waiters (%x)...missed irq?\n",
2602 if (INTEL_GEN(dev_priv
) >= 6)
2603 gen6_rps_idle(dev_priv
);
2604 intel_runtime_pm_put(dev_priv
);
2606 mutex_unlock(&dev
->struct_mutex
);
2609 if (rearm_hangcheck
) {
2610 GEM_BUG_ON(!dev_priv
->gt
.awake
);
2611 i915_queue_hangcheck(dev_priv
);
2615 void i915_gem_close_object(struct drm_gem_object
*gem
, struct drm_file
*file
)
2617 struct drm_i915_gem_object
*obj
= to_intel_bo(gem
);
2618 struct drm_i915_file_private
*fpriv
= file
->driver_priv
;
2619 struct i915_vma
*vma
, *vn
;
2621 mutex_lock(&obj
->base
.dev
->struct_mutex
);
2622 list_for_each_entry_safe(vma
, vn
, &obj
->vma_list
, obj_link
)
2623 if (vma
->vm
->file
== fpriv
)
2624 i915_vma_close(vma
);
2625 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
2629 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2630 * @dev: drm device pointer
2631 * @data: ioctl data blob
2632 * @file: drm file pointer
2634 * Returns 0 if successful, else an error is returned with the remaining time in
2635 * the timeout parameter.
2636 * -ETIME: object is still busy after timeout
2637 * -ERESTARTSYS: signal interrupted the wait
2638 * -ENONENT: object doesn't exist
2639 * Also possible, but rare:
2640 * -EAGAIN: GPU wedged
2642 * -ENODEV: Internal IRQ fail
2643 * -E?: The add request failed
2645 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2646 * non-zero timeout parameter the wait ioctl will wait for the given number of
2647 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2648 * without holding struct_mutex the object may become re-busied before this
2649 * function completes. A similar but shorter * race condition exists in the busy
2653 i915_gem_wait_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*file
)
2655 struct drm_i915_gem_wait
*args
= data
;
2656 struct drm_i915_gem_object
*obj
;
2657 struct drm_i915_gem_request
*requests
[I915_NUM_ENGINES
];
2661 if (args
->flags
!= 0)
2664 ret
= i915_mutex_lock_interruptible(dev
);
2668 obj
= i915_gem_object_lookup(file
, args
->bo_handle
);
2670 mutex_unlock(&dev
->struct_mutex
);
2677 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
2678 struct drm_i915_gem_request
*req
;
2680 req
= i915_gem_active_get(&obj
->last_read
[i
],
2681 &obj
->base
.dev
->struct_mutex
);
2683 requests
[n
++] = req
;
2687 i915_gem_object_put(obj
);
2688 mutex_unlock(&dev
->struct_mutex
);
2690 for (i
= 0; i
< n
; i
++) {
2692 ret
= i915_wait_request(requests
[i
], true,
2693 args
->timeout_ns
> 0 ? &args
->timeout_ns
: NULL
,
2694 to_rps_client(file
));
2695 i915_gem_request_put(requests
[i
]);
2701 __i915_gem_object_sync(struct drm_i915_gem_request
*to
,
2702 struct drm_i915_gem_request
*from
)
2706 if (to
->engine
== from
->engine
)
2709 if (!i915
.semaphores
) {
2710 ret
= i915_wait_request(from
,
2711 from
->i915
->mm
.interruptible
,
2717 int idx
= intel_engine_sync_index(from
->engine
, to
->engine
);
2718 if (from
->fence
.seqno
<= from
->engine
->semaphore
.sync_seqno
[idx
])
2721 trace_i915_gem_ring_sync_to(to
, from
);
2722 ret
= to
->engine
->semaphore
.sync_to(to
, from
);
2726 from
->engine
->semaphore
.sync_seqno
[idx
] = from
->fence
.seqno
;
2733 * i915_gem_object_sync - sync an object to a ring.
2735 * @obj: object which may be in use on another ring.
2736 * @to: request we are wishing to use
2738 * This code is meant to abstract object synchronization with the GPU.
2739 * Conceptually we serialise writes between engines inside the GPU.
2740 * We only allow one engine to write into a buffer at any time, but
2741 * multiple readers. To ensure each has a coherent view of memory, we must:
2743 * - If there is an outstanding write request to the object, the new
2744 * request must wait for it to complete (either CPU or in hw, requests
2745 * on the same ring will be naturally ordered).
2747 * - If we are a write request (pending_write_domain is set), the new
2748 * request must wait for outstanding read requests to complete.
2750 * Returns 0 if successful, else propagates up the lower layer error.
2753 i915_gem_object_sync(struct drm_i915_gem_object
*obj
,
2754 struct drm_i915_gem_request
*to
)
2756 struct i915_gem_active
*active
;
2757 unsigned long active_mask
;
2760 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
2762 active_mask
= obj
->active
;
2766 if (obj
->base
.pending_write_domain
) {
2767 active
= obj
->last_read
;
2770 active
= &obj
->last_write
;
2773 for_each_active(active_mask
, idx
) {
2774 struct drm_i915_gem_request
*request
;
2777 request
= i915_gem_active_peek(&active
[idx
],
2778 &obj
->base
.dev
->struct_mutex
);
2782 ret
= __i915_gem_object_sync(to
, request
);
2790 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object
*obj
)
2792 u32 old_write_domain
, old_read_domains
;
2794 /* Force a pagefault for domain tracking on next user access */
2795 i915_gem_release_mmap(obj
);
2797 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_GTT
) == 0)
2800 old_read_domains
= obj
->base
.read_domains
;
2801 old_write_domain
= obj
->base
.write_domain
;
2803 obj
->base
.read_domains
&= ~I915_GEM_DOMAIN_GTT
;
2804 obj
->base
.write_domain
&= ~I915_GEM_DOMAIN_GTT
;
2806 trace_i915_gem_object_change_domain(obj
,
2811 static void __i915_vma_iounmap(struct i915_vma
*vma
)
2813 GEM_BUG_ON(i915_vma_is_pinned(vma
));
2815 if (vma
->iomap
== NULL
)
2818 io_mapping_unmap(vma
->iomap
);
2822 int i915_vma_unbind(struct i915_vma
*vma
)
2824 struct drm_i915_gem_object
*obj
= vma
->obj
;
2825 unsigned long active
;
2828 /* First wait upon any activity as retiring the request may
2829 * have side-effects such as unpinning or even unbinding this vma.
2831 active
= i915_vma_get_active(vma
);
2835 /* When a closed VMA is retired, it is unbound - eek.
2836 * In order to prevent it from being recursively closed,
2837 * take a pin on the vma so that the second unbind is
2840 __i915_vma_pin(vma
);
2842 for_each_active(active
, idx
) {
2843 ret
= i915_gem_active_retire(&vma
->last_read
[idx
],
2844 &vma
->vm
->dev
->struct_mutex
);
2849 __i915_vma_unpin(vma
);
2853 GEM_BUG_ON(i915_vma_is_active(vma
));
2856 if (i915_vma_is_pinned(vma
))
2859 if (!drm_mm_node_allocated(&vma
->node
))
2862 GEM_BUG_ON(obj
->bind_count
== 0);
2863 GEM_BUG_ON(!obj
->pages
);
2865 if (i915_vma_is_ggtt(vma
) &&
2866 vma
->ggtt_view
.type
== I915_GGTT_VIEW_NORMAL
) {
2867 i915_gem_object_finish_gtt(obj
);
2869 /* release the fence reg _after_ flushing */
2870 ret
= i915_gem_object_put_fence(obj
);
2874 __i915_vma_iounmap(vma
);
2877 if (likely(!vma
->vm
->closed
)) {
2878 trace_i915_vma_unbind(vma
);
2879 vma
->vm
->unbind_vma(vma
);
2881 vma
->flags
&= ~(I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
);
2883 drm_mm_remove_node(&vma
->node
);
2884 list_move_tail(&vma
->vm_link
, &vma
->vm
->unbound_list
);
2886 if (i915_vma_is_ggtt(vma
)) {
2887 if (vma
->ggtt_view
.type
== I915_GGTT_VIEW_NORMAL
) {
2888 obj
->map_and_fenceable
= false;
2889 } else if (vma
->ggtt_view
.pages
) {
2890 sg_free_table(vma
->ggtt_view
.pages
);
2891 kfree(vma
->ggtt_view
.pages
);
2893 vma
->ggtt_view
.pages
= NULL
;
2896 /* Since the unbound list is global, only move to that list if
2897 * no more VMAs exist. */
2898 if (--obj
->bind_count
== 0)
2899 list_move_tail(&obj
->global_list
,
2900 &to_i915(obj
->base
.dev
)->mm
.unbound_list
);
2902 /* And finally now the object is completely decoupled from this vma,
2903 * we can drop its hold on the backing storage and allow it to be
2904 * reaped by the shrinker.
2906 i915_gem_object_unpin_pages(obj
);
2909 if (unlikely(i915_vma_is_closed(vma
)))
2910 i915_vma_destroy(vma
);
2915 int i915_gem_wait_for_idle(struct drm_i915_private
*dev_priv
)
2917 struct intel_engine_cs
*engine
;
2920 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
2922 for_each_engine(engine
, dev_priv
) {
2923 if (engine
->last_context
== NULL
)
2926 ret
= intel_engine_idle(engine
);
2934 static bool i915_gem_valid_gtt_space(struct i915_vma
*vma
,
2935 unsigned long cache_level
)
2937 struct drm_mm_node
*gtt_space
= &vma
->node
;
2938 struct drm_mm_node
*other
;
2941 * On some machines we have to be careful when putting differing types
2942 * of snoopable memory together to avoid the prefetcher crossing memory
2943 * domains and dying. During vm initialisation, we decide whether or not
2944 * these constraints apply and set the drm_mm.color_adjust
2947 if (vma
->vm
->mm
.color_adjust
== NULL
)
2950 if (!drm_mm_node_allocated(gtt_space
))
2953 if (list_empty(>t_space
->node_list
))
2956 other
= list_entry(gtt_space
->node_list
.prev
, struct drm_mm_node
, node_list
);
2957 if (other
->allocated
&& !other
->hole_follows
&& other
->color
!= cache_level
)
2960 other
= list_entry(gtt_space
->node_list
.next
, struct drm_mm_node
, node_list
);
2961 if (other
->allocated
&& !gtt_space
->hole_follows
&& other
->color
!= cache_level
)
2968 * i915_vma_insert - finds a slot for the vma in its address space
2970 * @size: requested size in bytes (can be larger than the VMA)
2971 * @alignment: required alignment
2972 * @flags: mask of PIN_* flags to use
2974 * First we try to allocate some free space that meets the requirements for
2975 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
2976 * preferrably the oldest idle entry to make room for the new VMA.
2979 * 0 on success, negative error code otherwise.
2982 i915_vma_insert(struct i915_vma
*vma
, u64 size
, u64 alignment
, u64 flags
)
2984 struct drm_i915_private
*dev_priv
= to_i915(vma
->vm
->dev
);
2985 struct drm_i915_gem_object
*obj
= vma
->obj
;
2990 GEM_BUG_ON(vma
->flags
& (I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
));
2991 GEM_BUG_ON(drm_mm_node_allocated(&vma
->node
));
2993 size
= max(size
, vma
->size
);
2994 if (flags
& PIN_MAPPABLE
)
2995 size
= i915_gem_get_ggtt_size(dev_priv
, size
, obj
->tiling_mode
);
2998 i915_gem_get_ggtt_alignment(dev_priv
, size
, obj
->tiling_mode
,
2999 flags
& PIN_MAPPABLE
);
3001 alignment
= min_alignment
;
3002 if (alignment
& (min_alignment
- 1)) {
3003 DRM_DEBUG("Invalid object alignment requested %llu, minimum %llu\n",
3004 alignment
, min_alignment
);
3008 start
= flags
& PIN_OFFSET_BIAS
? flags
& PIN_OFFSET_MASK
: 0;
3010 end
= vma
->vm
->total
;
3011 if (flags
& PIN_MAPPABLE
)
3012 end
= min_t(u64
, end
, dev_priv
->ggtt
.mappable_end
);
3013 if (flags
& PIN_ZONE_4G
)
3014 end
= min_t(u64
, end
, (1ULL << 32) - PAGE_SIZE
);
3016 /* If binding the object/GGTT view requires more space than the entire
3017 * aperture has, reject it early before evicting everything in a vain
3018 * attempt to find space.
3021 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
3022 size
, obj
->base
.size
,
3023 flags
& PIN_MAPPABLE
? "mappable" : "total",
3028 ret
= i915_gem_object_get_pages(obj
);
3032 i915_gem_object_pin_pages(obj
);
3034 if (flags
& PIN_OFFSET_FIXED
) {
3035 u64 offset
= flags
& PIN_OFFSET_MASK
;
3036 if (offset
& (alignment
- 1) || offset
> end
- size
) {
3041 vma
->node
.start
= offset
;
3042 vma
->node
.size
= size
;
3043 vma
->node
.color
= obj
->cache_level
;
3044 ret
= drm_mm_reserve_node(&vma
->vm
->mm
, &vma
->node
);
3046 ret
= i915_gem_evict_for_vma(vma
);
3048 ret
= drm_mm_reserve_node(&vma
->vm
->mm
, &vma
->node
);
3053 u32 search_flag
, alloc_flag
;
3055 if (flags
& PIN_HIGH
) {
3056 search_flag
= DRM_MM_SEARCH_BELOW
;
3057 alloc_flag
= DRM_MM_CREATE_TOP
;
3059 search_flag
= DRM_MM_SEARCH_DEFAULT
;
3060 alloc_flag
= DRM_MM_CREATE_DEFAULT
;
3063 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3064 * so we know that we always have a minimum alignment of 4096.
3065 * The drm_mm range manager is optimised to return results
3066 * with zero alignment, so where possible use the optimal
3069 if (alignment
<= 4096)
3073 ret
= drm_mm_insert_node_in_range_generic(&vma
->vm
->mm
,
3081 ret
= i915_gem_evict_something(vma
->vm
, size
, alignment
,
3091 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma
, obj
->cache_level
));
3093 list_move_tail(&obj
->global_list
, &dev_priv
->mm
.bound_list
);
3094 list_move_tail(&vma
->vm_link
, &vma
->vm
->inactive_list
);
3100 i915_gem_object_unpin_pages(obj
);
3105 i915_gem_clflush_object(struct drm_i915_gem_object
*obj
,
3108 /* If we don't have a page list set up, then we're not pinned
3109 * to GPU, and we can ignore the cache flush because it'll happen
3110 * again at bind time.
3112 if (obj
->pages
== NULL
)
3116 * Stolen memory is always coherent with the GPU as it is explicitly
3117 * marked as wc by the system, or the system is cache-coherent.
3119 if (obj
->stolen
|| obj
->phys_handle
)
3122 /* If the GPU is snooping the contents of the CPU cache,
3123 * we do not need to manually clear the CPU cache lines. However,
3124 * the caches are only snooped when the render cache is
3125 * flushed/invalidated. As we always have to emit invalidations
3126 * and flushes when moving into and out of the RENDER domain, correct
3127 * snooping behaviour occurs naturally as the result of our domain
3130 if (!force
&& cpu_cache_is_coherent(obj
->base
.dev
, obj
->cache_level
)) {
3131 obj
->cache_dirty
= true;
3135 trace_i915_gem_object_clflush(obj
);
3136 drm_clflush_sg(obj
->pages
);
3137 obj
->cache_dirty
= false;
3142 /** Flushes the GTT write domain for the object if it's dirty. */
3144 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object
*obj
)
3146 uint32_t old_write_domain
;
3148 if (obj
->base
.write_domain
!= I915_GEM_DOMAIN_GTT
)
3151 /* No actual flushing is required for the GTT write domain. Writes
3152 * to it immediately go to main memory as far as we know, so there's
3153 * no chipset flush. It also doesn't land in render cache.
3155 * However, we do have to enforce the order so that all writes through
3156 * the GTT land before any writes to the device, such as updates to
3161 old_write_domain
= obj
->base
.write_domain
;
3162 obj
->base
.write_domain
= 0;
3164 intel_fb_obj_flush(obj
, false, ORIGIN_GTT
);
3166 trace_i915_gem_object_change_domain(obj
,
3167 obj
->base
.read_domains
,
3171 /** Flushes the CPU write domain for the object if it's dirty. */
3173 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object
*obj
)
3175 uint32_t old_write_domain
;
3177 if (obj
->base
.write_domain
!= I915_GEM_DOMAIN_CPU
)
3180 if (i915_gem_clflush_object(obj
, obj
->pin_display
))
3181 i915_gem_chipset_flush(to_i915(obj
->base
.dev
));
3183 old_write_domain
= obj
->base
.write_domain
;
3184 obj
->base
.write_domain
= 0;
3186 intel_fb_obj_flush(obj
, false, ORIGIN_CPU
);
3188 trace_i915_gem_object_change_domain(obj
,
3189 obj
->base
.read_domains
,
3194 * Moves a single object to the GTT read, and possibly write domain.
3195 * @obj: object to act on
3196 * @write: ask for write access or read only
3198 * This function returns when the move is complete, including waiting on
3202 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object
*obj
, bool write
)
3204 uint32_t old_write_domain
, old_read_domains
;
3205 struct i915_vma
*vma
;
3208 ret
= i915_gem_object_wait_rendering(obj
, !write
);
3212 if (obj
->base
.write_domain
== I915_GEM_DOMAIN_GTT
)
3215 /* Flush and acquire obj->pages so that we are coherent through
3216 * direct access in memory with previous cached writes through
3217 * shmemfs and that our cache domain tracking remains valid.
3218 * For example, if the obj->filp was moved to swap without us
3219 * being notified and releasing the pages, we would mistakenly
3220 * continue to assume that the obj remained out of the CPU cached
3223 ret
= i915_gem_object_get_pages(obj
);
3227 i915_gem_object_flush_cpu_write_domain(obj
);
3229 /* Serialise direct access to this object with the barriers for
3230 * coherent writes from the GPU, by effectively invalidating the
3231 * GTT domain upon first access.
3233 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_GTT
) == 0)
3236 old_write_domain
= obj
->base
.write_domain
;
3237 old_read_domains
= obj
->base
.read_domains
;
3239 /* It should now be out of any other write domains, and we can update
3240 * the domain values for our changes.
3242 BUG_ON((obj
->base
.write_domain
& ~I915_GEM_DOMAIN_GTT
) != 0);
3243 obj
->base
.read_domains
|= I915_GEM_DOMAIN_GTT
;
3245 obj
->base
.read_domains
= I915_GEM_DOMAIN_GTT
;
3246 obj
->base
.write_domain
= I915_GEM_DOMAIN_GTT
;
3250 trace_i915_gem_object_change_domain(obj
,
3254 /* And bump the LRU for this access */
3255 vma
= i915_gem_obj_to_ggtt(obj
);
3257 drm_mm_node_allocated(&vma
->node
) &&
3258 !i915_vma_is_active(vma
))
3259 list_move_tail(&vma
->vm_link
, &vma
->vm
->inactive_list
);
3265 * Changes the cache-level of an object across all VMA.
3266 * @obj: object to act on
3267 * @cache_level: new cache level to set for the object
3269 * After this function returns, the object will be in the new cache-level
3270 * across all GTT and the contents of the backing storage will be coherent,
3271 * with respect to the new cache-level. In order to keep the backing storage
3272 * coherent for all users, we only allow a single cache level to be set
3273 * globally on the object and prevent it from being changed whilst the
3274 * hardware is reading from the object. That is if the object is currently
3275 * on the scanout it will be set to uncached (or equivalent display
3276 * cache coherency) and all non-MOCS GPU access will also be uncached so
3277 * that all direct access to the scanout remains coherent.
3279 int i915_gem_object_set_cache_level(struct drm_i915_gem_object
*obj
,
3280 enum i915_cache_level cache_level
)
3282 struct i915_vma
*vma
;
3285 if (obj
->cache_level
== cache_level
)
3288 /* Inspect the list of currently bound VMA and unbind any that would
3289 * be invalid given the new cache-level. This is principally to
3290 * catch the issue of the CS prefetch crossing page boundaries and
3291 * reading an invalid PTE on older architectures.
3294 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
3295 if (!drm_mm_node_allocated(&vma
->node
))
3298 if (i915_vma_is_pinned(vma
)) {
3299 DRM_DEBUG("can not change the cache level of pinned objects\n");
3303 if (i915_gem_valid_gtt_space(vma
, cache_level
))
3306 ret
= i915_vma_unbind(vma
);
3310 /* As unbinding may affect other elements in the
3311 * obj->vma_list (due to side-effects from retiring
3312 * an active vma), play safe and restart the iterator.
3317 /* We can reuse the existing drm_mm nodes but need to change the
3318 * cache-level on the PTE. We could simply unbind them all and
3319 * rebind with the correct cache-level on next use. However since
3320 * we already have a valid slot, dma mapping, pages etc, we may as
3321 * rewrite the PTE in the belief that doing so tramples upon less
3322 * state and so involves less work.
3324 if (obj
->bind_count
) {
3325 /* Before we change the PTE, the GPU must not be accessing it.
3326 * If we wait upon the object, we know that all the bound
3327 * VMA are no longer active.
3329 ret
= i915_gem_object_wait_rendering(obj
, false);
3333 if (!HAS_LLC(obj
->base
.dev
) && cache_level
!= I915_CACHE_NONE
) {
3334 /* Access to snoopable pages through the GTT is
3335 * incoherent and on some machines causes a hard
3336 * lockup. Relinquish the CPU mmaping to force
3337 * userspace to refault in the pages and we can
3338 * then double check if the GTT mapping is still
3339 * valid for that pointer access.
3341 i915_gem_release_mmap(obj
);
3343 /* As we no longer need a fence for GTT access,
3344 * we can relinquish it now (and so prevent having
3345 * to steal a fence from someone else on the next
3346 * fence request). Note GPU activity would have
3347 * dropped the fence as all snoopable access is
3348 * supposed to be linear.
3350 ret
= i915_gem_object_put_fence(obj
);
3354 /* We either have incoherent backing store and
3355 * so no GTT access or the architecture is fully
3356 * coherent. In such cases, existing GTT mmaps
3357 * ignore the cache bit in the PTE and we can
3358 * rewrite it without confusing the GPU or having
3359 * to force userspace to fault back in its mmaps.
3363 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
3364 if (!drm_mm_node_allocated(&vma
->node
))
3367 ret
= i915_vma_bind(vma
, cache_level
, PIN_UPDATE
);
3373 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
)
3374 vma
->node
.color
= cache_level
;
3375 obj
->cache_level
= cache_level
;
3378 /* Flush the dirty CPU caches to the backing storage so that the
3379 * object is now coherent at its new cache level (with respect
3380 * to the access domain).
3382 if (obj
->cache_dirty
&& cpu_write_needs_clflush(obj
)) {
3383 if (i915_gem_clflush_object(obj
, true))
3384 i915_gem_chipset_flush(to_i915(obj
->base
.dev
));
3390 int i915_gem_get_caching_ioctl(struct drm_device
*dev
, void *data
,
3391 struct drm_file
*file
)
3393 struct drm_i915_gem_caching
*args
= data
;
3394 struct drm_i915_gem_object
*obj
;
3396 obj
= i915_gem_object_lookup(file
, args
->handle
);
3400 switch (obj
->cache_level
) {
3401 case I915_CACHE_LLC
:
3402 case I915_CACHE_L3_LLC
:
3403 args
->caching
= I915_CACHING_CACHED
;
3407 args
->caching
= I915_CACHING_DISPLAY
;
3411 args
->caching
= I915_CACHING_NONE
;
3415 i915_gem_object_put_unlocked(obj
);
3419 int i915_gem_set_caching_ioctl(struct drm_device
*dev
, void *data
,
3420 struct drm_file
*file
)
3422 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3423 struct drm_i915_gem_caching
*args
= data
;
3424 struct drm_i915_gem_object
*obj
;
3425 enum i915_cache_level level
;
3428 switch (args
->caching
) {
3429 case I915_CACHING_NONE
:
3430 level
= I915_CACHE_NONE
;
3432 case I915_CACHING_CACHED
:
3434 * Due to a HW issue on BXT A stepping, GPU stores via a
3435 * snooped mapping may leave stale data in a corresponding CPU
3436 * cacheline, whereas normally such cachelines would get
3439 if (!HAS_LLC(dev
) && !HAS_SNOOP(dev
))
3442 level
= I915_CACHE_LLC
;
3444 case I915_CACHING_DISPLAY
:
3445 level
= HAS_WT(dev
) ? I915_CACHE_WT
: I915_CACHE_NONE
;
3451 intel_runtime_pm_get(dev_priv
);
3453 ret
= i915_mutex_lock_interruptible(dev
);
3457 obj
= i915_gem_object_lookup(file
, args
->handle
);
3463 ret
= i915_gem_object_set_cache_level(obj
, level
);
3465 i915_gem_object_put(obj
);
3467 mutex_unlock(&dev
->struct_mutex
);
3469 intel_runtime_pm_put(dev_priv
);
3475 * Prepare buffer for display plane (scanout, cursors, etc).
3476 * Can be called from an uninterruptible phase (modesetting) and allows
3477 * any flushes to be pipelined (for pageflips).
3480 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object
*obj
,
3482 const struct i915_ggtt_view
*view
)
3484 u32 old_read_domains
, old_write_domain
;
3487 /* Mark the pin_display early so that we account for the
3488 * display coherency whilst setting up the cache domains.
3492 /* The display engine is not coherent with the LLC cache on gen6. As
3493 * a result, we make sure that the pinning that is about to occur is
3494 * done with uncached PTEs. This is lowest common denominator for all
3497 * However for gen6+, we could do better by using the GFDT bit instead
3498 * of uncaching, which would allow us to flush all the LLC-cached data
3499 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3501 ret
= i915_gem_object_set_cache_level(obj
,
3502 HAS_WT(obj
->base
.dev
) ? I915_CACHE_WT
: I915_CACHE_NONE
);
3504 goto err_unpin_display
;
3506 /* As the user may map the buffer once pinned in the display plane
3507 * (e.g. libkms for the bootup splash), we have to ensure that we
3508 * always use map_and_fenceable for all scanout buffers.
3510 ret
= i915_gem_object_ggtt_pin(obj
, view
, 0, alignment
,
3511 view
->type
== I915_GGTT_VIEW_NORMAL
?
3514 goto err_unpin_display
;
3516 i915_gem_object_flush_cpu_write_domain(obj
);
3518 old_write_domain
= obj
->base
.write_domain
;
3519 old_read_domains
= obj
->base
.read_domains
;
3521 /* It should now be out of any other write domains, and we can update
3522 * the domain values for our changes.
3524 obj
->base
.write_domain
= 0;
3525 obj
->base
.read_domains
|= I915_GEM_DOMAIN_GTT
;
3527 trace_i915_gem_object_change_domain(obj
,
3539 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object
*obj
,
3540 const struct i915_ggtt_view
*view
)
3542 if (WARN_ON(obj
->pin_display
== 0))
3545 i915_gem_object_ggtt_unpin_view(obj
, view
);
3551 * Moves a single object to the CPU read, and possibly write domain.
3552 * @obj: object to act on
3553 * @write: requesting write or read-only access
3555 * This function returns when the move is complete, including waiting on
3559 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object
*obj
, bool write
)
3561 uint32_t old_write_domain
, old_read_domains
;
3564 ret
= i915_gem_object_wait_rendering(obj
, !write
);
3568 if (obj
->base
.write_domain
== I915_GEM_DOMAIN_CPU
)
3571 i915_gem_object_flush_gtt_write_domain(obj
);
3573 old_write_domain
= obj
->base
.write_domain
;
3574 old_read_domains
= obj
->base
.read_domains
;
3576 /* Flush the CPU cache if it's still invalid. */
3577 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_CPU
) == 0) {
3578 i915_gem_clflush_object(obj
, false);
3580 obj
->base
.read_domains
|= I915_GEM_DOMAIN_CPU
;
3583 /* It should now be out of any other write domains, and we can update
3584 * the domain values for our changes.
3586 BUG_ON((obj
->base
.write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
3588 /* If we're writing through the CPU, then the GPU read domains will
3589 * need to be invalidated at next use.
3592 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
3593 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
3596 trace_i915_gem_object_change_domain(obj
,
3603 /* Throttle our rendering by waiting until the ring has completed our requests
3604 * emitted over 20 msec ago.
3606 * Note that if we were to use the current jiffies each time around the loop,
3607 * we wouldn't escape the function with any frames outstanding if the time to
3608 * render a frame was over 20ms.
3610 * This should get us reasonable parallelism between CPU and GPU but also
3611 * relatively low latency when blocking on a particular request to finish.
3614 i915_gem_ring_throttle(struct drm_device
*dev
, struct drm_file
*file
)
3616 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3617 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
3618 unsigned long recent_enough
= jiffies
- DRM_I915_THROTTLE_JIFFIES
;
3619 struct drm_i915_gem_request
*request
, *target
= NULL
;
3622 ret
= i915_gem_wait_for_error(&dev_priv
->gpu_error
);
3626 /* ABI: return -EIO if already wedged */
3627 if (i915_terminally_wedged(&dev_priv
->gpu_error
))
3630 spin_lock(&file_priv
->mm
.lock
);
3631 list_for_each_entry(request
, &file_priv
->mm
.request_list
, client_list
) {
3632 if (time_after_eq(request
->emitted_jiffies
, recent_enough
))
3636 * Note that the request might not have been submitted yet.
3637 * In which case emitted_jiffies will be zero.
3639 if (!request
->emitted_jiffies
)
3645 i915_gem_request_get(target
);
3646 spin_unlock(&file_priv
->mm
.lock
);
3651 ret
= i915_wait_request(target
, true, NULL
, NULL
);
3652 i915_gem_request_put(target
);
3658 i915_vma_misplaced(struct i915_vma
*vma
, u64 size
, u64 alignment
, u64 flags
)
3660 struct drm_i915_gem_object
*obj
= vma
->obj
;
3662 if (!drm_mm_node_allocated(&vma
->node
))
3665 if (vma
->node
.size
< size
)
3668 if (alignment
&& vma
->node
.start
& (alignment
- 1))
3671 if (flags
& PIN_MAPPABLE
&& !obj
->map_and_fenceable
)
3674 if (flags
& PIN_OFFSET_BIAS
&&
3675 vma
->node
.start
< (flags
& PIN_OFFSET_MASK
))
3678 if (flags
& PIN_OFFSET_FIXED
&&
3679 vma
->node
.start
!= (flags
& PIN_OFFSET_MASK
))
3685 void __i915_vma_set_map_and_fenceable(struct i915_vma
*vma
)
3687 struct drm_i915_gem_object
*obj
= vma
->obj
;
3688 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
3689 bool mappable
, fenceable
;
3690 u32 fence_size
, fence_alignment
;
3692 fence_size
= i915_gem_get_ggtt_size(dev_priv
,
3695 fence_alignment
= i915_gem_get_ggtt_alignment(dev_priv
,
3700 fenceable
= (vma
->node
.size
== fence_size
&&
3701 (vma
->node
.start
& (fence_alignment
- 1)) == 0);
3703 mappable
= (vma
->node
.start
+ fence_size
<=
3704 dev_priv
->ggtt
.mappable_end
);
3706 obj
->map_and_fenceable
= mappable
&& fenceable
;
3709 int __i915_vma_do_pin(struct i915_vma
*vma
,
3710 u64 size
, u64 alignment
, u64 flags
)
3712 unsigned int bound
= vma
->flags
;
3715 GEM_BUG_ON((flags
& (PIN_GLOBAL
| PIN_USER
)) == 0);
3716 GEM_BUG_ON((flags
& PIN_GLOBAL
) && !i915_vma_is_ggtt(vma
));
3718 if (WARN_ON(bound
& I915_VMA_PIN_OVERFLOW
)) {
3723 if ((bound
& I915_VMA_BIND_MASK
) == 0) {
3724 ret
= i915_vma_insert(vma
, size
, alignment
, flags
);
3729 ret
= i915_vma_bind(vma
, vma
->obj
->cache_level
, flags
);
3733 if ((bound
^ vma
->flags
) & I915_VMA_GLOBAL_BIND
)
3734 __i915_vma_set_map_and_fenceable(vma
);
3736 GEM_BUG_ON(i915_vma_misplaced(vma
, size
, alignment
, flags
));
3740 __i915_vma_unpin(vma
);
3745 i915_gem_object_ggtt_pin(struct drm_i915_gem_object
*obj
,
3746 const struct i915_ggtt_view
*view
,
3751 struct i915_vma
*vma
;
3755 view
= &i915_ggtt_view_normal
;
3757 vma
= i915_gem_obj_lookup_or_create_ggtt_vma(obj
, view
);
3759 return PTR_ERR(vma
);
3761 if (i915_vma_misplaced(vma
, size
, alignment
, flags
)) {
3762 if (flags
& PIN_NONBLOCK
&&
3763 (i915_vma_is_pinned(vma
) || i915_vma_is_active(vma
)))
3766 WARN(i915_vma_is_pinned(vma
),
3767 "bo is already pinned in ggtt with incorrect alignment:"
3768 " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
3769 " obj->map_and_fenceable=%d\n",
3770 upper_32_bits(vma
->node
.start
),
3771 lower_32_bits(vma
->node
.start
),
3773 !!(flags
& PIN_MAPPABLE
),
3774 obj
->map_and_fenceable
);
3775 ret
= i915_vma_unbind(vma
);
3780 return i915_vma_pin(vma
, size
, alignment
, flags
| PIN_GLOBAL
);
3784 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object
*obj
,
3785 const struct i915_ggtt_view
*view
)
3787 i915_vma_unpin(i915_gem_obj_to_ggtt_view(obj
, view
));
3791 i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
3792 struct drm_file
*file
)
3794 struct drm_i915_gem_busy
*args
= data
;
3795 struct drm_i915_gem_object
*obj
;
3798 ret
= i915_mutex_lock_interruptible(dev
);
3802 obj
= i915_gem_object_lookup(file
, args
->handle
);
3808 /* Count all active objects as busy, even if they are currently not used
3809 * by the gpu. Users of this interface expect objects to eventually
3810 * become non-busy without any further actions.
3814 struct drm_i915_gem_request
*req
;
3817 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
3818 req
= i915_gem_active_peek(&obj
->last_read
[i
],
3819 &obj
->base
.dev
->struct_mutex
);
3821 args
->busy
|= 1 << (16 + req
->engine
->exec_id
);
3823 req
= i915_gem_active_peek(&obj
->last_write
,
3824 &obj
->base
.dev
->struct_mutex
);
3826 args
->busy
|= req
->engine
->exec_id
;
3829 i915_gem_object_put(obj
);
3831 mutex_unlock(&dev
->struct_mutex
);
3836 i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
3837 struct drm_file
*file_priv
)
3839 return i915_gem_ring_throttle(dev
, file_priv
);
3843 i915_gem_madvise_ioctl(struct drm_device
*dev
, void *data
,
3844 struct drm_file
*file_priv
)
3846 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3847 struct drm_i915_gem_madvise
*args
= data
;
3848 struct drm_i915_gem_object
*obj
;
3851 switch (args
->madv
) {
3852 case I915_MADV_DONTNEED
:
3853 case I915_MADV_WILLNEED
:
3859 ret
= i915_mutex_lock_interruptible(dev
);
3863 obj
= i915_gem_object_lookup(file_priv
, args
->handle
);
3869 if (i915_gem_obj_is_pinned(obj
)) {
3875 obj
->tiling_mode
!= I915_TILING_NONE
&&
3876 dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
) {
3877 if (obj
->madv
== I915_MADV_WILLNEED
)
3878 i915_gem_object_unpin_pages(obj
);
3879 if (args
->madv
== I915_MADV_WILLNEED
)
3880 i915_gem_object_pin_pages(obj
);
3883 if (obj
->madv
!= __I915_MADV_PURGED
)
3884 obj
->madv
= args
->madv
;
3886 /* if the object is no longer attached, discard its backing storage */
3887 if (obj
->madv
== I915_MADV_DONTNEED
&& obj
->pages
== NULL
)
3888 i915_gem_object_truncate(obj
);
3890 args
->retained
= obj
->madv
!= __I915_MADV_PURGED
;
3893 i915_gem_object_put(obj
);
3895 mutex_unlock(&dev
->struct_mutex
);
3899 void i915_gem_object_init(struct drm_i915_gem_object
*obj
,
3900 const struct drm_i915_gem_object_ops
*ops
)
3904 INIT_LIST_HEAD(&obj
->global_list
);
3905 for (i
= 0; i
< I915_NUM_ENGINES
; i
++)
3906 init_request_active(&obj
->last_read
[i
],
3907 i915_gem_object_retire__read
);
3908 init_request_active(&obj
->last_write
,
3909 i915_gem_object_retire__write
);
3910 init_request_active(&obj
->last_fence
, NULL
);
3911 INIT_LIST_HEAD(&obj
->obj_exec_link
);
3912 INIT_LIST_HEAD(&obj
->vma_list
);
3913 INIT_LIST_HEAD(&obj
->batch_pool_link
);
3917 obj
->fence_reg
= I915_FENCE_REG_NONE
;
3918 obj
->madv
= I915_MADV_WILLNEED
;
3920 i915_gem_info_add_obj(to_i915(obj
->base
.dev
), obj
->base
.size
);
3923 static const struct drm_i915_gem_object_ops i915_gem_object_ops
= {
3924 .flags
= I915_GEM_OBJECT_HAS_STRUCT_PAGE
,
3925 .get_pages
= i915_gem_object_get_pages_gtt
,
3926 .put_pages
= i915_gem_object_put_pages_gtt
,
3929 struct drm_i915_gem_object
*i915_gem_object_create(struct drm_device
*dev
,
3932 struct drm_i915_gem_object
*obj
;
3933 struct address_space
*mapping
;
3937 obj
= i915_gem_object_alloc(dev
);
3939 return ERR_PTR(-ENOMEM
);
3941 ret
= drm_gem_object_init(dev
, &obj
->base
, size
);
3945 mask
= GFP_HIGHUSER
| __GFP_RECLAIMABLE
;
3946 if (IS_CRESTLINE(dev
) || IS_BROADWATER(dev
)) {
3947 /* 965gm cannot relocate objects above 4GiB. */
3948 mask
&= ~__GFP_HIGHMEM
;
3949 mask
|= __GFP_DMA32
;
3952 mapping
= file_inode(obj
->base
.filp
)->i_mapping
;
3953 mapping_set_gfp_mask(mapping
, mask
);
3955 i915_gem_object_init(obj
, &i915_gem_object_ops
);
3957 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
3958 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
3961 /* On some devices, we can have the GPU use the LLC (the CPU
3962 * cache) for about a 10% performance improvement
3963 * compared to uncached. Graphics requests other than
3964 * display scanout are coherent with the CPU in
3965 * accessing this cache. This means in this mode we
3966 * don't need to clflush on the CPU side, and on the
3967 * GPU side we only need to flush internal caches to
3968 * get data visible to the CPU.
3970 * However, we maintain the display planes as UC, and so
3971 * need to rebind when first used as such.
3973 obj
->cache_level
= I915_CACHE_LLC
;
3975 obj
->cache_level
= I915_CACHE_NONE
;
3977 trace_i915_gem_object_create(obj
);
3982 i915_gem_object_free(obj
);
3984 return ERR_PTR(ret
);
3987 static bool discard_backing_storage(struct drm_i915_gem_object
*obj
)
3989 /* If we are the last user of the backing storage (be it shmemfs
3990 * pages or stolen etc), we know that the pages are going to be
3991 * immediately released. In this case, we can then skip copying
3992 * back the contents from the GPU.
3995 if (obj
->madv
!= I915_MADV_WILLNEED
)
3998 if (obj
->base
.filp
== NULL
)
4001 /* At first glance, this looks racy, but then again so would be
4002 * userspace racing mmap against close. However, the first external
4003 * reference to the filp can only be obtained through the
4004 * i915_gem_mmap_ioctl() which safeguards us against the user
4005 * acquiring such a reference whilst we are in the middle of
4006 * freeing the object.
4008 return atomic_long_read(&obj
->base
.filp
->f_count
) == 1;
4011 void i915_gem_free_object(struct drm_gem_object
*gem_obj
)
4013 struct drm_i915_gem_object
*obj
= to_intel_bo(gem_obj
);
4014 struct drm_device
*dev
= obj
->base
.dev
;
4015 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4016 struct i915_vma
*vma
, *next
;
4018 intel_runtime_pm_get(dev_priv
);
4020 trace_i915_gem_object_destroy(obj
);
4022 /* All file-owned VMA should have been released by this point through
4023 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4024 * However, the object may also be bound into the global GTT (e.g.
4025 * older GPUs without per-process support, or for direct access through
4026 * the GTT either for the user or for scanout). Those VMA still need to
4029 list_for_each_entry_safe(vma
, next
, &obj
->vma_list
, obj_link
) {
4030 GEM_BUG_ON(!i915_vma_is_ggtt(vma
));
4031 GEM_BUG_ON(i915_vma_is_active(vma
));
4032 vma
->flags
&= ~I915_VMA_PIN_MASK
;
4033 i915_vma_close(vma
);
4035 GEM_BUG_ON(obj
->bind_count
);
4037 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4038 * before progressing. */
4040 i915_gem_object_unpin_pages(obj
);
4042 WARN_ON(obj
->frontbuffer_bits
);
4044 if (obj
->pages
&& obj
->madv
== I915_MADV_WILLNEED
&&
4045 dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
&&
4046 obj
->tiling_mode
!= I915_TILING_NONE
)
4047 i915_gem_object_unpin_pages(obj
);
4049 if (WARN_ON(obj
->pages_pin_count
))
4050 obj
->pages_pin_count
= 0;
4051 if (discard_backing_storage(obj
))
4052 obj
->madv
= I915_MADV_DONTNEED
;
4053 i915_gem_object_put_pages(obj
);
4057 if (obj
->base
.import_attach
)
4058 drm_prime_gem_destroy(&obj
->base
, NULL
);
4060 if (obj
->ops
->release
)
4061 obj
->ops
->release(obj
);
4063 drm_gem_object_release(&obj
->base
);
4064 i915_gem_info_remove_obj(dev_priv
, obj
->base
.size
);
4067 i915_gem_object_free(obj
);
4069 intel_runtime_pm_put(dev_priv
);
4072 struct i915_vma
*i915_gem_obj_to_vma(struct drm_i915_gem_object
*obj
,
4073 struct i915_address_space
*vm
)
4075 struct i915_vma
*vma
;
4076 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
4077 if (vma
->ggtt_view
.type
== I915_GGTT_VIEW_NORMAL
&&
4084 struct i915_vma
*i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object
*obj
,
4085 const struct i915_ggtt_view
*view
)
4087 struct i915_vma
*vma
;
4091 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
)
4092 if (i915_vma_is_ggtt(vma
) &&
4093 i915_ggtt_view_equal(&vma
->ggtt_view
, view
))
4099 i915_gem_stop_engines(struct drm_device
*dev
)
4101 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4102 struct intel_engine_cs
*engine
;
4104 for_each_engine(engine
, dev_priv
)
4105 dev_priv
->gt
.stop_engine(engine
);
4109 i915_gem_suspend(struct drm_device
*dev
)
4111 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4114 intel_suspend_gt_powersave(dev_priv
);
4116 mutex_lock(&dev
->struct_mutex
);
4118 /* We have to flush all the executing contexts to main memory so
4119 * that they can saved in the hibernation image. To ensure the last
4120 * context image is coherent, we have to switch away from it. That
4121 * leaves the dev_priv->kernel_context still active when
4122 * we actually suspend, and its image in memory may not match the GPU
4123 * state. Fortunately, the kernel_context is disposable and we do
4124 * not rely on its state.
4126 ret
= i915_gem_switch_to_kernel_context(dev_priv
);
4130 ret
= i915_gem_wait_for_idle(dev_priv
);
4134 i915_gem_retire_requests(dev_priv
);
4136 /* Note that rather than stopping the engines, all we have to do
4137 * is assert that every RING_HEAD == RING_TAIL (all execution complete)
4138 * and similar for all logical context images (to ensure they are
4139 * all ready for hibernation).
4141 i915_gem_stop_engines(dev
);
4142 i915_gem_context_lost(dev_priv
);
4143 mutex_unlock(&dev
->struct_mutex
);
4145 cancel_delayed_work_sync(&dev_priv
->gpu_error
.hangcheck_work
);
4146 cancel_delayed_work_sync(&dev_priv
->gt
.retire_work
);
4147 flush_delayed_work(&dev_priv
->gt
.idle_work
);
4149 /* Assert that we sucessfully flushed all the work and
4150 * reset the GPU back to its idle, low power state.
4152 WARN_ON(dev_priv
->gt
.awake
);
4157 mutex_unlock(&dev
->struct_mutex
);
4161 void i915_gem_resume(struct drm_device
*dev
)
4163 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4165 mutex_lock(&dev
->struct_mutex
);
4166 i915_gem_restore_gtt_mappings(dev
);
4168 /* As we didn't flush the kernel context before suspend, we cannot
4169 * guarantee that the context image is complete. So let's just reset
4170 * it and start again.
4172 if (i915
.enable_execlists
)
4173 intel_lr_context_reset(dev_priv
, dev_priv
->kernel_context
);
4175 mutex_unlock(&dev
->struct_mutex
);
4178 void i915_gem_init_swizzling(struct drm_device
*dev
)
4180 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4182 if (INTEL_INFO(dev
)->gen
< 5 ||
4183 dev_priv
->mm
.bit_6_swizzle_x
== I915_BIT_6_SWIZZLE_NONE
)
4186 I915_WRITE(DISP_ARB_CTL
, I915_READ(DISP_ARB_CTL
) |
4187 DISP_TILE_SURFACE_SWIZZLING
);
4192 I915_WRITE(TILECTL
, I915_READ(TILECTL
) | TILECTL_SWZCTL
);
4194 I915_WRITE(ARB_MODE
, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB
));
4195 else if (IS_GEN7(dev
))
4196 I915_WRITE(ARB_MODE
, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB
));
4197 else if (IS_GEN8(dev
))
4198 I915_WRITE(GAMTARBMODE
, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW
));
4203 static void init_unused_ring(struct drm_device
*dev
, u32 base
)
4205 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4207 I915_WRITE(RING_CTL(base
), 0);
4208 I915_WRITE(RING_HEAD(base
), 0);
4209 I915_WRITE(RING_TAIL(base
), 0);
4210 I915_WRITE(RING_START(base
), 0);
4213 static void init_unused_rings(struct drm_device
*dev
)
4216 init_unused_ring(dev
, PRB1_BASE
);
4217 init_unused_ring(dev
, SRB0_BASE
);
4218 init_unused_ring(dev
, SRB1_BASE
);
4219 init_unused_ring(dev
, SRB2_BASE
);
4220 init_unused_ring(dev
, SRB3_BASE
);
4221 } else if (IS_GEN2(dev
)) {
4222 init_unused_ring(dev
, SRB0_BASE
);
4223 init_unused_ring(dev
, SRB1_BASE
);
4224 } else if (IS_GEN3(dev
)) {
4225 init_unused_ring(dev
, PRB1_BASE
);
4226 init_unused_ring(dev
, PRB2_BASE
);
4231 i915_gem_init_hw(struct drm_device
*dev
)
4233 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4234 struct intel_engine_cs
*engine
;
4237 /* Double layer security blanket, see i915_gem_init() */
4238 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
4240 if (HAS_EDRAM(dev
) && INTEL_GEN(dev_priv
) < 9)
4241 I915_WRITE(HSW_IDICR
, I915_READ(HSW_IDICR
) | IDIHASHMSK(0xf));
4243 if (IS_HASWELL(dev
))
4244 I915_WRITE(MI_PREDICATE_RESULT_2
, IS_HSW_GT3(dev
) ?
4245 LOWER_SLICE_ENABLED
: LOWER_SLICE_DISABLED
);
4247 if (HAS_PCH_NOP(dev
)) {
4248 if (IS_IVYBRIDGE(dev
)) {
4249 u32 temp
= I915_READ(GEN7_MSG_CTL
);
4250 temp
&= ~(WAIT_FOR_PCH_FLR_ACK
| WAIT_FOR_PCH_RESET_ACK
);
4251 I915_WRITE(GEN7_MSG_CTL
, temp
);
4252 } else if (INTEL_INFO(dev
)->gen
>= 7) {
4253 u32 temp
= I915_READ(HSW_NDE_RSTWRN_OPT
);
4254 temp
&= ~RESET_PCH_HANDSHAKE_ENABLE
;
4255 I915_WRITE(HSW_NDE_RSTWRN_OPT
, temp
);
4259 i915_gem_init_swizzling(dev
);
4262 * At least 830 can leave some of the unused rings
4263 * "active" (ie. head != tail) after resume which
4264 * will prevent c3 entry. Makes sure all unused rings
4267 init_unused_rings(dev
);
4269 BUG_ON(!dev_priv
->kernel_context
);
4271 ret
= i915_ppgtt_init_hw(dev
);
4273 DRM_ERROR("PPGTT enable HW failed %d\n", ret
);
4277 /* Need to do basic initialisation of all rings first: */
4278 for_each_engine(engine
, dev_priv
) {
4279 ret
= engine
->init_hw(engine
);
4284 intel_mocs_init_l3cc_table(dev
);
4286 /* We can't enable contexts until all firmware is loaded */
4287 ret
= intel_guc_setup(dev
);
4292 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
4296 bool intel_sanitize_semaphores(struct drm_i915_private
*dev_priv
, int value
)
4298 if (INTEL_INFO(dev_priv
)->gen
< 6)
4301 /* TODO: make semaphores and Execlists play nicely together */
4302 if (i915
.enable_execlists
)
4308 #ifdef CONFIG_INTEL_IOMMU
4309 /* Enable semaphores on SNB when IO remapping is off */
4310 if (INTEL_INFO(dev_priv
)->gen
== 6 && intel_iommu_gfx_mapped
)
4317 int i915_gem_init(struct drm_device
*dev
)
4319 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4322 mutex_lock(&dev
->struct_mutex
);
4324 if (!i915
.enable_execlists
) {
4325 dev_priv
->gt
.cleanup_engine
= intel_engine_cleanup
;
4326 dev_priv
->gt
.stop_engine
= intel_engine_stop
;
4328 dev_priv
->gt
.cleanup_engine
= intel_logical_ring_cleanup
;
4329 dev_priv
->gt
.stop_engine
= intel_logical_ring_stop
;
4332 /* This is just a security blanket to placate dragons.
4333 * On some systems, we very sporadically observe that the first TLBs
4334 * used by the CS may be stale, despite us poking the TLB reset. If
4335 * we hold the forcewake during initialisation these problems
4336 * just magically go away.
4338 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
4340 i915_gem_init_userptr(dev_priv
);
4342 ret
= i915_gem_init_ggtt(dev_priv
);
4346 ret
= i915_gem_context_init(dev
);
4350 ret
= intel_engines_init(dev
);
4354 ret
= i915_gem_init_hw(dev
);
4356 /* Allow engine initialisation to fail by marking the GPU as
4357 * wedged. But we only want to do this where the GPU is angry,
4358 * for all other failure, such as an allocation failure, bail.
4360 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4361 atomic_or(I915_WEDGED
, &dev_priv
->gpu_error
.reset_counter
);
4366 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
4367 mutex_unlock(&dev
->struct_mutex
);
4373 i915_gem_cleanup_engines(struct drm_device
*dev
)
4375 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4376 struct intel_engine_cs
*engine
;
4378 for_each_engine(engine
, dev_priv
)
4379 dev_priv
->gt
.cleanup_engine(engine
);
4383 init_engine_lists(struct intel_engine_cs
*engine
)
4385 INIT_LIST_HEAD(&engine
->request_list
);
4389 i915_gem_load_init_fences(struct drm_i915_private
*dev_priv
)
4391 struct drm_device
*dev
= &dev_priv
->drm
;
4393 if (INTEL_INFO(dev_priv
)->gen
>= 7 && !IS_VALLEYVIEW(dev_priv
) &&
4394 !IS_CHERRYVIEW(dev_priv
))
4395 dev_priv
->num_fence_regs
= 32;
4396 else if (INTEL_INFO(dev_priv
)->gen
>= 4 || IS_I945G(dev_priv
) ||
4397 IS_I945GM(dev_priv
) || IS_G33(dev_priv
))
4398 dev_priv
->num_fence_regs
= 16;
4400 dev_priv
->num_fence_regs
= 8;
4402 if (intel_vgpu_active(dev_priv
))
4403 dev_priv
->num_fence_regs
=
4404 I915_READ(vgtif_reg(avail_rs
.fence_num
));
4406 /* Initialize fence registers to zero */
4407 i915_gem_restore_fences(dev
);
4409 i915_gem_detect_bit_6_swizzle(dev
);
4413 i915_gem_load_init(struct drm_device
*dev
)
4415 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4419 kmem_cache_create("i915_gem_object",
4420 sizeof(struct drm_i915_gem_object
), 0,
4424 kmem_cache_create("i915_gem_vma",
4425 sizeof(struct i915_vma
), 0,
4428 dev_priv
->requests
=
4429 kmem_cache_create("i915_gem_request",
4430 sizeof(struct drm_i915_gem_request
), 0,
4434 INIT_LIST_HEAD(&dev_priv
->context_list
);
4435 INIT_LIST_HEAD(&dev_priv
->mm
.unbound_list
);
4436 INIT_LIST_HEAD(&dev_priv
->mm
.bound_list
);
4437 INIT_LIST_HEAD(&dev_priv
->mm
.fence_list
);
4438 for (i
= 0; i
< I915_NUM_ENGINES
; i
++)
4439 init_engine_lists(&dev_priv
->engine
[i
]);
4440 for (i
= 0; i
< I915_MAX_NUM_FENCES
; i
++)
4441 INIT_LIST_HEAD(&dev_priv
->fence_regs
[i
].lru_list
);
4442 INIT_DELAYED_WORK(&dev_priv
->gt
.retire_work
,
4443 i915_gem_retire_work_handler
);
4444 INIT_DELAYED_WORK(&dev_priv
->gt
.idle_work
,
4445 i915_gem_idle_work_handler
);
4446 init_waitqueue_head(&dev_priv
->gpu_error
.wait_queue
);
4447 init_waitqueue_head(&dev_priv
->gpu_error
.reset_queue
);
4449 dev_priv
->relative_constants_mode
= I915_EXEC_CONSTANTS_REL_GENERAL
;
4451 INIT_LIST_HEAD(&dev_priv
->mm
.fence_list
);
4453 init_waitqueue_head(&dev_priv
->pending_flip_queue
);
4455 dev_priv
->mm
.interruptible
= true;
4457 mutex_init(&dev_priv
->fb_tracking
.lock
);
4460 void i915_gem_load_cleanup(struct drm_device
*dev
)
4462 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4464 kmem_cache_destroy(dev_priv
->requests
);
4465 kmem_cache_destroy(dev_priv
->vmas
);
4466 kmem_cache_destroy(dev_priv
->objects
);
4469 int i915_gem_freeze_late(struct drm_i915_private
*dev_priv
)
4471 struct drm_i915_gem_object
*obj
;
4473 /* Called just before we write the hibernation image.
4475 * We need to update the domain tracking to reflect that the CPU
4476 * will be accessing all the pages to create and restore from the
4477 * hibernation, and so upon restoration those pages will be in the
4480 * To make sure the hibernation image contains the latest state,
4481 * we update that state just before writing out the image.
4484 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
4485 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
4486 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
4489 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
4490 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
4491 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
4497 void i915_gem_release(struct drm_device
*dev
, struct drm_file
*file
)
4499 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
4500 struct drm_i915_gem_request
*request
;
4502 /* Clean up our request list when the client is going away, so that
4503 * later retire_requests won't dereference our soon-to-be-gone
4506 spin_lock(&file_priv
->mm
.lock
);
4507 list_for_each_entry(request
, &file_priv
->mm
.request_list
, client_list
)
4508 request
->file_priv
= NULL
;
4509 spin_unlock(&file_priv
->mm
.lock
);
4511 if (!list_empty(&file_priv
->rps
.link
)) {
4512 spin_lock(&to_i915(dev
)->rps
.client_lock
);
4513 list_del(&file_priv
->rps
.link
);
4514 spin_unlock(&to_i915(dev
)->rps
.client_lock
);
4518 int i915_gem_open(struct drm_device
*dev
, struct drm_file
*file
)
4520 struct drm_i915_file_private
*file_priv
;
4523 DRM_DEBUG_DRIVER("\n");
4525 file_priv
= kzalloc(sizeof(*file_priv
), GFP_KERNEL
);
4529 file
->driver_priv
= file_priv
;
4530 file_priv
->dev_priv
= to_i915(dev
);
4531 file_priv
->file
= file
;
4532 INIT_LIST_HEAD(&file_priv
->rps
.link
);
4534 spin_lock_init(&file_priv
->mm
.lock
);
4535 INIT_LIST_HEAD(&file_priv
->mm
.request_list
);
4537 file_priv
->bsd_engine
= -1;
4539 ret
= i915_gem_context_open(dev
, file
);
4547 * i915_gem_track_fb - update frontbuffer tracking
4548 * @old: current GEM buffer for the frontbuffer slots
4549 * @new: new GEM buffer for the frontbuffer slots
4550 * @frontbuffer_bits: bitmask of frontbuffer slots
4552 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4553 * from @old and setting them in @new. Both @old and @new can be NULL.
4555 void i915_gem_track_fb(struct drm_i915_gem_object
*old
,
4556 struct drm_i915_gem_object
*new,
4557 unsigned frontbuffer_bits
)
4560 WARN_ON(!mutex_is_locked(&old
->base
.dev
->struct_mutex
));
4561 WARN_ON(!(old
->frontbuffer_bits
& frontbuffer_bits
));
4562 old
->frontbuffer_bits
&= ~frontbuffer_bits
;
4566 WARN_ON(!mutex_is_locked(&new->base
.dev
->struct_mutex
));
4567 WARN_ON(new->frontbuffer_bits
& frontbuffer_bits
);
4568 new->frontbuffer_bits
|= frontbuffer_bits
;
4572 /* All the new VM stuff */
4573 u64
i915_gem_obj_offset(struct drm_i915_gem_object
*o
,
4574 struct i915_address_space
*vm
)
4576 struct drm_i915_private
*dev_priv
= to_i915(o
->base
.dev
);
4577 struct i915_vma
*vma
;
4579 WARN_ON(vm
== &dev_priv
->mm
.aliasing_ppgtt
->base
);
4581 list_for_each_entry(vma
, &o
->vma_list
, obj_link
) {
4582 if (i915_vma_is_ggtt(vma
) &&
4583 vma
->ggtt_view
.type
!= I915_GGTT_VIEW_NORMAL
)
4586 return vma
->node
.start
;
4589 WARN(1, "%s vma for this object not found.\n",
4590 i915_is_ggtt(vm
) ? "global" : "ppgtt");
4594 u64
i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object
*o
,
4595 const struct i915_ggtt_view
*view
)
4597 struct i915_vma
*vma
;
4599 list_for_each_entry(vma
, &o
->vma_list
, obj_link
)
4600 if (i915_vma_is_ggtt(vma
) &&
4601 i915_ggtt_view_equal(&vma
->ggtt_view
, view
))
4602 return vma
->node
.start
;
4604 WARN(1, "global vma for this object not found. (view=%u)\n", view
->type
);
4608 bool i915_gem_obj_bound(struct drm_i915_gem_object
*o
,
4609 struct i915_address_space
*vm
)
4611 struct i915_vma
*vma
;
4613 list_for_each_entry(vma
, &o
->vma_list
, obj_link
) {
4614 if (i915_vma_is_ggtt(vma
) &&
4615 vma
->ggtt_view
.type
!= I915_GGTT_VIEW_NORMAL
)
4617 if (vma
->vm
== vm
&& drm_mm_node_allocated(&vma
->node
))
4624 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object
*o
,
4625 const struct i915_ggtt_view
*view
)
4627 struct i915_vma
*vma
;
4629 list_for_each_entry(vma
, &o
->vma_list
, obj_link
)
4630 if (i915_vma_is_ggtt(vma
) &&
4631 i915_ggtt_view_equal(&vma
->ggtt_view
, view
) &&
4632 drm_mm_node_allocated(&vma
->node
))
4638 unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object
*o
)
4640 struct i915_vma
*vma
;
4642 GEM_BUG_ON(list_empty(&o
->vma_list
));
4644 list_for_each_entry(vma
, &o
->vma_list
, obj_link
) {
4645 if (i915_vma_is_ggtt(vma
) &&
4646 vma
->ggtt_view
.type
== I915_GGTT_VIEW_NORMAL
)
4647 return vma
->node
.size
;
4653 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object
*obj
)
4655 struct i915_vma
*vma
;
4656 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
)
4657 if (i915_vma_is_pinned(vma
))
4663 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
4665 i915_gem_object_get_dirty_page(struct drm_i915_gem_object
*obj
, int n
)
4669 /* Only default objects have per-page dirty tracking */
4670 if (WARN_ON(!i915_gem_object_has_struct_page(obj
)))
4673 page
= i915_gem_object_get_page(obj
, n
);
4674 set_page_dirty(page
);
4678 /* Allocate a new GEM object and fill it with the supplied data */
4679 struct drm_i915_gem_object
*
4680 i915_gem_object_create_from_data(struct drm_device
*dev
,
4681 const void *data
, size_t size
)
4683 struct drm_i915_gem_object
*obj
;
4684 struct sg_table
*sg
;
4688 obj
= i915_gem_object_create(dev
, round_up(size
, PAGE_SIZE
));
4692 ret
= i915_gem_object_set_to_cpu_domain(obj
, true);
4696 ret
= i915_gem_object_get_pages(obj
);
4700 i915_gem_object_pin_pages(obj
);
4702 bytes
= sg_copy_from_buffer(sg
->sgl
, sg
->nents
, (void *)data
, size
);
4703 obj
->dirty
= 1; /* Backing store is now out of date */
4704 i915_gem_object_unpin_pages(obj
);
4706 if (WARN_ON(bytes
!= size
)) {
4707 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes
, size
);
4715 i915_gem_object_put(obj
);
4716 return ERR_PTR(ret
);