2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
33 #include <linux/pci.h>
35 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
38 i915_gem_object_set_to_gpu_domain(struct drm_gem_object
*obj
,
39 uint32_t read_domains
,
40 uint32_t write_domain
);
41 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object
*obj
);
42 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object
*obj
);
43 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object
*obj
);
44 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object
*obj
,
46 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object
*obj
,
49 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object
*obj
);
50 static int i915_gem_object_get_page_list(struct drm_gem_object
*obj
);
51 static void i915_gem_object_free_page_list(struct drm_gem_object
*obj
);
52 static int i915_gem_object_wait_rendering(struct drm_gem_object
*obj
);
53 static int i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
,
55 static int i915_gem_object_get_fence_reg(struct drm_gem_object
*obj
, bool write
);
56 static void i915_gem_clear_fence_reg(struct drm_gem_object
*obj
);
57 static int i915_gem_evict_something(struct drm_device
*dev
);
58 static int i915_gem_phys_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
59 struct drm_i915_gem_pwrite
*args
,
60 struct drm_file
*file_priv
);
62 int i915_gem_do_init(struct drm_device
*dev
, unsigned long start
,
65 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
68 (start
& (PAGE_SIZE
- 1)) != 0 ||
69 (end
& (PAGE_SIZE
- 1)) != 0) {
73 drm_mm_init(&dev_priv
->mm
.gtt_space
, start
,
76 dev
->gtt_total
= (uint32_t) (end
- start
);
82 i915_gem_init_ioctl(struct drm_device
*dev
, void *data
,
83 struct drm_file
*file_priv
)
85 struct drm_i915_gem_init
*args
= data
;
88 mutex_lock(&dev
->struct_mutex
);
89 ret
= i915_gem_do_init(dev
, args
->gtt_start
, args
->gtt_end
);
90 mutex_unlock(&dev
->struct_mutex
);
96 i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
97 struct drm_file
*file_priv
)
99 struct drm_i915_gem_get_aperture
*args
= data
;
101 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
104 args
->aper_size
= dev
->gtt_total
;
105 args
->aper_available_size
= (args
->aper_size
-
106 atomic_read(&dev
->pin_memory
));
113 * Creates a new mm object and returns a handle to it.
116 i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
117 struct drm_file
*file_priv
)
119 struct drm_i915_gem_create
*args
= data
;
120 struct drm_gem_object
*obj
;
123 args
->size
= roundup(args
->size
, PAGE_SIZE
);
125 /* Allocate the new object */
126 obj
= drm_gem_object_alloc(dev
, args
->size
);
130 ret
= drm_gem_handle_create(file_priv
, obj
, &handle
);
131 mutex_lock(&dev
->struct_mutex
);
132 drm_gem_object_handle_unreference(obj
);
133 mutex_unlock(&dev
->struct_mutex
);
138 args
->handle
= handle
;
144 * Reads data from the object referenced by handle.
146 * On error, the contents of *data are undefined.
149 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
150 struct drm_file
*file_priv
)
152 struct drm_i915_gem_pread
*args
= data
;
153 struct drm_gem_object
*obj
;
154 struct drm_i915_gem_object
*obj_priv
;
159 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
162 obj_priv
= obj
->driver_private
;
164 /* Bounds check source.
166 * XXX: This could use review for overflow issues...
168 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
169 args
->offset
+ args
->size
> obj
->size
) {
170 drm_gem_object_unreference(obj
);
174 mutex_lock(&dev
->struct_mutex
);
176 ret
= i915_gem_object_set_cpu_read_domain_range(obj
, args
->offset
,
179 drm_gem_object_unreference(obj
);
180 mutex_unlock(&dev
->struct_mutex
);
184 offset
= args
->offset
;
186 read
= vfs_read(obj
->filp
, (char __user
*)(uintptr_t)args
->data_ptr
,
187 args
->size
, &offset
);
188 if (read
!= args
->size
) {
189 drm_gem_object_unreference(obj
);
190 mutex_unlock(&dev
->struct_mutex
);
197 drm_gem_object_unreference(obj
);
198 mutex_unlock(&dev
->struct_mutex
);
203 /* This is the fast write path which cannot handle
204 * page faults in the source data
208 fast_user_write(struct io_mapping
*mapping
,
209 loff_t page_base
, int page_offset
,
210 char __user
*user_data
,
214 unsigned long unwritten
;
216 vaddr_atomic
= io_mapping_map_atomic_wc(mapping
, page_base
);
217 unwritten
= __copy_from_user_inatomic_nocache(vaddr_atomic
+ page_offset
,
219 io_mapping_unmap_atomic(vaddr_atomic
);
225 /* Here's the write path which can sleep for
230 slow_user_write(struct io_mapping
*mapping
,
231 loff_t page_base
, int page_offset
,
232 char __user
*user_data
,
236 unsigned long unwritten
;
238 vaddr
= io_mapping_map_wc(mapping
, page_base
);
241 unwritten
= __copy_from_user(vaddr
+ page_offset
,
243 io_mapping_unmap(vaddr
);
250 i915_gem_gtt_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
251 struct drm_i915_gem_pwrite
*args
,
252 struct drm_file
*file_priv
)
254 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
255 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
257 loff_t offset
, page_base
;
258 char __user
*user_data
;
259 int page_offset
, page_length
;
262 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
264 if (!access_ok(VERIFY_READ
, user_data
, remain
))
268 mutex_lock(&dev
->struct_mutex
);
269 ret
= i915_gem_object_pin(obj
, 0);
271 mutex_unlock(&dev
->struct_mutex
);
274 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
278 obj_priv
= obj
->driver_private
;
279 offset
= obj_priv
->gtt_offset
+ args
->offset
;
283 /* Operation in this page
285 * page_base = page offset within aperture
286 * page_offset = offset within page
287 * page_length = bytes to copy for this page
289 page_base
= (offset
& ~(PAGE_SIZE
-1));
290 page_offset
= offset
& (PAGE_SIZE
-1);
291 page_length
= remain
;
292 if ((page_offset
+ remain
) > PAGE_SIZE
)
293 page_length
= PAGE_SIZE
- page_offset
;
295 ret
= fast_user_write (dev_priv
->mm
.gtt_mapping
, page_base
,
296 page_offset
, user_data
, page_length
);
298 /* If we get a fault while copying data, then (presumably) our
299 * source page isn't available. In this case, use the
300 * non-atomic function
303 ret
= slow_user_write (dev_priv
->mm
.gtt_mapping
,
304 page_base
, page_offset
,
305 user_data
, page_length
);
310 remain
-= page_length
;
311 user_data
+= page_length
;
312 offset
+= page_length
;
316 i915_gem_object_unpin(obj
);
317 mutex_unlock(&dev
->struct_mutex
);
323 i915_gem_shmem_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
324 struct drm_i915_gem_pwrite
*args
,
325 struct drm_file
*file_priv
)
331 mutex_lock(&dev
->struct_mutex
);
333 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
335 mutex_unlock(&dev
->struct_mutex
);
339 offset
= args
->offset
;
341 written
= vfs_write(obj
->filp
,
342 (char __user
*)(uintptr_t) args
->data_ptr
,
343 args
->size
, &offset
);
344 if (written
!= args
->size
) {
345 mutex_unlock(&dev
->struct_mutex
);
352 mutex_unlock(&dev
->struct_mutex
);
358 * Writes data to the object referenced by handle.
360 * On error, the contents of the buffer that were to be modified are undefined.
363 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
364 struct drm_file
*file_priv
)
366 struct drm_i915_gem_pwrite
*args
= data
;
367 struct drm_gem_object
*obj
;
368 struct drm_i915_gem_object
*obj_priv
;
371 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
374 obj_priv
= obj
->driver_private
;
376 /* Bounds check destination.
378 * XXX: This could use review for overflow issues...
380 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
381 args
->offset
+ args
->size
> obj
->size
) {
382 drm_gem_object_unreference(obj
);
386 /* We can only do the GTT pwrite on untiled buffers, as otherwise
387 * it would end up going through the fenced access, and we'll get
388 * different detiling behavior between reading and writing.
389 * pread/pwrite currently are reading and writing from the CPU
390 * perspective, requiring manual detiling by the client.
392 if (obj_priv
->phys_obj
)
393 ret
= i915_gem_phys_pwrite(dev
, obj
, args
, file_priv
);
394 else if (obj_priv
->tiling_mode
== I915_TILING_NONE
&&
396 ret
= i915_gem_gtt_pwrite(dev
, obj
, args
, file_priv
);
398 ret
= i915_gem_shmem_pwrite(dev
, obj
, args
, file_priv
);
402 DRM_INFO("pwrite failed %d\n", ret
);
405 drm_gem_object_unreference(obj
);
411 * Called when user space prepares to use an object with the CPU, either
412 * through the mmap ioctl's mapping or a GTT mapping.
415 i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
416 struct drm_file
*file_priv
)
418 struct drm_i915_gem_set_domain
*args
= data
;
419 struct drm_gem_object
*obj
;
420 uint32_t read_domains
= args
->read_domains
;
421 uint32_t write_domain
= args
->write_domain
;
424 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
427 /* Only handle setting domains to types used by the CPU. */
428 if (write_domain
& ~(I915_GEM_DOMAIN_CPU
| I915_GEM_DOMAIN_GTT
))
431 if (read_domains
& ~(I915_GEM_DOMAIN_CPU
| I915_GEM_DOMAIN_GTT
))
434 /* Having something in the write domain implies it's in the read
435 * domain, and only that read domain. Enforce that in the request.
437 if (write_domain
!= 0 && read_domains
!= write_domain
)
440 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
444 mutex_lock(&dev
->struct_mutex
);
446 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
447 obj
, obj
->size
, read_domains
, write_domain
);
449 if (read_domains
& I915_GEM_DOMAIN_GTT
) {
450 ret
= i915_gem_object_set_to_gtt_domain(obj
, write_domain
!= 0);
452 /* Silently promote "you're not bound, there was nothing to do"
453 * to success, since the client was just asking us to
454 * make sure everything was done.
459 ret
= i915_gem_object_set_to_cpu_domain(obj
, write_domain
!= 0);
462 drm_gem_object_unreference(obj
);
463 mutex_unlock(&dev
->struct_mutex
);
468 * Called when user space has done writes to this buffer
471 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
472 struct drm_file
*file_priv
)
474 struct drm_i915_gem_sw_finish
*args
= data
;
475 struct drm_gem_object
*obj
;
476 struct drm_i915_gem_object
*obj_priv
;
479 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
482 mutex_lock(&dev
->struct_mutex
);
483 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
485 mutex_unlock(&dev
->struct_mutex
);
490 DRM_INFO("%s: sw_finish %d (%p %d)\n",
491 __func__
, args
->handle
, obj
, obj
->size
);
493 obj_priv
= obj
->driver_private
;
495 /* Pinned buffers may be scanout, so flush the cache */
496 if (obj_priv
->pin_count
)
497 i915_gem_object_flush_cpu_write_domain(obj
);
499 drm_gem_object_unreference(obj
);
500 mutex_unlock(&dev
->struct_mutex
);
505 * Maps the contents of an object, returning the address it is mapped
508 * While the mapping holds a reference on the contents of the object, it doesn't
509 * imply a ref on the object itself.
512 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
513 struct drm_file
*file_priv
)
515 struct drm_i915_gem_mmap
*args
= data
;
516 struct drm_gem_object
*obj
;
520 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
523 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
527 offset
= args
->offset
;
529 down_write(¤t
->mm
->mmap_sem
);
530 addr
= do_mmap(obj
->filp
, 0, args
->size
,
531 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
533 up_write(¤t
->mm
->mmap_sem
);
534 mutex_lock(&dev
->struct_mutex
);
535 drm_gem_object_unreference(obj
);
536 mutex_unlock(&dev
->struct_mutex
);
537 if (IS_ERR((void *)addr
))
540 args
->addr_ptr
= (uint64_t) addr
;
546 * i915_gem_fault - fault a page into the GTT
547 * vma: VMA in question
550 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
551 * from userspace. The fault handler takes care of binding the object to
552 * the GTT (if needed), allocating and programming a fence register (again,
553 * only if needed based on whether the old reg is still valid or the object
554 * is tiled) and inserting a new PTE into the faulting process.
556 * Note that the faulting process may involve evicting existing objects
557 * from the GTT and/or fence registers to make room. So performance may
558 * suffer if the GTT working set is large or there are few fence registers
561 int i915_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
563 struct drm_gem_object
*obj
= vma
->vm_private_data
;
564 struct drm_device
*dev
= obj
->dev
;
565 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
566 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
570 bool write
= !!(vmf
->flags
& FAULT_FLAG_WRITE
);
572 /* We don't use vmf->pgoff since that has the fake offset */
573 page_offset
= ((unsigned long)vmf
->virtual_address
- vma
->vm_start
) >>
576 /* Now bind it into the GTT if needed */
577 mutex_lock(&dev
->struct_mutex
);
578 if (!obj_priv
->gtt_space
) {
579 ret
= i915_gem_object_bind_to_gtt(obj
, obj_priv
->gtt_alignment
);
581 mutex_unlock(&dev
->struct_mutex
);
582 return VM_FAULT_SIGBUS
;
584 list_add(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
587 /* Need a new fence register? */
588 if (obj_priv
->fence_reg
== I915_FENCE_REG_NONE
&&
589 obj_priv
->tiling_mode
!= I915_TILING_NONE
) {
590 ret
= i915_gem_object_get_fence_reg(obj
, write
);
592 mutex_unlock(&dev
->struct_mutex
);
593 return VM_FAULT_SIGBUS
;
597 pfn
= ((dev
->agp
->base
+ obj_priv
->gtt_offset
) >> PAGE_SHIFT
) +
600 /* Finally, remap it using the new GTT offset */
601 ret
= vm_insert_pfn(vma
, (unsigned long)vmf
->virtual_address
, pfn
);
603 mutex_unlock(&dev
->struct_mutex
);
610 return VM_FAULT_SIGBUS
;
612 return VM_FAULT_NOPAGE
;
617 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
618 * @obj: obj in question
620 * GEM memory mapping works by handing back to userspace a fake mmap offset
621 * it can use in a subsequent mmap(2) call. The DRM core code then looks
622 * up the object based on the offset and sets up the various memory mapping
625 * This routine allocates and attaches a fake offset for @obj.
628 i915_gem_create_mmap_offset(struct drm_gem_object
*obj
)
630 struct drm_device
*dev
= obj
->dev
;
631 struct drm_gem_mm
*mm
= dev
->mm_private
;
632 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
633 struct drm_map_list
*list
;
637 /* Set the object up for mmap'ing */
638 list
= &obj
->map_list
;
639 list
->map
= drm_calloc(1, sizeof(struct drm_map_list
),
645 map
->type
= _DRM_GEM
;
646 map
->size
= obj
->size
;
649 /* Get a DRM GEM mmap offset allocated... */
650 list
->file_offset_node
= drm_mm_search_free(&mm
->offset_manager
,
651 obj
->size
/ PAGE_SIZE
, 0, 0);
652 if (!list
->file_offset_node
) {
653 DRM_ERROR("failed to allocate offset for bo %d\n", obj
->name
);
658 list
->file_offset_node
= drm_mm_get_block(list
->file_offset_node
,
659 obj
->size
/ PAGE_SIZE
, 0);
660 if (!list
->file_offset_node
) {
665 list
->hash
.key
= list
->file_offset_node
->start
;
666 if (drm_ht_insert_item(&mm
->offset_hash
, &list
->hash
)) {
667 DRM_ERROR("failed to add to map hash\n");
671 /* By now we should be all set, any drm_mmap request on the offset
672 * below will get to our mmap & fault handler */
673 obj_priv
->mmap_offset
= ((uint64_t) list
->hash
.key
) << PAGE_SHIFT
;
678 drm_mm_put_block(list
->file_offset_node
);
680 drm_free(list
->map
, sizeof(struct drm_map_list
), DRM_MEM_DRIVER
);
686 i915_gem_free_mmap_offset(struct drm_gem_object
*obj
)
688 struct drm_device
*dev
= obj
->dev
;
689 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
690 struct drm_gem_mm
*mm
= dev
->mm_private
;
691 struct drm_map_list
*list
;
693 list
= &obj
->map_list
;
694 drm_ht_remove_item(&mm
->offset_hash
, &list
->hash
);
696 if (list
->file_offset_node
) {
697 drm_mm_put_block(list
->file_offset_node
);
698 list
->file_offset_node
= NULL
;
702 drm_free(list
->map
, sizeof(struct drm_map
), DRM_MEM_DRIVER
);
706 obj_priv
->mmap_offset
= 0;
710 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
711 * @obj: object to check
713 * Return the required GTT alignment for an object, taking into account
714 * potential fence register mapping if needed.
717 i915_gem_get_gtt_alignment(struct drm_gem_object
*obj
)
719 struct drm_device
*dev
= obj
->dev
;
720 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
724 * Minimum alignment is 4k (GTT page size), but might be greater
725 * if a fence register is needed for the object.
727 if (IS_I965G(dev
) || obj_priv
->tiling_mode
== I915_TILING_NONE
)
731 * Previous chips need to be aligned to the size of the smallest
732 * fence register that can contain the object.
739 for (i
= start
; i
< obj
->size
; i
<<= 1)
746 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
748 * @data: GTT mapping ioctl data
749 * @file_priv: GEM object info
751 * Simply returns the fake offset to userspace so it can mmap it.
752 * The mmap call will end up in drm_gem_mmap(), which will set things
753 * up so we can get faults in the handler above.
755 * The fault handler will take care of binding the object into the GTT
756 * (since it may have been evicted to make room for something), allocating
757 * a fence register, and mapping the appropriate aperture address into
761 i915_gem_mmap_gtt_ioctl(struct drm_device
*dev
, void *data
,
762 struct drm_file
*file_priv
)
764 struct drm_i915_gem_mmap_gtt
*args
= data
;
765 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
766 struct drm_gem_object
*obj
;
767 struct drm_i915_gem_object
*obj_priv
;
770 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
773 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
777 mutex_lock(&dev
->struct_mutex
);
779 obj_priv
= obj
->driver_private
;
781 if (!obj_priv
->mmap_offset
) {
782 ret
= i915_gem_create_mmap_offset(obj
);
784 drm_gem_object_unreference(obj
);
785 mutex_unlock(&dev
->struct_mutex
);
790 args
->offset
= obj_priv
->mmap_offset
;
792 obj_priv
->gtt_alignment
= i915_gem_get_gtt_alignment(obj
);
794 /* Make sure the alignment is correct for fence regs etc */
795 if (obj_priv
->agp_mem
&&
796 (obj_priv
->gtt_offset
& (obj_priv
->gtt_alignment
- 1))) {
797 drm_gem_object_unreference(obj
);
798 mutex_unlock(&dev
->struct_mutex
);
803 * Pull it into the GTT so that we have a page list (makes the
804 * initial fault faster and any subsequent flushing possible).
806 if (!obj_priv
->agp_mem
) {
807 ret
= i915_gem_object_bind_to_gtt(obj
, obj_priv
->gtt_alignment
);
809 drm_gem_object_unreference(obj
);
810 mutex_unlock(&dev
->struct_mutex
);
813 list_add(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
816 drm_gem_object_unreference(obj
);
817 mutex_unlock(&dev
->struct_mutex
);
823 i915_gem_object_free_page_list(struct drm_gem_object
*obj
)
825 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
826 int page_count
= obj
->size
/ PAGE_SIZE
;
829 if (obj_priv
->page_list
== NULL
)
833 for (i
= 0; i
< page_count
; i
++)
834 if (obj_priv
->page_list
[i
] != NULL
) {
836 set_page_dirty(obj_priv
->page_list
[i
]);
837 mark_page_accessed(obj_priv
->page_list
[i
]);
838 page_cache_release(obj_priv
->page_list
[i
]);
842 drm_free(obj_priv
->page_list
,
843 page_count
* sizeof(struct page
*),
845 obj_priv
->page_list
= NULL
;
849 i915_gem_object_move_to_active(struct drm_gem_object
*obj
, uint32_t seqno
)
851 struct drm_device
*dev
= obj
->dev
;
852 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
853 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
855 /* Add a reference if we're newly entering the active list. */
856 if (!obj_priv
->active
) {
857 drm_gem_object_reference(obj
);
858 obj_priv
->active
= 1;
860 /* Move from whatever list we were on to the tail of execution. */
861 list_move_tail(&obj_priv
->list
,
862 &dev_priv
->mm
.active_list
);
863 obj_priv
->last_rendering_seqno
= seqno
;
867 i915_gem_object_move_to_flushing(struct drm_gem_object
*obj
)
869 struct drm_device
*dev
= obj
->dev
;
870 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
871 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
873 BUG_ON(!obj_priv
->active
);
874 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.flushing_list
);
875 obj_priv
->last_rendering_seqno
= 0;
879 i915_gem_object_move_to_inactive(struct drm_gem_object
*obj
)
881 struct drm_device
*dev
= obj
->dev
;
882 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
883 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
885 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
886 if (obj_priv
->pin_count
!= 0)
887 list_del_init(&obj_priv
->list
);
889 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
891 obj_priv
->last_rendering_seqno
= 0;
892 if (obj_priv
->active
) {
893 obj_priv
->active
= 0;
894 drm_gem_object_unreference(obj
);
896 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
900 * Creates a new sequence number, emitting a write of it to the status page
901 * plus an interrupt, which will trigger i915_user_interrupt_handler.
903 * Must be called with struct_lock held.
905 * Returned sequence numbers are nonzero on success.
908 i915_add_request(struct drm_device
*dev
, uint32_t flush_domains
)
910 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
911 struct drm_i915_gem_request
*request
;
916 request
= drm_calloc(1, sizeof(*request
), DRM_MEM_DRIVER
);
920 /* Grab the seqno we're going to make this request be, and bump the
921 * next (skipping 0 so it can be the reserved no-seqno value).
923 seqno
= dev_priv
->mm
.next_gem_seqno
;
924 dev_priv
->mm
.next_gem_seqno
++;
925 if (dev_priv
->mm
.next_gem_seqno
== 0)
926 dev_priv
->mm
.next_gem_seqno
++;
929 OUT_RING(MI_STORE_DWORD_INDEX
);
930 OUT_RING(I915_GEM_HWS_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
933 OUT_RING(MI_USER_INTERRUPT
);
936 DRM_DEBUG("%d\n", seqno
);
938 request
->seqno
= seqno
;
939 request
->emitted_jiffies
= jiffies
;
940 was_empty
= list_empty(&dev_priv
->mm
.request_list
);
941 list_add_tail(&request
->list
, &dev_priv
->mm
.request_list
);
943 /* Associate any objects on the flushing list matching the write
944 * domain we're flushing with our flush.
946 if (flush_domains
!= 0) {
947 struct drm_i915_gem_object
*obj_priv
, *next
;
949 list_for_each_entry_safe(obj_priv
, next
,
950 &dev_priv
->mm
.flushing_list
, list
) {
951 struct drm_gem_object
*obj
= obj_priv
->obj
;
953 if ((obj
->write_domain
& flush_domains
) ==
955 obj
->write_domain
= 0;
956 i915_gem_object_move_to_active(obj
, seqno
);
962 if (was_empty
&& !dev_priv
->mm
.suspended
)
963 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
968 * Command execution barrier
970 * Ensures that all commands in the ring are finished
971 * before signalling the CPU
974 i915_retire_commands(struct drm_device
*dev
)
976 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
977 uint32_t cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
978 uint32_t flush_domains
= 0;
981 /* The sampler always gets flushed on i965 (sigh) */
983 flush_domains
|= I915_GEM_DOMAIN_SAMPLER
;
986 OUT_RING(0); /* noop */
988 return flush_domains
;
992 * Moves buffers associated only with the given active seqno from the active
993 * to inactive list, potentially freeing them.
996 i915_gem_retire_request(struct drm_device
*dev
,
997 struct drm_i915_gem_request
*request
)
999 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1001 /* Move any buffers on the active list that are no longer referenced
1002 * by the ringbuffer to the flushing/inactive lists as appropriate.
1004 while (!list_empty(&dev_priv
->mm
.active_list
)) {
1005 struct drm_gem_object
*obj
;
1006 struct drm_i915_gem_object
*obj_priv
;
1008 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
1009 struct drm_i915_gem_object
,
1011 obj
= obj_priv
->obj
;
1013 /* If the seqno being retired doesn't match the oldest in the
1014 * list, then the oldest in the list must still be newer than
1017 if (obj_priv
->last_rendering_seqno
!= request
->seqno
)
1021 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1022 __func__
, request
->seqno
, obj
);
1025 if (obj
->write_domain
!= 0)
1026 i915_gem_object_move_to_flushing(obj
);
1028 i915_gem_object_move_to_inactive(obj
);
1033 * Returns true if seq1 is later than seq2.
1036 i915_seqno_passed(uint32_t seq1
, uint32_t seq2
)
1038 return (int32_t)(seq1
- seq2
) >= 0;
1042 i915_get_gem_seqno(struct drm_device
*dev
)
1044 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1046 return READ_HWSP(dev_priv
, I915_GEM_HWS_INDEX
);
1050 * This function clears the request list as sequence numbers are passed.
1053 i915_gem_retire_requests(struct drm_device
*dev
)
1055 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1058 seqno
= i915_get_gem_seqno(dev
);
1060 while (!list_empty(&dev_priv
->mm
.request_list
)) {
1061 struct drm_i915_gem_request
*request
;
1062 uint32_t retiring_seqno
;
1064 request
= list_first_entry(&dev_priv
->mm
.request_list
,
1065 struct drm_i915_gem_request
,
1067 retiring_seqno
= request
->seqno
;
1069 if (i915_seqno_passed(seqno
, retiring_seqno
) ||
1070 dev_priv
->mm
.wedged
) {
1071 i915_gem_retire_request(dev
, request
);
1073 list_del(&request
->list
);
1074 drm_free(request
, sizeof(*request
), DRM_MEM_DRIVER
);
1081 i915_gem_retire_work_handler(struct work_struct
*work
)
1083 drm_i915_private_t
*dev_priv
;
1084 struct drm_device
*dev
;
1086 dev_priv
= container_of(work
, drm_i915_private_t
,
1087 mm
.retire_work
.work
);
1088 dev
= dev_priv
->dev
;
1090 mutex_lock(&dev
->struct_mutex
);
1091 i915_gem_retire_requests(dev
);
1092 if (!dev_priv
->mm
.suspended
&&
1093 !list_empty(&dev_priv
->mm
.request_list
))
1094 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
1095 mutex_unlock(&dev
->struct_mutex
);
1099 * Waits for a sequence number to be signaled, and cleans up the
1100 * request and object lists appropriately for that event.
1103 i915_wait_request(struct drm_device
*dev
, uint32_t seqno
)
1105 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1110 if (!i915_seqno_passed(i915_get_gem_seqno(dev
), seqno
)) {
1111 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
1112 i915_user_irq_get(dev
);
1113 ret
= wait_event_interruptible(dev_priv
->irq_queue
,
1114 i915_seqno_passed(i915_get_gem_seqno(dev
),
1116 dev_priv
->mm
.wedged
);
1117 i915_user_irq_put(dev
);
1118 dev_priv
->mm
.waiting_gem_seqno
= 0;
1120 if (dev_priv
->mm
.wedged
)
1123 if (ret
&& ret
!= -ERESTARTSYS
)
1124 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1125 __func__
, ret
, seqno
, i915_get_gem_seqno(dev
));
1127 /* Directly dispatch request retiring. While we have the work queue
1128 * to handle this, the waiter on a request often wants an associated
1129 * buffer to have made it to the inactive list, and we would need
1130 * a separate wait queue to handle that.
1133 i915_gem_retire_requests(dev
);
1139 i915_gem_flush(struct drm_device
*dev
,
1140 uint32_t invalidate_domains
,
1141 uint32_t flush_domains
)
1143 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1148 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__
,
1149 invalidate_domains
, flush_domains
);
1152 if (flush_domains
& I915_GEM_DOMAIN_CPU
)
1153 drm_agp_chipset_flush(dev
);
1155 if ((invalidate_domains
| flush_domains
) & ~(I915_GEM_DOMAIN_CPU
|
1156 I915_GEM_DOMAIN_GTT
)) {
1158 * read/write caches:
1160 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1161 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1162 * also flushed at 2d versus 3d pipeline switches.
1166 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1167 * MI_READ_FLUSH is set, and is always flushed on 965.
1169 * I915_GEM_DOMAIN_COMMAND may not exist?
1171 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1172 * invalidated when MI_EXE_FLUSH is set.
1174 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1175 * invalidated with every MI_FLUSH.
1179 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1180 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1181 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1182 * are flushed at any MI_FLUSH.
1185 cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
1186 if ((invalidate_domains
|flush_domains
) &
1187 I915_GEM_DOMAIN_RENDER
)
1188 cmd
&= ~MI_NO_WRITE_FLUSH
;
1189 if (!IS_I965G(dev
)) {
1191 * On the 965, the sampler cache always gets flushed
1192 * and this bit is reserved.
1194 if (invalidate_domains
& I915_GEM_DOMAIN_SAMPLER
)
1195 cmd
|= MI_READ_FLUSH
;
1197 if (invalidate_domains
& I915_GEM_DOMAIN_INSTRUCTION
)
1198 cmd
|= MI_EXE_FLUSH
;
1201 DRM_INFO("%s: queue flush %08x to ring\n", __func__
, cmd
);
1205 OUT_RING(0); /* noop */
1211 * Ensures that all rendering to the object has completed and the object is
1212 * safe to unbind from the GTT or access from the CPU.
1215 i915_gem_object_wait_rendering(struct drm_gem_object
*obj
)
1217 struct drm_device
*dev
= obj
->dev
;
1218 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1221 /* This function only exists to support waiting for existing rendering,
1222 * not for emitting required flushes.
1224 BUG_ON((obj
->write_domain
& I915_GEM_GPU_DOMAINS
) != 0);
1226 /* If there is rendering queued on the buffer being evicted, wait for
1229 if (obj_priv
->active
) {
1231 DRM_INFO("%s: object %p wait for seqno %08x\n",
1232 __func__
, obj
, obj_priv
->last_rendering_seqno
);
1234 ret
= i915_wait_request(dev
, obj_priv
->last_rendering_seqno
);
1243 * Unbinds an object from the GTT aperture.
1246 i915_gem_object_unbind(struct drm_gem_object
*obj
)
1248 struct drm_device
*dev
= obj
->dev
;
1249 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1254 DRM_INFO("%s:%d %p\n", __func__
, __LINE__
, obj
);
1255 DRM_INFO("gtt_space %p\n", obj_priv
->gtt_space
);
1257 if (obj_priv
->gtt_space
== NULL
)
1260 if (obj_priv
->pin_count
!= 0) {
1261 DRM_ERROR("Attempting to unbind pinned buffer\n");
1265 /* Move the object to the CPU domain to ensure that
1266 * any possible CPU writes while it's not in the GTT
1267 * are flushed when we go to remap it. This will
1268 * also ensure that all pending GPU writes are finished
1271 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
1273 if (ret
!= -ERESTARTSYS
)
1274 DRM_ERROR("set_domain failed: %d\n", ret
);
1278 if (obj_priv
->agp_mem
!= NULL
) {
1279 drm_unbind_agp(obj_priv
->agp_mem
);
1280 drm_free_agp(obj_priv
->agp_mem
, obj
->size
/ PAGE_SIZE
);
1281 obj_priv
->agp_mem
= NULL
;
1284 BUG_ON(obj_priv
->active
);
1286 /* blow away mappings if mapped through GTT */
1287 offset
= ((loff_t
) obj
->map_list
.hash
.key
) << PAGE_SHIFT
;
1288 if (dev
->dev_mapping
)
1289 unmap_mapping_range(dev
->dev_mapping
, offset
, obj
->size
, 1);
1291 if (obj_priv
->fence_reg
!= I915_FENCE_REG_NONE
)
1292 i915_gem_clear_fence_reg(obj
);
1294 i915_gem_object_free_page_list(obj
);
1296 if (obj_priv
->gtt_space
) {
1297 atomic_dec(&dev
->gtt_count
);
1298 atomic_sub(obj
->size
, &dev
->gtt_memory
);
1300 drm_mm_put_block(obj_priv
->gtt_space
);
1301 obj_priv
->gtt_space
= NULL
;
1304 /* Remove ourselves from the LRU list if present. */
1305 if (!list_empty(&obj_priv
->list
))
1306 list_del_init(&obj_priv
->list
);
1312 i915_gem_evict_something(struct drm_device
*dev
)
1314 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1315 struct drm_gem_object
*obj
;
1316 struct drm_i915_gem_object
*obj_priv
;
1320 /* If there's an inactive buffer available now, grab it
1323 if (!list_empty(&dev_priv
->mm
.inactive_list
)) {
1324 obj_priv
= list_first_entry(&dev_priv
->mm
.inactive_list
,
1325 struct drm_i915_gem_object
,
1327 obj
= obj_priv
->obj
;
1328 BUG_ON(obj_priv
->pin_count
!= 0);
1330 DRM_INFO("%s: evicting %p\n", __func__
, obj
);
1332 BUG_ON(obj_priv
->active
);
1334 /* Wait on the rendering and unbind the buffer. */
1335 ret
= i915_gem_object_unbind(obj
);
1339 /* If we didn't get anything, but the ring is still processing
1340 * things, wait for one of those things to finish and hopefully
1341 * leave us a buffer to evict.
1343 if (!list_empty(&dev_priv
->mm
.request_list
)) {
1344 struct drm_i915_gem_request
*request
;
1346 request
= list_first_entry(&dev_priv
->mm
.request_list
,
1347 struct drm_i915_gem_request
,
1350 ret
= i915_wait_request(dev
, request
->seqno
);
1354 /* if waiting caused an object to become inactive,
1355 * then loop around and wait for it. Otherwise, we
1356 * assume that waiting freed and unbound something,
1357 * so there should now be some space in the GTT
1359 if (!list_empty(&dev_priv
->mm
.inactive_list
))
1364 /* If we didn't have anything on the request list but there
1365 * are buffers awaiting a flush, emit one and try again.
1366 * When we wait on it, those buffers waiting for that flush
1367 * will get moved to inactive.
1369 if (!list_empty(&dev_priv
->mm
.flushing_list
)) {
1370 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
1371 struct drm_i915_gem_object
,
1373 obj
= obj_priv
->obj
;
1378 i915_add_request(dev
, obj
->write_domain
);
1384 DRM_ERROR("inactive empty %d request empty %d "
1385 "flushing empty %d\n",
1386 list_empty(&dev_priv
->mm
.inactive_list
),
1387 list_empty(&dev_priv
->mm
.request_list
),
1388 list_empty(&dev_priv
->mm
.flushing_list
));
1389 /* If we didn't do any of the above, there's nothing to be done
1390 * and we just can't fit it in.
1398 i915_gem_evict_everything(struct drm_device
*dev
)
1403 ret
= i915_gem_evict_something(dev
);
1413 i915_gem_object_get_page_list(struct drm_gem_object
*obj
)
1415 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1417 struct address_space
*mapping
;
1418 struct inode
*inode
;
1422 if (obj_priv
->page_list
)
1425 /* Get the list of pages out of our struct file. They'll be pinned
1426 * at this point until we release them.
1428 page_count
= obj
->size
/ PAGE_SIZE
;
1429 BUG_ON(obj_priv
->page_list
!= NULL
);
1430 obj_priv
->page_list
= drm_calloc(page_count
, sizeof(struct page
*),
1432 if (obj_priv
->page_list
== NULL
) {
1433 DRM_ERROR("Faled to allocate page list\n");
1437 inode
= obj
->filp
->f_path
.dentry
->d_inode
;
1438 mapping
= inode
->i_mapping
;
1439 for (i
= 0; i
< page_count
; i
++) {
1440 page
= read_mapping_page(mapping
, i
, NULL
);
1442 ret
= PTR_ERR(page
);
1443 DRM_ERROR("read_mapping_page failed: %d\n", ret
);
1444 i915_gem_object_free_page_list(obj
);
1447 obj_priv
->page_list
[i
] = page
;
1452 static void i965_write_fence_reg(struct drm_i915_fence_reg
*reg
)
1454 struct drm_gem_object
*obj
= reg
->obj
;
1455 struct drm_device
*dev
= obj
->dev
;
1456 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1457 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1458 int regnum
= obj_priv
->fence_reg
;
1461 val
= (uint64_t)((obj_priv
->gtt_offset
+ obj
->size
- 4096) &
1463 val
|= obj_priv
->gtt_offset
& 0xfffff000;
1464 val
|= ((obj_priv
->stride
/ 128) - 1) << I965_FENCE_PITCH_SHIFT
;
1465 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
1466 val
|= 1 << I965_FENCE_TILING_Y_SHIFT
;
1467 val
|= I965_FENCE_REG_VALID
;
1469 I915_WRITE64(FENCE_REG_965_0
+ (regnum
* 8), val
);
1472 static void i915_write_fence_reg(struct drm_i915_fence_reg
*reg
)
1474 struct drm_gem_object
*obj
= reg
->obj
;
1475 struct drm_device
*dev
= obj
->dev
;
1476 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1477 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1478 int regnum
= obj_priv
->fence_reg
;
1483 if ((obj_priv
->gtt_offset
& ~I915_FENCE_START_MASK
) ||
1484 (obj_priv
->gtt_offset
& (obj
->size
- 1))) {
1485 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
1486 __func__
, obj_priv
->gtt_offset
, obj
->size
);
1490 if (obj_priv
->tiling_mode
== I915_TILING_Y
&&
1491 HAS_128_BYTE_Y_TILING(dev
))
1496 /* Note: pitch better be a power of two tile widths */
1497 pitch_val
= obj_priv
->stride
/ tile_width
;
1498 pitch_val
= ffs(pitch_val
) - 1;
1500 val
= obj_priv
->gtt_offset
;
1501 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
1502 val
|= 1 << I830_FENCE_TILING_Y_SHIFT
;
1503 val
|= I915_FENCE_SIZE_BITS(obj
->size
);
1504 val
|= pitch_val
<< I830_FENCE_PITCH_SHIFT
;
1505 val
|= I830_FENCE_REG_VALID
;
1507 I915_WRITE(FENCE_REG_830_0
+ (regnum
* 4), val
);
1510 static void i830_write_fence_reg(struct drm_i915_fence_reg
*reg
)
1512 struct drm_gem_object
*obj
= reg
->obj
;
1513 struct drm_device
*dev
= obj
->dev
;
1514 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1515 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1516 int regnum
= obj_priv
->fence_reg
;
1520 if ((obj_priv
->gtt_offset
& ~I915_FENCE_START_MASK
) ||
1521 (obj_priv
->gtt_offset
& (obj
->size
- 1))) {
1522 WARN(1, "%s: object 0x%08x not 1M or size aligned\n",
1523 __func__
, obj_priv
->gtt_offset
);
1527 pitch_val
= (obj_priv
->stride
/ 128) - 1;
1529 val
= obj_priv
->gtt_offset
;
1530 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
1531 val
|= 1 << I830_FENCE_TILING_Y_SHIFT
;
1532 val
|= I830_FENCE_SIZE_BITS(obj
->size
);
1533 val
|= pitch_val
<< I830_FENCE_PITCH_SHIFT
;
1534 val
|= I830_FENCE_REG_VALID
;
1536 I915_WRITE(FENCE_REG_830_0
+ (regnum
* 4), val
);
1541 * i915_gem_object_get_fence_reg - set up a fence reg for an object
1542 * @obj: object to map through a fence reg
1543 * @write: object is about to be written
1545 * When mapping objects through the GTT, userspace wants to be able to write
1546 * to them without having to worry about swizzling if the object is tiled.
1548 * This function walks the fence regs looking for a free one for @obj,
1549 * stealing one if it can't find any.
1551 * It then sets up the reg based on the object's properties: address, pitch
1552 * and tiling format.
1555 i915_gem_object_get_fence_reg(struct drm_gem_object
*obj
, bool write
)
1557 struct drm_device
*dev
= obj
->dev
;
1558 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1559 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1560 struct drm_i915_fence_reg
*reg
= NULL
;
1563 switch (obj_priv
->tiling_mode
) {
1564 case I915_TILING_NONE
:
1565 WARN(1, "allocating a fence for non-tiled object?\n");
1568 if (!obj_priv
->stride
)
1570 WARN((obj_priv
->stride
& (512 - 1)),
1571 "object 0x%08x is X tiled but has non-512B pitch\n",
1572 obj_priv
->gtt_offset
);
1575 if (!obj_priv
->stride
)
1577 WARN((obj_priv
->stride
& (128 - 1)),
1578 "object 0x%08x is Y tiled but has non-128B pitch\n",
1579 obj_priv
->gtt_offset
);
1583 /* First try to find a free reg */
1584 for (i
= dev_priv
->fence_reg_start
; i
< dev_priv
->num_fence_regs
; i
++) {
1585 reg
= &dev_priv
->fence_regs
[i
];
1590 /* None available, try to steal one or wait for a user to finish */
1591 if (i
== dev_priv
->num_fence_regs
) {
1592 struct drm_i915_gem_object
*old_obj_priv
= NULL
;
1596 /* Could try to use LRU here instead... */
1597 for (i
= dev_priv
->fence_reg_start
;
1598 i
< dev_priv
->num_fence_regs
; i
++) {
1599 reg
= &dev_priv
->fence_regs
[i
];
1600 old_obj_priv
= reg
->obj
->driver_private
;
1601 if (!old_obj_priv
->pin_count
)
1606 * Now things get ugly... we have to wait for one of the
1607 * objects to finish before trying again.
1609 if (i
== dev_priv
->num_fence_regs
) {
1610 ret
= i915_gem_object_set_to_gtt_domain(reg
->obj
, 0);
1612 WARN(ret
!= -ERESTARTSYS
,
1613 "switch to GTT domain failed: %d\n", ret
);
1620 * Zap this virtual mapping so we can set up a fence again
1621 * for this object next time we need it.
1623 offset
= ((loff_t
) reg
->obj
->map_list
.hash
.key
) << PAGE_SHIFT
;
1624 if (dev
->dev_mapping
)
1625 unmap_mapping_range(dev
->dev_mapping
, offset
,
1627 old_obj_priv
->fence_reg
= I915_FENCE_REG_NONE
;
1630 obj_priv
->fence_reg
= i
;
1634 i965_write_fence_reg(reg
);
1635 else if (IS_I9XX(dev
))
1636 i915_write_fence_reg(reg
);
1638 i830_write_fence_reg(reg
);
1644 * i915_gem_clear_fence_reg - clear out fence register info
1645 * @obj: object to clear
1647 * Zeroes out the fence register itself and clears out the associated
1648 * data structures in dev_priv and obj_priv.
1651 i915_gem_clear_fence_reg(struct drm_gem_object
*obj
)
1653 struct drm_device
*dev
= obj
->dev
;
1654 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1655 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1658 I915_WRITE64(FENCE_REG_965_0
+ (obj_priv
->fence_reg
* 8), 0);
1660 I915_WRITE(FENCE_REG_830_0
+ (obj_priv
->fence_reg
* 4), 0);
1662 dev_priv
->fence_regs
[obj_priv
->fence_reg
].obj
= NULL
;
1663 obj_priv
->fence_reg
= I915_FENCE_REG_NONE
;
1667 * Finds free space in the GTT aperture and binds the object there.
1670 i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
, unsigned alignment
)
1672 struct drm_device
*dev
= obj
->dev
;
1673 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1674 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1675 struct drm_mm_node
*free_space
;
1676 int page_count
, ret
;
1678 if (dev_priv
->mm
.suspended
)
1681 alignment
= i915_gem_get_gtt_alignment(obj
);
1682 if (alignment
& (PAGE_SIZE
- 1)) {
1683 DRM_ERROR("Invalid object alignment requested %u\n", alignment
);
1688 free_space
= drm_mm_search_free(&dev_priv
->mm
.gtt_space
,
1689 obj
->size
, alignment
, 0);
1690 if (free_space
!= NULL
) {
1691 obj_priv
->gtt_space
= drm_mm_get_block(free_space
, obj
->size
,
1693 if (obj_priv
->gtt_space
!= NULL
) {
1694 obj_priv
->gtt_space
->private = obj
;
1695 obj_priv
->gtt_offset
= obj_priv
->gtt_space
->start
;
1698 if (obj_priv
->gtt_space
== NULL
) {
1699 /* If the gtt is empty and we're still having trouble
1700 * fitting our object in, we're out of memory.
1703 DRM_INFO("%s: GTT full, evicting something\n", __func__
);
1705 if (list_empty(&dev_priv
->mm
.inactive_list
) &&
1706 list_empty(&dev_priv
->mm
.flushing_list
) &&
1707 list_empty(&dev_priv
->mm
.active_list
)) {
1708 DRM_ERROR("GTT full, but LRU list empty\n");
1712 ret
= i915_gem_evict_something(dev
);
1714 if (ret
!= -ERESTARTSYS
)
1715 DRM_ERROR("Failed to evict a buffer %d\n", ret
);
1722 DRM_INFO("Binding object of size %d at 0x%08x\n",
1723 obj
->size
, obj_priv
->gtt_offset
);
1725 ret
= i915_gem_object_get_page_list(obj
);
1727 drm_mm_put_block(obj_priv
->gtt_space
);
1728 obj_priv
->gtt_space
= NULL
;
1732 page_count
= obj
->size
/ PAGE_SIZE
;
1733 /* Create an AGP memory structure pointing at our pages, and bind it
1736 obj_priv
->agp_mem
= drm_agp_bind_pages(dev
,
1737 obj_priv
->page_list
,
1739 obj_priv
->gtt_offset
,
1740 obj_priv
->agp_type
);
1741 if (obj_priv
->agp_mem
== NULL
) {
1742 i915_gem_object_free_page_list(obj
);
1743 drm_mm_put_block(obj_priv
->gtt_space
);
1744 obj_priv
->gtt_space
= NULL
;
1747 atomic_inc(&dev
->gtt_count
);
1748 atomic_add(obj
->size
, &dev
->gtt_memory
);
1750 /* Assert that the object is not currently in any GPU domain. As it
1751 * wasn't in the GTT, there shouldn't be any way it could have been in
1754 BUG_ON(obj
->read_domains
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1755 BUG_ON(obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1761 i915_gem_clflush_object(struct drm_gem_object
*obj
)
1763 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1765 /* If we don't have a page list set up, then we're not pinned
1766 * to GPU, and we can ignore the cache flush because it'll happen
1767 * again at bind time.
1769 if (obj_priv
->page_list
== NULL
)
1772 drm_clflush_pages(obj_priv
->page_list
, obj
->size
/ PAGE_SIZE
);
1775 /** Flushes any GPU write domain for the object if it's dirty. */
1777 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object
*obj
)
1779 struct drm_device
*dev
= obj
->dev
;
1782 if ((obj
->write_domain
& I915_GEM_GPU_DOMAINS
) == 0)
1785 /* Queue the GPU write cache flushing we need. */
1786 i915_gem_flush(dev
, 0, obj
->write_domain
);
1787 seqno
= i915_add_request(dev
, obj
->write_domain
);
1788 obj
->write_domain
= 0;
1789 i915_gem_object_move_to_active(obj
, seqno
);
1792 /** Flushes the GTT write domain for the object if it's dirty. */
1794 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object
*obj
)
1796 if (obj
->write_domain
!= I915_GEM_DOMAIN_GTT
)
1799 /* No actual flushing is required for the GTT write domain. Writes
1800 * to it immediately go to main memory as far as we know, so there's
1801 * no chipset flush. It also doesn't land in render cache.
1803 obj
->write_domain
= 0;
1806 /** Flushes the CPU write domain for the object if it's dirty. */
1808 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object
*obj
)
1810 struct drm_device
*dev
= obj
->dev
;
1812 if (obj
->write_domain
!= I915_GEM_DOMAIN_CPU
)
1815 i915_gem_clflush_object(obj
);
1816 drm_agp_chipset_flush(dev
);
1817 obj
->write_domain
= 0;
1821 * Moves a single object to the GTT read, and possibly write domain.
1823 * This function returns when the move is complete, including waiting on
1827 i915_gem_object_set_to_gtt_domain(struct drm_gem_object
*obj
, int write
)
1829 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1832 /* Not valid to be called on unbound objects. */
1833 if (obj_priv
->gtt_space
== NULL
)
1836 i915_gem_object_flush_gpu_write_domain(obj
);
1837 /* Wait on any GPU rendering and flushing to occur. */
1838 ret
= i915_gem_object_wait_rendering(obj
);
1842 /* If we're writing through the GTT domain, then CPU and GPU caches
1843 * will need to be invalidated at next use.
1846 obj
->read_domains
&= I915_GEM_DOMAIN_GTT
;
1848 i915_gem_object_flush_cpu_write_domain(obj
);
1850 /* It should now be out of any other write domains, and we can update
1851 * the domain values for our changes.
1853 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_GTT
) != 0);
1854 obj
->read_domains
|= I915_GEM_DOMAIN_GTT
;
1856 obj
->write_domain
= I915_GEM_DOMAIN_GTT
;
1857 obj_priv
->dirty
= 1;
1864 * Moves a single object to the CPU read, and possibly write domain.
1866 * This function returns when the move is complete, including waiting on
1870 i915_gem_object_set_to_cpu_domain(struct drm_gem_object
*obj
, int write
)
1872 struct drm_device
*dev
= obj
->dev
;
1875 i915_gem_object_flush_gpu_write_domain(obj
);
1876 /* Wait on any GPU rendering and flushing to occur. */
1877 ret
= i915_gem_object_wait_rendering(obj
);
1881 i915_gem_object_flush_gtt_write_domain(obj
);
1883 /* If we have a partially-valid cache of the object in the CPU,
1884 * finish invalidating it and free the per-page flags.
1886 i915_gem_object_set_to_full_cpu_read_domain(obj
);
1888 /* Flush the CPU cache if it's still invalid. */
1889 if ((obj
->read_domains
& I915_GEM_DOMAIN_CPU
) == 0) {
1890 i915_gem_clflush_object(obj
);
1891 drm_agp_chipset_flush(dev
);
1893 obj
->read_domains
|= I915_GEM_DOMAIN_CPU
;
1896 /* It should now be out of any other write domains, and we can update
1897 * the domain values for our changes.
1899 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
1901 /* If we're writing through the CPU, then the GPU read domains will
1902 * need to be invalidated at next use.
1905 obj
->read_domains
&= I915_GEM_DOMAIN_CPU
;
1906 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
1913 * Set the next domain for the specified object. This
1914 * may not actually perform the necessary flushing/invaliding though,
1915 * as that may want to be batched with other set_domain operations
1917 * This is (we hope) the only really tricky part of gem. The goal
1918 * is fairly simple -- track which caches hold bits of the object
1919 * and make sure they remain coherent. A few concrete examples may
1920 * help to explain how it works. For shorthand, we use the notation
1921 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1922 * a pair of read and write domain masks.
1924 * Case 1: the batch buffer
1930 * 5. Unmapped from GTT
1933 * Let's take these a step at a time
1936 * Pages allocated from the kernel may still have
1937 * cache contents, so we set them to (CPU, CPU) always.
1938 * 2. Written by CPU (using pwrite)
1939 * The pwrite function calls set_domain (CPU, CPU) and
1940 * this function does nothing (as nothing changes)
1942 * This function asserts that the object is not
1943 * currently in any GPU-based read or write domains
1945 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1946 * As write_domain is zero, this function adds in the
1947 * current read domains (CPU+COMMAND, 0).
1948 * flush_domains is set to CPU.
1949 * invalidate_domains is set to COMMAND
1950 * clflush is run to get data out of the CPU caches
1951 * then i915_dev_set_domain calls i915_gem_flush to
1952 * emit an MI_FLUSH and drm_agp_chipset_flush
1953 * 5. Unmapped from GTT
1954 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1955 * flush_domains and invalidate_domains end up both zero
1956 * so no flushing/invalidating happens
1960 * Case 2: The shared render buffer
1964 * 3. Read/written by GPU
1965 * 4. set_domain to (CPU,CPU)
1966 * 5. Read/written by CPU
1967 * 6. Read/written by GPU
1970 * Same as last example, (CPU, CPU)
1972 * Nothing changes (assertions find that it is not in the GPU)
1973 * 3. Read/written by GPU
1974 * execbuffer calls set_domain (RENDER, RENDER)
1975 * flush_domains gets CPU
1976 * invalidate_domains gets GPU
1978 * MI_FLUSH and drm_agp_chipset_flush
1979 * 4. set_domain (CPU, CPU)
1980 * flush_domains gets GPU
1981 * invalidate_domains gets CPU
1982 * wait_rendering (obj) to make sure all drawing is complete.
1983 * This will include an MI_FLUSH to get the data from GPU
1985 * clflush (obj) to invalidate the CPU cache
1986 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1987 * 5. Read/written by CPU
1988 * cache lines are loaded and dirtied
1989 * 6. Read written by GPU
1990 * Same as last GPU access
1992 * Case 3: The constant buffer
1997 * 4. Updated (written) by CPU again
2006 * flush_domains = CPU
2007 * invalidate_domains = RENDER
2010 * drm_agp_chipset_flush
2011 * 4. Updated (written) by CPU again
2013 * flush_domains = 0 (no previous write domain)
2014 * invalidate_domains = 0 (no new read domains)
2017 * flush_domains = CPU
2018 * invalidate_domains = RENDER
2021 * drm_agp_chipset_flush
2024 i915_gem_object_set_to_gpu_domain(struct drm_gem_object
*obj
,
2025 uint32_t read_domains
,
2026 uint32_t write_domain
)
2028 struct drm_device
*dev
= obj
->dev
;
2029 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2030 uint32_t invalidate_domains
= 0;
2031 uint32_t flush_domains
= 0;
2033 BUG_ON(read_domains
& I915_GEM_DOMAIN_CPU
);
2034 BUG_ON(write_domain
== I915_GEM_DOMAIN_CPU
);
2037 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2039 obj
->read_domains
, read_domains
,
2040 obj
->write_domain
, write_domain
);
2043 * If the object isn't moving to a new write domain,
2044 * let the object stay in multiple read domains
2046 if (write_domain
== 0)
2047 read_domains
|= obj
->read_domains
;
2049 obj_priv
->dirty
= 1;
2052 * Flush the current write domain if
2053 * the new read domains don't match. Invalidate
2054 * any read domains which differ from the old
2057 if (obj
->write_domain
&& obj
->write_domain
!= read_domains
) {
2058 flush_domains
|= obj
->write_domain
;
2059 invalidate_domains
|= read_domains
& ~obj
->write_domain
;
2062 * Invalidate any read caches which may have
2063 * stale data. That is, any new read domains.
2065 invalidate_domains
|= read_domains
& ~obj
->read_domains
;
2066 if ((flush_domains
| invalidate_domains
) & I915_GEM_DOMAIN_CPU
) {
2068 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2069 __func__
, flush_domains
, invalidate_domains
);
2071 i915_gem_clflush_object(obj
);
2074 if ((write_domain
| flush_domains
) != 0)
2075 obj
->write_domain
= write_domain
;
2076 obj
->read_domains
= read_domains
;
2078 dev
->invalidate_domains
|= invalidate_domains
;
2079 dev
->flush_domains
|= flush_domains
;
2081 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2083 obj
->read_domains
, obj
->write_domain
,
2084 dev
->invalidate_domains
, dev
->flush_domains
);
2089 * Moves the object from a partially CPU read to a full one.
2091 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2092 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2095 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object
*obj
)
2097 struct drm_device
*dev
= obj
->dev
;
2098 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2100 if (!obj_priv
->page_cpu_valid
)
2103 /* If we're partially in the CPU read domain, finish moving it in.
2105 if (obj
->read_domains
& I915_GEM_DOMAIN_CPU
) {
2108 for (i
= 0; i
<= (obj
->size
- 1) / PAGE_SIZE
; i
++) {
2109 if (obj_priv
->page_cpu_valid
[i
])
2111 drm_clflush_pages(obj_priv
->page_list
+ i
, 1);
2113 drm_agp_chipset_flush(dev
);
2116 /* Free the page_cpu_valid mappings which are now stale, whether
2117 * or not we've got I915_GEM_DOMAIN_CPU.
2119 drm_free(obj_priv
->page_cpu_valid
, obj
->size
/ PAGE_SIZE
,
2121 obj_priv
->page_cpu_valid
= NULL
;
2125 * Set the CPU read domain on a range of the object.
2127 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2128 * not entirely valid. The page_cpu_valid member of the object flags which
2129 * pages have been flushed, and will be respected by
2130 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2131 * of the whole object.
2133 * This function returns when the move is complete, including waiting on
2137 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object
*obj
,
2138 uint64_t offset
, uint64_t size
)
2140 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2143 if (offset
== 0 && size
== obj
->size
)
2144 return i915_gem_object_set_to_cpu_domain(obj
, 0);
2146 i915_gem_object_flush_gpu_write_domain(obj
);
2147 /* Wait on any GPU rendering and flushing to occur. */
2148 ret
= i915_gem_object_wait_rendering(obj
);
2151 i915_gem_object_flush_gtt_write_domain(obj
);
2153 /* If we're already fully in the CPU read domain, we're done. */
2154 if (obj_priv
->page_cpu_valid
== NULL
&&
2155 (obj
->read_domains
& I915_GEM_DOMAIN_CPU
) != 0)
2158 /* Otherwise, create/clear the per-page CPU read domain flag if we're
2159 * newly adding I915_GEM_DOMAIN_CPU
2161 if (obj_priv
->page_cpu_valid
== NULL
) {
2162 obj_priv
->page_cpu_valid
= drm_calloc(1, obj
->size
/ PAGE_SIZE
,
2164 if (obj_priv
->page_cpu_valid
== NULL
)
2166 } else if ((obj
->read_domains
& I915_GEM_DOMAIN_CPU
) == 0)
2167 memset(obj_priv
->page_cpu_valid
, 0, obj
->size
/ PAGE_SIZE
);
2169 /* Flush the cache on any pages that are still invalid from the CPU's
2172 for (i
= offset
/ PAGE_SIZE
; i
<= (offset
+ size
- 1) / PAGE_SIZE
;
2174 if (obj_priv
->page_cpu_valid
[i
])
2177 drm_clflush_pages(obj_priv
->page_list
+ i
, 1);
2179 obj_priv
->page_cpu_valid
[i
] = 1;
2182 /* It should now be out of any other write domains, and we can update
2183 * the domain values for our changes.
2185 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
2187 obj
->read_domains
|= I915_GEM_DOMAIN_CPU
;
2193 * Pin an object to the GTT and evaluate the relocations landing in it.
2196 i915_gem_object_pin_and_relocate(struct drm_gem_object
*obj
,
2197 struct drm_file
*file_priv
,
2198 struct drm_i915_gem_exec_object
*entry
)
2200 struct drm_device
*dev
= obj
->dev
;
2201 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2202 struct drm_i915_gem_relocation_entry reloc
;
2203 struct drm_i915_gem_relocation_entry __user
*relocs
;
2204 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2206 void __iomem
*reloc_page
;
2208 /* Choose the GTT offset for our buffer and put it there. */
2209 ret
= i915_gem_object_pin(obj
, (uint32_t) entry
->alignment
);
2213 entry
->offset
= obj_priv
->gtt_offset
;
2215 relocs
= (struct drm_i915_gem_relocation_entry __user
*)
2216 (uintptr_t) entry
->relocs_ptr
;
2217 /* Apply the relocations, using the GTT aperture to avoid cache
2218 * flushing requirements.
2220 for (i
= 0; i
< entry
->relocation_count
; i
++) {
2221 struct drm_gem_object
*target_obj
;
2222 struct drm_i915_gem_object
*target_obj_priv
;
2223 uint32_t reloc_val
, reloc_offset
;
2224 uint32_t __iomem
*reloc_entry
;
2226 ret
= copy_from_user(&reloc
, relocs
+ i
, sizeof(reloc
));
2228 i915_gem_object_unpin(obj
);
2232 target_obj
= drm_gem_object_lookup(obj
->dev
, file_priv
,
2233 reloc
.target_handle
);
2234 if (target_obj
== NULL
) {
2235 i915_gem_object_unpin(obj
);
2238 target_obj_priv
= target_obj
->driver_private
;
2240 /* The target buffer should have appeared before us in the
2241 * exec_object list, so it should have a GTT space bound by now.
2243 if (target_obj_priv
->gtt_space
== NULL
) {
2244 DRM_ERROR("No GTT space found for object %d\n",
2245 reloc
.target_handle
);
2246 drm_gem_object_unreference(target_obj
);
2247 i915_gem_object_unpin(obj
);
2251 if (reloc
.offset
> obj
->size
- 4) {
2252 DRM_ERROR("Relocation beyond object bounds: "
2253 "obj %p target %d offset %d size %d.\n",
2254 obj
, reloc
.target_handle
,
2255 (int) reloc
.offset
, (int) obj
->size
);
2256 drm_gem_object_unreference(target_obj
);
2257 i915_gem_object_unpin(obj
);
2260 if (reloc
.offset
& 3) {
2261 DRM_ERROR("Relocation not 4-byte aligned: "
2262 "obj %p target %d offset %d.\n",
2263 obj
, reloc
.target_handle
,
2264 (int) reloc
.offset
);
2265 drm_gem_object_unreference(target_obj
);
2266 i915_gem_object_unpin(obj
);
2270 if (reloc
.write_domain
& I915_GEM_DOMAIN_CPU
||
2271 reloc
.read_domains
& I915_GEM_DOMAIN_CPU
) {
2272 DRM_ERROR("reloc with read/write CPU domains: "
2273 "obj %p target %d offset %d "
2274 "read %08x write %08x",
2275 obj
, reloc
.target_handle
,
2278 reloc
.write_domain
);
2279 drm_gem_object_unreference(target_obj
);
2280 i915_gem_object_unpin(obj
);
2284 if (reloc
.write_domain
&& target_obj
->pending_write_domain
&&
2285 reloc
.write_domain
!= target_obj
->pending_write_domain
) {
2286 DRM_ERROR("Write domain conflict: "
2287 "obj %p target %d offset %d "
2288 "new %08x old %08x\n",
2289 obj
, reloc
.target_handle
,
2292 target_obj
->pending_write_domain
);
2293 drm_gem_object_unreference(target_obj
);
2294 i915_gem_object_unpin(obj
);
2299 DRM_INFO("%s: obj %p offset %08x target %d "
2300 "read %08x write %08x gtt %08x "
2301 "presumed %08x delta %08x\n",
2305 (int) reloc
.target_handle
,
2306 (int) reloc
.read_domains
,
2307 (int) reloc
.write_domain
,
2308 (int) target_obj_priv
->gtt_offset
,
2309 (int) reloc
.presumed_offset
,
2313 target_obj
->pending_read_domains
|= reloc
.read_domains
;
2314 target_obj
->pending_write_domain
|= reloc
.write_domain
;
2316 /* If the relocation already has the right value in it, no
2317 * more work needs to be done.
2319 if (target_obj_priv
->gtt_offset
== reloc
.presumed_offset
) {
2320 drm_gem_object_unreference(target_obj
);
2324 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
2326 drm_gem_object_unreference(target_obj
);
2327 i915_gem_object_unpin(obj
);
2331 /* Map the page containing the relocation we're going to
2334 reloc_offset
= obj_priv
->gtt_offset
+ reloc
.offset
;
2335 reloc_page
= io_mapping_map_atomic_wc(dev_priv
->mm
.gtt_mapping
,
2338 reloc_entry
= (uint32_t __iomem
*)(reloc_page
+
2339 (reloc_offset
& (PAGE_SIZE
- 1)));
2340 reloc_val
= target_obj_priv
->gtt_offset
+ reloc
.delta
;
2343 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
2344 obj
, (unsigned int) reloc
.offset
,
2345 readl(reloc_entry
), reloc_val
);
2347 writel(reloc_val
, reloc_entry
);
2348 io_mapping_unmap_atomic(reloc_page
);
2350 /* Write the updated presumed offset for this entry back out
2353 reloc
.presumed_offset
= target_obj_priv
->gtt_offset
;
2354 ret
= copy_to_user(relocs
+ i
, &reloc
, sizeof(reloc
));
2356 drm_gem_object_unreference(target_obj
);
2357 i915_gem_object_unpin(obj
);
2361 drm_gem_object_unreference(target_obj
);
2366 i915_gem_dump_object(obj
, 128, __func__
, ~0);
2371 /** Dispatch a batchbuffer to the ring
2374 i915_dispatch_gem_execbuffer(struct drm_device
*dev
,
2375 struct drm_i915_gem_execbuffer
*exec
,
2376 uint64_t exec_offset
)
2378 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2379 struct drm_clip_rect __user
*boxes
= (struct drm_clip_rect __user
*)
2380 (uintptr_t) exec
->cliprects_ptr
;
2381 int nbox
= exec
->num_cliprects
;
2383 uint32_t exec_start
, exec_len
;
2386 exec_start
= (uint32_t) exec_offset
+ exec
->batch_start_offset
;
2387 exec_len
= (uint32_t) exec
->batch_len
;
2389 if ((exec_start
| exec_len
) & 0x7) {
2390 DRM_ERROR("alignment\n");
2397 count
= nbox
? nbox
: 1;
2399 for (i
= 0; i
< count
; i
++) {
2401 int ret
= i915_emit_box(dev
, boxes
, i
,
2402 exec
->DR1
, exec
->DR4
);
2407 if (IS_I830(dev
) || IS_845G(dev
)) {
2409 OUT_RING(MI_BATCH_BUFFER
);
2410 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
2411 OUT_RING(exec_start
+ exec_len
- 4);
2416 if (IS_I965G(dev
)) {
2417 OUT_RING(MI_BATCH_BUFFER_START
|
2419 MI_BATCH_NON_SECURE_I965
);
2420 OUT_RING(exec_start
);
2422 OUT_RING(MI_BATCH_BUFFER_START
|
2424 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
2430 /* XXX breadcrumb */
2434 /* Throttle our rendering by waiting until the ring has completed our requests
2435 * emitted over 20 msec ago.
2437 * This should get us reasonable parallelism between CPU and GPU but also
2438 * relatively low latency when blocking on a particular request to finish.
2441 i915_gem_ring_throttle(struct drm_device
*dev
, struct drm_file
*file_priv
)
2443 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
2447 mutex_lock(&dev
->struct_mutex
);
2448 seqno
= i915_file_priv
->mm
.last_gem_throttle_seqno
;
2449 i915_file_priv
->mm
.last_gem_throttle_seqno
=
2450 i915_file_priv
->mm
.last_gem_seqno
;
2452 ret
= i915_wait_request(dev
, seqno
);
2453 mutex_unlock(&dev
->struct_mutex
);
2458 i915_gem_execbuffer(struct drm_device
*dev
, void *data
,
2459 struct drm_file
*file_priv
)
2461 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2462 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
2463 struct drm_i915_gem_execbuffer
*args
= data
;
2464 struct drm_i915_gem_exec_object
*exec_list
= NULL
;
2465 struct drm_gem_object
**object_list
= NULL
;
2466 struct drm_gem_object
*batch_obj
;
2467 int ret
, i
, pinned
= 0;
2468 uint64_t exec_offset
;
2469 uint32_t seqno
, flush_domains
;
2473 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
2474 (int) args
->buffers_ptr
, args
->buffer_count
, args
->batch_len
);
2477 if (args
->buffer_count
< 1) {
2478 DRM_ERROR("execbuf with %d buffers\n", args
->buffer_count
);
2481 /* Copy in the exec list from userland */
2482 exec_list
= drm_calloc(sizeof(*exec_list
), args
->buffer_count
,
2484 object_list
= drm_calloc(sizeof(*object_list
), args
->buffer_count
,
2486 if (exec_list
== NULL
|| object_list
== NULL
) {
2487 DRM_ERROR("Failed to allocate exec or object list "
2489 args
->buffer_count
);
2493 ret
= copy_from_user(exec_list
,
2494 (struct drm_i915_relocation_entry __user
*)
2495 (uintptr_t) args
->buffers_ptr
,
2496 sizeof(*exec_list
) * args
->buffer_count
);
2498 DRM_ERROR("copy %d exec entries failed %d\n",
2499 args
->buffer_count
, ret
);
2503 mutex_lock(&dev
->struct_mutex
);
2505 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2507 if (dev_priv
->mm
.wedged
) {
2508 DRM_ERROR("Execbuf while wedged\n");
2509 mutex_unlock(&dev
->struct_mutex
);
2514 if (dev_priv
->mm
.suspended
) {
2515 DRM_ERROR("Execbuf while VT-switched.\n");
2516 mutex_unlock(&dev
->struct_mutex
);
2521 /* Look up object handles */
2522 for (i
= 0; i
< args
->buffer_count
; i
++) {
2523 object_list
[i
] = drm_gem_object_lookup(dev
, file_priv
,
2524 exec_list
[i
].handle
);
2525 if (object_list
[i
] == NULL
) {
2526 DRM_ERROR("Invalid object handle %d at index %d\n",
2527 exec_list
[i
].handle
, i
);
2533 /* Pin and relocate */
2534 for (pin_tries
= 0; ; pin_tries
++) {
2536 for (i
= 0; i
< args
->buffer_count
; i
++) {
2537 object_list
[i
]->pending_read_domains
= 0;
2538 object_list
[i
]->pending_write_domain
= 0;
2539 ret
= i915_gem_object_pin_and_relocate(object_list
[i
],
2550 /* error other than GTT full, or we've already tried again */
2551 if (ret
!= -ENOMEM
|| pin_tries
>= 1) {
2552 if (ret
!= -ERESTARTSYS
)
2553 DRM_ERROR("Failed to pin buffers %d\n", ret
);
2557 /* unpin all of our buffers */
2558 for (i
= 0; i
< pinned
; i
++)
2559 i915_gem_object_unpin(object_list
[i
]);
2562 /* evict everyone we can from the aperture */
2563 ret
= i915_gem_evict_everything(dev
);
2568 /* Set the pending read domains for the batch buffer to COMMAND */
2569 batch_obj
= object_list
[args
->buffer_count
-1];
2570 batch_obj
->pending_read_domains
= I915_GEM_DOMAIN_COMMAND
;
2571 batch_obj
->pending_write_domain
= 0;
2573 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2575 /* Zero the global flush/invalidate flags. These
2576 * will be modified as new domains are computed
2579 dev
->invalidate_domains
= 0;
2580 dev
->flush_domains
= 0;
2582 for (i
= 0; i
< args
->buffer_count
; i
++) {
2583 struct drm_gem_object
*obj
= object_list
[i
];
2585 /* Compute new gpu domains and update invalidate/flush */
2586 i915_gem_object_set_to_gpu_domain(obj
,
2587 obj
->pending_read_domains
,
2588 obj
->pending_write_domain
);
2591 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2593 if (dev
->invalidate_domains
| dev
->flush_domains
) {
2595 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
2597 dev
->invalidate_domains
,
2598 dev
->flush_domains
);
2601 dev
->invalidate_domains
,
2602 dev
->flush_domains
);
2603 if (dev
->flush_domains
)
2604 (void)i915_add_request(dev
, dev
->flush_domains
);
2607 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2610 for (i
= 0; i
< args
->buffer_count
; i
++) {
2611 i915_gem_object_check_coherency(object_list
[i
],
2612 exec_list
[i
].handle
);
2616 exec_offset
= exec_list
[args
->buffer_count
- 1].offset
;
2619 i915_gem_dump_object(object_list
[args
->buffer_count
- 1],
2625 /* Exec the batchbuffer */
2626 ret
= i915_dispatch_gem_execbuffer(dev
, args
, exec_offset
);
2628 DRM_ERROR("dispatch failed %d\n", ret
);
2633 * Ensure that the commands in the batch buffer are
2634 * finished before the interrupt fires
2636 flush_domains
= i915_retire_commands(dev
);
2638 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2641 * Get a seqno representing the execution of the current buffer,
2642 * which we can wait on. We would like to mitigate these interrupts,
2643 * likely by only creating seqnos occasionally (so that we have
2644 * *some* interrupts representing completion of buffers that we can
2645 * wait on when trying to clear up gtt space).
2647 seqno
= i915_add_request(dev
, flush_domains
);
2649 i915_file_priv
->mm
.last_gem_seqno
= seqno
;
2650 for (i
= 0; i
< args
->buffer_count
; i
++) {
2651 struct drm_gem_object
*obj
= object_list
[i
];
2653 i915_gem_object_move_to_active(obj
, seqno
);
2655 DRM_INFO("%s: move to exec list %p\n", __func__
, obj
);
2659 i915_dump_lru(dev
, __func__
);
2662 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2665 for (i
= 0; i
< pinned
; i
++)
2666 i915_gem_object_unpin(object_list
[i
]);
2668 for (i
= 0; i
< args
->buffer_count
; i
++)
2669 drm_gem_object_unreference(object_list
[i
]);
2671 mutex_unlock(&dev
->struct_mutex
);
2674 /* Copy the new buffer offsets back to the user's exec list. */
2675 ret
= copy_to_user((struct drm_i915_relocation_entry __user
*)
2676 (uintptr_t) args
->buffers_ptr
,
2678 sizeof(*exec_list
) * args
->buffer_count
);
2680 DRM_ERROR("failed to copy %d exec entries "
2681 "back to user (%d)\n",
2682 args
->buffer_count
, ret
);
2686 drm_free(object_list
, sizeof(*object_list
) * args
->buffer_count
,
2688 drm_free(exec_list
, sizeof(*exec_list
) * args
->buffer_count
,
2695 i915_gem_object_pin(struct drm_gem_object
*obj
, uint32_t alignment
)
2697 struct drm_device
*dev
= obj
->dev
;
2698 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2701 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2702 if (obj_priv
->gtt_space
== NULL
) {
2703 ret
= i915_gem_object_bind_to_gtt(obj
, alignment
);
2705 if (ret
!= -EBUSY
&& ret
!= -ERESTARTSYS
)
2706 DRM_ERROR("Failure to bind: %d", ret
);
2710 * Pre-965 chips need a fence register set up in order to
2711 * properly handle tiled surfaces.
2713 if (!IS_I965G(dev
) &&
2714 obj_priv
->fence_reg
== I915_FENCE_REG_NONE
&&
2715 obj_priv
->tiling_mode
!= I915_TILING_NONE
)
2716 i915_gem_object_get_fence_reg(obj
, true);
2718 obj_priv
->pin_count
++;
2720 /* If the object is not active and not pending a flush,
2721 * remove it from the inactive list
2723 if (obj_priv
->pin_count
== 1) {
2724 atomic_inc(&dev
->pin_count
);
2725 atomic_add(obj
->size
, &dev
->pin_memory
);
2726 if (!obj_priv
->active
&&
2727 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
2728 I915_GEM_DOMAIN_GTT
)) == 0 &&
2729 !list_empty(&obj_priv
->list
))
2730 list_del_init(&obj_priv
->list
);
2732 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2738 i915_gem_object_unpin(struct drm_gem_object
*obj
)
2740 struct drm_device
*dev
= obj
->dev
;
2741 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2742 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2744 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2745 obj_priv
->pin_count
--;
2746 BUG_ON(obj_priv
->pin_count
< 0);
2747 BUG_ON(obj_priv
->gtt_space
== NULL
);
2749 /* If the object is no longer pinned, and is
2750 * neither active nor being flushed, then stick it on
2753 if (obj_priv
->pin_count
== 0) {
2754 if (!obj_priv
->active
&&
2755 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
2756 I915_GEM_DOMAIN_GTT
)) == 0)
2757 list_move_tail(&obj_priv
->list
,
2758 &dev_priv
->mm
.inactive_list
);
2759 atomic_dec(&dev
->pin_count
);
2760 atomic_sub(obj
->size
, &dev
->pin_memory
);
2762 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2766 i915_gem_pin_ioctl(struct drm_device
*dev
, void *data
,
2767 struct drm_file
*file_priv
)
2769 struct drm_i915_gem_pin
*args
= data
;
2770 struct drm_gem_object
*obj
;
2771 struct drm_i915_gem_object
*obj_priv
;
2774 mutex_lock(&dev
->struct_mutex
);
2776 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2778 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2780 mutex_unlock(&dev
->struct_mutex
);
2783 obj_priv
= obj
->driver_private
;
2785 if (obj_priv
->pin_filp
!= NULL
&& obj_priv
->pin_filp
!= file_priv
) {
2786 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2788 drm_gem_object_unreference(obj
);
2789 mutex_unlock(&dev
->struct_mutex
);
2793 obj_priv
->user_pin_count
++;
2794 obj_priv
->pin_filp
= file_priv
;
2795 if (obj_priv
->user_pin_count
== 1) {
2796 ret
= i915_gem_object_pin(obj
, args
->alignment
);
2798 drm_gem_object_unreference(obj
);
2799 mutex_unlock(&dev
->struct_mutex
);
2804 /* XXX - flush the CPU caches for pinned objects
2805 * as the X server doesn't manage domains yet
2807 i915_gem_object_flush_cpu_write_domain(obj
);
2808 args
->offset
= obj_priv
->gtt_offset
;
2809 drm_gem_object_unreference(obj
);
2810 mutex_unlock(&dev
->struct_mutex
);
2816 i915_gem_unpin_ioctl(struct drm_device
*dev
, void *data
,
2817 struct drm_file
*file_priv
)
2819 struct drm_i915_gem_pin
*args
= data
;
2820 struct drm_gem_object
*obj
;
2821 struct drm_i915_gem_object
*obj_priv
;
2823 mutex_lock(&dev
->struct_mutex
);
2825 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2827 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2829 mutex_unlock(&dev
->struct_mutex
);
2833 obj_priv
= obj
->driver_private
;
2834 if (obj_priv
->pin_filp
!= file_priv
) {
2835 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
2837 drm_gem_object_unreference(obj
);
2838 mutex_unlock(&dev
->struct_mutex
);
2841 obj_priv
->user_pin_count
--;
2842 if (obj_priv
->user_pin_count
== 0) {
2843 obj_priv
->pin_filp
= NULL
;
2844 i915_gem_object_unpin(obj
);
2847 drm_gem_object_unreference(obj
);
2848 mutex_unlock(&dev
->struct_mutex
);
2853 i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
2854 struct drm_file
*file_priv
)
2856 struct drm_i915_gem_busy
*args
= data
;
2857 struct drm_gem_object
*obj
;
2858 struct drm_i915_gem_object
*obj_priv
;
2860 mutex_lock(&dev
->struct_mutex
);
2861 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2863 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2865 mutex_unlock(&dev
->struct_mutex
);
2869 obj_priv
= obj
->driver_private
;
2870 /* Don't count being on the flushing list against the object being
2871 * done. Otherwise, a buffer left on the flushing list but not getting
2872 * flushed (because nobody's flushing that domain) won't ever return
2873 * unbusy and get reused by libdrm's bo cache. The other expected
2874 * consumer of this interface, OpenGL's occlusion queries, also specs
2875 * that the objects get unbusy "eventually" without any interference.
2877 args
->busy
= obj_priv
->active
&& obj_priv
->last_rendering_seqno
!= 0;
2879 drm_gem_object_unreference(obj
);
2880 mutex_unlock(&dev
->struct_mutex
);
2885 i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
2886 struct drm_file
*file_priv
)
2888 return i915_gem_ring_throttle(dev
, file_priv
);
2891 int i915_gem_init_object(struct drm_gem_object
*obj
)
2893 struct drm_i915_gem_object
*obj_priv
;
2895 obj_priv
= drm_calloc(1, sizeof(*obj_priv
), DRM_MEM_DRIVER
);
2896 if (obj_priv
== NULL
)
2900 * We've just allocated pages from the kernel,
2901 * so they've just been written by the CPU with
2902 * zeros. They'll need to be clflushed before we
2903 * use them with the GPU.
2905 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
2906 obj
->read_domains
= I915_GEM_DOMAIN_CPU
;
2908 obj_priv
->agp_type
= AGP_USER_MEMORY
;
2910 obj
->driver_private
= obj_priv
;
2911 obj_priv
->obj
= obj
;
2912 obj_priv
->fence_reg
= I915_FENCE_REG_NONE
;
2913 INIT_LIST_HEAD(&obj_priv
->list
);
2918 void i915_gem_free_object(struct drm_gem_object
*obj
)
2920 struct drm_device
*dev
= obj
->dev
;
2921 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2923 while (obj_priv
->pin_count
> 0)
2924 i915_gem_object_unpin(obj
);
2926 if (obj_priv
->phys_obj
)
2927 i915_gem_detach_phys_object(dev
, obj
);
2929 i915_gem_object_unbind(obj
);
2931 i915_gem_free_mmap_offset(obj
);
2933 drm_free(obj_priv
->page_cpu_valid
, 1, DRM_MEM_DRIVER
);
2934 drm_free(obj
->driver_private
, 1, DRM_MEM_DRIVER
);
2937 /** Unbinds all objects that are on the given buffer list. */
2939 i915_gem_evict_from_list(struct drm_device
*dev
, struct list_head
*head
)
2941 struct drm_gem_object
*obj
;
2942 struct drm_i915_gem_object
*obj_priv
;
2945 while (!list_empty(head
)) {
2946 obj_priv
= list_first_entry(head
,
2947 struct drm_i915_gem_object
,
2949 obj
= obj_priv
->obj
;
2951 if (obj_priv
->pin_count
!= 0) {
2952 DRM_ERROR("Pinned object in unbind list\n");
2953 mutex_unlock(&dev
->struct_mutex
);
2957 ret
= i915_gem_object_unbind(obj
);
2959 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2961 mutex_unlock(&dev
->struct_mutex
);
2971 i915_gem_idle(struct drm_device
*dev
)
2973 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2974 uint32_t seqno
, cur_seqno
, last_seqno
;
2977 mutex_lock(&dev
->struct_mutex
);
2979 if (dev_priv
->mm
.suspended
|| dev_priv
->ring
.ring_obj
== NULL
) {
2980 mutex_unlock(&dev
->struct_mutex
);
2984 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2985 * We need to replace this with a semaphore, or something.
2987 dev_priv
->mm
.suspended
= 1;
2989 /* Cancel the retire work handler, wait for it to finish if running
2991 mutex_unlock(&dev
->struct_mutex
);
2992 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
2993 mutex_lock(&dev
->struct_mutex
);
2995 i915_kernel_lost_context(dev
);
2997 /* Flush the GPU along with all non-CPU write domains
2999 i915_gem_flush(dev
, ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
),
3000 ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
3001 seqno
= i915_add_request(dev
, ~I915_GEM_DOMAIN_CPU
);
3004 mutex_unlock(&dev
->struct_mutex
);
3008 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
3012 cur_seqno
= i915_get_gem_seqno(dev
);
3013 if (i915_seqno_passed(cur_seqno
, seqno
))
3015 if (last_seqno
== cur_seqno
) {
3016 if (stuck
++ > 100) {
3017 DRM_ERROR("hardware wedged\n");
3018 dev_priv
->mm
.wedged
= 1;
3019 DRM_WAKEUP(&dev_priv
->irq_queue
);
3024 last_seqno
= cur_seqno
;
3026 dev_priv
->mm
.waiting_gem_seqno
= 0;
3028 i915_gem_retire_requests(dev
);
3030 if (!dev_priv
->mm
.wedged
) {
3031 /* Active and flushing should now be empty as we've
3032 * waited for a sequence higher than any pending execbuffer
3034 WARN_ON(!list_empty(&dev_priv
->mm
.active_list
));
3035 WARN_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
3036 /* Request should now be empty as we've also waited
3037 * for the last request in the list
3039 WARN_ON(!list_empty(&dev_priv
->mm
.request_list
));
3042 /* Empty the active and flushing lists to inactive. If there's
3043 * anything left at this point, it means that we're wedged and
3044 * nothing good's going to happen by leaving them there. So strip
3045 * the GPU domains and just stuff them onto inactive.
3047 while (!list_empty(&dev_priv
->mm
.active_list
)) {
3048 struct drm_i915_gem_object
*obj_priv
;
3050 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
3051 struct drm_i915_gem_object
,
3053 obj_priv
->obj
->write_domain
&= ~I915_GEM_GPU_DOMAINS
;
3054 i915_gem_object_move_to_inactive(obj_priv
->obj
);
3057 while (!list_empty(&dev_priv
->mm
.flushing_list
)) {
3058 struct drm_i915_gem_object
*obj_priv
;
3060 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
3061 struct drm_i915_gem_object
,
3063 obj_priv
->obj
->write_domain
&= ~I915_GEM_GPU_DOMAINS
;
3064 i915_gem_object_move_to_inactive(obj_priv
->obj
);
3068 /* Move all inactive buffers out of the GTT. */
3069 ret
= i915_gem_evict_from_list(dev
, &dev_priv
->mm
.inactive_list
);
3070 WARN_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
3072 mutex_unlock(&dev
->struct_mutex
);
3076 i915_gem_cleanup_ringbuffer(dev
);
3077 mutex_unlock(&dev
->struct_mutex
);
3083 i915_gem_init_hws(struct drm_device
*dev
)
3085 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3086 struct drm_gem_object
*obj
;
3087 struct drm_i915_gem_object
*obj_priv
;
3090 /* If we need a physical address for the status page, it's already
3091 * initialized at driver load time.
3093 if (!I915_NEED_GFX_HWS(dev
))
3096 obj
= drm_gem_object_alloc(dev
, 4096);
3098 DRM_ERROR("Failed to allocate status page\n");
3101 obj_priv
= obj
->driver_private
;
3102 obj_priv
->agp_type
= AGP_USER_CACHED_MEMORY
;
3104 ret
= i915_gem_object_pin(obj
, 4096);
3106 drm_gem_object_unreference(obj
);
3110 dev_priv
->status_gfx_addr
= obj_priv
->gtt_offset
;
3112 dev_priv
->hw_status_page
= kmap(obj_priv
->page_list
[0]);
3113 if (dev_priv
->hw_status_page
== NULL
) {
3114 DRM_ERROR("Failed to map status page.\n");
3115 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
3116 i915_gem_object_unpin(obj
);
3117 drm_gem_object_unreference(obj
);
3120 dev_priv
->hws_obj
= obj
;
3121 memset(dev_priv
->hw_status_page
, 0, PAGE_SIZE
);
3122 I915_WRITE(HWS_PGA
, dev_priv
->status_gfx_addr
);
3123 I915_READ(HWS_PGA
); /* posting read */
3124 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv
->status_gfx_addr
);
3130 i915_gem_cleanup_hws(struct drm_device
*dev
)
3132 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3133 struct drm_gem_object
*obj
= dev_priv
->hws_obj
;
3134 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
3136 if (dev_priv
->hws_obj
== NULL
)
3139 kunmap(obj_priv
->page_list
[0]);
3140 i915_gem_object_unpin(obj
);
3141 drm_gem_object_unreference(obj
);
3142 dev_priv
->hws_obj
= NULL
;
3143 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
3144 dev_priv
->hw_status_page
= NULL
;
3146 /* Write high address into HWS_PGA when disabling. */
3147 I915_WRITE(HWS_PGA
, 0x1ffff000);
3151 i915_gem_init_ringbuffer(struct drm_device
*dev
)
3153 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3154 struct drm_gem_object
*obj
;
3155 struct drm_i915_gem_object
*obj_priv
;
3156 drm_i915_ring_buffer_t
*ring
= &dev_priv
->ring
;
3160 ret
= i915_gem_init_hws(dev
);
3164 obj
= drm_gem_object_alloc(dev
, 128 * 1024);
3166 DRM_ERROR("Failed to allocate ringbuffer\n");
3167 i915_gem_cleanup_hws(dev
);
3170 obj_priv
= obj
->driver_private
;
3172 ret
= i915_gem_object_pin(obj
, 4096);
3174 drm_gem_object_unreference(obj
);
3175 i915_gem_cleanup_hws(dev
);
3179 /* Set up the kernel mapping for the ring. */
3180 ring
->Size
= obj
->size
;
3181 ring
->tail_mask
= obj
->size
- 1;
3183 ring
->map
.offset
= dev
->agp
->base
+ obj_priv
->gtt_offset
;
3184 ring
->map
.size
= obj
->size
;
3186 ring
->map
.flags
= 0;
3189 drm_core_ioremap_wc(&ring
->map
, dev
);
3190 if (ring
->map
.handle
== NULL
) {
3191 DRM_ERROR("Failed to map ringbuffer.\n");
3192 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
3193 i915_gem_object_unpin(obj
);
3194 drm_gem_object_unreference(obj
);
3195 i915_gem_cleanup_hws(dev
);
3198 ring
->ring_obj
= obj
;
3199 ring
->virtual_start
= ring
->map
.handle
;
3201 /* Stop the ring if it's running. */
3202 I915_WRITE(PRB0_CTL
, 0);
3203 I915_WRITE(PRB0_TAIL
, 0);
3204 I915_WRITE(PRB0_HEAD
, 0);
3206 /* Initialize the ring. */
3207 I915_WRITE(PRB0_START
, obj_priv
->gtt_offset
);
3208 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
3210 /* G45 ring initialization fails to reset head to zero */
3212 DRM_ERROR("Ring head not reset to zero "
3213 "ctl %08x head %08x tail %08x start %08x\n",
3214 I915_READ(PRB0_CTL
),
3215 I915_READ(PRB0_HEAD
),
3216 I915_READ(PRB0_TAIL
),
3217 I915_READ(PRB0_START
));
3218 I915_WRITE(PRB0_HEAD
, 0);
3220 DRM_ERROR("Ring head forced to zero "
3221 "ctl %08x head %08x tail %08x start %08x\n",
3222 I915_READ(PRB0_CTL
),
3223 I915_READ(PRB0_HEAD
),
3224 I915_READ(PRB0_TAIL
),
3225 I915_READ(PRB0_START
));
3228 I915_WRITE(PRB0_CTL
,
3229 ((obj
->size
- 4096) & RING_NR_PAGES
) |
3233 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
3235 /* If the head is still not zero, the ring is dead */
3237 DRM_ERROR("Ring initialization failed "
3238 "ctl %08x head %08x tail %08x start %08x\n",
3239 I915_READ(PRB0_CTL
),
3240 I915_READ(PRB0_HEAD
),
3241 I915_READ(PRB0_TAIL
),
3242 I915_READ(PRB0_START
));
3246 /* Update our cache of the ring state */
3247 if (!drm_core_check_feature(dev
, DRIVER_MODESET
))
3248 i915_kernel_lost_context(dev
);
3250 ring
->head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
3251 ring
->tail
= I915_READ(PRB0_TAIL
) & TAIL_ADDR
;
3252 ring
->space
= ring
->head
- (ring
->tail
+ 8);
3253 if (ring
->space
< 0)
3254 ring
->space
+= ring
->Size
;
3261 i915_gem_cleanup_ringbuffer(struct drm_device
*dev
)
3263 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3265 if (dev_priv
->ring
.ring_obj
== NULL
)
3268 drm_core_ioremapfree(&dev_priv
->ring
.map
, dev
);
3270 i915_gem_object_unpin(dev_priv
->ring
.ring_obj
);
3271 drm_gem_object_unreference(dev_priv
->ring
.ring_obj
);
3272 dev_priv
->ring
.ring_obj
= NULL
;
3273 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
3275 i915_gem_cleanup_hws(dev
);
3279 i915_gem_entervt_ioctl(struct drm_device
*dev
, void *data
,
3280 struct drm_file
*file_priv
)
3282 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3285 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
3288 if (dev_priv
->mm
.wedged
) {
3289 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3290 dev_priv
->mm
.wedged
= 0;
3293 mutex_lock(&dev
->struct_mutex
);
3294 dev_priv
->mm
.suspended
= 0;
3296 ret
= i915_gem_init_ringbuffer(dev
);
3300 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
3301 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
3302 BUG_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
3303 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
3304 mutex_unlock(&dev
->struct_mutex
);
3306 drm_irq_install(dev
);
3312 i915_gem_leavevt_ioctl(struct drm_device
*dev
, void *data
,
3313 struct drm_file
*file_priv
)
3317 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
3320 ret
= i915_gem_idle(dev
);
3321 drm_irq_uninstall(dev
);
3327 i915_gem_lastclose(struct drm_device
*dev
)
3331 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
3334 ret
= i915_gem_idle(dev
);
3336 DRM_ERROR("failed to idle hardware: %d\n", ret
);
3340 i915_gem_load(struct drm_device
*dev
)
3342 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3344 INIT_LIST_HEAD(&dev_priv
->mm
.active_list
);
3345 INIT_LIST_HEAD(&dev_priv
->mm
.flushing_list
);
3346 INIT_LIST_HEAD(&dev_priv
->mm
.inactive_list
);
3347 INIT_LIST_HEAD(&dev_priv
->mm
.request_list
);
3348 INIT_DELAYED_WORK(&dev_priv
->mm
.retire_work
,
3349 i915_gem_retire_work_handler
);
3350 dev_priv
->mm
.next_gem_seqno
= 1;
3352 /* Old X drivers will take 0-2 for front, back, depth buffers */
3353 dev_priv
->fence_reg_start
= 3;
3355 if (IS_I965G(dev
) || IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
3356 dev_priv
->num_fence_regs
= 16;
3358 dev_priv
->num_fence_regs
= 8;
3360 i915_gem_detect_bit_6_swizzle(dev
);
3364 * Create a physically contiguous memory object for this object
3365 * e.g. for cursor + overlay regs
3367 int i915_gem_init_phys_object(struct drm_device
*dev
,
3370 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3371 struct drm_i915_gem_phys_object
*phys_obj
;
3374 if (dev_priv
->mm
.phys_objs
[id
- 1] || !size
)
3377 phys_obj
= drm_calloc(1, sizeof(struct drm_i915_gem_phys_object
), DRM_MEM_DRIVER
);
3383 phys_obj
->handle
= drm_pci_alloc(dev
, size
, 0, 0xffffffff);
3384 if (!phys_obj
->handle
) {
3389 set_memory_wc((unsigned long)phys_obj
->handle
->vaddr
, phys_obj
->handle
->size
/ PAGE_SIZE
);
3392 dev_priv
->mm
.phys_objs
[id
- 1] = phys_obj
;
3396 drm_free(phys_obj
, sizeof(struct drm_i915_gem_phys_object
), DRM_MEM_DRIVER
);
3400 void i915_gem_free_phys_object(struct drm_device
*dev
, int id
)
3402 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3403 struct drm_i915_gem_phys_object
*phys_obj
;
3405 if (!dev_priv
->mm
.phys_objs
[id
- 1])
3408 phys_obj
= dev_priv
->mm
.phys_objs
[id
- 1];
3409 if (phys_obj
->cur_obj
) {
3410 i915_gem_detach_phys_object(dev
, phys_obj
->cur_obj
);
3414 set_memory_wb((unsigned long)phys_obj
->handle
->vaddr
, phys_obj
->handle
->size
/ PAGE_SIZE
);
3416 drm_pci_free(dev
, phys_obj
->handle
);
3418 dev_priv
->mm
.phys_objs
[id
- 1] = NULL
;
3421 void i915_gem_free_all_phys_object(struct drm_device
*dev
)
3425 for (i
= I915_GEM_PHYS_CURSOR_0
; i
<= I915_MAX_PHYS_OBJECT
; i
++)
3426 i915_gem_free_phys_object(dev
, i
);
3429 void i915_gem_detach_phys_object(struct drm_device
*dev
,
3430 struct drm_gem_object
*obj
)
3432 struct drm_i915_gem_object
*obj_priv
;
3437 obj_priv
= obj
->driver_private
;
3438 if (!obj_priv
->phys_obj
)
3441 ret
= i915_gem_object_get_page_list(obj
);
3445 page_count
= obj
->size
/ PAGE_SIZE
;
3447 for (i
= 0; i
< page_count
; i
++) {
3448 char *dst
= kmap_atomic(obj_priv
->page_list
[i
], KM_USER0
);
3449 char *src
= obj_priv
->phys_obj
->handle
->vaddr
+ (i
* PAGE_SIZE
);
3451 memcpy(dst
, src
, PAGE_SIZE
);
3452 kunmap_atomic(dst
, KM_USER0
);
3454 drm_clflush_pages(obj_priv
->page_list
, page_count
);
3455 drm_agp_chipset_flush(dev
);
3457 obj_priv
->phys_obj
->cur_obj
= NULL
;
3458 obj_priv
->phys_obj
= NULL
;
3462 i915_gem_attach_phys_object(struct drm_device
*dev
,
3463 struct drm_gem_object
*obj
, int id
)
3465 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3466 struct drm_i915_gem_object
*obj_priv
;
3471 if (id
> I915_MAX_PHYS_OBJECT
)
3474 obj_priv
= obj
->driver_private
;
3476 if (obj_priv
->phys_obj
) {
3477 if (obj_priv
->phys_obj
->id
== id
)
3479 i915_gem_detach_phys_object(dev
, obj
);
3483 /* create a new object */
3484 if (!dev_priv
->mm
.phys_objs
[id
- 1]) {
3485 ret
= i915_gem_init_phys_object(dev
, id
,
3488 DRM_ERROR("failed to init phys object %d size: %zu\n", id
, obj
->size
);
3493 /* bind to the object */
3494 obj_priv
->phys_obj
= dev_priv
->mm
.phys_objs
[id
- 1];
3495 obj_priv
->phys_obj
->cur_obj
= obj
;
3497 ret
= i915_gem_object_get_page_list(obj
);
3499 DRM_ERROR("failed to get page list\n");
3503 page_count
= obj
->size
/ PAGE_SIZE
;
3505 for (i
= 0; i
< page_count
; i
++) {
3506 char *src
= kmap_atomic(obj_priv
->page_list
[i
], KM_USER0
);
3507 char *dst
= obj_priv
->phys_obj
->handle
->vaddr
+ (i
* PAGE_SIZE
);
3509 memcpy(dst
, src
, PAGE_SIZE
);
3510 kunmap_atomic(src
, KM_USER0
);
3519 i915_gem_phys_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
3520 struct drm_i915_gem_pwrite
*args
,
3521 struct drm_file
*file_priv
)
3523 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
3526 char __user
*user_data
;
3528 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
3529 obj_addr
= obj_priv
->phys_obj
->handle
->vaddr
+ args
->offset
;
3531 DRM_ERROR("obj_addr %p, %lld\n", obj_addr
, args
->size
);
3532 ret
= copy_from_user(obj_addr
, user_data
, args
->size
);
3536 drm_agp_chipset_flush(dev
);