2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
33 #include <linux/pci.h>
35 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
38 i915_gem_object_set_to_gpu_domain(struct drm_gem_object
*obj
,
39 uint32_t read_domains
,
40 uint32_t write_domain
);
41 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object
*obj
);
42 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object
*obj
);
43 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object
*obj
);
44 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object
*obj
,
46 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object
*obj
,
49 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object
*obj
);
50 static int i915_gem_object_get_page_list(struct drm_gem_object
*obj
);
51 static void i915_gem_object_free_page_list(struct drm_gem_object
*obj
);
52 static int i915_gem_object_wait_rendering(struct drm_gem_object
*obj
);
53 static int i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
,
55 static int i915_gem_object_get_fence_reg(struct drm_gem_object
*obj
, bool write
);
56 static void i915_gem_clear_fence_reg(struct drm_gem_object
*obj
);
57 static int i915_gem_evict_something(struct drm_device
*dev
);
58 static int i915_gem_phys_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
59 struct drm_i915_gem_pwrite
*args
,
60 struct drm_file
*file_priv
);
62 int i915_gem_do_init(struct drm_device
*dev
, unsigned long start
,
65 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
68 (start
& (PAGE_SIZE
- 1)) != 0 ||
69 (end
& (PAGE_SIZE
- 1)) != 0) {
73 drm_mm_init(&dev_priv
->mm
.gtt_space
, start
,
76 dev
->gtt_total
= (uint32_t) (end
- start
);
82 i915_gem_init_ioctl(struct drm_device
*dev
, void *data
,
83 struct drm_file
*file_priv
)
85 struct drm_i915_gem_init
*args
= data
;
88 mutex_lock(&dev
->struct_mutex
);
89 ret
= i915_gem_do_init(dev
, args
->gtt_start
, args
->gtt_end
);
90 mutex_unlock(&dev
->struct_mutex
);
96 i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
97 struct drm_file
*file_priv
)
99 struct drm_i915_gem_get_aperture
*args
= data
;
101 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
104 args
->aper_size
= dev
->gtt_total
;
105 args
->aper_available_size
= (args
->aper_size
-
106 atomic_read(&dev
->pin_memory
));
113 * Creates a new mm object and returns a handle to it.
116 i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
117 struct drm_file
*file_priv
)
119 struct drm_i915_gem_create
*args
= data
;
120 struct drm_gem_object
*obj
;
123 args
->size
= roundup(args
->size
, PAGE_SIZE
);
125 /* Allocate the new object */
126 obj
= drm_gem_object_alloc(dev
, args
->size
);
130 ret
= drm_gem_handle_create(file_priv
, obj
, &handle
);
131 mutex_lock(&dev
->struct_mutex
);
132 drm_gem_object_handle_unreference(obj
);
133 mutex_unlock(&dev
->struct_mutex
);
138 args
->handle
= handle
;
144 * Reads data from the object referenced by handle.
146 * On error, the contents of *data are undefined.
149 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
150 struct drm_file
*file_priv
)
152 struct drm_i915_gem_pread
*args
= data
;
153 struct drm_gem_object
*obj
;
154 struct drm_i915_gem_object
*obj_priv
;
159 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
162 obj_priv
= obj
->driver_private
;
164 /* Bounds check source.
166 * XXX: This could use review for overflow issues...
168 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
169 args
->offset
+ args
->size
> obj
->size
) {
170 drm_gem_object_unreference(obj
);
174 mutex_lock(&dev
->struct_mutex
);
176 ret
= i915_gem_object_set_cpu_read_domain_range(obj
, args
->offset
,
179 drm_gem_object_unreference(obj
);
180 mutex_unlock(&dev
->struct_mutex
);
184 offset
= args
->offset
;
186 read
= vfs_read(obj
->filp
, (char __user
*)(uintptr_t)args
->data_ptr
,
187 args
->size
, &offset
);
188 if (read
!= args
->size
) {
189 drm_gem_object_unreference(obj
);
190 mutex_unlock(&dev
->struct_mutex
);
197 drm_gem_object_unreference(obj
);
198 mutex_unlock(&dev
->struct_mutex
);
203 /* This is the fast write path which cannot handle
204 * page faults in the source data
208 fast_user_write(struct io_mapping
*mapping
,
209 loff_t page_base
, int page_offset
,
210 char __user
*user_data
,
214 unsigned long unwritten
;
216 vaddr_atomic
= io_mapping_map_atomic_wc(mapping
, page_base
);
217 unwritten
= __copy_from_user_inatomic_nocache(vaddr_atomic
+ page_offset
,
219 io_mapping_unmap_atomic(vaddr_atomic
);
225 /* Here's the write path which can sleep for
230 slow_user_write(struct io_mapping
*mapping
,
231 loff_t page_base
, int page_offset
,
232 char __user
*user_data
,
236 unsigned long unwritten
;
238 vaddr
= io_mapping_map_wc(mapping
, page_base
);
241 unwritten
= __copy_from_user(vaddr
+ page_offset
,
243 io_mapping_unmap(vaddr
);
250 i915_gem_gtt_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
251 struct drm_i915_gem_pwrite
*args
,
252 struct drm_file
*file_priv
)
254 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
255 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
257 loff_t offset
, page_base
;
258 char __user
*user_data
;
259 int page_offset
, page_length
;
262 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
264 if (!access_ok(VERIFY_READ
, user_data
, remain
))
268 mutex_lock(&dev
->struct_mutex
);
269 ret
= i915_gem_object_pin(obj
, 0);
271 mutex_unlock(&dev
->struct_mutex
);
274 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
278 obj_priv
= obj
->driver_private
;
279 offset
= obj_priv
->gtt_offset
+ args
->offset
;
283 /* Operation in this page
285 * page_base = page offset within aperture
286 * page_offset = offset within page
287 * page_length = bytes to copy for this page
289 page_base
= (offset
& ~(PAGE_SIZE
-1));
290 page_offset
= offset
& (PAGE_SIZE
-1);
291 page_length
= remain
;
292 if ((page_offset
+ remain
) > PAGE_SIZE
)
293 page_length
= PAGE_SIZE
- page_offset
;
295 ret
= fast_user_write (dev_priv
->mm
.gtt_mapping
, page_base
,
296 page_offset
, user_data
, page_length
);
298 /* If we get a fault while copying data, then (presumably) our
299 * source page isn't available. In this case, use the
300 * non-atomic function
303 ret
= slow_user_write (dev_priv
->mm
.gtt_mapping
,
304 page_base
, page_offset
,
305 user_data
, page_length
);
310 remain
-= page_length
;
311 user_data
+= page_length
;
312 offset
+= page_length
;
316 i915_gem_object_unpin(obj
);
317 mutex_unlock(&dev
->struct_mutex
);
323 i915_gem_shmem_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
324 struct drm_i915_gem_pwrite
*args
,
325 struct drm_file
*file_priv
)
331 mutex_lock(&dev
->struct_mutex
);
333 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
335 mutex_unlock(&dev
->struct_mutex
);
339 offset
= args
->offset
;
341 written
= vfs_write(obj
->filp
,
342 (char __user
*)(uintptr_t) args
->data_ptr
,
343 args
->size
, &offset
);
344 if (written
!= args
->size
) {
345 mutex_unlock(&dev
->struct_mutex
);
352 mutex_unlock(&dev
->struct_mutex
);
358 * Writes data to the object referenced by handle.
360 * On error, the contents of the buffer that were to be modified are undefined.
363 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
364 struct drm_file
*file_priv
)
366 struct drm_i915_gem_pwrite
*args
= data
;
367 struct drm_gem_object
*obj
;
368 struct drm_i915_gem_object
*obj_priv
;
371 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
374 obj_priv
= obj
->driver_private
;
376 /* Bounds check destination.
378 * XXX: This could use review for overflow issues...
380 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
381 args
->offset
+ args
->size
> obj
->size
) {
382 drm_gem_object_unreference(obj
);
386 /* We can only do the GTT pwrite on untiled buffers, as otherwise
387 * it would end up going through the fenced access, and we'll get
388 * different detiling behavior between reading and writing.
389 * pread/pwrite currently are reading and writing from the CPU
390 * perspective, requiring manual detiling by the client.
392 if (obj_priv
->phys_obj
)
393 ret
= i915_gem_phys_pwrite(dev
, obj
, args
, file_priv
);
394 else if (obj_priv
->tiling_mode
== I915_TILING_NONE
&&
396 ret
= i915_gem_gtt_pwrite(dev
, obj
, args
, file_priv
);
398 ret
= i915_gem_shmem_pwrite(dev
, obj
, args
, file_priv
);
402 DRM_INFO("pwrite failed %d\n", ret
);
405 drm_gem_object_unreference(obj
);
411 * Called when user space prepares to use an object with the CPU, either
412 * through the mmap ioctl's mapping or a GTT mapping.
415 i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
416 struct drm_file
*file_priv
)
418 struct drm_i915_gem_set_domain
*args
= data
;
419 struct drm_gem_object
*obj
;
420 uint32_t read_domains
= args
->read_domains
;
421 uint32_t write_domain
= args
->write_domain
;
424 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
427 /* Only handle setting domains to types used by the CPU. */
428 if (write_domain
& ~(I915_GEM_DOMAIN_CPU
| I915_GEM_DOMAIN_GTT
))
431 if (read_domains
& ~(I915_GEM_DOMAIN_CPU
| I915_GEM_DOMAIN_GTT
))
434 /* Having something in the write domain implies it's in the read
435 * domain, and only that read domain. Enforce that in the request.
437 if (write_domain
!= 0 && read_domains
!= write_domain
)
440 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
444 mutex_lock(&dev
->struct_mutex
);
446 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
447 obj
, obj
->size
, read_domains
, write_domain
);
449 if (read_domains
& I915_GEM_DOMAIN_GTT
) {
450 ret
= i915_gem_object_set_to_gtt_domain(obj
, write_domain
!= 0);
452 /* Silently promote "you're not bound, there was nothing to do"
453 * to success, since the client was just asking us to
454 * make sure everything was done.
459 ret
= i915_gem_object_set_to_cpu_domain(obj
, write_domain
!= 0);
462 drm_gem_object_unreference(obj
);
463 mutex_unlock(&dev
->struct_mutex
);
468 * Called when user space has done writes to this buffer
471 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
472 struct drm_file
*file_priv
)
474 struct drm_i915_gem_sw_finish
*args
= data
;
475 struct drm_gem_object
*obj
;
476 struct drm_i915_gem_object
*obj_priv
;
479 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
482 mutex_lock(&dev
->struct_mutex
);
483 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
485 mutex_unlock(&dev
->struct_mutex
);
490 DRM_INFO("%s: sw_finish %d (%p %d)\n",
491 __func__
, args
->handle
, obj
, obj
->size
);
493 obj_priv
= obj
->driver_private
;
495 /* Pinned buffers may be scanout, so flush the cache */
496 if (obj_priv
->pin_count
)
497 i915_gem_object_flush_cpu_write_domain(obj
);
499 drm_gem_object_unreference(obj
);
500 mutex_unlock(&dev
->struct_mutex
);
505 * Maps the contents of an object, returning the address it is mapped
508 * While the mapping holds a reference on the contents of the object, it doesn't
509 * imply a ref on the object itself.
512 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
513 struct drm_file
*file_priv
)
515 struct drm_i915_gem_mmap
*args
= data
;
516 struct drm_gem_object
*obj
;
520 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
523 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
527 offset
= args
->offset
;
529 down_write(¤t
->mm
->mmap_sem
);
530 addr
= do_mmap(obj
->filp
, 0, args
->size
,
531 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
533 up_write(¤t
->mm
->mmap_sem
);
534 mutex_lock(&dev
->struct_mutex
);
535 drm_gem_object_unreference(obj
);
536 mutex_unlock(&dev
->struct_mutex
);
537 if (IS_ERR((void *)addr
))
540 args
->addr_ptr
= (uint64_t) addr
;
546 * i915_gem_fault - fault a page into the GTT
547 * vma: VMA in question
550 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
551 * from userspace. The fault handler takes care of binding the object to
552 * the GTT (if needed), allocating and programming a fence register (again,
553 * only if needed based on whether the old reg is still valid or the object
554 * is tiled) and inserting a new PTE into the faulting process.
556 * Note that the faulting process may involve evicting existing objects
557 * from the GTT and/or fence registers to make room. So performance may
558 * suffer if the GTT working set is large or there are few fence registers
561 int i915_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
563 struct drm_gem_object
*obj
= vma
->vm_private_data
;
564 struct drm_device
*dev
= obj
->dev
;
565 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
566 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
570 bool write
= !!(vmf
->flags
& FAULT_FLAG_WRITE
);
572 /* We don't use vmf->pgoff since that has the fake offset */
573 page_offset
= ((unsigned long)vmf
->virtual_address
- vma
->vm_start
) >>
576 /* Now bind it into the GTT if needed */
577 mutex_lock(&dev
->struct_mutex
);
578 if (!obj_priv
->gtt_space
) {
579 ret
= i915_gem_object_bind_to_gtt(obj
, obj_priv
->gtt_alignment
);
581 mutex_unlock(&dev
->struct_mutex
);
582 return VM_FAULT_SIGBUS
;
584 list_add(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
587 /* Need a new fence register? */
588 if (obj_priv
->fence_reg
== I915_FENCE_REG_NONE
&&
589 obj_priv
->tiling_mode
!= I915_TILING_NONE
) {
590 ret
= i915_gem_object_get_fence_reg(obj
, write
);
592 mutex_unlock(&dev
->struct_mutex
);
593 return VM_FAULT_SIGBUS
;
597 pfn
= ((dev
->agp
->base
+ obj_priv
->gtt_offset
) >> PAGE_SHIFT
) +
600 /* Finally, remap it using the new GTT offset */
601 ret
= vm_insert_pfn(vma
, (unsigned long)vmf
->virtual_address
, pfn
);
603 mutex_unlock(&dev
->struct_mutex
);
611 DRM_ERROR("can't insert pfn?? fault or busy...\n");
612 return VM_FAULT_SIGBUS
;
614 return VM_FAULT_NOPAGE
;
619 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
620 * @obj: obj in question
622 * GEM memory mapping works by handing back to userspace a fake mmap offset
623 * it can use in a subsequent mmap(2) call. The DRM core code then looks
624 * up the object based on the offset and sets up the various memory mapping
627 * This routine allocates and attaches a fake offset for @obj.
630 i915_gem_create_mmap_offset(struct drm_gem_object
*obj
)
632 struct drm_device
*dev
= obj
->dev
;
633 struct drm_gem_mm
*mm
= dev
->mm_private
;
634 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
635 struct drm_map_list
*list
;
639 /* Set the object up for mmap'ing */
640 list
= &obj
->map_list
;
641 list
->map
= drm_calloc(1, sizeof(struct drm_map_list
),
647 map
->type
= _DRM_GEM
;
648 map
->size
= obj
->size
;
651 /* Get a DRM GEM mmap offset allocated... */
652 list
->file_offset_node
= drm_mm_search_free(&mm
->offset_manager
,
653 obj
->size
/ PAGE_SIZE
, 0, 0);
654 if (!list
->file_offset_node
) {
655 DRM_ERROR("failed to allocate offset for bo %d\n", obj
->name
);
660 list
->file_offset_node
= drm_mm_get_block(list
->file_offset_node
,
661 obj
->size
/ PAGE_SIZE
, 0);
662 if (!list
->file_offset_node
) {
667 list
->hash
.key
= list
->file_offset_node
->start
;
668 if (drm_ht_insert_item(&mm
->offset_hash
, &list
->hash
)) {
669 DRM_ERROR("failed to add to map hash\n");
673 /* By now we should be all set, any drm_mmap request on the offset
674 * below will get to our mmap & fault handler */
675 obj_priv
->mmap_offset
= ((uint64_t) list
->hash
.key
) << PAGE_SHIFT
;
680 drm_mm_put_block(list
->file_offset_node
);
682 drm_free(list
->map
, sizeof(struct drm_map_list
), DRM_MEM_DRIVER
);
688 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
689 * @obj: object to check
691 * Return the required GTT alignment for an object, taking into account
692 * potential fence register mapping if needed.
695 i915_gem_get_gtt_alignment(struct drm_gem_object
*obj
)
697 struct drm_device
*dev
= obj
->dev
;
698 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
702 * Minimum alignment is 4k (GTT page size), but might be greater
703 * if a fence register is needed for the object.
705 if (IS_I965G(dev
) || obj_priv
->tiling_mode
== I915_TILING_NONE
)
709 * Previous chips need to be aligned to the size of the smallest
710 * fence register that can contain the object.
717 for (i
= start
; i
< obj
->size
; i
<<= 1)
724 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
726 * @data: GTT mapping ioctl data
727 * @file_priv: GEM object info
729 * Simply returns the fake offset to userspace so it can mmap it.
730 * The mmap call will end up in drm_gem_mmap(), which will set things
731 * up so we can get faults in the handler above.
733 * The fault handler will take care of binding the object into the GTT
734 * (since it may have been evicted to make room for something), allocating
735 * a fence register, and mapping the appropriate aperture address into
739 i915_gem_mmap_gtt_ioctl(struct drm_device
*dev
, void *data
,
740 struct drm_file
*file_priv
)
742 struct drm_i915_gem_mmap_gtt
*args
= data
;
743 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
744 struct drm_gem_object
*obj
;
745 struct drm_i915_gem_object
*obj_priv
;
748 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
751 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
755 mutex_lock(&dev
->struct_mutex
);
757 obj_priv
= obj
->driver_private
;
759 if (!obj_priv
->mmap_offset
) {
760 ret
= i915_gem_create_mmap_offset(obj
);
765 args
->offset
= obj_priv
->mmap_offset
;
767 obj_priv
->gtt_alignment
= i915_gem_get_gtt_alignment(obj
);
769 /* Make sure the alignment is correct for fence regs etc */
770 if (obj_priv
->agp_mem
&&
771 (obj_priv
->gtt_offset
& (obj_priv
->gtt_alignment
- 1))) {
772 drm_gem_object_unreference(obj
);
773 mutex_unlock(&dev
->struct_mutex
);
778 * Pull it into the GTT so that we have a page list (makes the
779 * initial fault faster and any subsequent flushing possible).
781 if (!obj_priv
->agp_mem
) {
782 ret
= i915_gem_object_bind_to_gtt(obj
, obj_priv
->gtt_alignment
);
784 drm_gem_object_unreference(obj
);
785 mutex_unlock(&dev
->struct_mutex
);
788 list_add(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
791 drm_gem_object_unreference(obj
);
792 mutex_unlock(&dev
->struct_mutex
);
798 i915_gem_object_free_page_list(struct drm_gem_object
*obj
)
800 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
801 int page_count
= obj
->size
/ PAGE_SIZE
;
804 if (obj_priv
->page_list
== NULL
)
808 for (i
= 0; i
< page_count
; i
++)
809 if (obj_priv
->page_list
[i
] != NULL
) {
811 set_page_dirty(obj_priv
->page_list
[i
]);
812 mark_page_accessed(obj_priv
->page_list
[i
]);
813 page_cache_release(obj_priv
->page_list
[i
]);
817 drm_free(obj_priv
->page_list
,
818 page_count
* sizeof(struct page
*),
820 obj_priv
->page_list
= NULL
;
824 i915_gem_object_move_to_active(struct drm_gem_object
*obj
, uint32_t seqno
)
826 struct drm_device
*dev
= obj
->dev
;
827 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
828 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
830 /* Add a reference if we're newly entering the active list. */
831 if (!obj_priv
->active
) {
832 drm_gem_object_reference(obj
);
833 obj_priv
->active
= 1;
835 /* Move from whatever list we were on to the tail of execution. */
836 list_move_tail(&obj_priv
->list
,
837 &dev_priv
->mm
.active_list
);
838 obj_priv
->last_rendering_seqno
= seqno
;
842 i915_gem_object_move_to_flushing(struct drm_gem_object
*obj
)
844 struct drm_device
*dev
= obj
->dev
;
845 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
846 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
848 BUG_ON(!obj_priv
->active
);
849 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.flushing_list
);
850 obj_priv
->last_rendering_seqno
= 0;
854 i915_gem_object_move_to_inactive(struct drm_gem_object
*obj
)
856 struct drm_device
*dev
= obj
->dev
;
857 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
858 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
860 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
861 if (obj_priv
->pin_count
!= 0)
862 list_del_init(&obj_priv
->list
);
864 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
866 obj_priv
->last_rendering_seqno
= 0;
867 if (obj_priv
->active
) {
868 obj_priv
->active
= 0;
869 drm_gem_object_unreference(obj
);
871 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
875 * Creates a new sequence number, emitting a write of it to the status page
876 * plus an interrupt, which will trigger i915_user_interrupt_handler.
878 * Must be called with struct_lock held.
880 * Returned sequence numbers are nonzero on success.
883 i915_add_request(struct drm_device
*dev
, uint32_t flush_domains
)
885 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
886 struct drm_i915_gem_request
*request
;
891 request
= drm_calloc(1, sizeof(*request
), DRM_MEM_DRIVER
);
895 /* Grab the seqno we're going to make this request be, and bump the
896 * next (skipping 0 so it can be the reserved no-seqno value).
898 seqno
= dev_priv
->mm
.next_gem_seqno
;
899 dev_priv
->mm
.next_gem_seqno
++;
900 if (dev_priv
->mm
.next_gem_seqno
== 0)
901 dev_priv
->mm
.next_gem_seqno
++;
904 OUT_RING(MI_STORE_DWORD_INDEX
);
905 OUT_RING(I915_GEM_HWS_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
908 OUT_RING(MI_USER_INTERRUPT
);
911 DRM_DEBUG("%d\n", seqno
);
913 request
->seqno
= seqno
;
914 request
->emitted_jiffies
= jiffies
;
915 was_empty
= list_empty(&dev_priv
->mm
.request_list
);
916 list_add_tail(&request
->list
, &dev_priv
->mm
.request_list
);
918 /* Associate any objects on the flushing list matching the write
919 * domain we're flushing with our flush.
921 if (flush_domains
!= 0) {
922 struct drm_i915_gem_object
*obj_priv
, *next
;
924 list_for_each_entry_safe(obj_priv
, next
,
925 &dev_priv
->mm
.flushing_list
, list
) {
926 struct drm_gem_object
*obj
= obj_priv
->obj
;
928 if ((obj
->write_domain
& flush_domains
) ==
930 obj
->write_domain
= 0;
931 i915_gem_object_move_to_active(obj
, seqno
);
937 if (was_empty
&& !dev_priv
->mm
.suspended
)
938 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
943 * Command execution barrier
945 * Ensures that all commands in the ring are finished
946 * before signalling the CPU
949 i915_retire_commands(struct drm_device
*dev
)
951 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
952 uint32_t cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
953 uint32_t flush_domains
= 0;
956 /* The sampler always gets flushed on i965 (sigh) */
958 flush_domains
|= I915_GEM_DOMAIN_SAMPLER
;
961 OUT_RING(0); /* noop */
963 return flush_domains
;
967 * Moves buffers associated only with the given active seqno from the active
968 * to inactive list, potentially freeing them.
971 i915_gem_retire_request(struct drm_device
*dev
,
972 struct drm_i915_gem_request
*request
)
974 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
976 /* Move any buffers on the active list that are no longer referenced
977 * by the ringbuffer to the flushing/inactive lists as appropriate.
979 while (!list_empty(&dev_priv
->mm
.active_list
)) {
980 struct drm_gem_object
*obj
;
981 struct drm_i915_gem_object
*obj_priv
;
983 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
984 struct drm_i915_gem_object
,
988 /* If the seqno being retired doesn't match the oldest in the
989 * list, then the oldest in the list must still be newer than
992 if (obj_priv
->last_rendering_seqno
!= request
->seqno
)
996 DRM_INFO("%s: retire %d moves to inactive list %p\n",
997 __func__
, request
->seqno
, obj
);
1000 if (obj
->write_domain
!= 0)
1001 i915_gem_object_move_to_flushing(obj
);
1003 i915_gem_object_move_to_inactive(obj
);
1008 * Returns true if seq1 is later than seq2.
1011 i915_seqno_passed(uint32_t seq1
, uint32_t seq2
)
1013 return (int32_t)(seq1
- seq2
) >= 0;
1017 i915_get_gem_seqno(struct drm_device
*dev
)
1019 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1021 return READ_HWSP(dev_priv
, I915_GEM_HWS_INDEX
);
1025 * This function clears the request list as sequence numbers are passed.
1028 i915_gem_retire_requests(struct drm_device
*dev
)
1030 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1033 seqno
= i915_get_gem_seqno(dev
);
1035 while (!list_empty(&dev_priv
->mm
.request_list
)) {
1036 struct drm_i915_gem_request
*request
;
1037 uint32_t retiring_seqno
;
1039 request
= list_first_entry(&dev_priv
->mm
.request_list
,
1040 struct drm_i915_gem_request
,
1042 retiring_seqno
= request
->seqno
;
1044 if (i915_seqno_passed(seqno
, retiring_seqno
) ||
1045 dev_priv
->mm
.wedged
) {
1046 i915_gem_retire_request(dev
, request
);
1048 list_del(&request
->list
);
1049 drm_free(request
, sizeof(*request
), DRM_MEM_DRIVER
);
1056 i915_gem_retire_work_handler(struct work_struct
*work
)
1058 drm_i915_private_t
*dev_priv
;
1059 struct drm_device
*dev
;
1061 dev_priv
= container_of(work
, drm_i915_private_t
,
1062 mm
.retire_work
.work
);
1063 dev
= dev_priv
->dev
;
1065 mutex_lock(&dev
->struct_mutex
);
1066 i915_gem_retire_requests(dev
);
1067 if (!dev_priv
->mm
.suspended
&&
1068 !list_empty(&dev_priv
->mm
.request_list
))
1069 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
1070 mutex_unlock(&dev
->struct_mutex
);
1074 * Waits for a sequence number to be signaled, and cleans up the
1075 * request and object lists appropriately for that event.
1078 i915_wait_request(struct drm_device
*dev
, uint32_t seqno
)
1080 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1085 if (!i915_seqno_passed(i915_get_gem_seqno(dev
), seqno
)) {
1086 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
1087 i915_user_irq_get(dev
);
1088 ret
= wait_event_interruptible(dev_priv
->irq_queue
,
1089 i915_seqno_passed(i915_get_gem_seqno(dev
),
1091 dev_priv
->mm
.wedged
);
1092 i915_user_irq_put(dev
);
1093 dev_priv
->mm
.waiting_gem_seqno
= 0;
1095 if (dev_priv
->mm
.wedged
)
1098 if (ret
&& ret
!= -ERESTARTSYS
)
1099 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1100 __func__
, ret
, seqno
, i915_get_gem_seqno(dev
));
1102 /* Directly dispatch request retiring. While we have the work queue
1103 * to handle this, the waiter on a request often wants an associated
1104 * buffer to have made it to the inactive list, and we would need
1105 * a separate wait queue to handle that.
1108 i915_gem_retire_requests(dev
);
1114 i915_gem_flush(struct drm_device
*dev
,
1115 uint32_t invalidate_domains
,
1116 uint32_t flush_domains
)
1118 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1123 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__
,
1124 invalidate_domains
, flush_domains
);
1127 if (flush_domains
& I915_GEM_DOMAIN_CPU
)
1128 drm_agp_chipset_flush(dev
);
1130 if ((invalidate_domains
| flush_domains
) & ~(I915_GEM_DOMAIN_CPU
|
1131 I915_GEM_DOMAIN_GTT
)) {
1133 * read/write caches:
1135 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1136 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1137 * also flushed at 2d versus 3d pipeline switches.
1141 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1142 * MI_READ_FLUSH is set, and is always flushed on 965.
1144 * I915_GEM_DOMAIN_COMMAND may not exist?
1146 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1147 * invalidated when MI_EXE_FLUSH is set.
1149 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1150 * invalidated with every MI_FLUSH.
1154 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1155 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1156 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1157 * are flushed at any MI_FLUSH.
1160 cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
1161 if ((invalidate_domains
|flush_domains
) &
1162 I915_GEM_DOMAIN_RENDER
)
1163 cmd
&= ~MI_NO_WRITE_FLUSH
;
1164 if (!IS_I965G(dev
)) {
1166 * On the 965, the sampler cache always gets flushed
1167 * and this bit is reserved.
1169 if (invalidate_domains
& I915_GEM_DOMAIN_SAMPLER
)
1170 cmd
|= MI_READ_FLUSH
;
1172 if (invalidate_domains
& I915_GEM_DOMAIN_INSTRUCTION
)
1173 cmd
|= MI_EXE_FLUSH
;
1176 DRM_INFO("%s: queue flush %08x to ring\n", __func__
, cmd
);
1180 OUT_RING(0); /* noop */
1186 * Ensures that all rendering to the object has completed and the object is
1187 * safe to unbind from the GTT or access from the CPU.
1190 i915_gem_object_wait_rendering(struct drm_gem_object
*obj
)
1192 struct drm_device
*dev
= obj
->dev
;
1193 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1196 /* This function only exists to support waiting for existing rendering,
1197 * not for emitting required flushes.
1199 BUG_ON((obj
->write_domain
& I915_GEM_GPU_DOMAINS
) != 0);
1201 /* If there is rendering queued on the buffer being evicted, wait for
1204 if (obj_priv
->active
) {
1206 DRM_INFO("%s: object %p wait for seqno %08x\n",
1207 __func__
, obj
, obj_priv
->last_rendering_seqno
);
1209 ret
= i915_wait_request(dev
, obj_priv
->last_rendering_seqno
);
1218 * Unbinds an object from the GTT aperture.
1221 i915_gem_object_unbind(struct drm_gem_object
*obj
)
1223 struct drm_device
*dev
= obj
->dev
;
1224 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1229 DRM_INFO("%s:%d %p\n", __func__
, __LINE__
, obj
);
1230 DRM_INFO("gtt_space %p\n", obj_priv
->gtt_space
);
1232 if (obj_priv
->gtt_space
== NULL
)
1235 if (obj_priv
->pin_count
!= 0) {
1236 DRM_ERROR("Attempting to unbind pinned buffer\n");
1240 /* Move the object to the CPU domain to ensure that
1241 * any possible CPU writes while it's not in the GTT
1242 * are flushed when we go to remap it. This will
1243 * also ensure that all pending GPU writes are finished
1246 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
1248 if (ret
!= -ERESTARTSYS
)
1249 DRM_ERROR("set_domain failed: %d\n", ret
);
1253 if (obj_priv
->agp_mem
!= NULL
) {
1254 drm_unbind_agp(obj_priv
->agp_mem
);
1255 drm_free_agp(obj_priv
->agp_mem
, obj
->size
/ PAGE_SIZE
);
1256 obj_priv
->agp_mem
= NULL
;
1259 BUG_ON(obj_priv
->active
);
1261 /* blow away mappings if mapped through GTT */
1262 offset
= ((loff_t
) obj
->map_list
.hash
.key
) << PAGE_SHIFT
;
1263 if (dev
->dev_mapping
)
1264 unmap_mapping_range(dev
->dev_mapping
, offset
, obj
->size
, 1);
1266 if (obj_priv
->fence_reg
!= I915_FENCE_REG_NONE
)
1267 i915_gem_clear_fence_reg(obj
);
1269 i915_gem_object_free_page_list(obj
);
1271 if (obj_priv
->gtt_space
) {
1272 atomic_dec(&dev
->gtt_count
);
1273 atomic_sub(obj
->size
, &dev
->gtt_memory
);
1275 drm_mm_put_block(obj_priv
->gtt_space
);
1276 obj_priv
->gtt_space
= NULL
;
1279 /* Remove ourselves from the LRU list if present. */
1280 if (!list_empty(&obj_priv
->list
))
1281 list_del_init(&obj_priv
->list
);
1287 i915_gem_evict_something(struct drm_device
*dev
)
1289 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1290 struct drm_gem_object
*obj
;
1291 struct drm_i915_gem_object
*obj_priv
;
1295 /* If there's an inactive buffer available now, grab it
1298 if (!list_empty(&dev_priv
->mm
.inactive_list
)) {
1299 obj_priv
= list_first_entry(&dev_priv
->mm
.inactive_list
,
1300 struct drm_i915_gem_object
,
1302 obj
= obj_priv
->obj
;
1303 BUG_ON(obj_priv
->pin_count
!= 0);
1305 DRM_INFO("%s: evicting %p\n", __func__
, obj
);
1307 BUG_ON(obj_priv
->active
);
1309 /* Wait on the rendering and unbind the buffer. */
1310 ret
= i915_gem_object_unbind(obj
);
1314 /* If we didn't get anything, but the ring is still processing
1315 * things, wait for one of those things to finish and hopefully
1316 * leave us a buffer to evict.
1318 if (!list_empty(&dev_priv
->mm
.request_list
)) {
1319 struct drm_i915_gem_request
*request
;
1321 request
= list_first_entry(&dev_priv
->mm
.request_list
,
1322 struct drm_i915_gem_request
,
1325 ret
= i915_wait_request(dev
, request
->seqno
);
1329 /* if waiting caused an object to become inactive,
1330 * then loop around and wait for it. Otherwise, we
1331 * assume that waiting freed and unbound something,
1332 * so there should now be some space in the GTT
1334 if (!list_empty(&dev_priv
->mm
.inactive_list
))
1339 /* If we didn't have anything on the request list but there
1340 * are buffers awaiting a flush, emit one and try again.
1341 * When we wait on it, those buffers waiting for that flush
1342 * will get moved to inactive.
1344 if (!list_empty(&dev_priv
->mm
.flushing_list
)) {
1345 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
1346 struct drm_i915_gem_object
,
1348 obj
= obj_priv
->obj
;
1353 i915_add_request(dev
, obj
->write_domain
);
1359 DRM_ERROR("inactive empty %d request empty %d "
1360 "flushing empty %d\n",
1361 list_empty(&dev_priv
->mm
.inactive_list
),
1362 list_empty(&dev_priv
->mm
.request_list
),
1363 list_empty(&dev_priv
->mm
.flushing_list
));
1364 /* If we didn't do any of the above, there's nothing to be done
1365 * and we just can't fit it in.
1373 i915_gem_evict_everything(struct drm_device
*dev
)
1378 ret
= i915_gem_evict_something(dev
);
1388 i915_gem_object_get_page_list(struct drm_gem_object
*obj
)
1390 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1392 struct address_space
*mapping
;
1393 struct inode
*inode
;
1397 if (obj_priv
->page_list
)
1400 /* Get the list of pages out of our struct file. They'll be pinned
1401 * at this point until we release them.
1403 page_count
= obj
->size
/ PAGE_SIZE
;
1404 BUG_ON(obj_priv
->page_list
!= NULL
);
1405 obj_priv
->page_list
= drm_calloc(page_count
, sizeof(struct page
*),
1407 if (obj_priv
->page_list
== NULL
) {
1408 DRM_ERROR("Faled to allocate page list\n");
1412 inode
= obj
->filp
->f_path
.dentry
->d_inode
;
1413 mapping
= inode
->i_mapping
;
1414 for (i
= 0; i
< page_count
; i
++) {
1415 page
= read_mapping_page(mapping
, i
, NULL
);
1417 ret
= PTR_ERR(page
);
1418 DRM_ERROR("read_mapping_page failed: %d\n", ret
);
1419 i915_gem_object_free_page_list(obj
);
1422 obj_priv
->page_list
[i
] = page
;
1427 static void i965_write_fence_reg(struct drm_i915_fence_reg
*reg
)
1429 struct drm_gem_object
*obj
= reg
->obj
;
1430 struct drm_device
*dev
= obj
->dev
;
1431 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1432 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1433 int regnum
= obj_priv
->fence_reg
;
1436 val
= (uint64_t)((obj_priv
->gtt_offset
+ obj
->size
- 4096) &
1438 val
|= obj_priv
->gtt_offset
& 0xfffff000;
1439 val
|= ((obj_priv
->stride
/ 128) - 1) << I965_FENCE_PITCH_SHIFT
;
1440 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
1441 val
|= 1 << I965_FENCE_TILING_Y_SHIFT
;
1442 val
|= I965_FENCE_REG_VALID
;
1444 I915_WRITE64(FENCE_REG_965_0
+ (regnum
* 8), val
);
1447 static void i915_write_fence_reg(struct drm_i915_fence_reg
*reg
)
1449 struct drm_gem_object
*obj
= reg
->obj
;
1450 struct drm_device
*dev
= obj
->dev
;
1451 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1452 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1453 int regnum
= obj_priv
->fence_reg
;
1458 if ((obj_priv
->gtt_offset
& ~I915_FENCE_START_MASK
) ||
1459 (obj_priv
->gtt_offset
& (obj
->size
- 1))) {
1460 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
1461 __func__
, obj_priv
->gtt_offset
, obj
->size
);
1465 if (obj_priv
->tiling_mode
== I915_TILING_Y
&&
1466 HAS_128_BYTE_Y_TILING(dev
))
1471 /* Note: pitch better be a power of two tile widths */
1472 pitch_val
= obj_priv
->stride
/ tile_width
;
1473 pitch_val
= ffs(pitch_val
) - 1;
1475 val
= obj_priv
->gtt_offset
;
1476 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
1477 val
|= 1 << I830_FENCE_TILING_Y_SHIFT
;
1478 val
|= I915_FENCE_SIZE_BITS(obj
->size
);
1479 val
|= pitch_val
<< I830_FENCE_PITCH_SHIFT
;
1480 val
|= I830_FENCE_REG_VALID
;
1482 I915_WRITE(FENCE_REG_830_0
+ (regnum
* 4), val
);
1485 static void i830_write_fence_reg(struct drm_i915_fence_reg
*reg
)
1487 struct drm_gem_object
*obj
= reg
->obj
;
1488 struct drm_device
*dev
= obj
->dev
;
1489 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1490 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1491 int regnum
= obj_priv
->fence_reg
;
1495 if ((obj_priv
->gtt_offset
& ~I915_FENCE_START_MASK
) ||
1496 (obj_priv
->gtt_offset
& (obj
->size
- 1))) {
1497 WARN(1, "%s: object 0x%08x not 1M or size aligned\n",
1498 __func__
, obj_priv
->gtt_offset
);
1502 pitch_val
= (obj_priv
->stride
/ 128) - 1;
1504 val
= obj_priv
->gtt_offset
;
1505 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
1506 val
|= 1 << I830_FENCE_TILING_Y_SHIFT
;
1507 val
|= I830_FENCE_SIZE_BITS(obj
->size
);
1508 val
|= pitch_val
<< I830_FENCE_PITCH_SHIFT
;
1509 val
|= I830_FENCE_REG_VALID
;
1511 I915_WRITE(FENCE_REG_830_0
+ (regnum
* 4), val
);
1516 * i915_gem_object_get_fence_reg - set up a fence reg for an object
1517 * @obj: object to map through a fence reg
1518 * @write: object is about to be written
1520 * When mapping objects through the GTT, userspace wants to be able to write
1521 * to them without having to worry about swizzling if the object is tiled.
1523 * This function walks the fence regs looking for a free one for @obj,
1524 * stealing one if it can't find any.
1526 * It then sets up the reg based on the object's properties: address, pitch
1527 * and tiling format.
1530 i915_gem_object_get_fence_reg(struct drm_gem_object
*obj
, bool write
)
1532 struct drm_device
*dev
= obj
->dev
;
1533 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1534 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1535 struct drm_i915_fence_reg
*reg
= NULL
;
1538 switch (obj_priv
->tiling_mode
) {
1539 case I915_TILING_NONE
:
1540 WARN(1, "allocating a fence for non-tiled object?\n");
1543 if (!obj_priv
->stride
)
1545 WARN((obj_priv
->stride
& (512 - 1)),
1546 "object 0x%08x is X tiled but has non-512B pitch\n",
1547 obj_priv
->gtt_offset
);
1550 if (!obj_priv
->stride
)
1552 WARN((obj_priv
->stride
& (128 - 1)),
1553 "object 0x%08x is Y tiled but has non-128B pitch\n",
1554 obj_priv
->gtt_offset
);
1558 /* First try to find a free reg */
1559 for (i
= dev_priv
->fence_reg_start
; i
< dev_priv
->num_fence_regs
; i
++) {
1560 reg
= &dev_priv
->fence_regs
[i
];
1565 /* None available, try to steal one or wait for a user to finish */
1566 if (i
== dev_priv
->num_fence_regs
) {
1567 struct drm_i915_gem_object
*old_obj_priv
= NULL
;
1571 /* Could try to use LRU here instead... */
1572 for (i
= dev_priv
->fence_reg_start
;
1573 i
< dev_priv
->num_fence_regs
; i
++) {
1574 reg
= &dev_priv
->fence_regs
[i
];
1575 old_obj_priv
= reg
->obj
->driver_private
;
1576 if (!old_obj_priv
->pin_count
)
1581 * Now things get ugly... we have to wait for one of the
1582 * objects to finish before trying again.
1584 if (i
== dev_priv
->num_fence_regs
) {
1585 ret
= i915_gem_object_set_to_gtt_domain(reg
->obj
, 0);
1587 WARN(ret
!= -ERESTARTSYS
,
1588 "switch to GTT domain failed: %d\n", ret
);
1595 * Zap this virtual mapping so we can set up a fence again
1596 * for this object next time we need it.
1598 offset
= ((loff_t
) reg
->obj
->map_list
.hash
.key
) << PAGE_SHIFT
;
1599 if (dev
->dev_mapping
)
1600 unmap_mapping_range(dev
->dev_mapping
, offset
,
1602 old_obj_priv
->fence_reg
= I915_FENCE_REG_NONE
;
1605 obj_priv
->fence_reg
= i
;
1609 i965_write_fence_reg(reg
);
1610 else if (IS_I9XX(dev
))
1611 i915_write_fence_reg(reg
);
1613 i830_write_fence_reg(reg
);
1619 * i915_gem_clear_fence_reg - clear out fence register info
1620 * @obj: object to clear
1622 * Zeroes out the fence register itself and clears out the associated
1623 * data structures in dev_priv and obj_priv.
1626 i915_gem_clear_fence_reg(struct drm_gem_object
*obj
)
1628 struct drm_device
*dev
= obj
->dev
;
1629 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1630 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1633 I915_WRITE64(FENCE_REG_965_0
+ (obj_priv
->fence_reg
* 8), 0);
1635 I915_WRITE(FENCE_REG_830_0
+ (obj_priv
->fence_reg
* 4), 0);
1637 dev_priv
->fence_regs
[obj_priv
->fence_reg
].obj
= NULL
;
1638 obj_priv
->fence_reg
= I915_FENCE_REG_NONE
;
1642 * Finds free space in the GTT aperture and binds the object there.
1645 i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
, unsigned alignment
)
1647 struct drm_device
*dev
= obj
->dev
;
1648 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1649 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1650 struct drm_mm_node
*free_space
;
1651 int page_count
, ret
;
1653 if (dev_priv
->mm
.suspended
)
1656 alignment
= i915_gem_get_gtt_alignment(obj
);
1657 if (alignment
& (PAGE_SIZE
- 1)) {
1658 DRM_ERROR("Invalid object alignment requested %u\n", alignment
);
1663 free_space
= drm_mm_search_free(&dev_priv
->mm
.gtt_space
,
1664 obj
->size
, alignment
, 0);
1665 if (free_space
!= NULL
) {
1666 obj_priv
->gtt_space
= drm_mm_get_block(free_space
, obj
->size
,
1668 if (obj_priv
->gtt_space
!= NULL
) {
1669 obj_priv
->gtt_space
->private = obj
;
1670 obj_priv
->gtt_offset
= obj_priv
->gtt_space
->start
;
1673 if (obj_priv
->gtt_space
== NULL
) {
1674 /* If the gtt is empty and we're still having trouble
1675 * fitting our object in, we're out of memory.
1678 DRM_INFO("%s: GTT full, evicting something\n", __func__
);
1680 if (list_empty(&dev_priv
->mm
.inactive_list
) &&
1681 list_empty(&dev_priv
->mm
.flushing_list
) &&
1682 list_empty(&dev_priv
->mm
.active_list
)) {
1683 DRM_ERROR("GTT full, but LRU list empty\n");
1687 ret
= i915_gem_evict_something(dev
);
1689 if (ret
!= -ERESTARTSYS
)
1690 DRM_ERROR("Failed to evict a buffer %d\n", ret
);
1697 DRM_INFO("Binding object of size %d at 0x%08x\n",
1698 obj
->size
, obj_priv
->gtt_offset
);
1700 ret
= i915_gem_object_get_page_list(obj
);
1702 drm_mm_put_block(obj_priv
->gtt_space
);
1703 obj_priv
->gtt_space
= NULL
;
1707 page_count
= obj
->size
/ PAGE_SIZE
;
1708 /* Create an AGP memory structure pointing at our pages, and bind it
1711 obj_priv
->agp_mem
= drm_agp_bind_pages(dev
,
1712 obj_priv
->page_list
,
1714 obj_priv
->gtt_offset
,
1715 obj_priv
->agp_type
);
1716 if (obj_priv
->agp_mem
== NULL
) {
1717 i915_gem_object_free_page_list(obj
);
1718 drm_mm_put_block(obj_priv
->gtt_space
);
1719 obj_priv
->gtt_space
= NULL
;
1722 atomic_inc(&dev
->gtt_count
);
1723 atomic_add(obj
->size
, &dev
->gtt_memory
);
1725 /* Assert that the object is not currently in any GPU domain. As it
1726 * wasn't in the GTT, there shouldn't be any way it could have been in
1729 BUG_ON(obj
->read_domains
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1730 BUG_ON(obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1736 i915_gem_clflush_object(struct drm_gem_object
*obj
)
1738 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1740 /* If we don't have a page list set up, then we're not pinned
1741 * to GPU, and we can ignore the cache flush because it'll happen
1742 * again at bind time.
1744 if (obj_priv
->page_list
== NULL
)
1747 drm_clflush_pages(obj_priv
->page_list
, obj
->size
/ PAGE_SIZE
);
1750 /** Flushes any GPU write domain for the object if it's dirty. */
1752 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object
*obj
)
1754 struct drm_device
*dev
= obj
->dev
;
1757 if ((obj
->write_domain
& I915_GEM_GPU_DOMAINS
) == 0)
1760 /* Queue the GPU write cache flushing we need. */
1761 i915_gem_flush(dev
, 0, obj
->write_domain
);
1762 seqno
= i915_add_request(dev
, obj
->write_domain
);
1763 obj
->write_domain
= 0;
1764 i915_gem_object_move_to_active(obj
, seqno
);
1767 /** Flushes the GTT write domain for the object if it's dirty. */
1769 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object
*obj
)
1771 if (obj
->write_domain
!= I915_GEM_DOMAIN_GTT
)
1774 /* No actual flushing is required for the GTT write domain. Writes
1775 * to it immediately go to main memory as far as we know, so there's
1776 * no chipset flush. It also doesn't land in render cache.
1778 obj
->write_domain
= 0;
1781 /** Flushes the CPU write domain for the object if it's dirty. */
1783 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object
*obj
)
1785 struct drm_device
*dev
= obj
->dev
;
1787 if (obj
->write_domain
!= I915_GEM_DOMAIN_CPU
)
1790 i915_gem_clflush_object(obj
);
1791 drm_agp_chipset_flush(dev
);
1792 obj
->write_domain
= 0;
1796 * Moves a single object to the GTT read, and possibly write domain.
1798 * This function returns when the move is complete, including waiting on
1802 i915_gem_object_set_to_gtt_domain(struct drm_gem_object
*obj
, int write
)
1804 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1807 /* Not valid to be called on unbound objects. */
1808 if (obj_priv
->gtt_space
== NULL
)
1811 i915_gem_object_flush_gpu_write_domain(obj
);
1812 /* Wait on any GPU rendering and flushing to occur. */
1813 ret
= i915_gem_object_wait_rendering(obj
);
1817 /* If we're writing through the GTT domain, then CPU and GPU caches
1818 * will need to be invalidated at next use.
1821 obj
->read_domains
&= I915_GEM_DOMAIN_GTT
;
1823 i915_gem_object_flush_cpu_write_domain(obj
);
1825 /* It should now be out of any other write domains, and we can update
1826 * the domain values for our changes.
1828 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_GTT
) != 0);
1829 obj
->read_domains
|= I915_GEM_DOMAIN_GTT
;
1831 obj
->write_domain
= I915_GEM_DOMAIN_GTT
;
1832 obj_priv
->dirty
= 1;
1839 * Moves a single object to the CPU read, and possibly write domain.
1841 * This function returns when the move is complete, including waiting on
1845 i915_gem_object_set_to_cpu_domain(struct drm_gem_object
*obj
, int write
)
1847 struct drm_device
*dev
= obj
->dev
;
1850 i915_gem_object_flush_gpu_write_domain(obj
);
1851 /* Wait on any GPU rendering and flushing to occur. */
1852 ret
= i915_gem_object_wait_rendering(obj
);
1856 i915_gem_object_flush_gtt_write_domain(obj
);
1858 /* If we have a partially-valid cache of the object in the CPU,
1859 * finish invalidating it and free the per-page flags.
1861 i915_gem_object_set_to_full_cpu_read_domain(obj
);
1863 /* Flush the CPU cache if it's still invalid. */
1864 if ((obj
->read_domains
& I915_GEM_DOMAIN_CPU
) == 0) {
1865 i915_gem_clflush_object(obj
);
1866 drm_agp_chipset_flush(dev
);
1868 obj
->read_domains
|= I915_GEM_DOMAIN_CPU
;
1871 /* It should now be out of any other write domains, and we can update
1872 * the domain values for our changes.
1874 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
1876 /* If we're writing through the CPU, then the GPU read domains will
1877 * need to be invalidated at next use.
1880 obj
->read_domains
&= I915_GEM_DOMAIN_CPU
;
1881 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
1888 * Set the next domain for the specified object. This
1889 * may not actually perform the necessary flushing/invaliding though,
1890 * as that may want to be batched with other set_domain operations
1892 * This is (we hope) the only really tricky part of gem. The goal
1893 * is fairly simple -- track which caches hold bits of the object
1894 * and make sure they remain coherent. A few concrete examples may
1895 * help to explain how it works. For shorthand, we use the notation
1896 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1897 * a pair of read and write domain masks.
1899 * Case 1: the batch buffer
1905 * 5. Unmapped from GTT
1908 * Let's take these a step at a time
1911 * Pages allocated from the kernel may still have
1912 * cache contents, so we set them to (CPU, CPU) always.
1913 * 2. Written by CPU (using pwrite)
1914 * The pwrite function calls set_domain (CPU, CPU) and
1915 * this function does nothing (as nothing changes)
1917 * This function asserts that the object is not
1918 * currently in any GPU-based read or write domains
1920 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1921 * As write_domain is zero, this function adds in the
1922 * current read domains (CPU+COMMAND, 0).
1923 * flush_domains is set to CPU.
1924 * invalidate_domains is set to COMMAND
1925 * clflush is run to get data out of the CPU caches
1926 * then i915_dev_set_domain calls i915_gem_flush to
1927 * emit an MI_FLUSH and drm_agp_chipset_flush
1928 * 5. Unmapped from GTT
1929 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1930 * flush_domains and invalidate_domains end up both zero
1931 * so no flushing/invalidating happens
1935 * Case 2: The shared render buffer
1939 * 3. Read/written by GPU
1940 * 4. set_domain to (CPU,CPU)
1941 * 5. Read/written by CPU
1942 * 6. Read/written by GPU
1945 * Same as last example, (CPU, CPU)
1947 * Nothing changes (assertions find that it is not in the GPU)
1948 * 3. Read/written by GPU
1949 * execbuffer calls set_domain (RENDER, RENDER)
1950 * flush_domains gets CPU
1951 * invalidate_domains gets GPU
1953 * MI_FLUSH and drm_agp_chipset_flush
1954 * 4. set_domain (CPU, CPU)
1955 * flush_domains gets GPU
1956 * invalidate_domains gets CPU
1957 * wait_rendering (obj) to make sure all drawing is complete.
1958 * This will include an MI_FLUSH to get the data from GPU
1960 * clflush (obj) to invalidate the CPU cache
1961 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1962 * 5. Read/written by CPU
1963 * cache lines are loaded and dirtied
1964 * 6. Read written by GPU
1965 * Same as last GPU access
1967 * Case 3: The constant buffer
1972 * 4. Updated (written) by CPU again
1981 * flush_domains = CPU
1982 * invalidate_domains = RENDER
1985 * drm_agp_chipset_flush
1986 * 4. Updated (written) by CPU again
1988 * flush_domains = 0 (no previous write domain)
1989 * invalidate_domains = 0 (no new read domains)
1992 * flush_domains = CPU
1993 * invalidate_domains = RENDER
1996 * drm_agp_chipset_flush
1999 i915_gem_object_set_to_gpu_domain(struct drm_gem_object
*obj
,
2000 uint32_t read_domains
,
2001 uint32_t write_domain
)
2003 struct drm_device
*dev
= obj
->dev
;
2004 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2005 uint32_t invalidate_domains
= 0;
2006 uint32_t flush_domains
= 0;
2008 BUG_ON(read_domains
& I915_GEM_DOMAIN_CPU
);
2009 BUG_ON(write_domain
== I915_GEM_DOMAIN_CPU
);
2012 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2014 obj
->read_domains
, read_domains
,
2015 obj
->write_domain
, write_domain
);
2018 * If the object isn't moving to a new write domain,
2019 * let the object stay in multiple read domains
2021 if (write_domain
== 0)
2022 read_domains
|= obj
->read_domains
;
2024 obj_priv
->dirty
= 1;
2027 * Flush the current write domain if
2028 * the new read domains don't match. Invalidate
2029 * any read domains which differ from the old
2032 if (obj
->write_domain
&& obj
->write_domain
!= read_domains
) {
2033 flush_domains
|= obj
->write_domain
;
2034 invalidate_domains
|= read_domains
& ~obj
->write_domain
;
2037 * Invalidate any read caches which may have
2038 * stale data. That is, any new read domains.
2040 invalidate_domains
|= read_domains
& ~obj
->read_domains
;
2041 if ((flush_domains
| invalidate_domains
) & I915_GEM_DOMAIN_CPU
) {
2043 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2044 __func__
, flush_domains
, invalidate_domains
);
2046 i915_gem_clflush_object(obj
);
2049 if ((write_domain
| flush_domains
) != 0)
2050 obj
->write_domain
= write_domain
;
2051 obj
->read_domains
= read_domains
;
2053 dev
->invalidate_domains
|= invalidate_domains
;
2054 dev
->flush_domains
|= flush_domains
;
2056 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2058 obj
->read_domains
, obj
->write_domain
,
2059 dev
->invalidate_domains
, dev
->flush_domains
);
2064 * Moves the object from a partially CPU read to a full one.
2066 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2067 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2070 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object
*obj
)
2072 struct drm_device
*dev
= obj
->dev
;
2073 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2075 if (!obj_priv
->page_cpu_valid
)
2078 /* If we're partially in the CPU read domain, finish moving it in.
2080 if (obj
->read_domains
& I915_GEM_DOMAIN_CPU
) {
2083 for (i
= 0; i
<= (obj
->size
- 1) / PAGE_SIZE
; i
++) {
2084 if (obj_priv
->page_cpu_valid
[i
])
2086 drm_clflush_pages(obj_priv
->page_list
+ i
, 1);
2088 drm_agp_chipset_flush(dev
);
2091 /* Free the page_cpu_valid mappings which are now stale, whether
2092 * or not we've got I915_GEM_DOMAIN_CPU.
2094 drm_free(obj_priv
->page_cpu_valid
, obj
->size
/ PAGE_SIZE
,
2096 obj_priv
->page_cpu_valid
= NULL
;
2100 * Set the CPU read domain on a range of the object.
2102 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2103 * not entirely valid. The page_cpu_valid member of the object flags which
2104 * pages have been flushed, and will be respected by
2105 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2106 * of the whole object.
2108 * This function returns when the move is complete, including waiting on
2112 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object
*obj
,
2113 uint64_t offset
, uint64_t size
)
2115 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2118 if (offset
== 0 && size
== obj
->size
)
2119 return i915_gem_object_set_to_cpu_domain(obj
, 0);
2121 i915_gem_object_flush_gpu_write_domain(obj
);
2122 /* Wait on any GPU rendering and flushing to occur. */
2123 ret
= i915_gem_object_wait_rendering(obj
);
2126 i915_gem_object_flush_gtt_write_domain(obj
);
2128 /* If we're already fully in the CPU read domain, we're done. */
2129 if (obj_priv
->page_cpu_valid
== NULL
&&
2130 (obj
->read_domains
& I915_GEM_DOMAIN_CPU
) != 0)
2133 /* Otherwise, create/clear the per-page CPU read domain flag if we're
2134 * newly adding I915_GEM_DOMAIN_CPU
2136 if (obj_priv
->page_cpu_valid
== NULL
) {
2137 obj_priv
->page_cpu_valid
= drm_calloc(1, obj
->size
/ PAGE_SIZE
,
2139 if (obj_priv
->page_cpu_valid
== NULL
)
2141 } else if ((obj
->read_domains
& I915_GEM_DOMAIN_CPU
) == 0)
2142 memset(obj_priv
->page_cpu_valid
, 0, obj
->size
/ PAGE_SIZE
);
2144 /* Flush the cache on any pages that are still invalid from the CPU's
2147 for (i
= offset
/ PAGE_SIZE
; i
<= (offset
+ size
- 1) / PAGE_SIZE
;
2149 if (obj_priv
->page_cpu_valid
[i
])
2152 drm_clflush_pages(obj_priv
->page_list
+ i
, 1);
2154 obj_priv
->page_cpu_valid
[i
] = 1;
2157 /* It should now be out of any other write domains, and we can update
2158 * the domain values for our changes.
2160 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
2162 obj
->read_domains
|= I915_GEM_DOMAIN_CPU
;
2168 * Pin an object to the GTT and evaluate the relocations landing in it.
2171 i915_gem_object_pin_and_relocate(struct drm_gem_object
*obj
,
2172 struct drm_file
*file_priv
,
2173 struct drm_i915_gem_exec_object
*entry
)
2175 struct drm_device
*dev
= obj
->dev
;
2176 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2177 struct drm_i915_gem_relocation_entry reloc
;
2178 struct drm_i915_gem_relocation_entry __user
*relocs
;
2179 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2181 void __iomem
*reloc_page
;
2183 /* Choose the GTT offset for our buffer and put it there. */
2184 ret
= i915_gem_object_pin(obj
, (uint32_t) entry
->alignment
);
2188 entry
->offset
= obj_priv
->gtt_offset
;
2190 relocs
= (struct drm_i915_gem_relocation_entry __user
*)
2191 (uintptr_t) entry
->relocs_ptr
;
2192 /* Apply the relocations, using the GTT aperture to avoid cache
2193 * flushing requirements.
2195 for (i
= 0; i
< entry
->relocation_count
; i
++) {
2196 struct drm_gem_object
*target_obj
;
2197 struct drm_i915_gem_object
*target_obj_priv
;
2198 uint32_t reloc_val
, reloc_offset
;
2199 uint32_t __iomem
*reloc_entry
;
2201 ret
= copy_from_user(&reloc
, relocs
+ i
, sizeof(reloc
));
2203 i915_gem_object_unpin(obj
);
2207 target_obj
= drm_gem_object_lookup(obj
->dev
, file_priv
,
2208 reloc
.target_handle
);
2209 if (target_obj
== NULL
) {
2210 i915_gem_object_unpin(obj
);
2213 target_obj_priv
= target_obj
->driver_private
;
2215 /* The target buffer should have appeared before us in the
2216 * exec_object list, so it should have a GTT space bound by now.
2218 if (target_obj_priv
->gtt_space
== NULL
) {
2219 DRM_ERROR("No GTT space found for object %d\n",
2220 reloc
.target_handle
);
2221 drm_gem_object_unreference(target_obj
);
2222 i915_gem_object_unpin(obj
);
2226 if (reloc
.offset
> obj
->size
- 4) {
2227 DRM_ERROR("Relocation beyond object bounds: "
2228 "obj %p target %d offset %d size %d.\n",
2229 obj
, reloc
.target_handle
,
2230 (int) reloc
.offset
, (int) obj
->size
);
2231 drm_gem_object_unreference(target_obj
);
2232 i915_gem_object_unpin(obj
);
2235 if (reloc
.offset
& 3) {
2236 DRM_ERROR("Relocation not 4-byte aligned: "
2237 "obj %p target %d offset %d.\n",
2238 obj
, reloc
.target_handle
,
2239 (int) reloc
.offset
);
2240 drm_gem_object_unreference(target_obj
);
2241 i915_gem_object_unpin(obj
);
2245 if (reloc
.write_domain
& I915_GEM_DOMAIN_CPU
||
2246 reloc
.read_domains
& I915_GEM_DOMAIN_CPU
) {
2247 DRM_ERROR("reloc with read/write CPU domains: "
2248 "obj %p target %d offset %d "
2249 "read %08x write %08x",
2250 obj
, reloc
.target_handle
,
2253 reloc
.write_domain
);
2257 if (reloc
.write_domain
&& target_obj
->pending_write_domain
&&
2258 reloc
.write_domain
!= target_obj
->pending_write_domain
) {
2259 DRM_ERROR("Write domain conflict: "
2260 "obj %p target %d offset %d "
2261 "new %08x old %08x\n",
2262 obj
, reloc
.target_handle
,
2265 target_obj
->pending_write_domain
);
2266 drm_gem_object_unreference(target_obj
);
2267 i915_gem_object_unpin(obj
);
2272 DRM_INFO("%s: obj %p offset %08x target %d "
2273 "read %08x write %08x gtt %08x "
2274 "presumed %08x delta %08x\n",
2278 (int) reloc
.target_handle
,
2279 (int) reloc
.read_domains
,
2280 (int) reloc
.write_domain
,
2281 (int) target_obj_priv
->gtt_offset
,
2282 (int) reloc
.presumed_offset
,
2286 target_obj
->pending_read_domains
|= reloc
.read_domains
;
2287 target_obj
->pending_write_domain
|= reloc
.write_domain
;
2289 /* If the relocation already has the right value in it, no
2290 * more work needs to be done.
2292 if (target_obj_priv
->gtt_offset
== reloc
.presumed_offset
) {
2293 drm_gem_object_unreference(target_obj
);
2297 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
2299 drm_gem_object_unreference(target_obj
);
2300 i915_gem_object_unpin(obj
);
2304 /* Map the page containing the relocation we're going to
2307 reloc_offset
= obj_priv
->gtt_offset
+ reloc
.offset
;
2308 reloc_page
= io_mapping_map_atomic_wc(dev_priv
->mm
.gtt_mapping
,
2311 reloc_entry
= (uint32_t __iomem
*)(reloc_page
+
2312 (reloc_offset
& (PAGE_SIZE
- 1)));
2313 reloc_val
= target_obj_priv
->gtt_offset
+ reloc
.delta
;
2316 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
2317 obj
, (unsigned int) reloc
.offset
,
2318 readl(reloc_entry
), reloc_val
);
2320 writel(reloc_val
, reloc_entry
);
2321 io_mapping_unmap_atomic(reloc_page
);
2323 /* Write the updated presumed offset for this entry back out
2326 reloc
.presumed_offset
= target_obj_priv
->gtt_offset
;
2327 ret
= copy_to_user(relocs
+ i
, &reloc
, sizeof(reloc
));
2329 drm_gem_object_unreference(target_obj
);
2330 i915_gem_object_unpin(obj
);
2334 drm_gem_object_unreference(target_obj
);
2339 i915_gem_dump_object(obj
, 128, __func__
, ~0);
2344 /** Dispatch a batchbuffer to the ring
2347 i915_dispatch_gem_execbuffer(struct drm_device
*dev
,
2348 struct drm_i915_gem_execbuffer
*exec
,
2349 uint64_t exec_offset
)
2351 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2352 struct drm_clip_rect __user
*boxes
= (struct drm_clip_rect __user
*)
2353 (uintptr_t) exec
->cliprects_ptr
;
2354 int nbox
= exec
->num_cliprects
;
2356 uint32_t exec_start
, exec_len
;
2359 exec_start
= (uint32_t) exec_offset
+ exec
->batch_start_offset
;
2360 exec_len
= (uint32_t) exec
->batch_len
;
2362 if ((exec_start
| exec_len
) & 0x7) {
2363 DRM_ERROR("alignment\n");
2370 count
= nbox
? nbox
: 1;
2372 for (i
= 0; i
< count
; i
++) {
2374 int ret
= i915_emit_box(dev
, boxes
, i
,
2375 exec
->DR1
, exec
->DR4
);
2380 if (IS_I830(dev
) || IS_845G(dev
)) {
2382 OUT_RING(MI_BATCH_BUFFER
);
2383 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
2384 OUT_RING(exec_start
+ exec_len
- 4);
2389 if (IS_I965G(dev
)) {
2390 OUT_RING(MI_BATCH_BUFFER_START
|
2392 MI_BATCH_NON_SECURE_I965
);
2393 OUT_RING(exec_start
);
2395 OUT_RING(MI_BATCH_BUFFER_START
|
2397 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
2403 /* XXX breadcrumb */
2407 /* Throttle our rendering by waiting until the ring has completed our requests
2408 * emitted over 20 msec ago.
2410 * This should get us reasonable parallelism between CPU and GPU but also
2411 * relatively low latency when blocking on a particular request to finish.
2414 i915_gem_ring_throttle(struct drm_device
*dev
, struct drm_file
*file_priv
)
2416 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
2420 mutex_lock(&dev
->struct_mutex
);
2421 seqno
= i915_file_priv
->mm
.last_gem_throttle_seqno
;
2422 i915_file_priv
->mm
.last_gem_throttle_seqno
=
2423 i915_file_priv
->mm
.last_gem_seqno
;
2425 ret
= i915_wait_request(dev
, seqno
);
2426 mutex_unlock(&dev
->struct_mutex
);
2431 i915_gem_execbuffer(struct drm_device
*dev
, void *data
,
2432 struct drm_file
*file_priv
)
2434 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2435 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
2436 struct drm_i915_gem_execbuffer
*args
= data
;
2437 struct drm_i915_gem_exec_object
*exec_list
= NULL
;
2438 struct drm_gem_object
**object_list
= NULL
;
2439 struct drm_gem_object
*batch_obj
;
2440 int ret
, i
, pinned
= 0;
2441 uint64_t exec_offset
;
2442 uint32_t seqno
, flush_domains
;
2446 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
2447 (int) args
->buffers_ptr
, args
->buffer_count
, args
->batch_len
);
2450 if (args
->buffer_count
< 1) {
2451 DRM_ERROR("execbuf with %d buffers\n", args
->buffer_count
);
2454 /* Copy in the exec list from userland */
2455 exec_list
= drm_calloc(sizeof(*exec_list
), args
->buffer_count
,
2457 object_list
= drm_calloc(sizeof(*object_list
), args
->buffer_count
,
2459 if (exec_list
== NULL
|| object_list
== NULL
) {
2460 DRM_ERROR("Failed to allocate exec or object list "
2462 args
->buffer_count
);
2466 ret
= copy_from_user(exec_list
,
2467 (struct drm_i915_relocation_entry __user
*)
2468 (uintptr_t) args
->buffers_ptr
,
2469 sizeof(*exec_list
) * args
->buffer_count
);
2471 DRM_ERROR("copy %d exec entries failed %d\n",
2472 args
->buffer_count
, ret
);
2476 mutex_lock(&dev
->struct_mutex
);
2478 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2480 if (dev_priv
->mm
.wedged
) {
2481 DRM_ERROR("Execbuf while wedged\n");
2482 mutex_unlock(&dev
->struct_mutex
);
2486 if (dev_priv
->mm
.suspended
) {
2487 DRM_ERROR("Execbuf while VT-switched.\n");
2488 mutex_unlock(&dev
->struct_mutex
);
2492 /* Look up object handles */
2493 for (i
= 0; i
< args
->buffer_count
; i
++) {
2494 object_list
[i
] = drm_gem_object_lookup(dev
, file_priv
,
2495 exec_list
[i
].handle
);
2496 if (object_list
[i
] == NULL
) {
2497 DRM_ERROR("Invalid object handle %d at index %d\n",
2498 exec_list
[i
].handle
, i
);
2504 /* Pin and relocate */
2505 for (pin_tries
= 0; ; pin_tries
++) {
2507 for (i
= 0; i
< args
->buffer_count
; i
++) {
2508 object_list
[i
]->pending_read_domains
= 0;
2509 object_list
[i
]->pending_write_domain
= 0;
2510 ret
= i915_gem_object_pin_and_relocate(object_list
[i
],
2521 /* error other than GTT full, or we've already tried again */
2522 if (ret
!= -ENOMEM
|| pin_tries
>= 1) {
2523 if (ret
!= -ERESTARTSYS
)
2524 DRM_ERROR("Failed to pin buffers %d\n", ret
);
2528 /* unpin all of our buffers */
2529 for (i
= 0; i
< pinned
; i
++)
2530 i915_gem_object_unpin(object_list
[i
]);
2533 /* evict everyone we can from the aperture */
2534 ret
= i915_gem_evict_everything(dev
);
2539 /* Set the pending read domains for the batch buffer to COMMAND */
2540 batch_obj
= object_list
[args
->buffer_count
-1];
2541 batch_obj
->pending_read_domains
= I915_GEM_DOMAIN_COMMAND
;
2542 batch_obj
->pending_write_domain
= 0;
2544 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2546 /* Zero the global flush/invalidate flags. These
2547 * will be modified as new domains are computed
2550 dev
->invalidate_domains
= 0;
2551 dev
->flush_domains
= 0;
2553 for (i
= 0; i
< args
->buffer_count
; i
++) {
2554 struct drm_gem_object
*obj
= object_list
[i
];
2556 /* Compute new gpu domains and update invalidate/flush */
2557 i915_gem_object_set_to_gpu_domain(obj
,
2558 obj
->pending_read_domains
,
2559 obj
->pending_write_domain
);
2562 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2564 if (dev
->invalidate_domains
| dev
->flush_domains
) {
2566 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
2568 dev
->invalidate_domains
,
2569 dev
->flush_domains
);
2572 dev
->invalidate_domains
,
2573 dev
->flush_domains
);
2574 if (dev
->flush_domains
)
2575 (void)i915_add_request(dev
, dev
->flush_domains
);
2578 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2581 for (i
= 0; i
< args
->buffer_count
; i
++) {
2582 i915_gem_object_check_coherency(object_list
[i
],
2583 exec_list
[i
].handle
);
2587 exec_offset
= exec_list
[args
->buffer_count
- 1].offset
;
2590 i915_gem_dump_object(object_list
[args
->buffer_count
- 1],
2596 /* Exec the batchbuffer */
2597 ret
= i915_dispatch_gem_execbuffer(dev
, args
, exec_offset
);
2599 DRM_ERROR("dispatch failed %d\n", ret
);
2604 * Ensure that the commands in the batch buffer are
2605 * finished before the interrupt fires
2607 flush_domains
= i915_retire_commands(dev
);
2609 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2612 * Get a seqno representing the execution of the current buffer,
2613 * which we can wait on. We would like to mitigate these interrupts,
2614 * likely by only creating seqnos occasionally (so that we have
2615 * *some* interrupts representing completion of buffers that we can
2616 * wait on when trying to clear up gtt space).
2618 seqno
= i915_add_request(dev
, flush_domains
);
2620 i915_file_priv
->mm
.last_gem_seqno
= seqno
;
2621 for (i
= 0; i
< args
->buffer_count
; i
++) {
2622 struct drm_gem_object
*obj
= object_list
[i
];
2624 i915_gem_object_move_to_active(obj
, seqno
);
2626 DRM_INFO("%s: move to exec list %p\n", __func__
, obj
);
2630 i915_dump_lru(dev
, __func__
);
2633 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2635 /* Copy the new buffer offsets back to the user's exec list. */
2636 ret
= copy_to_user((struct drm_i915_relocation_entry __user
*)
2637 (uintptr_t) args
->buffers_ptr
,
2639 sizeof(*exec_list
) * args
->buffer_count
);
2641 DRM_ERROR("failed to copy %d exec entries "
2642 "back to user (%d)\n",
2643 args
->buffer_count
, ret
);
2645 for (i
= 0; i
< pinned
; i
++)
2646 i915_gem_object_unpin(object_list
[i
]);
2648 for (i
= 0; i
< args
->buffer_count
; i
++)
2649 drm_gem_object_unreference(object_list
[i
]);
2651 mutex_unlock(&dev
->struct_mutex
);
2654 drm_free(object_list
, sizeof(*object_list
) * args
->buffer_count
,
2656 drm_free(exec_list
, sizeof(*exec_list
) * args
->buffer_count
,
2663 i915_gem_object_pin(struct drm_gem_object
*obj
, uint32_t alignment
)
2665 struct drm_device
*dev
= obj
->dev
;
2666 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2669 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2670 if (obj_priv
->gtt_space
== NULL
) {
2671 ret
= i915_gem_object_bind_to_gtt(obj
, alignment
);
2673 if (ret
!= -EBUSY
&& ret
!= -ERESTARTSYS
)
2674 DRM_ERROR("Failure to bind: %d", ret
);
2678 * Pre-965 chips need a fence register set up in order to
2679 * properly handle tiled surfaces.
2681 if (!IS_I965G(dev
) &&
2682 obj_priv
->fence_reg
== I915_FENCE_REG_NONE
&&
2683 obj_priv
->tiling_mode
!= I915_TILING_NONE
)
2684 i915_gem_object_get_fence_reg(obj
, true);
2686 obj_priv
->pin_count
++;
2688 /* If the object is not active and not pending a flush,
2689 * remove it from the inactive list
2691 if (obj_priv
->pin_count
== 1) {
2692 atomic_inc(&dev
->pin_count
);
2693 atomic_add(obj
->size
, &dev
->pin_memory
);
2694 if (!obj_priv
->active
&&
2695 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
2696 I915_GEM_DOMAIN_GTT
)) == 0 &&
2697 !list_empty(&obj_priv
->list
))
2698 list_del_init(&obj_priv
->list
);
2700 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2706 i915_gem_object_unpin(struct drm_gem_object
*obj
)
2708 struct drm_device
*dev
= obj
->dev
;
2709 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2710 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2712 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2713 obj_priv
->pin_count
--;
2714 BUG_ON(obj_priv
->pin_count
< 0);
2715 BUG_ON(obj_priv
->gtt_space
== NULL
);
2717 /* If the object is no longer pinned, and is
2718 * neither active nor being flushed, then stick it on
2721 if (obj_priv
->pin_count
== 0) {
2722 if (!obj_priv
->active
&&
2723 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
2724 I915_GEM_DOMAIN_GTT
)) == 0)
2725 list_move_tail(&obj_priv
->list
,
2726 &dev_priv
->mm
.inactive_list
);
2727 atomic_dec(&dev
->pin_count
);
2728 atomic_sub(obj
->size
, &dev
->pin_memory
);
2730 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2734 i915_gem_pin_ioctl(struct drm_device
*dev
, void *data
,
2735 struct drm_file
*file_priv
)
2737 struct drm_i915_gem_pin
*args
= data
;
2738 struct drm_gem_object
*obj
;
2739 struct drm_i915_gem_object
*obj_priv
;
2742 mutex_lock(&dev
->struct_mutex
);
2744 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2746 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2748 mutex_unlock(&dev
->struct_mutex
);
2751 obj_priv
= obj
->driver_private
;
2753 if (obj_priv
->pin_filp
!= NULL
&& obj_priv
->pin_filp
!= file_priv
) {
2754 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2756 mutex_unlock(&dev
->struct_mutex
);
2760 obj_priv
->user_pin_count
++;
2761 obj_priv
->pin_filp
= file_priv
;
2762 if (obj_priv
->user_pin_count
== 1) {
2763 ret
= i915_gem_object_pin(obj
, args
->alignment
);
2765 drm_gem_object_unreference(obj
);
2766 mutex_unlock(&dev
->struct_mutex
);
2771 /* XXX - flush the CPU caches for pinned objects
2772 * as the X server doesn't manage domains yet
2774 i915_gem_object_flush_cpu_write_domain(obj
);
2775 args
->offset
= obj_priv
->gtt_offset
;
2776 drm_gem_object_unreference(obj
);
2777 mutex_unlock(&dev
->struct_mutex
);
2783 i915_gem_unpin_ioctl(struct drm_device
*dev
, void *data
,
2784 struct drm_file
*file_priv
)
2786 struct drm_i915_gem_pin
*args
= data
;
2787 struct drm_gem_object
*obj
;
2788 struct drm_i915_gem_object
*obj_priv
;
2790 mutex_lock(&dev
->struct_mutex
);
2792 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2794 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2796 mutex_unlock(&dev
->struct_mutex
);
2800 obj_priv
= obj
->driver_private
;
2801 if (obj_priv
->pin_filp
!= file_priv
) {
2802 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
2804 drm_gem_object_unreference(obj
);
2805 mutex_unlock(&dev
->struct_mutex
);
2808 obj_priv
->user_pin_count
--;
2809 if (obj_priv
->user_pin_count
== 0) {
2810 obj_priv
->pin_filp
= NULL
;
2811 i915_gem_object_unpin(obj
);
2814 drm_gem_object_unreference(obj
);
2815 mutex_unlock(&dev
->struct_mutex
);
2820 i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
2821 struct drm_file
*file_priv
)
2823 struct drm_i915_gem_busy
*args
= data
;
2824 struct drm_gem_object
*obj
;
2825 struct drm_i915_gem_object
*obj_priv
;
2827 mutex_lock(&dev
->struct_mutex
);
2828 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2830 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2832 mutex_unlock(&dev
->struct_mutex
);
2836 obj_priv
= obj
->driver_private
;
2837 /* Don't count being on the flushing list against the object being
2838 * done. Otherwise, a buffer left on the flushing list but not getting
2839 * flushed (because nobody's flushing that domain) won't ever return
2840 * unbusy and get reused by libdrm's bo cache. The other expected
2841 * consumer of this interface, OpenGL's occlusion queries, also specs
2842 * that the objects get unbusy "eventually" without any interference.
2844 args
->busy
= obj_priv
->active
&& obj_priv
->last_rendering_seqno
!= 0;
2846 drm_gem_object_unreference(obj
);
2847 mutex_unlock(&dev
->struct_mutex
);
2852 i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
2853 struct drm_file
*file_priv
)
2855 return i915_gem_ring_throttle(dev
, file_priv
);
2858 int i915_gem_init_object(struct drm_gem_object
*obj
)
2860 struct drm_i915_gem_object
*obj_priv
;
2862 obj_priv
= drm_calloc(1, sizeof(*obj_priv
), DRM_MEM_DRIVER
);
2863 if (obj_priv
== NULL
)
2867 * We've just allocated pages from the kernel,
2868 * so they've just been written by the CPU with
2869 * zeros. They'll need to be clflushed before we
2870 * use them with the GPU.
2872 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
2873 obj
->read_domains
= I915_GEM_DOMAIN_CPU
;
2875 obj_priv
->agp_type
= AGP_USER_MEMORY
;
2877 obj
->driver_private
= obj_priv
;
2878 obj_priv
->obj
= obj
;
2879 obj_priv
->fence_reg
= I915_FENCE_REG_NONE
;
2880 INIT_LIST_HEAD(&obj_priv
->list
);
2885 void i915_gem_free_object(struct drm_gem_object
*obj
)
2887 struct drm_device
*dev
= obj
->dev
;
2888 struct drm_gem_mm
*mm
= dev
->mm_private
;
2889 struct drm_map_list
*list
;
2890 struct drm_map
*map
;
2891 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2893 while (obj_priv
->pin_count
> 0)
2894 i915_gem_object_unpin(obj
);
2896 if (obj_priv
->phys_obj
)
2897 i915_gem_detach_phys_object(dev
, obj
);
2899 i915_gem_object_unbind(obj
);
2901 list
= &obj
->map_list
;
2902 drm_ht_remove_item(&mm
->offset_hash
, &list
->hash
);
2904 if (list
->file_offset_node
) {
2905 drm_mm_put_block(list
->file_offset_node
);
2906 list
->file_offset_node
= NULL
;
2911 drm_free(map
, sizeof(*map
), DRM_MEM_DRIVER
);
2915 drm_free(obj_priv
->page_cpu_valid
, 1, DRM_MEM_DRIVER
);
2916 drm_free(obj
->driver_private
, 1, DRM_MEM_DRIVER
);
2919 /** Unbinds all objects that are on the given buffer list. */
2921 i915_gem_evict_from_list(struct drm_device
*dev
, struct list_head
*head
)
2923 struct drm_gem_object
*obj
;
2924 struct drm_i915_gem_object
*obj_priv
;
2927 while (!list_empty(head
)) {
2928 obj_priv
= list_first_entry(head
,
2929 struct drm_i915_gem_object
,
2931 obj
= obj_priv
->obj
;
2933 if (obj_priv
->pin_count
!= 0) {
2934 DRM_ERROR("Pinned object in unbind list\n");
2935 mutex_unlock(&dev
->struct_mutex
);
2939 ret
= i915_gem_object_unbind(obj
);
2941 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2943 mutex_unlock(&dev
->struct_mutex
);
2953 i915_gem_idle(struct drm_device
*dev
)
2955 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2956 uint32_t seqno
, cur_seqno
, last_seqno
;
2959 mutex_lock(&dev
->struct_mutex
);
2961 if (dev_priv
->mm
.suspended
|| dev_priv
->ring
.ring_obj
== NULL
) {
2962 mutex_unlock(&dev
->struct_mutex
);
2966 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2967 * We need to replace this with a semaphore, or something.
2969 dev_priv
->mm
.suspended
= 1;
2971 /* Cancel the retire work handler, wait for it to finish if running
2973 mutex_unlock(&dev
->struct_mutex
);
2974 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
2975 mutex_lock(&dev
->struct_mutex
);
2977 i915_kernel_lost_context(dev
);
2979 /* Flush the GPU along with all non-CPU write domains
2981 i915_gem_flush(dev
, ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
),
2982 ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
2983 seqno
= i915_add_request(dev
, ~I915_GEM_DOMAIN_CPU
);
2986 mutex_unlock(&dev
->struct_mutex
);
2990 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
2994 cur_seqno
= i915_get_gem_seqno(dev
);
2995 if (i915_seqno_passed(cur_seqno
, seqno
))
2997 if (last_seqno
== cur_seqno
) {
2998 if (stuck
++ > 100) {
2999 DRM_ERROR("hardware wedged\n");
3000 dev_priv
->mm
.wedged
= 1;
3001 DRM_WAKEUP(&dev_priv
->irq_queue
);
3006 last_seqno
= cur_seqno
;
3008 dev_priv
->mm
.waiting_gem_seqno
= 0;
3010 i915_gem_retire_requests(dev
);
3012 if (!dev_priv
->mm
.wedged
) {
3013 /* Active and flushing should now be empty as we've
3014 * waited for a sequence higher than any pending execbuffer
3016 WARN_ON(!list_empty(&dev_priv
->mm
.active_list
));
3017 WARN_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
3018 /* Request should now be empty as we've also waited
3019 * for the last request in the list
3021 WARN_ON(!list_empty(&dev_priv
->mm
.request_list
));
3024 /* Empty the active and flushing lists to inactive. If there's
3025 * anything left at this point, it means that we're wedged and
3026 * nothing good's going to happen by leaving them there. So strip
3027 * the GPU domains and just stuff them onto inactive.
3029 while (!list_empty(&dev_priv
->mm
.active_list
)) {
3030 struct drm_i915_gem_object
*obj_priv
;
3032 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
3033 struct drm_i915_gem_object
,
3035 obj_priv
->obj
->write_domain
&= ~I915_GEM_GPU_DOMAINS
;
3036 i915_gem_object_move_to_inactive(obj_priv
->obj
);
3039 while (!list_empty(&dev_priv
->mm
.flushing_list
)) {
3040 struct drm_i915_gem_object
*obj_priv
;
3042 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
3043 struct drm_i915_gem_object
,
3045 obj_priv
->obj
->write_domain
&= ~I915_GEM_GPU_DOMAINS
;
3046 i915_gem_object_move_to_inactive(obj_priv
->obj
);
3050 /* Move all inactive buffers out of the GTT. */
3051 ret
= i915_gem_evict_from_list(dev
, &dev_priv
->mm
.inactive_list
);
3052 WARN_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
3054 mutex_unlock(&dev
->struct_mutex
);
3058 i915_gem_cleanup_ringbuffer(dev
);
3059 mutex_unlock(&dev
->struct_mutex
);
3065 i915_gem_init_hws(struct drm_device
*dev
)
3067 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3068 struct drm_gem_object
*obj
;
3069 struct drm_i915_gem_object
*obj_priv
;
3072 /* If we need a physical address for the status page, it's already
3073 * initialized at driver load time.
3075 if (!I915_NEED_GFX_HWS(dev
))
3078 obj
= drm_gem_object_alloc(dev
, 4096);
3080 DRM_ERROR("Failed to allocate status page\n");
3083 obj_priv
= obj
->driver_private
;
3084 obj_priv
->agp_type
= AGP_USER_CACHED_MEMORY
;
3086 ret
= i915_gem_object_pin(obj
, 4096);
3088 drm_gem_object_unreference(obj
);
3092 dev_priv
->status_gfx_addr
= obj_priv
->gtt_offset
;
3094 dev_priv
->hw_status_page
= kmap(obj_priv
->page_list
[0]);
3095 if (dev_priv
->hw_status_page
== NULL
) {
3096 DRM_ERROR("Failed to map status page.\n");
3097 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
3098 drm_gem_object_unreference(obj
);
3101 dev_priv
->hws_obj
= obj
;
3102 memset(dev_priv
->hw_status_page
, 0, PAGE_SIZE
);
3103 I915_WRITE(HWS_PGA
, dev_priv
->status_gfx_addr
);
3104 I915_READ(HWS_PGA
); /* posting read */
3105 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv
->status_gfx_addr
);
3111 i915_gem_init_ringbuffer(struct drm_device
*dev
)
3113 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3114 struct drm_gem_object
*obj
;
3115 struct drm_i915_gem_object
*obj_priv
;
3116 drm_i915_ring_buffer_t
*ring
= &dev_priv
->ring
;
3120 ret
= i915_gem_init_hws(dev
);
3124 obj
= drm_gem_object_alloc(dev
, 128 * 1024);
3126 DRM_ERROR("Failed to allocate ringbuffer\n");
3129 obj_priv
= obj
->driver_private
;
3131 ret
= i915_gem_object_pin(obj
, 4096);
3133 drm_gem_object_unreference(obj
);
3137 /* Set up the kernel mapping for the ring. */
3138 ring
->Size
= obj
->size
;
3139 ring
->tail_mask
= obj
->size
- 1;
3141 ring
->map
.offset
= dev
->agp
->base
+ obj_priv
->gtt_offset
;
3142 ring
->map
.size
= obj
->size
;
3144 ring
->map
.flags
= 0;
3147 drm_core_ioremap_wc(&ring
->map
, dev
);
3148 if (ring
->map
.handle
== NULL
) {
3149 DRM_ERROR("Failed to map ringbuffer.\n");
3150 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
3151 drm_gem_object_unreference(obj
);
3154 ring
->ring_obj
= obj
;
3155 ring
->virtual_start
= ring
->map
.handle
;
3157 /* Stop the ring if it's running. */
3158 I915_WRITE(PRB0_CTL
, 0);
3159 I915_WRITE(PRB0_TAIL
, 0);
3160 I915_WRITE(PRB0_HEAD
, 0);
3162 /* Initialize the ring. */
3163 I915_WRITE(PRB0_START
, obj_priv
->gtt_offset
);
3164 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
3166 /* G45 ring initialization fails to reset head to zero */
3168 DRM_ERROR("Ring head not reset to zero "
3169 "ctl %08x head %08x tail %08x start %08x\n",
3170 I915_READ(PRB0_CTL
),
3171 I915_READ(PRB0_HEAD
),
3172 I915_READ(PRB0_TAIL
),
3173 I915_READ(PRB0_START
));
3174 I915_WRITE(PRB0_HEAD
, 0);
3176 DRM_ERROR("Ring head forced to zero "
3177 "ctl %08x head %08x tail %08x start %08x\n",
3178 I915_READ(PRB0_CTL
),
3179 I915_READ(PRB0_HEAD
),
3180 I915_READ(PRB0_TAIL
),
3181 I915_READ(PRB0_START
));
3184 I915_WRITE(PRB0_CTL
,
3185 ((obj
->size
- 4096) & RING_NR_PAGES
) |
3189 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
3191 /* If the head is still not zero, the ring is dead */
3193 DRM_ERROR("Ring initialization failed "
3194 "ctl %08x head %08x tail %08x start %08x\n",
3195 I915_READ(PRB0_CTL
),
3196 I915_READ(PRB0_HEAD
),
3197 I915_READ(PRB0_TAIL
),
3198 I915_READ(PRB0_START
));
3202 /* Update our cache of the ring state */
3203 if (!drm_core_check_feature(dev
, DRIVER_MODESET
))
3204 i915_kernel_lost_context(dev
);
3206 ring
->head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
3207 ring
->tail
= I915_READ(PRB0_TAIL
) & TAIL_ADDR
;
3208 ring
->space
= ring
->head
- (ring
->tail
+ 8);
3209 if (ring
->space
< 0)
3210 ring
->space
+= ring
->Size
;
3217 i915_gem_cleanup_ringbuffer(struct drm_device
*dev
)
3219 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3221 if (dev_priv
->ring
.ring_obj
== NULL
)
3224 drm_core_ioremapfree(&dev_priv
->ring
.map
, dev
);
3226 i915_gem_object_unpin(dev_priv
->ring
.ring_obj
);
3227 drm_gem_object_unreference(dev_priv
->ring
.ring_obj
);
3228 dev_priv
->ring
.ring_obj
= NULL
;
3229 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
3231 if (dev_priv
->hws_obj
!= NULL
) {
3232 struct drm_gem_object
*obj
= dev_priv
->hws_obj
;
3233 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
3235 kunmap(obj_priv
->page_list
[0]);
3236 i915_gem_object_unpin(obj
);
3237 drm_gem_object_unreference(obj
);
3238 dev_priv
->hws_obj
= NULL
;
3239 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
3240 dev_priv
->hw_status_page
= NULL
;
3242 /* Write high address into HWS_PGA when disabling. */
3243 I915_WRITE(HWS_PGA
, 0x1ffff000);
3248 i915_gem_entervt_ioctl(struct drm_device
*dev
, void *data
,
3249 struct drm_file
*file_priv
)
3251 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3254 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
3257 if (dev_priv
->mm
.wedged
) {
3258 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3259 dev_priv
->mm
.wedged
= 0;
3262 mutex_lock(&dev
->struct_mutex
);
3263 dev_priv
->mm
.suspended
= 0;
3265 ret
= i915_gem_init_ringbuffer(dev
);
3269 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
3270 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
3271 BUG_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
3272 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
3273 mutex_unlock(&dev
->struct_mutex
);
3275 drm_irq_install(dev
);
3281 i915_gem_leavevt_ioctl(struct drm_device
*dev
, void *data
,
3282 struct drm_file
*file_priv
)
3286 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
3289 ret
= i915_gem_idle(dev
);
3290 drm_irq_uninstall(dev
);
3296 i915_gem_lastclose(struct drm_device
*dev
)
3300 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
3303 ret
= i915_gem_idle(dev
);
3305 DRM_ERROR("failed to idle hardware: %d\n", ret
);
3309 i915_gem_load(struct drm_device
*dev
)
3311 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3313 INIT_LIST_HEAD(&dev_priv
->mm
.active_list
);
3314 INIT_LIST_HEAD(&dev_priv
->mm
.flushing_list
);
3315 INIT_LIST_HEAD(&dev_priv
->mm
.inactive_list
);
3316 INIT_LIST_HEAD(&dev_priv
->mm
.request_list
);
3317 INIT_DELAYED_WORK(&dev_priv
->mm
.retire_work
,
3318 i915_gem_retire_work_handler
);
3319 dev_priv
->mm
.next_gem_seqno
= 1;
3321 /* Old X drivers will take 0-2 for front, back, depth buffers */
3322 dev_priv
->fence_reg_start
= 3;
3324 if (IS_I965G(dev
) || IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
3325 dev_priv
->num_fence_regs
= 16;
3327 dev_priv
->num_fence_regs
= 8;
3329 i915_gem_detect_bit_6_swizzle(dev
);
3333 * Create a physically contiguous memory object for this object
3334 * e.g. for cursor + overlay regs
3336 int i915_gem_init_phys_object(struct drm_device
*dev
,
3339 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3340 struct drm_i915_gem_phys_object
*phys_obj
;
3343 if (dev_priv
->mm
.phys_objs
[id
- 1] || !size
)
3346 phys_obj
= drm_calloc(1, sizeof(struct drm_i915_gem_phys_object
), DRM_MEM_DRIVER
);
3352 phys_obj
->handle
= drm_pci_alloc(dev
, size
, 0, 0xffffffff);
3353 if (!phys_obj
->handle
) {
3358 set_memory_wc((unsigned long)phys_obj
->handle
->vaddr
, phys_obj
->handle
->size
/ PAGE_SIZE
);
3361 dev_priv
->mm
.phys_objs
[id
- 1] = phys_obj
;
3365 drm_free(phys_obj
, sizeof(struct drm_i915_gem_phys_object
), DRM_MEM_DRIVER
);
3369 void i915_gem_free_phys_object(struct drm_device
*dev
, int id
)
3371 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3372 struct drm_i915_gem_phys_object
*phys_obj
;
3374 if (!dev_priv
->mm
.phys_objs
[id
- 1])
3377 phys_obj
= dev_priv
->mm
.phys_objs
[id
- 1];
3378 if (phys_obj
->cur_obj
) {
3379 i915_gem_detach_phys_object(dev
, phys_obj
->cur_obj
);
3383 set_memory_wb((unsigned long)phys_obj
->handle
->vaddr
, phys_obj
->handle
->size
/ PAGE_SIZE
);
3385 drm_pci_free(dev
, phys_obj
->handle
);
3387 dev_priv
->mm
.phys_objs
[id
- 1] = NULL
;
3390 void i915_gem_free_all_phys_object(struct drm_device
*dev
)
3394 for (i
= I915_GEM_PHYS_CURSOR_0
; i
<= I915_MAX_PHYS_OBJECT
; i
++)
3395 i915_gem_free_phys_object(dev
, i
);
3398 void i915_gem_detach_phys_object(struct drm_device
*dev
,
3399 struct drm_gem_object
*obj
)
3401 struct drm_i915_gem_object
*obj_priv
;
3406 obj_priv
= obj
->driver_private
;
3407 if (!obj_priv
->phys_obj
)
3410 ret
= i915_gem_object_get_page_list(obj
);
3414 page_count
= obj
->size
/ PAGE_SIZE
;
3416 for (i
= 0; i
< page_count
; i
++) {
3417 char *dst
= kmap_atomic(obj_priv
->page_list
[i
], KM_USER0
);
3418 char *src
= obj_priv
->phys_obj
->handle
->vaddr
+ (i
* PAGE_SIZE
);
3420 memcpy(dst
, src
, PAGE_SIZE
);
3421 kunmap_atomic(dst
, KM_USER0
);
3423 drm_clflush_pages(obj_priv
->page_list
, page_count
);
3424 drm_agp_chipset_flush(dev
);
3426 obj_priv
->phys_obj
->cur_obj
= NULL
;
3427 obj_priv
->phys_obj
= NULL
;
3431 i915_gem_attach_phys_object(struct drm_device
*dev
,
3432 struct drm_gem_object
*obj
, int id
)
3434 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3435 struct drm_i915_gem_object
*obj_priv
;
3440 if (id
> I915_MAX_PHYS_OBJECT
)
3443 obj_priv
= obj
->driver_private
;
3445 if (obj_priv
->phys_obj
) {
3446 if (obj_priv
->phys_obj
->id
== id
)
3448 i915_gem_detach_phys_object(dev
, obj
);
3452 /* create a new object */
3453 if (!dev_priv
->mm
.phys_objs
[id
- 1]) {
3454 ret
= i915_gem_init_phys_object(dev
, id
,
3457 DRM_ERROR("failed to init phys object %d size: %zu\n", id
, obj
->size
);
3462 /* bind to the object */
3463 obj_priv
->phys_obj
= dev_priv
->mm
.phys_objs
[id
- 1];
3464 obj_priv
->phys_obj
->cur_obj
= obj
;
3466 ret
= i915_gem_object_get_page_list(obj
);
3468 DRM_ERROR("failed to get page list\n");
3472 page_count
= obj
->size
/ PAGE_SIZE
;
3474 for (i
= 0; i
< page_count
; i
++) {
3475 char *src
= kmap_atomic(obj_priv
->page_list
[i
], KM_USER0
);
3476 char *dst
= obj_priv
->phys_obj
->handle
->vaddr
+ (i
* PAGE_SIZE
);
3478 memcpy(dst
, src
, PAGE_SIZE
);
3479 kunmap_atomic(src
, KM_USER0
);
3488 i915_gem_phys_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
3489 struct drm_i915_gem_pwrite
*args
,
3490 struct drm_file
*file_priv
)
3492 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
3495 char __user
*user_data
;
3497 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
3498 obj_addr
= obj_priv
->phys_obj
->handle
->vaddr
+ args
->offset
;
3500 DRM_ERROR("obj_addr %p, %lld\n", obj_addr
, args
->size
);
3501 ret
= copy_from_user(obj_addr
, user_data
, args
->size
);
3505 drm_agp_chipset_flush(dev
);