2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
33 #include <linux/pci.h>
35 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
38 i915_gem_object_set_to_gpu_domain(struct drm_gem_object
*obj
,
39 uint32_t read_domains
,
40 uint32_t write_domain
);
41 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object
*obj
);
42 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object
*obj
);
43 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object
*obj
);
44 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object
*obj
,
46 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object
*obj
,
49 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object
*obj
);
50 static int i915_gem_object_get_page_list(struct drm_gem_object
*obj
);
51 static void i915_gem_object_free_page_list(struct drm_gem_object
*obj
);
52 static int i915_gem_object_wait_rendering(struct drm_gem_object
*obj
);
53 static int i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
,
55 static void i915_gem_object_get_fence_reg(struct drm_gem_object
*obj
);
56 static void i915_gem_clear_fence_reg(struct drm_gem_object
*obj
);
57 static int i915_gem_evict_something(struct drm_device
*dev
);
59 int i915_gem_do_init(struct drm_device
*dev
, unsigned long start
,
62 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
65 (start
& (PAGE_SIZE
- 1)) != 0 ||
66 (end
& (PAGE_SIZE
- 1)) != 0) {
70 drm_mm_init(&dev_priv
->mm
.gtt_space
, start
,
73 dev
->gtt_total
= (uint32_t) (end
- start
);
79 i915_gem_init_ioctl(struct drm_device
*dev
, void *data
,
80 struct drm_file
*file_priv
)
82 struct drm_i915_gem_init
*args
= data
;
85 mutex_lock(&dev
->struct_mutex
);
86 ret
= i915_gem_do_init(dev
, args
->gtt_start
, args
->gtt_end
);
87 mutex_unlock(&dev
->struct_mutex
);
93 i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
94 struct drm_file
*file_priv
)
96 struct drm_i915_gem_get_aperture
*args
= data
;
98 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
101 args
->aper_size
= dev
->gtt_total
;
102 args
->aper_available_size
= (args
->aper_size
-
103 atomic_read(&dev
->pin_memory
));
110 * Creates a new mm object and returns a handle to it.
113 i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
114 struct drm_file
*file_priv
)
116 struct drm_i915_gem_create
*args
= data
;
117 struct drm_gem_object
*obj
;
120 args
->size
= roundup(args
->size
, PAGE_SIZE
);
122 /* Allocate the new object */
123 obj
= drm_gem_object_alloc(dev
, args
->size
);
127 ret
= drm_gem_handle_create(file_priv
, obj
, &handle
);
128 mutex_lock(&dev
->struct_mutex
);
129 drm_gem_object_handle_unreference(obj
);
130 mutex_unlock(&dev
->struct_mutex
);
135 args
->handle
= handle
;
141 * Reads data from the object referenced by handle.
143 * On error, the contents of *data are undefined.
146 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
147 struct drm_file
*file_priv
)
149 struct drm_i915_gem_pread
*args
= data
;
150 struct drm_gem_object
*obj
;
151 struct drm_i915_gem_object
*obj_priv
;
156 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
159 obj_priv
= obj
->driver_private
;
161 /* Bounds check source.
163 * XXX: This could use review for overflow issues...
165 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
166 args
->offset
+ args
->size
> obj
->size
) {
167 drm_gem_object_unreference(obj
);
171 mutex_lock(&dev
->struct_mutex
);
173 ret
= i915_gem_object_set_cpu_read_domain_range(obj
, args
->offset
,
176 drm_gem_object_unreference(obj
);
177 mutex_unlock(&dev
->struct_mutex
);
181 offset
= args
->offset
;
183 read
= vfs_read(obj
->filp
, (char __user
*)(uintptr_t)args
->data_ptr
,
184 args
->size
, &offset
);
185 if (read
!= args
->size
) {
186 drm_gem_object_unreference(obj
);
187 mutex_unlock(&dev
->struct_mutex
);
194 drm_gem_object_unreference(obj
);
195 mutex_unlock(&dev
->struct_mutex
);
200 /* This is the fast write path which cannot handle
201 * page faults in the source data
205 fast_user_write(struct io_mapping
*mapping
,
206 loff_t page_base
, int page_offset
,
207 char __user
*user_data
,
211 unsigned long unwritten
;
213 vaddr_atomic
= io_mapping_map_atomic_wc(mapping
, page_base
);
214 unwritten
= __copy_from_user_inatomic_nocache(vaddr_atomic
+ page_offset
,
216 io_mapping_unmap_atomic(vaddr_atomic
);
222 /* Here's the write path which can sleep for
227 slow_user_write(struct io_mapping
*mapping
,
228 loff_t page_base
, int page_offset
,
229 char __user
*user_data
,
233 unsigned long unwritten
;
235 vaddr
= io_mapping_map_wc(mapping
, page_base
);
238 unwritten
= __copy_from_user(vaddr
+ page_offset
,
240 io_mapping_unmap(vaddr
);
247 i915_gem_gtt_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
248 struct drm_i915_gem_pwrite
*args
,
249 struct drm_file
*file_priv
)
251 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
252 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
254 loff_t offset
, page_base
;
255 char __user
*user_data
;
256 int page_offset
, page_length
;
259 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
261 if (!access_ok(VERIFY_READ
, user_data
, remain
))
265 mutex_lock(&dev
->struct_mutex
);
266 ret
= i915_gem_object_pin(obj
, 0);
268 mutex_unlock(&dev
->struct_mutex
);
271 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
275 obj_priv
= obj
->driver_private
;
276 offset
= obj_priv
->gtt_offset
+ args
->offset
;
280 /* Operation in this page
282 * page_base = page offset within aperture
283 * page_offset = offset within page
284 * page_length = bytes to copy for this page
286 page_base
= (offset
& ~(PAGE_SIZE
-1));
287 page_offset
= offset
& (PAGE_SIZE
-1);
288 page_length
= remain
;
289 if ((page_offset
+ remain
) > PAGE_SIZE
)
290 page_length
= PAGE_SIZE
- page_offset
;
292 ret
= fast_user_write (dev_priv
->mm
.gtt_mapping
, page_base
,
293 page_offset
, user_data
, page_length
);
295 /* If we get a fault while copying data, then (presumably) our
296 * source page isn't available. In this case, use the
297 * non-atomic function
300 ret
= slow_user_write (dev_priv
->mm
.gtt_mapping
,
301 page_base
, page_offset
,
302 user_data
, page_length
);
307 remain
-= page_length
;
308 user_data
+= page_length
;
309 offset
+= page_length
;
313 i915_gem_object_unpin(obj
);
314 mutex_unlock(&dev
->struct_mutex
);
320 i915_gem_shmem_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
321 struct drm_i915_gem_pwrite
*args
,
322 struct drm_file
*file_priv
)
328 mutex_lock(&dev
->struct_mutex
);
330 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
332 mutex_unlock(&dev
->struct_mutex
);
336 offset
= args
->offset
;
338 written
= vfs_write(obj
->filp
,
339 (char __user
*)(uintptr_t) args
->data_ptr
,
340 args
->size
, &offset
);
341 if (written
!= args
->size
) {
342 mutex_unlock(&dev
->struct_mutex
);
349 mutex_unlock(&dev
->struct_mutex
);
355 * Writes data to the object referenced by handle.
357 * On error, the contents of the buffer that were to be modified are undefined.
360 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
361 struct drm_file
*file_priv
)
363 struct drm_i915_gem_pwrite
*args
= data
;
364 struct drm_gem_object
*obj
;
365 struct drm_i915_gem_object
*obj_priv
;
368 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
371 obj_priv
= obj
->driver_private
;
373 /* Bounds check destination.
375 * XXX: This could use review for overflow issues...
377 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
378 args
->offset
+ args
->size
> obj
->size
) {
379 drm_gem_object_unreference(obj
);
383 /* We can only do the GTT pwrite on untiled buffers, as otherwise
384 * it would end up going through the fenced access, and we'll get
385 * different detiling behavior between reading and writing.
386 * pread/pwrite currently are reading and writing from the CPU
387 * perspective, requiring manual detiling by the client.
389 if (obj_priv
->tiling_mode
== I915_TILING_NONE
&&
391 ret
= i915_gem_gtt_pwrite(dev
, obj
, args
, file_priv
);
393 ret
= i915_gem_shmem_pwrite(dev
, obj
, args
, file_priv
);
397 DRM_INFO("pwrite failed %d\n", ret
);
400 drm_gem_object_unreference(obj
);
406 * Called when user space prepares to use an object with the CPU, either
407 * through the mmap ioctl's mapping or a GTT mapping.
410 i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
411 struct drm_file
*file_priv
)
413 struct drm_i915_gem_set_domain
*args
= data
;
414 struct drm_gem_object
*obj
;
415 uint32_t read_domains
= args
->read_domains
;
416 uint32_t write_domain
= args
->write_domain
;
419 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
422 /* Only handle setting domains to types used by the CPU. */
423 if (write_domain
& ~(I915_GEM_DOMAIN_CPU
| I915_GEM_DOMAIN_GTT
))
426 if (read_domains
& ~(I915_GEM_DOMAIN_CPU
| I915_GEM_DOMAIN_GTT
))
429 /* Having something in the write domain implies it's in the read
430 * domain, and only that read domain. Enforce that in the request.
432 if (write_domain
!= 0 && read_domains
!= write_domain
)
435 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
439 mutex_lock(&dev
->struct_mutex
);
441 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
442 obj
, obj
->size
, read_domains
, write_domain
);
444 if (read_domains
& I915_GEM_DOMAIN_GTT
) {
445 ret
= i915_gem_object_set_to_gtt_domain(obj
, write_domain
!= 0);
447 /* Silently promote "you're not bound, there was nothing to do"
448 * to success, since the client was just asking us to
449 * make sure everything was done.
454 ret
= i915_gem_object_set_to_cpu_domain(obj
, write_domain
!= 0);
457 drm_gem_object_unreference(obj
);
458 mutex_unlock(&dev
->struct_mutex
);
463 * Called when user space has done writes to this buffer
466 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
467 struct drm_file
*file_priv
)
469 struct drm_i915_gem_sw_finish
*args
= data
;
470 struct drm_gem_object
*obj
;
471 struct drm_i915_gem_object
*obj_priv
;
474 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
477 mutex_lock(&dev
->struct_mutex
);
478 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
480 mutex_unlock(&dev
->struct_mutex
);
485 DRM_INFO("%s: sw_finish %d (%p %d)\n",
486 __func__
, args
->handle
, obj
, obj
->size
);
488 obj_priv
= obj
->driver_private
;
490 /* Pinned buffers may be scanout, so flush the cache */
491 if (obj_priv
->pin_count
)
492 i915_gem_object_flush_cpu_write_domain(obj
);
494 drm_gem_object_unreference(obj
);
495 mutex_unlock(&dev
->struct_mutex
);
500 * Maps the contents of an object, returning the address it is mapped
503 * While the mapping holds a reference on the contents of the object, it doesn't
504 * imply a ref on the object itself.
507 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
508 struct drm_file
*file_priv
)
510 struct drm_i915_gem_mmap
*args
= data
;
511 struct drm_gem_object
*obj
;
515 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
518 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
522 offset
= args
->offset
;
524 down_write(¤t
->mm
->mmap_sem
);
525 addr
= do_mmap(obj
->filp
, 0, args
->size
,
526 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
528 up_write(¤t
->mm
->mmap_sem
);
529 mutex_lock(&dev
->struct_mutex
);
530 drm_gem_object_unreference(obj
);
531 mutex_unlock(&dev
->struct_mutex
);
532 if (IS_ERR((void *)addr
))
535 args
->addr_ptr
= (uint64_t) addr
;
541 * i915_gem_fault - fault a page into the GTT
542 * vma: VMA in question
545 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
546 * from userspace. The fault handler takes care of binding the object to
547 * the GTT (if needed), allocating and programming a fence register (again,
548 * only if needed based on whether the old reg is still valid or the object
549 * is tiled) and inserting a new PTE into the faulting process.
551 * Note that the faulting process may involve evicting existing objects
552 * from the GTT and/or fence registers to make room. So performance may
553 * suffer if the GTT working set is large or there are few fence registers
556 int i915_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
558 struct drm_gem_object
*obj
= vma
->vm_private_data
;
559 struct drm_device
*dev
= obj
->dev
;
560 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
561 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
566 /* We don't use vmf->pgoff since that has the fake offset */
567 page_offset
= ((unsigned long)vmf
->virtual_address
- vma
->vm_start
) >>
570 /* Now bind it into the GTT if needed */
571 mutex_lock(&dev
->struct_mutex
);
572 if (!obj_priv
->gtt_space
) {
573 ret
= i915_gem_object_bind_to_gtt(obj
, obj_priv
->gtt_alignment
);
575 mutex_unlock(&dev
->struct_mutex
);
576 return VM_FAULT_SIGBUS
;
578 list_add(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
581 /* Need a new fence register? */
582 if (obj_priv
->fence_reg
== I915_FENCE_REG_NONE
&&
583 obj_priv
->tiling_mode
!= I915_TILING_NONE
)
584 i915_gem_object_get_fence_reg(obj
);
586 pfn
= ((dev
->agp
->base
+ obj_priv
->gtt_offset
) >> PAGE_SHIFT
) +
589 /* Finally, remap it using the new GTT offset */
590 ret
= vm_insert_pfn(vma
, (unsigned long)vmf
->virtual_address
, pfn
);
592 mutex_unlock(&dev
->struct_mutex
);
600 DRM_ERROR("can't insert pfn?? fault or busy...\n");
601 return VM_FAULT_SIGBUS
;
603 return VM_FAULT_NOPAGE
;
608 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
609 * @obj: obj in question
611 * GEM memory mapping works by handing back to userspace a fake mmap offset
612 * it can use in a subsequent mmap(2) call. The DRM core code then looks
613 * up the object based on the offset and sets up the various memory mapping
616 * This routine allocates and attaches a fake offset for @obj.
619 i915_gem_create_mmap_offset(struct drm_gem_object
*obj
)
621 struct drm_device
*dev
= obj
->dev
;
622 struct drm_gem_mm
*mm
= dev
->mm_private
;
623 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
624 struct drm_map_list
*list
;
628 /* Set the object up for mmap'ing */
629 list
= &obj
->map_list
;
630 list
->map
= drm_calloc(1, sizeof(struct drm_map_list
),
636 map
->type
= _DRM_GEM
;
637 map
->size
= obj
->size
;
640 /* Get a DRM GEM mmap offset allocated... */
641 list
->file_offset_node
= drm_mm_search_free(&mm
->offset_manager
,
642 obj
->size
/ PAGE_SIZE
, 0, 0);
643 if (!list
->file_offset_node
) {
644 DRM_ERROR("failed to allocate offset for bo %d\n", obj
->name
);
649 list
->file_offset_node
= drm_mm_get_block(list
->file_offset_node
,
650 obj
->size
/ PAGE_SIZE
, 0);
651 if (!list
->file_offset_node
) {
656 list
->hash
.key
= list
->file_offset_node
->start
;
657 if (drm_ht_insert_item(&mm
->offset_hash
, &list
->hash
)) {
658 DRM_ERROR("failed to add to map hash\n");
662 /* By now we should be all set, any drm_mmap request on the offset
663 * below will get to our mmap & fault handler */
664 obj_priv
->mmap_offset
= ((uint64_t) list
->hash
.key
) << PAGE_SHIFT
;
669 drm_mm_put_block(list
->file_offset_node
);
671 drm_free(list
->map
, sizeof(struct drm_map_list
), DRM_MEM_DRIVER
);
677 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
678 * @obj: object to check
680 * Return the required GTT alignment for an object, taking into account
681 * potential fence register mapping if needed.
684 i915_gem_get_gtt_alignment(struct drm_gem_object
*obj
)
686 struct drm_device
*dev
= obj
->dev
;
687 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
691 * Minimum alignment is 4k (GTT page size), but might be greater
692 * if a fence register is needed for the object.
694 if (IS_I965G(dev
) || obj_priv
->tiling_mode
== I915_TILING_NONE
)
698 * Previous chips need to be aligned to the size of the smallest
699 * fence register that can contain the object.
706 for (i
= start
; i
< obj
->size
; i
<<= 1)
713 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
715 * @data: GTT mapping ioctl data
716 * @file_priv: GEM object info
718 * Simply returns the fake offset to userspace so it can mmap it.
719 * The mmap call will end up in drm_gem_mmap(), which will set things
720 * up so we can get faults in the handler above.
722 * The fault handler will take care of binding the object into the GTT
723 * (since it may have been evicted to make room for something), allocating
724 * a fence register, and mapping the appropriate aperture address into
728 i915_gem_mmap_gtt_ioctl(struct drm_device
*dev
, void *data
,
729 struct drm_file
*file_priv
)
731 struct drm_i915_gem_mmap_gtt
*args
= data
;
732 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
733 struct drm_gem_object
*obj
;
734 struct drm_i915_gem_object
*obj_priv
;
737 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
740 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
744 mutex_lock(&dev
->struct_mutex
);
746 obj_priv
= obj
->driver_private
;
748 if (!obj_priv
->mmap_offset
) {
749 ret
= i915_gem_create_mmap_offset(obj
);
754 args
->offset
= obj_priv
->mmap_offset
;
756 obj_priv
->gtt_alignment
= i915_gem_get_gtt_alignment(obj
);
758 /* Make sure the alignment is correct for fence regs etc */
759 if (obj_priv
->agp_mem
&&
760 (obj_priv
->gtt_offset
& (obj_priv
->gtt_alignment
- 1))) {
761 drm_gem_object_unreference(obj
);
762 mutex_unlock(&dev
->struct_mutex
);
767 * Pull it into the GTT so that we have a page list (makes the
768 * initial fault faster and any subsequent flushing possible).
770 if (!obj_priv
->agp_mem
) {
771 ret
= i915_gem_object_bind_to_gtt(obj
, obj_priv
->gtt_alignment
);
773 drm_gem_object_unreference(obj
);
774 mutex_unlock(&dev
->struct_mutex
);
777 list_add(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
780 drm_gem_object_unreference(obj
);
781 mutex_unlock(&dev
->struct_mutex
);
787 i915_gem_object_free_page_list(struct drm_gem_object
*obj
)
789 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
790 int page_count
= obj
->size
/ PAGE_SIZE
;
793 if (obj_priv
->page_list
== NULL
)
797 for (i
= 0; i
< page_count
; i
++)
798 if (obj_priv
->page_list
[i
] != NULL
) {
800 set_page_dirty(obj_priv
->page_list
[i
]);
801 mark_page_accessed(obj_priv
->page_list
[i
]);
802 page_cache_release(obj_priv
->page_list
[i
]);
806 drm_free(obj_priv
->page_list
,
807 page_count
* sizeof(struct page
*),
809 obj_priv
->page_list
= NULL
;
813 i915_gem_object_move_to_active(struct drm_gem_object
*obj
, uint32_t seqno
)
815 struct drm_device
*dev
= obj
->dev
;
816 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
817 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
819 /* Add a reference if we're newly entering the active list. */
820 if (!obj_priv
->active
) {
821 drm_gem_object_reference(obj
);
822 obj_priv
->active
= 1;
824 /* Move from whatever list we were on to the tail of execution. */
825 list_move_tail(&obj_priv
->list
,
826 &dev_priv
->mm
.active_list
);
827 obj_priv
->last_rendering_seqno
= seqno
;
831 i915_gem_object_move_to_flushing(struct drm_gem_object
*obj
)
833 struct drm_device
*dev
= obj
->dev
;
834 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
835 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
837 BUG_ON(!obj_priv
->active
);
838 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.flushing_list
);
839 obj_priv
->last_rendering_seqno
= 0;
843 i915_gem_object_move_to_inactive(struct drm_gem_object
*obj
)
845 struct drm_device
*dev
= obj
->dev
;
846 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
847 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
849 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
850 if (obj_priv
->pin_count
!= 0)
851 list_del_init(&obj_priv
->list
);
853 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
855 obj_priv
->last_rendering_seqno
= 0;
856 if (obj_priv
->active
) {
857 obj_priv
->active
= 0;
858 drm_gem_object_unreference(obj
);
860 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
864 * Creates a new sequence number, emitting a write of it to the status page
865 * plus an interrupt, which will trigger i915_user_interrupt_handler.
867 * Must be called with struct_lock held.
869 * Returned sequence numbers are nonzero on success.
872 i915_add_request(struct drm_device
*dev
, uint32_t flush_domains
)
874 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
875 struct drm_i915_gem_request
*request
;
880 request
= drm_calloc(1, sizeof(*request
), DRM_MEM_DRIVER
);
884 /* Grab the seqno we're going to make this request be, and bump the
885 * next (skipping 0 so it can be the reserved no-seqno value).
887 seqno
= dev_priv
->mm
.next_gem_seqno
;
888 dev_priv
->mm
.next_gem_seqno
++;
889 if (dev_priv
->mm
.next_gem_seqno
== 0)
890 dev_priv
->mm
.next_gem_seqno
++;
893 OUT_RING(MI_STORE_DWORD_INDEX
);
894 OUT_RING(I915_GEM_HWS_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
897 OUT_RING(MI_USER_INTERRUPT
);
900 DRM_DEBUG("%d\n", seqno
);
902 request
->seqno
= seqno
;
903 request
->emitted_jiffies
= jiffies
;
904 was_empty
= list_empty(&dev_priv
->mm
.request_list
);
905 list_add_tail(&request
->list
, &dev_priv
->mm
.request_list
);
907 /* Associate any objects on the flushing list matching the write
908 * domain we're flushing with our flush.
910 if (flush_domains
!= 0) {
911 struct drm_i915_gem_object
*obj_priv
, *next
;
913 list_for_each_entry_safe(obj_priv
, next
,
914 &dev_priv
->mm
.flushing_list
, list
) {
915 struct drm_gem_object
*obj
= obj_priv
->obj
;
917 if ((obj
->write_domain
& flush_domains
) ==
919 obj
->write_domain
= 0;
920 i915_gem_object_move_to_active(obj
, seqno
);
926 if (was_empty
&& !dev_priv
->mm
.suspended
)
927 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
932 * Command execution barrier
934 * Ensures that all commands in the ring are finished
935 * before signalling the CPU
938 i915_retire_commands(struct drm_device
*dev
)
940 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
941 uint32_t cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
942 uint32_t flush_domains
= 0;
945 /* The sampler always gets flushed on i965 (sigh) */
947 flush_domains
|= I915_GEM_DOMAIN_SAMPLER
;
950 OUT_RING(0); /* noop */
952 return flush_domains
;
956 * Moves buffers associated only with the given active seqno from the active
957 * to inactive list, potentially freeing them.
960 i915_gem_retire_request(struct drm_device
*dev
,
961 struct drm_i915_gem_request
*request
)
963 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
965 /* Move any buffers on the active list that are no longer referenced
966 * by the ringbuffer to the flushing/inactive lists as appropriate.
968 while (!list_empty(&dev_priv
->mm
.active_list
)) {
969 struct drm_gem_object
*obj
;
970 struct drm_i915_gem_object
*obj_priv
;
972 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
973 struct drm_i915_gem_object
,
977 /* If the seqno being retired doesn't match the oldest in the
978 * list, then the oldest in the list must still be newer than
981 if (obj_priv
->last_rendering_seqno
!= request
->seqno
)
985 DRM_INFO("%s: retire %d moves to inactive list %p\n",
986 __func__
, request
->seqno
, obj
);
989 if (obj
->write_domain
!= 0)
990 i915_gem_object_move_to_flushing(obj
);
992 i915_gem_object_move_to_inactive(obj
);
997 * Returns true if seq1 is later than seq2.
1000 i915_seqno_passed(uint32_t seq1
, uint32_t seq2
)
1002 return (int32_t)(seq1
- seq2
) >= 0;
1006 i915_get_gem_seqno(struct drm_device
*dev
)
1008 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1010 return READ_HWSP(dev_priv
, I915_GEM_HWS_INDEX
);
1014 * This function clears the request list as sequence numbers are passed.
1017 i915_gem_retire_requests(struct drm_device
*dev
)
1019 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1022 seqno
= i915_get_gem_seqno(dev
);
1024 while (!list_empty(&dev_priv
->mm
.request_list
)) {
1025 struct drm_i915_gem_request
*request
;
1026 uint32_t retiring_seqno
;
1028 request
= list_first_entry(&dev_priv
->mm
.request_list
,
1029 struct drm_i915_gem_request
,
1031 retiring_seqno
= request
->seqno
;
1033 if (i915_seqno_passed(seqno
, retiring_seqno
) ||
1034 dev_priv
->mm
.wedged
) {
1035 i915_gem_retire_request(dev
, request
);
1037 list_del(&request
->list
);
1038 drm_free(request
, sizeof(*request
), DRM_MEM_DRIVER
);
1045 i915_gem_retire_work_handler(struct work_struct
*work
)
1047 drm_i915_private_t
*dev_priv
;
1048 struct drm_device
*dev
;
1050 dev_priv
= container_of(work
, drm_i915_private_t
,
1051 mm
.retire_work
.work
);
1052 dev
= dev_priv
->dev
;
1054 mutex_lock(&dev
->struct_mutex
);
1055 i915_gem_retire_requests(dev
);
1056 if (!dev_priv
->mm
.suspended
&&
1057 !list_empty(&dev_priv
->mm
.request_list
))
1058 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
1059 mutex_unlock(&dev
->struct_mutex
);
1063 * Waits for a sequence number to be signaled, and cleans up the
1064 * request and object lists appropriately for that event.
1067 i915_wait_request(struct drm_device
*dev
, uint32_t seqno
)
1069 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1074 if (!i915_seqno_passed(i915_get_gem_seqno(dev
), seqno
)) {
1075 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
1076 i915_user_irq_get(dev
);
1077 ret
= wait_event_interruptible(dev_priv
->irq_queue
,
1078 i915_seqno_passed(i915_get_gem_seqno(dev
),
1080 dev_priv
->mm
.wedged
);
1081 i915_user_irq_put(dev
);
1082 dev_priv
->mm
.waiting_gem_seqno
= 0;
1084 if (dev_priv
->mm
.wedged
)
1087 if (ret
&& ret
!= -ERESTARTSYS
)
1088 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1089 __func__
, ret
, seqno
, i915_get_gem_seqno(dev
));
1091 /* Directly dispatch request retiring. While we have the work queue
1092 * to handle this, the waiter on a request often wants an associated
1093 * buffer to have made it to the inactive list, and we would need
1094 * a separate wait queue to handle that.
1097 i915_gem_retire_requests(dev
);
1103 i915_gem_flush(struct drm_device
*dev
,
1104 uint32_t invalidate_domains
,
1105 uint32_t flush_domains
)
1107 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1112 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__
,
1113 invalidate_domains
, flush_domains
);
1116 if (flush_domains
& I915_GEM_DOMAIN_CPU
)
1117 drm_agp_chipset_flush(dev
);
1119 if ((invalidate_domains
| flush_domains
) & ~(I915_GEM_DOMAIN_CPU
|
1120 I915_GEM_DOMAIN_GTT
)) {
1122 * read/write caches:
1124 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1125 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1126 * also flushed at 2d versus 3d pipeline switches.
1130 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1131 * MI_READ_FLUSH is set, and is always flushed on 965.
1133 * I915_GEM_DOMAIN_COMMAND may not exist?
1135 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1136 * invalidated when MI_EXE_FLUSH is set.
1138 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1139 * invalidated with every MI_FLUSH.
1143 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1144 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1145 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1146 * are flushed at any MI_FLUSH.
1149 cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
1150 if ((invalidate_domains
|flush_domains
) &
1151 I915_GEM_DOMAIN_RENDER
)
1152 cmd
&= ~MI_NO_WRITE_FLUSH
;
1153 if (!IS_I965G(dev
)) {
1155 * On the 965, the sampler cache always gets flushed
1156 * and this bit is reserved.
1158 if (invalidate_domains
& I915_GEM_DOMAIN_SAMPLER
)
1159 cmd
|= MI_READ_FLUSH
;
1161 if (invalidate_domains
& I915_GEM_DOMAIN_INSTRUCTION
)
1162 cmd
|= MI_EXE_FLUSH
;
1165 DRM_INFO("%s: queue flush %08x to ring\n", __func__
, cmd
);
1169 OUT_RING(0); /* noop */
1175 * Ensures that all rendering to the object has completed and the object is
1176 * safe to unbind from the GTT or access from the CPU.
1179 i915_gem_object_wait_rendering(struct drm_gem_object
*obj
)
1181 struct drm_device
*dev
= obj
->dev
;
1182 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1185 /* This function only exists to support waiting for existing rendering,
1186 * not for emitting required flushes.
1188 BUG_ON((obj
->write_domain
& I915_GEM_GPU_DOMAINS
) != 0);
1190 /* If there is rendering queued on the buffer being evicted, wait for
1193 if (obj_priv
->active
) {
1195 DRM_INFO("%s: object %p wait for seqno %08x\n",
1196 __func__
, obj
, obj_priv
->last_rendering_seqno
);
1198 ret
= i915_wait_request(dev
, obj_priv
->last_rendering_seqno
);
1207 * Unbinds an object from the GTT aperture.
1210 i915_gem_object_unbind(struct drm_gem_object
*obj
)
1212 struct drm_device
*dev
= obj
->dev
;
1213 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1218 DRM_INFO("%s:%d %p\n", __func__
, __LINE__
, obj
);
1219 DRM_INFO("gtt_space %p\n", obj_priv
->gtt_space
);
1221 if (obj_priv
->gtt_space
== NULL
)
1224 if (obj_priv
->pin_count
!= 0) {
1225 DRM_ERROR("Attempting to unbind pinned buffer\n");
1229 /* Move the object to the CPU domain to ensure that
1230 * any possible CPU writes while it's not in the GTT
1231 * are flushed when we go to remap it. This will
1232 * also ensure that all pending GPU writes are finished
1235 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
1237 if (ret
!= -ERESTARTSYS
)
1238 DRM_ERROR("set_domain failed: %d\n", ret
);
1242 if (obj_priv
->agp_mem
!= NULL
) {
1243 drm_unbind_agp(obj_priv
->agp_mem
);
1244 drm_free_agp(obj_priv
->agp_mem
, obj
->size
/ PAGE_SIZE
);
1245 obj_priv
->agp_mem
= NULL
;
1248 BUG_ON(obj_priv
->active
);
1250 /* blow away mappings if mapped through GTT */
1251 offset
= ((loff_t
) obj
->map_list
.hash
.key
) << PAGE_SHIFT
;
1252 if (dev
->dev_mapping
)
1253 unmap_mapping_range(dev
->dev_mapping
, offset
, obj
->size
, 1);
1255 if (obj_priv
->fence_reg
!= I915_FENCE_REG_NONE
)
1256 i915_gem_clear_fence_reg(obj
);
1258 i915_gem_object_free_page_list(obj
);
1260 if (obj_priv
->gtt_space
) {
1261 atomic_dec(&dev
->gtt_count
);
1262 atomic_sub(obj
->size
, &dev
->gtt_memory
);
1264 drm_mm_put_block(obj_priv
->gtt_space
);
1265 obj_priv
->gtt_space
= NULL
;
1268 /* Remove ourselves from the LRU list if present. */
1269 if (!list_empty(&obj_priv
->list
))
1270 list_del_init(&obj_priv
->list
);
1276 i915_gem_evict_something(struct drm_device
*dev
)
1278 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1279 struct drm_gem_object
*obj
;
1280 struct drm_i915_gem_object
*obj_priv
;
1284 /* If there's an inactive buffer available now, grab it
1287 if (!list_empty(&dev_priv
->mm
.inactive_list
)) {
1288 obj_priv
= list_first_entry(&dev_priv
->mm
.inactive_list
,
1289 struct drm_i915_gem_object
,
1291 obj
= obj_priv
->obj
;
1292 BUG_ON(obj_priv
->pin_count
!= 0);
1294 DRM_INFO("%s: evicting %p\n", __func__
, obj
);
1296 BUG_ON(obj_priv
->active
);
1298 /* Wait on the rendering and unbind the buffer. */
1299 ret
= i915_gem_object_unbind(obj
);
1303 /* If we didn't get anything, but the ring is still processing
1304 * things, wait for one of those things to finish and hopefully
1305 * leave us a buffer to evict.
1307 if (!list_empty(&dev_priv
->mm
.request_list
)) {
1308 struct drm_i915_gem_request
*request
;
1310 request
= list_first_entry(&dev_priv
->mm
.request_list
,
1311 struct drm_i915_gem_request
,
1314 ret
= i915_wait_request(dev
, request
->seqno
);
1318 /* if waiting caused an object to become inactive,
1319 * then loop around and wait for it. Otherwise, we
1320 * assume that waiting freed and unbound something,
1321 * so there should now be some space in the GTT
1323 if (!list_empty(&dev_priv
->mm
.inactive_list
))
1328 /* If we didn't have anything on the request list but there
1329 * are buffers awaiting a flush, emit one and try again.
1330 * When we wait on it, those buffers waiting for that flush
1331 * will get moved to inactive.
1333 if (!list_empty(&dev_priv
->mm
.flushing_list
)) {
1334 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
1335 struct drm_i915_gem_object
,
1337 obj
= obj_priv
->obj
;
1342 i915_add_request(dev
, obj
->write_domain
);
1348 DRM_ERROR("inactive empty %d request empty %d "
1349 "flushing empty %d\n",
1350 list_empty(&dev_priv
->mm
.inactive_list
),
1351 list_empty(&dev_priv
->mm
.request_list
),
1352 list_empty(&dev_priv
->mm
.flushing_list
));
1353 /* If we didn't do any of the above, there's nothing to be done
1354 * and we just can't fit it in.
1362 i915_gem_evict_everything(struct drm_device
*dev
)
1367 ret
= i915_gem_evict_something(dev
);
1377 i915_gem_object_get_page_list(struct drm_gem_object
*obj
)
1379 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1381 struct address_space
*mapping
;
1382 struct inode
*inode
;
1386 if (obj_priv
->page_list
)
1389 /* Get the list of pages out of our struct file. They'll be pinned
1390 * at this point until we release them.
1392 page_count
= obj
->size
/ PAGE_SIZE
;
1393 BUG_ON(obj_priv
->page_list
!= NULL
);
1394 obj_priv
->page_list
= drm_calloc(page_count
, sizeof(struct page
*),
1396 if (obj_priv
->page_list
== NULL
) {
1397 DRM_ERROR("Faled to allocate page list\n");
1401 inode
= obj
->filp
->f_path
.dentry
->d_inode
;
1402 mapping
= inode
->i_mapping
;
1403 for (i
= 0; i
< page_count
; i
++) {
1404 page
= read_mapping_page(mapping
, i
, NULL
);
1406 ret
= PTR_ERR(page
);
1407 DRM_ERROR("read_mapping_page failed: %d\n", ret
);
1408 i915_gem_object_free_page_list(obj
);
1411 obj_priv
->page_list
[i
] = page
;
1416 static void i965_write_fence_reg(struct drm_i915_fence_reg
*reg
)
1418 struct drm_gem_object
*obj
= reg
->obj
;
1419 struct drm_device
*dev
= obj
->dev
;
1420 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1421 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1422 int regnum
= obj_priv
->fence_reg
;
1425 val
= (uint64_t)((obj_priv
->gtt_offset
+ obj
->size
- 4096) &
1427 val
|= obj_priv
->gtt_offset
& 0xfffff000;
1428 val
|= ((obj_priv
->stride
/ 128) - 1) << I965_FENCE_PITCH_SHIFT
;
1429 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
1430 val
|= 1 << I965_FENCE_TILING_Y_SHIFT
;
1431 val
|= I965_FENCE_REG_VALID
;
1433 I915_WRITE64(FENCE_REG_965_0
+ (regnum
* 8), val
);
1436 static void i915_write_fence_reg(struct drm_i915_fence_reg
*reg
)
1438 struct drm_gem_object
*obj
= reg
->obj
;
1439 struct drm_device
*dev
= obj
->dev
;
1440 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1441 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1442 int regnum
= obj_priv
->fence_reg
;
1446 if ((obj_priv
->gtt_offset
& ~I915_FENCE_START_MASK
) ||
1447 (obj_priv
->gtt_offset
& (obj
->size
- 1))) {
1448 WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__
);
1452 if (obj_priv
->tiling_mode
== I915_TILING_Y
&& (IS_I945G(dev
) ||
1455 pitch_val
= (obj_priv
->stride
/ 128) - 1;
1457 pitch_val
= (obj_priv
->stride
/ 512) - 1;
1459 val
= obj_priv
->gtt_offset
;
1460 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
1461 val
|= 1 << I830_FENCE_TILING_Y_SHIFT
;
1462 val
|= I915_FENCE_SIZE_BITS(obj
->size
);
1463 val
|= pitch_val
<< I830_FENCE_PITCH_SHIFT
;
1464 val
|= I830_FENCE_REG_VALID
;
1466 I915_WRITE(FENCE_REG_830_0
+ (regnum
* 4), val
);
1469 static void i830_write_fence_reg(struct drm_i915_fence_reg
*reg
)
1471 struct drm_gem_object
*obj
= reg
->obj
;
1472 struct drm_device
*dev
= obj
->dev
;
1473 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1474 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1475 int regnum
= obj_priv
->fence_reg
;
1479 if ((obj_priv
->gtt_offset
& ~I915_FENCE_START_MASK
) ||
1480 (obj_priv
->gtt_offset
& (obj
->size
- 1))) {
1481 WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__
);
1485 pitch_val
= (obj_priv
->stride
/ 128) - 1;
1487 val
= obj_priv
->gtt_offset
;
1488 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
1489 val
|= 1 << I830_FENCE_TILING_Y_SHIFT
;
1490 val
|= I830_FENCE_SIZE_BITS(obj
->size
);
1491 val
|= pitch_val
<< I830_FENCE_PITCH_SHIFT
;
1492 val
|= I830_FENCE_REG_VALID
;
1494 I915_WRITE(FENCE_REG_830_0
+ (regnum
* 4), val
);
1499 * i915_gem_object_get_fence_reg - set up a fence reg for an object
1500 * @obj: object to map through a fence reg
1502 * When mapping objects through the GTT, userspace wants to be able to write
1503 * to them without having to worry about swizzling if the object is tiled.
1505 * This function walks the fence regs looking for a free one for @obj,
1506 * stealing one if it can't find any.
1508 * It then sets up the reg based on the object's properties: address, pitch
1509 * and tiling format.
1512 i915_gem_object_get_fence_reg(struct drm_gem_object
*obj
)
1514 struct drm_device
*dev
= obj
->dev
;
1515 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1516 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1517 struct drm_i915_fence_reg
*reg
= NULL
;
1520 switch (obj_priv
->tiling_mode
) {
1521 case I915_TILING_NONE
:
1522 WARN(1, "allocating a fence for non-tiled object?\n");
1525 WARN(obj_priv
->stride
& (512 - 1),
1526 "object is X tiled but has non-512B pitch\n");
1529 WARN(obj_priv
->stride
& (128 - 1),
1530 "object is Y tiled but has non-128B pitch\n");
1534 /* First try to find a free reg */
1535 for (i
= dev_priv
->fence_reg_start
; i
< dev_priv
->num_fence_regs
; i
++) {
1536 reg
= &dev_priv
->fence_regs
[i
];
1541 /* None available, try to steal one or wait for a user to finish */
1542 if (i
== dev_priv
->num_fence_regs
) {
1543 struct drm_i915_gem_object
*old_obj_priv
= NULL
;
1547 /* Could try to use LRU here instead... */
1548 for (i
= dev_priv
->fence_reg_start
;
1549 i
< dev_priv
->num_fence_regs
; i
++) {
1550 reg
= &dev_priv
->fence_regs
[i
];
1551 old_obj_priv
= reg
->obj
->driver_private
;
1552 if (!old_obj_priv
->pin_count
)
1557 * Now things get ugly... we have to wait for one of the
1558 * objects to finish before trying again.
1560 if (i
== dev_priv
->num_fence_regs
) {
1561 ret
= i915_gem_object_wait_rendering(reg
->obj
);
1563 WARN(ret
, "wait_rendering failed: %d\n", ret
);
1570 * Zap this virtual mapping so we can set up a fence again
1571 * for this object next time we need it.
1573 offset
= ((loff_t
) reg
->obj
->map_list
.hash
.key
) << PAGE_SHIFT
;
1574 if (dev
->dev_mapping
)
1575 unmap_mapping_range(dev
->dev_mapping
, offset
,
1577 old_obj_priv
->fence_reg
= I915_FENCE_REG_NONE
;
1580 obj_priv
->fence_reg
= i
;
1584 i965_write_fence_reg(reg
);
1585 else if (IS_I9XX(dev
))
1586 i915_write_fence_reg(reg
);
1588 i830_write_fence_reg(reg
);
1592 * i915_gem_clear_fence_reg - clear out fence register info
1593 * @obj: object to clear
1595 * Zeroes out the fence register itself and clears out the associated
1596 * data structures in dev_priv and obj_priv.
1599 i915_gem_clear_fence_reg(struct drm_gem_object
*obj
)
1601 struct drm_device
*dev
= obj
->dev
;
1602 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1603 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1606 I915_WRITE64(FENCE_REG_965_0
+ (obj_priv
->fence_reg
* 8), 0);
1608 I915_WRITE(FENCE_REG_830_0
+ (obj_priv
->fence_reg
* 4), 0);
1610 dev_priv
->fence_regs
[obj_priv
->fence_reg
].obj
= NULL
;
1611 obj_priv
->fence_reg
= I915_FENCE_REG_NONE
;
1615 * Finds free space in the GTT aperture and binds the object there.
1618 i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
, unsigned alignment
)
1620 struct drm_device
*dev
= obj
->dev
;
1621 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1622 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1623 struct drm_mm_node
*free_space
;
1624 int page_count
, ret
;
1627 alignment
= PAGE_SIZE
;
1628 if (alignment
& (PAGE_SIZE
- 1)) {
1629 DRM_ERROR("Invalid object alignment requested %u\n", alignment
);
1634 free_space
= drm_mm_search_free(&dev_priv
->mm
.gtt_space
,
1635 obj
->size
, alignment
, 0);
1636 if (free_space
!= NULL
) {
1637 obj_priv
->gtt_space
= drm_mm_get_block(free_space
, obj
->size
,
1639 if (obj_priv
->gtt_space
!= NULL
) {
1640 obj_priv
->gtt_space
->private = obj
;
1641 obj_priv
->gtt_offset
= obj_priv
->gtt_space
->start
;
1644 if (obj_priv
->gtt_space
== NULL
) {
1645 /* If the gtt is empty and we're still having trouble
1646 * fitting our object in, we're out of memory.
1649 DRM_INFO("%s: GTT full, evicting something\n", __func__
);
1651 if (list_empty(&dev_priv
->mm
.inactive_list
) &&
1652 list_empty(&dev_priv
->mm
.flushing_list
) &&
1653 list_empty(&dev_priv
->mm
.active_list
)) {
1654 DRM_ERROR("GTT full, but LRU list empty\n");
1658 ret
= i915_gem_evict_something(dev
);
1660 if (ret
!= -ERESTARTSYS
)
1661 DRM_ERROR("Failed to evict a buffer %d\n", ret
);
1668 DRM_INFO("Binding object of size %d at 0x%08x\n",
1669 obj
->size
, obj_priv
->gtt_offset
);
1671 ret
= i915_gem_object_get_page_list(obj
);
1673 drm_mm_put_block(obj_priv
->gtt_space
);
1674 obj_priv
->gtt_space
= NULL
;
1678 page_count
= obj
->size
/ PAGE_SIZE
;
1679 /* Create an AGP memory structure pointing at our pages, and bind it
1682 obj_priv
->agp_mem
= drm_agp_bind_pages(dev
,
1683 obj_priv
->page_list
,
1685 obj_priv
->gtt_offset
,
1686 obj_priv
->agp_type
);
1687 if (obj_priv
->agp_mem
== NULL
) {
1688 i915_gem_object_free_page_list(obj
);
1689 drm_mm_put_block(obj_priv
->gtt_space
);
1690 obj_priv
->gtt_space
= NULL
;
1693 atomic_inc(&dev
->gtt_count
);
1694 atomic_add(obj
->size
, &dev
->gtt_memory
);
1696 /* Assert that the object is not currently in any GPU domain. As it
1697 * wasn't in the GTT, there shouldn't be any way it could have been in
1700 BUG_ON(obj
->read_domains
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1701 BUG_ON(obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1707 i915_gem_clflush_object(struct drm_gem_object
*obj
)
1709 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1711 /* If we don't have a page list set up, then we're not pinned
1712 * to GPU, and we can ignore the cache flush because it'll happen
1713 * again at bind time.
1715 if (obj_priv
->page_list
== NULL
)
1718 drm_clflush_pages(obj_priv
->page_list
, obj
->size
/ PAGE_SIZE
);
1721 /** Flushes any GPU write domain for the object if it's dirty. */
1723 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object
*obj
)
1725 struct drm_device
*dev
= obj
->dev
;
1728 if ((obj
->write_domain
& I915_GEM_GPU_DOMAINS
) == 0)
1731 /* Queue the GPU write cache flushing we need. */
1732 i915_gem_flush(dev
, 0, obj
->write_domain
);
1733 seqno
= i915_add_request(dev
, obj
->write_domain
);
1734 obj
->write_domain
= 0;
1735 i915_gem_object_move_to_active(obj
, seqno
);
1738 /** Flushes the GTT write domain for the object if it's dirty. */
1740 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object
*obj
)
1742 if (obj
->write_domain
!= I915_GEM_DOMAIN_GTT
)
1745 /* No actual flushing is required for the GTT write domain. Writes
1746 * to it immediately go to main memory as far as we know, so there's
1747 * no chipset flush. It also doesn't land in render cache.
1749 obj
->write_domain
= 0;
1752 /** Flushes the CPU write domain for the object if it's dirty. */
1754 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object
*obj
)
1756 struct drm_device
*dev
= obj
->dev
;
1758 if (obj
->write_domain
!= I915_GEM_DOMAIN_CPU
)
1761 i915_gem_clflush_object(obj
);
1762 drm_agp_chipset_flush(dev
);
1763 obj
->write_domain
= 0;
1767 * Moves a single object to the GTT read, and possibly write domain.
1769 * This function returns when the move is complete, including waiting on
1773 i915_gem_object_set_to_gtt_domain(struct drm_gem_object
*obj
, int write
)
1775 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1778 /* Not valid to be called on unbound objects. */
1779 if (obj_priv
->gtt_space
== NULL
)
1782 i915_gem_object_flush_gpu_write_domain(obj
);
1783 /* Wait on any GPU rendering and flushing to occur. */
1784 ret
= i915_gem_object_wait_rendering(obj
);
1788 /* If we're writing through the GTT domain, then CPU and GPU caches
1789 * will need to be invalidated at next use.
1792 obj
->read_domains
&= I915_GEM_DOMAIN_GTT
;
1794 i915_gem_object_flush_cpu_write_domain(obj
);
1796 /* It should now be out of any other write domains, and we can update
1797 * the domain values for our changes.
1799 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_GTT
) != 0);
1800 obj
->read_domains
|= I915_GEM_DOMAIN_GTT
;
1802 obj
->write_domain
= I915_GEM_DOMAIN_GTT
;
1803 obj_priv
->dirty
= 1;
1810 * Moves a single object to the CPU read, and possibly write domain.
1812 * This function returns when the move is complete, including waiting on
1816 i915_gem_object_set_to_cpu_domain(struct drm_gem_object
*obj
, int write
)
1818 struct drm_device
*dev
= obj
->dev
;
1821 i915_gem_object_flush_gpu_write_domain(obj
);
1822 /* Wait on any GPU rendering and flushing to occur. */
1823 ret
= i915_gem_object_wait_rendering(obj
);
1827 i915_gem_object_flush_gtt_write_domain(obj
);
1829 /* If we have a partially-valid cache of the object in the CPU,
1830 * finish invalidating it and free the per-page flags.
1832 i915_gem_object_set_to_full_cpu_read_domain(obj
);
1834 /* Flush the CPU cache if it's still invalid. */
1835 if ((obj
->read_domains
& I915_GEM_DOMAIN_CPU
) == 0) {
1836 i915_gem_clflush_object(obj
);
1837 drm_agp_chipset_flush(dev
);
1839 obj
->read_domains
|= I915_GEM_DOMAIN_CPU
;
1842 /* It should now be out of any other write domains, and we can update
1843 * the domain values for our changes.
1845 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
1847 /* If we're writing through the CPU, then the GPU read domains will
1848 * need to be invalidated at next use.
1851 obj
->read_domains
&= I915_GEM_DOMAIN_CPU
;
1852 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
1859 * Set the next domain for the specified object. This
1860 * may not actually perform the necessary flushing/invaliding though,
1861 * as that may want to be batched with other set_domain operations
1863 * This is (we hope) the only really tricky part of gem. The goal
1864 * is fairly simple -- track which caches hold bits of the object
1865 * and make sure they remain coherent. A few concrete examples may
1866 * help to explain how it works. For shorthand, we use the notation
1867 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1868 * a pair of read and write domain masks.
1870 * Case 1: the batch buffer
1876 * 5. Unmapped from GTT
1879 * Let's take these a step at a time
1882 * Pages allocated from the kernel may still have
1883 * cache contents, so we set them to (CPU, CPU) always.
1884 * 2. Written by CPU (using pwrite)
1885 * The pwrite function calls set_domain (CPU, CPU) and
1886 * this function does nothing (as nothing changes)
1888 * This function asserts that the object is not
1889 * currently in any GPU-based read or write domains
1891 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1892 * As write_domain is zero, this function adds in the
1893 * current read domains (CPU+COMMAND, 0).
1894 * flush_domains is set to CPU.
1895 * invalidate_domains is set to COMMAND
1896 * clflush is run to get data out of the CPU caches
1897 * then i915_dev_set_domain calls i915_gem_flush to
1898 * emit an MI_FLUSH and drm_agp_chipset_flush
1899 * 5. Unmapped from GTT
1900 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1901 * flush_domains and invalidate_domains end up both zero
1902 * so no flushing/invalidating happens
1906 * Case 2: The shared render buffer
1910 * 3. Read/written by GPU
1911 * 4. set_domain to (CPU,CPU)
1912 * 5. Read/written by CPU
1913 * 6. Read/written by GPU
1916 * Same as last example, (CPU, CPU)
1918 * Nothing changes (assertions find that it is not in the GPU)
1919 * 3. Read/written by GPU
1920 * execbuffer calls set_domain (RENDER, RENDER)
1921 * flush_domains gets CPU
1922 * invalidate_domains gets GPU
1924 * MI_FLUSH and drm_agp_chipset_flush
1925 * 4. set_domain (CPU, CPU)
1926 * flush_domains gets GPU
1927 * invalidate_domains gets CPU
1928 * wait_rendering (obj) to make sure all drawing is complete.
1929 * This will include an MI_FLUSH to get the data from GPU
1931 * clflush (obj) to invalidate the CPU cache
1932 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1933 * 5. Read/written by CPU
1934 * cache lines are loaded and dirtied
1935 * 6. Read written by GPU
1936 * Same as last GPU access
1938 * Case 3: The constant buffer
1943 * 4. Updated (written) by CPU again
1952 * flush_domains = CPU
1953 * invalidate_domains = RENDER
1956 * drm_agp_chipset_flush
1957 * 4. Updated (written) by CPU again
1959 * flush_domains = 0 (no previous write domain)
1960 * invalidate_domains = 0 (no new read domains)
1963 * flush_domains = CPU
1964 * invalidate_domains = RENDER
1967 * drm_agp_chipset_flush
1970 i915_gem_object_set_to_gpu_domain(struct drm_gem_object
*obj
,
1971 uint32_t read_domains
,
1972 uint32_t write_domain
)
1974 struct drm_device
*dev
= obj
->dev
;
1975 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1976 uint32_t invalidate_domains
= 0;
1977 uint32_t flush_domains
= 0;
1979 BUG_ON(read_domains
& I915_GEM_DOMAIN_CPU
);
1980 BUG_ON(write_domain
== I915_GEM_DOMAIN_CPU
);
1983 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1985 obj
->read_domains
, read_domains
,
1986 obj
->write_domain
, write_domain
);
1989 * If the object isn't moving to a new write domain,
1990 * let the object stay in multiple read domains
1992 if (write_domain
== 0)
1993 read_domains
|= obj
->read_domains
;
1995 obj_priv
->dirty
= 1;
1998 * Flush the current write domain if
1999 * the new read domains don't match. Invalidate
2000 * any read domains which differ from the old
2003 if (obj
->write_domain
&& obj
->write_domain
!= read_domains
) {
2004 flush_domains
|= obj
->write_domain
;
2005 invalidate_domains
|= read_domains
& ~obj
->write_domain
;
2008 * Invalidate any read caches which may have
2009 * stale data. That is, any new read domains.
2011 invalidate_domains
|= read_domains
& ~obj
->read_domains
;
2012 if ((flush_domains
| invalidate_domains
) & I915_GEM_DOMAIN_CPU
) {
2014 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2015 __func__
, flush_domains
, invalidate_domains
);
2017 i915_gem_clflush_object(obj
);
2020 if ((write_domain
| flush_domains
) != 0)
2021 obj
->write_domain
= write_domain
;
2022 obj
->read_domains
= read_domains
;
2024 dev
->invalidate_domains
|= invalidate_domains
;
2025 dev
->flush_domains
|= flush_domains
;
2027 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2029 obj
->read_domains
, obj
->write_domain
,
2030 dev
->invalidate_domains
, dev
->flush_domains
);
2035 * Moves the object from a partially CPU read to a full one.
2037 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2038 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2041 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object
*obj
)
2043 struct drm_device
*dev
= obj
->dev
;
2044 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2046 if (!obj_priv
->page_cpu_valid
)
2049 /* If we're partially in the CPU read domain, finish moving it in.
2051 if (obj
->read_domains
& I915_GEM_DOMAIN_CPU
) {
2054 for (i
= 0; i
<= (obj
->size
- 1) / PAGE_SIZE
; i
++) {
2055 if (obj_priv
->page_cpu_valid
[i
])
2057 drm_clflush_pages(obj_priv
->page_list
+ i
, 1);
2059 drm_agp_chipset_flush(dev
);
2062 /* Free the page_cpu_valid mappings which are now stale, whether
2063 * or not we've got I915_GEM_DOMAIN_CPU.
2065 drm_free(obj_priv
->page_cpu_valid
, obj
->size
/ PAGE_SIZE
,
2067 obj_priv
->page_cpu_valid
= NULL
;
2071 * Set the CPU read domain on a range of the object.
2073 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2074 * not entirely valid. The page_cpu_valid member of the object flags which
2075 * pages have been flushed, and will be respected by
2076 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2077 * of the whole object.
2079 * This function returns when the move is complete, including waiting on
2083 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object
*obj
,
2084 uint64_t offset
, uint64_t size
)
2086 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2089 if (offset
== 0 && size
== obj
->size
)
2090 return i915_gem_object_set_to_cpu_domain(obj
, 0);
2092 i915_gem_object_flush_gpu_write_domain(obj
);
2093 /* Wait on any GPU rendering and flushing to occur. */
2094 ret
= i915_gem_object_wait_rendering(obj
);
2097 i915_gem_object_flush_gtt_write_domain(obj
);
2099 /* If we're already fully in the CPU read domain, we're done. */
2100 if (obj_priv
->page_cpu_valid
== NULL
&&
2101 (obj
->read_domains
& I915_GEM_DOMAIN_CPU
) != 0)
2104 /* Otherwise, create/clear the per-page CPU read domain flag if we're
2105 * newly adding I915_GEM_DOMAIN_CPU
2107 if (obj_priv
->page_cpu_valid
== NULL
) {
2108 obj_priv
->page_cpu_valid
= drm_calloc(1, obj
->size
/ PAGE_SIZE
,
2110 if (obj_priv
->page_cpu_valid
== NULL
)
2112 } else if ((obj
->read_domains
& I915_GEM_DOMAIN_CPU
) == 0)
2113 memset(obj_priv
->page_cpu_valid
, 0, obj
->size
/ PAGE_SIZE
);
2115 /* Flush the cache on any pages that are still invalid from the CPU's
2118 for (i
= offset
/ PAGE_SIZE
; i
<= (offset
+ size
- 1) / PAGE_SIZE
;
2120 if (obj_priv
->page_cpu_valid
[i
])
2123 drm_clflush_pages(obj_priv
->page_list
+ i
, 1);
2125 obj_priv
->page_cpu_valid
[i
] = 1;
2128 /* It should now be out of any other write domains, and we can update
2129 * the domain values for our changes.
2131 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
2133 obj
->read_domains
|= I915_GEM_DOMAIN_CPU
;
2139 * Pin an object to the GTT and evaluate the relocations landing in it.
2142 i915_gem_object_pin_and_relocate(struct drm_gem_object
*obj
,
2143 struct drm_file
*file_priv
,
2144 struct drm_i915_gem_exec_object
*entry
)
2146 struct drm_device
*dev
= obj
->dev
;
2147 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2148 struct drm_i915_gem_relocation_entry reloc
;
2149 struct drm_i915_gem_relocation_entry __user
*relocs
;
2150 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2152 void __iomem
*reloc_page
;
2154 /* Choose the GTT offset for our buffer and put it there. */
2155 ret
= i915_gem_object_pin(obj
, (uint32_t) entry
->alignment
);
2159 entry
->offset
= obj_priv
->gtt_offset
;
2161 relocs
= (struct drm_i915_gem_relocation_entry __user
*)
2162 (uintptr_t) entry
->relocs_ptr
;
2163 /* Apply the relocations, using the GTT aperture to avoid cache
2164 * flushing requirements.
2166 for (i
= 0; i
< entry
->relocation_count
; i
++) {
2167 struct drm_gem_object
*target_obj
;
2168 struct drm_i915_gem_object
*target_obj_priv
;
2169 uint32_t reloc_val
, reloc_offset
;
2170 uint32_t __iomem
*reloc_entry
;
2172 ret
= copy_from_user(&reloc
, relocs
+ i
, sizeof(reloc
));
2174 i915_gem_object_unpin(obj
);
2178 target_obj
= drm_gem_object_lookup(obj
->dev
, file_priv
,
2179 reloc
.target_handle
);
2180 if (target_obj
== NULL
) {
2181 i915_gem_object_unpin(obj
);
2184 target_obj_priv
= target_obj
->driver_private
;
2186 /* The target buffer should have appeared before us in the
2187 * exec_object list, so it should have a GTT space bound by now.
2189 if (target_obj_priv
->gtt_space
== NULL
) {
2190 DRM_ERROR("No GTT space found for object %d\n",
2191 reloc
.target_handle
);
2192 drm_gem_object_unreference(target_obj
);
2193 i915_gem_object_unpin(obj
);
2197 if (reloc
.offset
> obj
->size
- 4) {
2198 DRM_ERROR("Relocation beyond object bounds: "
2199 "obj %p target %d offset %d size %d.\n",
2200 obj
, reloc
.target_handle
,
2201 (int) reloc
.offset
, (int) obj
->size
);
2202 drm_gem_object_unreference(target_obj
);
2203 i915_gem_object_unpin(obj
);
2206 if (reloc
.offset
& 3) {
2207 DRM_ERROR("Relocation not 4-byte aligned: "
2208 "obj %p target %d offset %d.\n",
2209 obj
, reloc
.target_handle
,
2210 (int) reloc
.offset
);
2211 drm_gem_object_unreference(target_obj
);
2212 i915_gem_object_unpin(obj
);
2216 if (reloc
.write_domain
& I915_GEM_DOMAIN_CPU
||
2217 reloc
.read_domains
& I915_GEM_DOMAIN_CPU
) {
2218 DRM_ERROR("reloc with read/write CPU domains: "
2219 "obj %p target %d offset %d "
2220 "read %08x write %08x",
2221 obj
, reloc
.target_handle
,
2224 reloc
.write_domain
);
2228 if (reloc
.write_domain
&& target_obj
->pending_write_domain
&&
2229 reloc
.write_domain
!= target_obj
->pending_write_domain
) {
2230 DRM_ERROR("Write domain conflict: "
2231 "obj %p target %d offset %d "
2232 "new %08x old %08x\n",
2233 obj
, reloc
.target_handle
,
2236 target_obj
->pending_write_domain
);
2237 drm_gem_object_unreference(target_obj
);
2238 i915_gem_object_unpin(obj
);
2243 DRM_INFO("%s: obj %p offset %08x target %d "
2244 "read %08x write %08x gtt %08x "
2245 "presumed %08x delta %08x\n",
2249 (int) reloc
.target_handle
,
2250 (int) reloc
.read_domains
,
2251 (int) reloc
.write_domain
,
2252 (int) target_obj_priv
->gtt_offset
,
2253 (int) reloc
.presumed_offset
,
2257 target_obj
->pending_read_domains
|= reloc
.read_domains
;
2258 target_obj
->pending_write_domain
|= reloc
.write_domain
;
2260 /* If the relocation already has the right value in it, no
2261 * more work needs to be done.
2263 if (target_obj_priv
->gtt_offset
== reloc
.presumed_offset
) {
2264 drm_gem_object_unreference(target_obj
);
2268 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
2270 drm_gem_object_unreference(target_obj
);
2271 i915_gem_object_unpin(obj
);
2275 /* Map the page containing the relocation we're going to
2278 reloc_offset
= obj_priv
->gtt_offset
+ reloc
.offset
;
2279 reloc_page
= io_mapping_map_atomic_wc(dev_priv
->mm
.gtt_mapping
,
2282 reloc_entry
= (uint32_t __iomem
*)(reloc_page
+
2283 (reloc_offset
& (PAGE_SIZE
- 1)));
2284 reloc_val
= target_obj_priv
->gtt_offset
+ reloc
.delta
;
2287 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
2288 obj
, (unsigned int) reloc
.offset
,
2289 readl(reloc_entry
), reloc_val
);
2291 writel(reloc_val
, reloc_entry
);
2292 io_mapping_unmap_atomic(reloc_page
);
2294 /* Write the updated presumed offset for this entry back out
2297 reloc
.presumed_offset
= target_obj_priv
->gtt_offset
;
2298 ret
= copy_to_user(relocs
+ i
, &reloc
, sizeof(reloc
));
2300 drm_gem_object_unreference(target_obj
);
2301 i915_gem_object_unpin(obj
);
2305 drm_gem_object_unreference(target_obj
);
2310 i915_gem_dump_object(obj
, 128, __func__
, ~0);
2315 /** Dispatch a batchbuffer to the ring
2318 i915_dispatch_gem_execbuffer(struct drm_device
*dev
,
2319 struct drm_i915_gem_execbuffer
*exec
,
2320 uint64_t exec_offset
)
2322 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2323 struct drm_clip_rect __user
*boxes
= (struct drm_clip_rect __user
*)
2324 (uintptr_t) exec
->cliprects_ptr
;
2325 int nbox
= exec
->num_cliprects
;
2327 uint32_t exec_start
, exec_len
;
2330 exec_start
= (uint32_t) exec_offset
+ exec
->batch_start_offset
;
2331 exec_len
= (uint32_t) exec
->batch_len
;
2333 if ((exec_start
| exec_len
) & 0x7) {
2334 DRM_ERROR("alignment\n");
2341 count
= nbox
? nbox
: 1;
2343 for (i
= 0; i
< count
; i
++) {
2345 int ret
= i915_emit_box(dev
, boxes
, i
,
2346 exec
->DR1
, exec
->DR4
);
2351 if (IS_I830(dev
) || IS_845G(dev
)) {
2353 OUT_RING(MI_BATCH_BUFFER
);
2354 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
2355 OUT_RING(exec_start
+ exec_len
- 4);
2360 if (IS_I965G(dev
)) {
2361 OUT_RING(MI_BATCH_BUFFER_START
|
2363 MI_BATCH_NON_SECURE_I965
);
2364 OUT_RING(exec_start
);
2366 OUT_RING(MI_BATCH_BUFFER_START
|
2368 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
2374 /* XXX breadcrumb */
2378 /* Throttle our rendering by waiting until the ring has completed our requests
2379 * emitted over 20 msec ago.
2381 * This should get us reasonable parallelism between CPU and GPU but also
2382 * relatively low latency when blocking on a particular request to finish.
2385 i915_gem_ring_throttle(struct drm_device
*dev
, struct drm_file
*file_priv
)
2387 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
2391 mutex_lock(&dev
->struct_mutex
);
2392 seqno
= i915_file_priv
->mm
.last_gem_throttle_seqno
;
2393 i915_file_priv
->mm
.last_gem_throttle_seqno
=
2394 i915_file_priv
->mm
.last_gem_seqno
;
2396 ret
= i915_wait_request(dev
, seqno
);
2397 mutex_unlock(&dev
->struct_mutex
);
2402 i915_gem_execbuffer(struct drm_device
*dev
, void *data
,
2403 struct drm_file
*file_priv
)
2405 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2406 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
2407 struct drm_i915_gem_execbuffer
*args
= data
;
2408 struct drm_i915_gem_exec_object
*exec_list
= NULL
;
2409 struct drm_gem_object
**object_list
= NULL
;
2410 struct drm_gem_object
*batch_obj
;
2411 int ret
, i
, pinned
= 0;
2412 uint64_t exec_offset
;
2413 uint32_t seqno
, flush_domains
;
2417 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
2418 (int) args
->buffers_ptr
, args
->buffer_count
, args
->batch_len
);
2421 if (args
->buffer_count
< 1) {
2422 DRM_ERROR("execbuf with %d buffers\n", args
->buffer_count
);
2425 /* Copy in the exec list from userland */
2426 exec_list
= drm_calloc(sizeof(*exec_list
), args
->buffer_count
,
2428 object_list
= drm_calloc(sizeof(*object_list
), args
->buffer_count
,
2430 if (exec_list
== NULL
|| object_list
== NULL
) {
2431 DRM_ERROR("Failed to allocate exec or object list "
2433 args
->buffer_count
);
2437 ret
= copy_from_user(exec_list
,
2438 (struct drm_i915_relocation_entry __user
*)
2439 (uintptr_t) args
->buffers_ptr
,
2440 sizeof(*exec_list
) * args
->buffer_count
);
2442 DRM_ERROR("copy %d exec entries failed %d\n",
2443 args
->buffer_count
, ret
);
2447 mutex_lock(&dev
->struct_mutex
);
2449 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2451 if (dev_priv
->mm
.wedged
) {
2452 DRM_ERROR("Execbuf while wedged\n");
2453 mutex_unlock(&dev
->struct_mutex
);
2457 if (dev_priv
->mm
.suspended
) {
2458 DRM_ERROR("Execbuf while VT-switched.\n");
2459 mutex_unlock(&dev
->struct_mutex
);
2463 /* Look up object handles */
2464 for (i
= 0; i
< args
->buffer_count
; i
++) {
2465 object_list
[i
] = drm_gem_object_lookup(dev
, file_priv
,
2466 exec_list
[i
].handle
);
2467 if (object_list
[i
] == NULL
) {
2468 DRM_ERROR("Invalid object handle %d at index %d\n",
2469 exec_list
[i
].handle
, i
);
2475 /* Pin and relocate */
2476 for (pin_tries
= 0; ; pin_tries
++) {
2478 for (i
= 0; i
< args
->buffer_count
; i
++) {
2479 object_list
[i
]->pending_read_domains
= 0;
2480 object_list
[i
]->pending_write_domain
= 0;
2481 ret
= i915_gem_object_pin_and_relocate(object_list
[i
],
2492 /* error other than GTT full, or we've already tried again */
2493 if (ret
!= -ENOMEM
|| pin_tries
>= 1) {
2494 DRM_ERROR("Failed to pin buffers %d\n", ret
);
2498 /* unpin all of our buffers */
2499 for (i
= 0; i
< pinned
; i
++)
2500 i915_gem_object_unpin(object_list
[i
]);
2502 /* evict everyone we can from the aperture */
2503 ret
= i915_gem_evict_everything(dev
);
2508 /* Set the pending read domains for the batch buffer to COMMAND */
2509 batch_obj
= object_list
[args
->buffer_count
-1];
2510 batch_obj
->pending_read_domains
= I915_GEM_DOMAIN_COMMAND
;
2511 batch_obj
->pending_write_domain
= 0;
2513 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2515 /* Zero the global flush/invalidate flags. These
2516 * will be modified as new domains are computed
2519 dev
->invalidate_domains
= 0;
2520 dev
->flush_domains
= 0;
2522 for (i
= 0; i
< args
->buffer_count
; i
++) {
2523 struct drm_gem_object
*obj
= object_list
[i
];
2525 /* Compute new gpu domains and update invalidate/flush */
2526 i915_gem_object_set_to_gpu_domain(obj
,
2527 obj
->pending_read_domains
,
2528 obj
->pending_write_domain
);
2531 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2533 if (dev
->invalidate_domains
| dev
->flush_domains
) {
2535 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
2537 dev
->invalidate_domains
,
2538 dev
->flush_domains
);
2541 dev
->invalidate_domains
,
2542 dev
->flush_domains
);
2543 if (dev
->flush_domains
)
2544 (void)i915_add_request(dev
, dev
->flush_domains
);
2547 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2550 for (i
= 0; i
< args
->buffer_count
; i
++) {
2551 i915_gem_object_check_coherency(object_list
[i
],
2552 exec_list
[i
].handle
);
2556 exec_offset
= exec_list
[args
->buffer_count
- 1].offset
;
2559 i915_gem_dump_object(object_list
[args
->buffer_count
- 1],
2565 /* Exec the batchbuffer */
2566 ret
= i915_dispatch_gem_execbuffer(dev
, args
, exec_offset
);
2568 DRM_ERROR("dispatch failed %d\n", ret
);
2573 * Ensure that the commands in the batch buffer are
2574 * finished before the interrupt fires
2576 flush_domains
= i915_retire_commands(dev
);
2578 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2581 * Get a seqno representing the execution of the current buffer,
2582 * which we can wait on. We would like to mitigate these interrupts,
2583 * likely by only creating seqnos occasionally (so that we have
2584 * *some* interrupts representing completion of buffers that we can
2585 * wait on when trying to clear up gtt space).
2587 seqno
= i915_add_request(dev
, flush_domains
);
2589 i915_file_priv
->mm
.last_gem_seqno
= seqno
;
2590 for (i
= 0; i
< args
->buffer_count
; i
++) {
2591 struct drm_gem_object
*obj
= object_list
[i
];
2593 i915_gem_object_move_to_active(obj
, seqno
);
2595 DRM_INFO("%s: move to exec list %p\n", __func__
, obj
);
2599 i915_dump_lru(dev
, __func__
);
2602 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2604 /* Copy the new buffer offsets back to the user's exec list. */
2605 ret
= copy_to_user((struct drm_i915_relocation_entry __user
*)
2606 (uintptr_t) args
->buffers_ptr
,
2608 sizeof(*exec_list
) * args
->buffer_count
);
2610 DRM_ERROR("failed to copy %d exec entries "
2611 "back to user (%d)\n",
2612 args
->buffer_count
, ret
);
2614 if (object_list
!= NULL
) {
2615 for (i
= 0; i
< pinned
; i
++)
2616 i915_gem_object_unpin(object_list
[i
]);
2618 for (i
= 0; i
< args
->buffer_count
; i
++)
2619 drm_gem_object_unreference(object_list
[i
]);
2621 mutex_unlock(&dev
->struct_mutex
);
2624 drm_free(object_list
, sizeof(*object_list
) * args
->buffer_count
,
2626 drm_free(exec_list
, sizeof(*exec_list
) * args
->buffer_count
,
2633 i915_gem_object_pin(struct drm_gem_object
*obj
, uint32_t alignment
)
2635 struct drm_device
*dev
= obj
->dev
;
2636 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2639 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2640 if (obj_priv
->gtt_space
== NULL
) {
2641 ret
= i915_gem_object_bind_to_gtt(obj
, alignment
);
2643 DRM_ERROR("Failure to bind: %d", ret
);
2647 obj_priv
->pin_count
++;
2649 /* If the object is not active and not pending a flush,
2650 * remove it from the inactive list
2652 if (obj_priv
->pin_count
== 1) {
2653 atomic_inc(&dev
->pin_count
);
2654 atomic_add(obj
->size
, &dev
->pin_memory
);
2655 if (!obj_priv
->active
&&
2656 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
2657 I915_GEM_DOMAIN_GTT
)) == 0 &&
2658 !list_empty(&obj_priv
->list
))
2659 list_del_init(&obj_priv
->list
);
2661 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2667 i915_gem_object_unpin(struct drm_gem_object
*obj
)
2669 struct drm_device
*dev
= obj
->dev
;
2670 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2671 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2673 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2674 obj_priv
->pin_count
--;
2675 BUG_ON(obj_priv
->pin_count
< 0);
2676 BUG_ON(obj_priv
->gtt_space
== NULL
);
2678 /* If the object is no longer pinned, and is
2679 * neither active nor being flushed, then stick it on
2682 if (obj_priv
->pin_count
== 0) {
2683 if (!obj_priv
->active
&&
2684 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
2685 I915_GEM_DOMAIN_GTT
)) == 0)
2686 list_move_tail(&obj_priv
->list
,
2687 &dev_priv
->mm
.inactive_list
);
2688 atomic_dec(&dev
->pin_count
);
2689 atomic_sub(obj
->size
, &dev
->pin_memory
);
2691 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2695 i915_gem_pin_ioctl(struct drm_device
*dev
, void *data
,
2696 struct drm_file
*file_priv
)
2698 struct drm_i915_gem_pin
*args
= data
;
2699 struct drm_gem_object
*obj
;
2700 struct drm_i915_gem_object
*obj_priv
;
2703 mutex_lock(&dev
->struct_mutex
);
2705 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2707 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2709 mutex_unlock(&dev
->struct_mutex
);
2712 obj_priv
= obj
->driver_private
;
2714 if (obj_priv
->pin_filp
!= NULL
&& obj_priv
->pin_filp
!= file_priv
) {
2715 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2717 mutex_unlock(&dev
->struct_mutex
);
2721 obj_priv
->user_pin_count
++;
2722 obj_priv
->pin_filp
= file_priv
;
2723 if (obj_priv
->user_pin_count
== 1) {
2724 ret
= i915_gem_object_pin(obj
, args
->alignment
);
2726 drm_gem_object_unreference(obj
);
2727 mutex_unlock(&dev
->struct_mutex
);
2732 /* XXX - flush the CPU caches for pinned objects
2733 * as the X server doesn't manage domains yet
2735 i915_gem_object_flush_cpu_write_domain(obj
);
2736 args
->offset
= obj_priv
->gtt_offset
;
2737 drm_gem_object_unreference(obj
);
2738 mutex_unlock(&dev
->struct_mutex
);
2744 i915_gem_unpin_ioctl(struct drm_device
*dev
, void *data
,
2745 struct drm_file
*file_priv
)
2747 struct drm_i915_gem_pin
*args
= data
;
2748 struct drm_gem_object
*obj
;
2749 struct drm_i915_gem_object
*obj_priv
;
2751 mutex_lock(&dev
->struct_mutex
);
2753 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2755 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2757 mutex_unlock(&dev
->struct_mutex
);
2761 obj_priv
= obj
->driver_private
;
2762 if (obj_priv
->pin_filp
!= file_priv
) {
2763 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
2765 drm_gem_object_unreference(obj
);
2766 mutex_unlock(&dev
->struct_mutex
);
2769 obj_priv
->user_pin_count
--;
2770 if (obj_priv
->user_pin_count
== 0) {
2771 obj_priv
->pin_filp
= NULL
;
2772 i915_gem_object_unpin(obj
);
2775 drm_gem_object_unreference(obj
);
2776 mutex_unlock(&dev
->struct_mutex
);
2781 i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
2782 struct drm_file
*file_priv
)
2784 struct drm_i915_gem_busy
*args
= data
;
2785 struct drm_gem_object
*obj
;
2786 struct drm_i915_gem_object
*obj_priv
;
2788 mutex_lock(&dev
->struct_mutex
);
2789 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2791 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2793 mutex_unlock(&dev
->struct_mutex
);
2797 obj_priv
= obj
->driver_private
;
2798 /* Don't count being on the flushing list against the object being
2799 * done. Otherwise, a buffer left on the flushing list but not getting
2800 * flushed (because nobody's flushing that domain) won't ever return
2801 * unbusy and get reused by libdrm's bo cache. The other expected
2802 * consumer of this interface, OpenGL's occlusion queries, also specs
2803 * that the objects get unbusy "eventually" without any interference.
2805 args
->busy
= obj_priv
->active
&& obj_priv
->last_rendering_seqno
!= 0;
2807 drm_gem_object_unreference(obj
);
2808 mutex_unlock(&dev
->struct_mutex
);
2813 i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
2814 struct drm_file
*file_priv
)
2816 return i915_gem_ring_throttle(dev
, file_priv
);
2819 int i915_gem_init_object(struct drm_gem_object
*obj
)
2821 struct drm_i915_gem_object
*obj_priv
;
2823 obj_priv
= drm_calloc(1, sizeof(*obj_priv
), DRM_MEM_DRIVER
);
2824 if (obj_priv
== NULL
)
2828 * We've just allocated pages from the kernel,
2829 * so they've just been written by the CPU with
2830 * zeros. They'll need to be clflushed before we
2831 * use them with the GPU.
2833 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
2834 obj
->read_domains
= I915_GEM_DOMAIN_CPU
;
2836 obj_priv
->agp_type
= AGP_USER_MEMORY
;
2838 obj
->driver_private
= obj_priv
;
2839 obj_priv
->obj
= obj
;
2840 obj_priv
->fence_reg
= I915_FENCE_REG_NONE
;
2841 INIT_LIST_HEAD(&obj_priv
->list
);
2846 void i915_gem_free_object(struct drm_gem_object
*obj
)
2848 struct drm_device
*dev
= obj
->dev
;
2849 struct drm_gem_mm
*mm
= dev
->mm_private
;
2850 struct drm_map_list
*list
;
2851 struct drm_map
*map
;
2852 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2854 while (obj_priv
->pin_count
> 0)
2855 i915_gem_object_unpin(obj
);
2857 i915_gem_object_unbind(obj
);
2859 list
= &obj
->map_list
;
2860 drm_ht_remove_item(&mm
->offset_hash
, &list
->hash
);
2862 if (list
->file_offset_node
) {
2863 drm_mm_put_block(list
->file_offset_node
);
2864 list
->file_offset_node
= NULL
;
2869 drm_free(map
, sizeof(*map
), DRM_MEM_DRIVER
);
2873 drm_free(obj_priv
->page_cpu_valid
, 1, DRM_MEM_DRIVER
);
2874 drm_free(obj
->driver_private
, 1, DRM_MEM_DRIVER
);
2877 /** Unbinds all objects that are on the given buffer list. */
2879 i915_gem_evict_from_list(struct drm_device
*dev
, struct list_head
*head
)
2881 struct drm_gem_object
*obj
;
2882 struct drm_i915_gem_object
*obj_priv
;
2885 while (!list_empty(head
)) {
2886 obj_priv
= list_first_entry(head
,
2887 struct drm_i915_gem_object
,
2889 obj
= obj_priv
->obj
;
2891 if (obj_priv
->pin_count
!= 0) {
2892 DRM_ERROR("Pinned object in unbind list\n");
2893 mutex_unlock(&dev
->struct_mutex
);
2897 ret
= i915_gem_object_unbind(obj
);
2899 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2901 mutex_unlock(&dev
->struct_mutex
);
2911 i915_gem_idle(struct drm_device
*dev
)
2913 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2914 uint32_t seqno
, cur_seqno
, last_seqno
;
2917 mutex_lock(&dev
->struct_mutex
);
2919 if (dev_priv
->mm
.suspended
|| dev_priv
->ring
.ring_obj
== NULL
) {
2920 mutex_unlock(&dev
->struct_mutex
);
2924 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2925 * We need to replace this with a semaphore, or something.
2927 dev_priv
->mm
.suspended
= 1;
2929 /* Cancel the retire work handler, wait for it to finish if running
2931 mutex_unlock(&dev
->struct_mutex
);
2932 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
2933 mutex_lock(&dev
->struct_mutex
);
2935 i915_kernel_lost_context(dev
);
2937 /* Flush the GPU along with all non-CPU write domains
2939 i915_gem_flush(dev
, ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
),
2940 ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
2941 seqno
= i915_add_request(dev
, ~I915_GEM_DOMAIN_CPU
);
2944 mutex_unlock(&dev
->struct_mutex
);
2948 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
2952 cur_seqno
= i915_get_gem_seqno(dev
);
2953 if (i915_seqno_passed(cur_seqno
, seqno
))
2955 if (last_seqno
== cur_seqno
) {
2956 if (stuck
++ > 100) {
2957 DRM_ERROR("hardware wedged\n");
2958 dev_priv
->mm
.wedged
= 1;
2959 DRM_WAKEUP(&dev_priv
->irq_queue
);
2964 last_seqno
= cur_seqno
;
2966 dev_priv
->mm
.waiting_gem_seqno
= 0;
2968 i915_gem_retire_requests(dev
);
2970 if (!dev_priv
->mm
.wedged
) {
2971 /* Active and flushing should now be empty as we've
2972 * waited for a sequence higher than any pending execbuffer
2974 WARN_ON(!list_empty(&dev_priv
->mm
.active_list
));
2975 WARN_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
2976 /* Request should now be empty as we've also waited
2977 * for the last request in the list
2979 WARN_ON(!list_empty(&dev_priv
->mm
.request_list
));
2982 /* Empty the active and flushing lists to inactive. If there's
2983 * anything left at this point, it means that we're wedged and
2984 * nothing good's going to happen by leaving them there. So strip
2985 * the GPU domains and just stuff them onto inactive.
2987 while (!list_empty(&dev_priv
->mm
.active_list
)) {
2988 struct drm_i915_gem_object
*obj_priv
;
2990 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
2991 struct drm_i915_gem_object
,
2993 obj_priv
->obj
->write_domain
&= ~I915_GEM_GPU_DOMAINS
;
2994 i915_gem_object_move_to_inactive(obj_priv
->obj
);
2997 while (!list_empty(&dev_priv
->mm
.flushing_list
)) {
2998 struct drm_i915_gem_object
*obj_priv
;
3000 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
3001 struct drm_i915_gem_object
,
3003 obj_priv
->obj
->write_domain
&= ~I915_GEM_GPU_DOMAINS
;
3004 i915_gem_object_move_to_inactive(obj_priv
->obj
);
3008 /* Move all inactive buffers out of the GTT. */
3009 ret
= i915_gem_evict_from_list(dev
, &dev_priv
->mm
.inactive_list
);
3010 WARN_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
3012 mutex_unlock(&dev
->struct_mutex
);
3016 i915_gem_cleanup_ringbuffer(dev
);
3017 mutex_unlock(&dev
->struct_mutex
);
3023 i915_gem_init_hws(struct drm_device
*dev
)
3025 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3026 struct drm_gem_object
*obj
;
3027 struct drm_i915_gem_object
*obj_priv
;
3030 /* If we need a physical address for the status page, it's already
3031 * initialized at driver load time.
3033 if (!I915_NEED_GFX_HWS(dev
))
3036 obj
= drm_gem_object_alloc(dev
, 4096);
3038 DRM_ERROR("Failed to allocate status page\n");
3041 obj_priv
= obj
->driver_private
;
3042 obj_priv
->agp_type
= AGP_USER_CACHED_MEMORY
;
3044 ret
= i915_gem_object_pin(obj
, 4096);
3046 drm_gem_object_unreference(obj
);
3050 dev_priv
->status_gfx_addr
= obj_priv
->gtt_offset
;
3052 dev_priv
->hw_status_page
= kmap(obj_priv
->page_list
[0]);
3053 if (dev_priv
->hw_status_page
== NULL
) {
3054 DRM_ERROR("Failed to map status page.\n");
3055 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
3056 drm_gem_object_unreference(obj
);
3059 dev_priv
->hws_obj
= obj
;
3060 memset(dev_priv
->hw_status_page
, 0, PAGE_SIZE
);
3061 I915_WRITE(HWS_PGA
, dev_priv
->status_gfx_addr
);
3062 I915_READ(HWS_PGA
); /* posting read */
3063 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv
->status_gfx_addr
);
3069 i915_gem_init_ringbuffer(struct drm_device
*dev
)
3071 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3072 struct drm_gem_object
*obj
;
3073 struct drm_i915_gem_object
*obj_priv
;
3074 drm_i915_ring_buffer_t
*ring
= &dev_priv
->ring
;
3078 ret
= i915_gem_init_hws(dev
);
3082 obj
= drm_gem_object_alloc(dev
, 128 * 1024);
3084 DRM_ERROR("Failed to allocate ringbuffer\n");
3087 obj_priv
= obj
->driver_private
;
3089 ret
= i915_gem_object_pin(obj
, 4096);
3091 drm_gem_object_unreference(obj
);
3095 /* Set up the kernel mapping for the ring. */
3096 ring
->Size
= obj
->size
;
3097 ring
->tail_mask
= obj
->size
- 1;
3099 ring
->map
.offset
= dev
->agp
->base
+ obj_priv
->gtt_offset
;
3100 ring
->map
.size
= obj
->size
;
3102 ring
->map
.flags
= 0;
3105 drm_core_ioremap_wc(&ring
->map
, dev
);
3106 if (ring
->map
.handle
== NULL
) {
3107 DRM_ERROR("Failed to map ringbuffer.\n");
3108 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
3109 drm_gem_object_unreference(obj
);
3112 ring
->ring_obj
= obj
;
3113 ring
->virtual_start
= ring
->map
.handle
;
3115 /* Stop the ring if it's running. */
3116 I915_WRITE(PRB0_CTL
, 0);
3117 I915_WRITE(PRB0_TAIL
, 0);
3118 I915_WRITE(PRB0_HEAD
, 0);
3120 /* Initialize the ring. */
3121 I915_WRITE(PRB0_START
, obj_priv
->gtt_offset
);
3122 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
3124 /* G45 ring initialization fails to reset head to zero */
3126 DRM_ERROR("Ring head not reset to zero "
3127 "ctl %08x head %08x tail %08x start %08x\n",
3128 I915_READ(PRB0_CTL
),
3129 I915_READ(PRB0_HEAD
),
3130 I915_READ(PRB0_TAIL
),
3131 I915_READ(PRB0_START
));
3132 I915_WRITE(PRB0_HEAD
, 0);
3134 DRM_ERROR("Ring head forced to zero "
3135 "ctl %08x head %08x tail %08x start %08x\n",
3136 I915_READ(PRB0_CTL
),
3137 I915_READ(PRB0_HEAD
),
3138 I915_READ(PRB0_TAIL
),
3139 I915_READ(PRB0_START
));
3142 I915_WRITE(PRB0_CTL
,
3143 ((obj
->size
- 4096) & RING_NR_PAGES
) |
3147 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
3149 /* If the head is still not zero, the ring is dead */
3151 DRM_ERROR("Ring initialization failed "
3152 "ctl %08x head %08x tail %08x start %08x\n",
3153 I915_READ(PRB0_CTL
),
3154 I915_READ(PRB0_HEAD
),
3155 I915_READ(PRB0_TAIL
),
3156 I915_READ(PRB0_START
));
3160 /* Update our cache of the ring state */
3161 if (!drm_core_check_feature(dev
, DRIVER_MODESET
))
3162 i915_kernel_lost_context(dev
);
3164 ring
->head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
3165 ring
->tail
= I915_READ(PRB0_TAIL
) & TAIL_ADDR
;
3166 ring
->space
= ring
->head
- (ring
->tail
+ 8);
3167 if (ring
->space
< 0)
3168 ring
->space
+= ring
->Size
;
3175 i915_gem_cleanup_ringbuffer(struct drm_device
*dev
)
3177 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3179 if (dev_priv
->ring
.ring_obj
== NULL
)
3182 drm_core_ioremapfree(&dev_priv
->ring
.map
, dev
);
3184 i915_gem_object_unpin(dev_priv
->ring
.ring_obj
);
3185 drm_gem_object_unreference(dev_priv
->ring
.ring_obj
);
3186 dev_priv
->ring
.ring_obj
= NULL
;
3187 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
3189 if (dev_priv
->hws_obj
!= NULL
) {
3190 struct drm_gem_object
*obj
= dev_priv
->hws_obj
;
3191 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
3193 kunmap(obj_priv
->page_list
[0]);
3194 i915_gem_object_unpin(obj
);
3195 drm_gem_object_unreference(obj
);
3196 dev_priv
->hws_obj
= NULL
;
3197 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
3198 dev_priv
->hw_status_page
= NULL
;
3200 /* Write high address into HWS_PGA when disabling. */
3201 I915_WRITE(HWS_PGA
, 0x1ffff000);
3206 i915_gem_entervt_ioctl(struct drm_device
*dev
, void *data
,
3207 struct drm_file
*file_priv
)
3209 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3212 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
3215 if (dev_priv
->mm
.wedged
) {
3216 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3217 dev_priv
->mm
.wedged
= 0;
3220 ret
= i915_gem_init_ringbuffer(dev
);
3224 dev_priv
->mm
.gtt_mapping
= io_mapping_create_wc(dev
->agp
->base
,
3225 dev
->agp
->agp_info
.aper_size
3228 mutex_lock(&dev
->struct_mutex
);
3229 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
3230 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
3231 BUG_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
3232 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
3233 dev_priv
->mm
.suspended
= 0;
3234 mutex_unlock(&dev
->struct_mutex
);
3236 drm_irq_install(dev
);
3242 i915_gem_leavevt_ioctl(struct drm_device
*dev
, void *data
,
3243 struct drm_file
*file_priv
)
3245 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3248 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
3251 ret
= i915_gem_idle(dev
);
3252 drm_irq_uninstall(dev
);
3254 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
3259 i915_gem_lastclose(struct drm_device
*dev
)
3263 ret
= i915_gem_idle(dev
);
3265 DRM_ERROR("failed to idle hardware: %d\n", ret
);
3269 i915_gem_load(struct drm_device
*dev
)
3271 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3273 INIT_LIST_HEAD(&dev_priv
->mm
.active_list
);
3274 INIT_LIST_HEAD(&dev_priv
->mm
.flushing_list
);
3275 INIT_LIST_HEAD(&dev_priv
->mm
.inactive_list
);
3276 INIT_LIST_HEAD(&dev_priv
->mm
.request_list
);
3277 INIT_DELAYED_WORK(&dev_priv
->mm
.retire_work
,
3278 i915_gem_retire_work_handler
);
3279 dev_priv
->mm
.next_gem_seqno
= 1;
3281 /* Old X drivers will take 0-2 for front, back, depth buffers */
3282 dev_priv
->fence_reg_start
= 3;
3285 dev_priv
->num_fence_regs
= 16;
3287 dev_priv
->num_fence_regs
= 8;
3289 i915_gem_detect_bit_6_swizzle(dev
);