2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
35 i915_gem_object_set_domain(struct drm_gem_object
*obj
,
36 uint32_t read_domains
,
37 uint32_t write_domain
);
39 i915_gem_object_set_domain_range(struct drm_gem_object
*obj
,
42 uint32_t read_domains
,
43 uint32_t write_domain
);
45 i915_gem_set_domain(struct drm_gem_object
*obj
,
46 struct drm_file
*file_priv
,
47 uint32_t read_domains
,
48 uint32_t write_domain
);
49 static int i915_gem_object_get_page_list(struct drm_gem_object
*obj
);
50 static void i915_gem_object_free_page_list(struct drm_gem_object
*obj
);
51 static int i915_gem_object_wait_rendering(struct drm_gem_object
*obj
);
54 i915_gem_init_ioctl(struct drm_device
*dev
, void *data
,
55 struct drm_file
*file_priv
)
57 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
58 struct drm_i915_gem_init
*args
= data
;
60 mutex_lock(&dev
->struct_mutex
);
62 if (args
->gtt_start
>= args
->gtt_end
||
63 (args
->gtt_start
& (PAGE_SIZE
- 1)) != 0 ||
64 (args
->gtt_end
& (PAGE_SIZE
- 1)) != 0) {
65 mutex_unlock(&dev
->struct_mutex
);
69 drm_mm_init(&dev_priv
->mm
.gtt_space
, args
->gtt_start
,
70 args
->gtt_end
- args
->gtt_start
);
72 dev
->gtt_total
= (uint32_t) (args
->gtt_end
- args
->gtt_start
);
74 mutex_unlock(&dev
->struct_mutex
);
81 * Creates a new mm object and returns a handle to it.
84 i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
85 struct drm_file
*file_priv
)
87 struct drm_i915_gem_create
*args
= data
;
88 struct drm_gem_object
*obj
;
91 args
->size
= roundup(args
->size
, PAGE_SIZE
);
93 /* Allocate the new object */
94 obj
= drm_gem_object_alloc(dev
, args
->size
);
98 ret
= drm_gem_handle_create(file_priv
, obj
, &handle
);
99 mutex_lock(&dev
->struct_mutex
);
100 drm_gem_object_handle_unreference(obj
);
101 mutex_unlock(&dev
->struct_mutex
);
106 args
->handle
= handle
;
112 * Reads data from the object referenced by handle.
114 * On error, the contents of *data are undefined.
117 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
118 struct drm_file
*file_priv
)
120 struct drm_i915_gem_pread
*args
= data
;
121 struct drm_gem_object
*obj
;
122 struct drm_i915_gem_object
*obj_priv
;
127 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
130 obj_priv
= obj
->driver_private
;
132 /* Bounds check source.
134 * XXX: This could use review for overflow issues...
136 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
137 args
->offset
+ args
->size
> obj
->size
) {
138 drm_gem_object_unreference(obj
);
142 mutex_lock(&dev
->struct_mutex
);
144 ret
= i915_gem_object_set_domain_range(obj
, args
->offset
, args
->size
,
145 I915_GEM_DOMAIN_CPU
, 0);
147 drm_gem_object_unreference(obj
);
148 mutex_unlock(&dev
->struct_mutex
);
151 offset
= args
->offset
;
153 read
= vfs_read(obj
->filp
, (char __user
*)(uintptr_t)args
->data_ptr
,
154 args
->size
, &offset
);
155 if (read
!= args
->size
) {
156 drm_gem_object_unreference(obj
);
157 mutex_unlock(&dev
->struct_mutex
);
164 drm_gem_object_unreference(obj
);
165 mutex_unlock(&dev
->struct_mutex
);
171 i915_gem_gtt_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
172 struct drm_i915_gem_pwrite
*args
,
173 struct drm_file
*file_priv
)
175 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
178 char __user
*user_data
;
183 unsigned long unwritten
;
185 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
187 if (!access_ok(VERIFY_READ
, user_data
, remain
))
191 mutex_lock(&dev
->struct_mutex
);
192 ret
= i915_gem_object_pin(obj
, 0);
194 mutex_unlock(&dev
->struct_mutex
);
197 ret
= i915_gem_set_domain(obj
, file_priv
,
198 I915_GEM_DOMAIN_GTT
, I915_GEM_DOMAIN_GTT
);
202 obj_priv
= obj
->driver_private
;
203 offset
= obj_priv
->gtt_offset
+ args
->offset
;
207 /* Operation in this page
210 * o = offset within page
213 i
= offset
>> PAGE_SHIFT
;
214 o
= offset
& (PAGE_SIZE
-1);
216 if ((o
+ l
) > PAGE_SIZE
)
219 pfn
= (dev
->agp
->base
>> PAGE_SHIFT
) + i
;
221 #ifdef CONFIG_HIGHMEM
222 /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
224 vaddr
= kmap_atomic_pfn(pfn
, KM_USER0
);
226 DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
227 i
, o
, l
, pfn
, vaddr
);
229 unwritten
= __copy_from_user_inatomic_nocache(vaddr
+ o
,
231 kunmap_atomic(vaddr
, KM_USER0
);
234 #endif /* CONFIG_HIGHMEM */
236 vaddr
= ioremap(pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
238 DRM_INFO("pwrite slow i %d o %d l %d "
239 "pfn %ld vaddr %p\n",
240 i
, o
, l
, pfn
, vaddr
);
246 unwritten
= __copy_from_user(vaddr
+ o
, user_data
, l
);
248 DRM_INFO("unwritten %ld\n", unwritten
);
261 #if WATCH_PWRITE && 1
262 i915_gem_clflush_object(obj
);
263 i915_gem_dump_object(obj
, args
->offset
+ args
->size
, __func__
, ~0);
264 i915_gem_clflush_object(obj
);
268 i915_gem_object_unpin(obj
);
269 mutex_unlock(&dev
->struct_mutex
);
275 i915_gem_shmem_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
276 struct drm_i915_gem_pwrite
*args
,
277 struct drm_file
*file_priv
)
283 mutex_lock(&dev
->struct_mutex
);
285 ret
= i915_gem_set_domain(obj
, file_priv
,
286 I915_GEM_DOMAIN_CPU
, I915_GEM_DOMAIN_CPU
);
288 mutex_unlock(&dev
->struct_mutex
);
292 offset
= args
->offset
;
294 written
= vfs_write(obj
->filp
,
295 (char __user
*)(uintptr_t) args
->data_ptr
,
296 args
->size
, &offset
);
297 if (written
!= args
->size
) {
298 mutex_unlock(&dev
->struct_mutex
);
305 mutex_unlock(&dev
->struct_mutex
);
311 * Writes data to the object referenced by handle.
313 * On error, the contents of the buffer that were to be modified are undefined.
316 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
317 struct drm_file
*file_priv
)
319 struct drm_i915_gem_pwrite
*args
= data
;
320 struct drm_gem_object
*obj
;
321 struct drm_i915_gem_object
*obj_priv
;
324 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
327 obj_priv
= obj
->driver_private
;
329 /* Bounds check destination.
331 * XXX: This could use review for overflow issues...
333 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
334 args
->offset
+ args
->size
> obj
->size
) {
335 drm_gem_object_unreference(obj
);
339 /* We can only do the GTT pwrite on untiled buffers, as otherwise
340 * it would end up going through the fenced access, and we'll get
341 * different detiling behavior between reading and writing.
342 * pread/pwrite currently are reading and writing from the CPU
343 * perspective, requiring manual detiling by the client.
345 if (obj_priv
->tiling_mode
== I915_TILING_NONE
&&
347 ret
= i915_gem_gtt_pwrite(dev
, obj
, args
, file_priv
);
349 ret
= i915_gem_shmem_pwrite(dev
, obj
, args
, file_priv
);
353 DRM_INFO("pwrite failed %d\n", ret
);
356 drm_gem_object_unreference(obj
);
362 * Called when user space prepares to use an object
365 i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
366 struct drm_file
*file_priv
)
368 struct drm_i915_gem_set_domain
*args
= data
;
369 struct drm_gem_object
*obj
;
372 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
375 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
379 mutex_lock(&dev
->struct_mutex
);
381 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
382 obj
, obj
->size
, args
->read_domains
, args
->write_domain
);
384 ret
= i915_gem_set_domain(obj
, file_priv
,
385 args
->read_domains
, args
->write_domain
);
386 drm_gem_object_unreference(obj
);
387 mutex_unlock(&dev
->struct_mutex
);
392 * Called when user space has done writes to this buffer
395 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
396 struct drm_file
*file_priv
)
398 struct drm_i915_gem_sw_finish
*args
= data
;
399 struct drm_gem_object
*obj
;
400 struct drm_i915_gem_object
*obj_priv
;
403 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
406 mutex_lock(&dev
->struct_mutex
);
407 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
409 mutex_unlock(&dev
->struct_mutex
);
414 DRM_INFO("%s: sw_finish %d (%p %d)\n",
415 __func__
, args
->handle
, obj
, obj
->size
);
417 obj_priv
= obj
->driver_private
;
419 /* Pinned buffers may be scanout, so flush the cache */
420 if ((obj
->write_domain
& I915_GEM_DOMAIN_CPU
) && obj_priv
->pin_count
) {
421 i915_gem_clflush_object(obj
);
422 drm_agp_chipset_flush(dev
);
424 drm_gem_object_unreference(obj
);
425 mutex_unlock(&dev
->struct_mutex
);
430 * Maps the contents of an object, returning the address it is mapped
433 * While the mapping holds a reference on the contents of the object, it doesn't
434 * imply a ref on the object itself.
437 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
438 struct drm_file
*file_priv
)
440 struct drm_i915_gem_mmap
*args
= data
;
441 struct drm_gem_object
*obj
;
445 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
448 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
452 offset
= args
->offset
;
454 down_write(¤t
->mm
->mmap_sem
);
455 addr
= do_mmap(obj
->filp
, 0, args
->size
,
456 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
458 up_write(¤t
->mm
->mmap_sem
);
459 mutex_lock(&dev
->struct_mutex
);
460 drm_gem_object_unreference(obj
);
461 mutex_unlock(&dev
->struct_mutex
);
462 if (IS_ERR((void *)addr
))
465 args
->addr_ptr
= (uint64_t) addr
;
471 i915_gem_object_free_page_list(struct drm_gem_object
*obj
)
473 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
474 int page_count
= obj
->size
/ PAGE_SIZE
;
477 if (obj_priv
->page_list
== NULL
)
481 for (i
= 0; i
< page_count
; i
++)
482 if (obj_priv
->page_list
[i
] != NULL
) {
484 set_page_dirty(obj_priv
->page_list
[i
]);
485 mark_page_accessed(obj_priv
->page_list
[i
]);
486 page_cache_release(obj_priv
->page_list
[i
]);
490 drm_free(obj_priv
->page_list
,
491 page_count
* sizeof(struct page
*),
493 obj_priv
->page_list
= NULL
;
497 i915_gem_object_move_to_active(struct drm_gem_object
*obj
)
499 struct drm_device
*dev
= obj
->dev
;
500 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
501 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
503 /* Add a reference if we're newly entering the active list. */
504 if (!obj_priv
->active
) {
505 drm_gem_object_reference(obj
);
506 obj_priv
->active
= 1;
508 /* Move from whatever list we were on to the tail of execution. */
509 list_move_tail(&obj_priv
->list
,
510 &dev_priv
->mm
.active_list
);
515 i915_gem_object_move_to_inactive(struct drm_gem_object
*obj
)
517 struct drm_device
*dev
= obj
->dev
;
518 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
519 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
521 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
522 if (obj_priv
->pin_count
!= 0)
523 list_del_init(&obj_priv
->list
);
525 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
527 if (obj_priv
->active
) {
528 obj_priv
->active
= 0;
529 drm_gem_object_unreference(obj
);
531 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
535 * Creates a new sequence number, emitting a write of it to the status page
536 * plus an interrupt, which will trigger i915_user_interrupt_handler.
538 * Must be called with struct_lock held.
540 * Returned sequence numbers are nonzero on success.
543 i915_add_request(struct drm_device
*dev
, uint32_t flush_domains
)
545 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
546 struct drm_i915_gem_request
*request
;
551 request
= drm_calloc(1, sizeof(*request
), DRM_MEM_DRIVER
);
555 /* Grab the seqno we're going to make this request be, and bump the
556 * next (skipping 0 so it can be the reserved no-seqno value).
558 seqno
= dev_priv
->mm
.next_gem_seqno
;
559 dev_priv
->mm
.next_gem_seqno
++;
560 if (dev_priv
->mm
.next_gem_seqno
== 0)
561 dev_priv
->mm
.next_gem_seqno
++;
564 OUT_RING(MI_STORE_DWORD_INDEX
);
565 OUT_RING(I915_GEM_HWS_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
568 OUT_RING(MI_USER_INTERRUPT
);
571 DRM_DEBUG("%d\n", seqno
);
573 request
->seqno
= seqno
;
574 request
->emitted_jiffies
= jiffies
;
575 request
->flush_domains
= flush_domains
;
576 was_empty
= list_empty(&dev_priv
->mm
.request_list
);
577 list_add_tail(&request
->list
, &dev_priv
->mm
.request_list
);
580 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
585 * Command execution barrier
587 * Ensures that all commands in the ring are finished
588 * before signalling the CPU
591 i915_retire_commands(struct drm_device
*dev
)
593 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
594 uint32_t cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
595 uint32_t flush_domains
= 0;
598 /* The sampler always gets flushed on i965 (sigh) */
600 flush_domains
|= I915_GEM_DOMAIN_SAMPLER
;
603 OUT_RING(0); /* noop */
605 return flush_domains
;
609 * Moves buffers associated only with the given active seqno from the active
610 * to inactive list, potentially freeing them.
613 i915_gem_retire_request(struct drm_device
*dev
,
614 struct drm_i915_gem_request
*request
)
616 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
618 /* Move any buffers on the active list that are no longer referenced
619 * by the ringbuffer to the flushing/inactive lists as appropriate.
621 while (!list_empty(&dev_priv
->mm
.active_list
)) {
622 struct drm_gem_object
*obj
;
623 struct drm_i915_gem_object
*obj_priv
;
625 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
626 struct drm_i915_gem_object
,
630 /* If the seqno being retired doesn't match the oldest in the
631 * list, then the oldest in the list must still be newer than
634 if (obj_priv
->last_rendering_seqno
!= request
->seqno
)
637 DRM_INFO("%s: retire %d moves to inactive list %p\n",
638 __func__
, request
->seqno
, obj
);
641 if (obj
->write_domain
!= 0) {
642 list_move_tail(&obj_priv
->list
,
643 &dev_priv
->mm
.flushing_list
);
645 i915_gem_object_move_to_inactive(obj
);
649 if (request
->flush_domains
!= 0) {
650 struct drm_i915_gem_object
*obj_priv
, *next
;
652 /* Clear the write domain and activity from any buffers
653 * that are just waiting for a flush matching the one retired.
655 list_for_each_entry_safe(obj_priv
, next
,
656 &dev_priv
->mm
.flushing_list
, list
) {
657 struct drm_gem_object
*obj
= obj_priv
->obj
;
659 if (obj
->write_domain
& request
->flush_domains
) {
660 obj
->write_domain
= 0;
661 i915_gem_object_move_to_inactive(obj
);
669 * Returns true if seq1 is later than seq2.
672 i915_seqno_passed(uint32_t seq1
, uint32_t seq2
)
674 return (int32_t)(seq1
- seq2
) >= 0;
678 i915_get_gem_seqno(struct drm_device
*dev
)
680 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
682 return READ_HWSP(dev_priv
, I915_GEM_HWS_INDEX
);
686 * This function clears the request list as sequence numbers are passed.
689 i915_gem_retire_requests(struct drm_device
*dev
)
691 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
694 seqno
= i915_get_gem_seqno(dev
);
696 while (!list_empty(&dev_priv
->mm
.request_list
)) {
697 struct drm_i915_gem_request
*request
;
698 uint32_t retiring_seqno
;
700 request
= list_first_entry(&dev_priv
->mm
.request_list
,
701 struct drm_i915_gem_request
,
703 retiring_seqno
= request
->seqno
;
705 if (i915_seqno_passed(seqno
, retiring_seqno
) ||
706 dev_priv
->mm
.wedged
) {
707 i915_gem_retire_request(dev
, request
);
709 list_del(&request
->list
);
710 drm_free(request
, sizeof(*request
), DRM_MEM_DRIVER
);
717 i915_gem_retire_work_handler(struct work_struct
*work
)
719 drm_i915_private_t
*dev_priv
;
720 struct drm_device
*dev
;
722 dev_priv
= container_of(work
, drm_i915_private_t
,
723 mm
.retire_work
.work
);
726 mutex_lock(&dev
->struct_mutex
);
727 i915_gem_retire_requests(dev
);
728 if (!list_empty(&dev_priv
->mm
.request_list
))
729 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
730 mutex_unlock(&dev
->struct_mutex
);
734 * Waits for a sequence number to be signaled, and cleans up the
735 * request and object lists appropriately for that event.
738 i915_wait_request(struct drm_device
*dev
, uint32_t seqno
)
740 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
745 if (!i915_seqno_passed(i915_get_gem_seqno(dev
), seqno
)) {
746 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
747 i915_user_irq_get(dev
);
748 ret
= wait_event_interruptible(dev_priv
->irq_queue
,
749 i915_seqno_passed(i915_get_gem_seqno(dev
),
751 dev_priv
->mm
.wedged
);
752 i915_user_irq_put(dev
);
753 dev_priv
->mm
.waiting_gem_seqno
= 0;
755 if (dev_priv
->mm
.wedged
)
758 if (ret
&& ret
!= -ERESTARTSYS
)
759 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
760 __func__
, ret
, seqno
, i915_get_gem_seqno(dev
));
762 /* Directly dispatch request retiring. While we have the work queue
763 * to handle this, the waiter on a request often wants an associated
764 * buffer to have made it to the inactive list, and we would need
765 * a separate wait queue to handle that.
768 i915_gem_retire_requests(dev
);
774 i915_gem_flush(struct drm_device
*dev
,
775 uint32_t invalidate_domains
,
776 uint32_t flush_domains
)
778 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
783 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__
,
784 invalidate_domains
, flush_domains
);
787 if (flush_domains
& I915_GEM_DOMAIN_CPU
)
788 drm_agp_chipset_flush(dev
);
790 if ((invalidate_domains
| flush_domains
) & ~(I915_GEM_DOMAIN_CPU
|
791 I915_GEM_DOMAIN_GTT
)) {
795 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
796 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
797 * also flushed at 2d versus 3d pipeline switches.
801 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
802 * MI_READ_FLUSH is set, and is always flushed on 965.
804 * I915_GEM_DOMAIN_COMMAND may not exist?
806 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
807 * invalidated when MI_EXE_FLUSH is set.
809 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
810 * invalidated with every MI_FLUSH.
814 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
815 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
816 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
817 * are flushed at any MI_FLUSH.
820 cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
821 if ((invalidate_domains
|flush_domains
) &
822 I915_GEM_DOMAIN_RENDER
)
823 cmd
&= ~MI_NO_WRITE_FLUSH
;
824 if (!IS_I965G(dev
)) {
826 * On the 965, the sampler cache always gets flushed
827 * and this bit is reserved.
829 if (invalidate_domains
& I915_GEM_DOMAIN_SAMPLER
)
830 cmd
|= MI_READ_FLUSH
;
832 if (invalidate_domains
& I915_GEM_DOMAIN_INSTRUCTION
)
836 DRM_INFO("%s: queue flush %08x to ring\n", __func__
, cmd
);
840 OUT_RING(0); /* noop */
846 * Ensures that all rendering to the object has completed and the object is
847 * safe to unbind from the GTT or access from the CPU.
850 i915_gem_object_wait_rendering(struct drm_gem_object
*obj
)
852 struct drm_device
*dev
= obj
->dev
;
853 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
856 /* If there are writes queued to the buffer, flush and
857 * create a new seqno to wait for.
859 if (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
)) {
860 uint32_t write_domain
= obj
->write_domain
;
862 DRM_INFO("%s: flushing object %p from write domain %08x\n",
863 __func__
, obj
, write_domain
);
865 i915_gem_flush(dev
, 0, write_domain
);
867 i915_gem_object_move_to_active(obj
);
868 obj_priv
->last_rendering_seqno
= i915_add_request(dev
,
870 BUG_ON(obj_priv
->last_rendering_seqno
== 0);
872 DRM_INFO("%s: flush moves to exec list %p\n", __func__
, obj
);
876 /* If there is rendering queued on the buffer being evicted, wait for
879 if (obj_priv
->active
) {
881 DRM_INFO("%s: object %p wait for seqno %08x\n",
882 __func__
, obj
, obj_priv
->last_rendering_seqno
);
884 ret
= i915_wait_request(dev
, obj_priv
->last_rendering_seqno
);
893 * Unbinds an object from the GTT aperture.
896 i915_gem_object_unbind(struct drm_gem_object
*obj
)
898 struct drm_device
*dev
= obj
->dev
;
899 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
903 DRM_INFO("%s:%d %p\n", __func__
, __LINE__
, obj
);
904 DRM_INFO("gtt_space %p\n", obj_priv
->gtt_space
);
906 if (obj_priv
->gtt_space
== NULL
)
909 if (obj_priv
->pin_count
!= 0) {
910 DRM_ERROR("Attempting to unbind pinned buffer\n");
914 /* Wait for any rendering to complete
916 ret
= i915_gem_object_wait_rendering(obj
);
918 DRM_ERROR("wait_rendering failed: %d\n", ret
);
922 /* Move the object to the CPU domain to ensure that
923 * any possible CPU writes while it's not in the GTT
924 * are flushed when we go to remap it. This will
925 * also ensure that all pending GPU writes are finished
928 ret
= i915_gem_object_set_domain(obj
, I915_GEM_DOMAIN_CPU
,
929 I915_GEM_DOMAIN_CPU
);
931 DRM_ERROR("set_domain failed: %d\n", ret
);
935 if (obj_priv
->agp_mem
!= NULL
) {
936 drm_unbind_agp(obj_priv
->agp_mem
);
937 drm_free_agp(obj_priv
->agp_mem
, obj
->size
/ PAGE_SIZE
);
938 obj_priv
->agp_mem
= NULL
;
941 BUG_ON(obj_priv
->active
);
943 i915_gem_object_free_page_list(obj
);
945 if (obj_priv
->gtt_space
) {
946 atomic_dec(&dev
->gtt_count
);
947 atomic_sub(obj
->size
, &dev
->gtt_memory
);
949 drm_mm_put_block(obj_priv
->gtt_space
);
950 obj_priv
->gtt_space
= NULL
;
953 /* Remove ourselves from the LRU list if present. */
954 if (!list_empty(&obj_priv
->list
))
955 list_del_init(&obj_priv
->list
);
961 i915_gem_evict_something(struct drm_device
*dev
)
963 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
964 struct drm_gem_object
*obj
;
965 struct drm_i915_gem_object
*obj_priv
;
969 /* If there's an inactive buffer available now, grab it
972 if (!list_empty(&dev_priv
->mm
.inactive_list
)) {
973 obj_priv
= list_first_entry(&dev_priv
->mm
.inactive_list
,
974 struct drm_i915_gem_object
,
977 BUG_ON(obj_priv
->pin_count
!= 0);
979 DRM_INFO("%s: evicting %p\n", __func__
, obj
);
981 BUG_ON(obj_priv
->active
);
983 /* Wait on the rendering and unbind the buffer. */
984 ret
= i915_gem_object_unbind(obj
);
988 /* If we didn't get anything, but the ring is still processing
989 * things, wait for one of those things to finish and hopefully
990 * leave us a buffer to evict.
992 if (!list_empty(&dev_priv
->mm
.request_list
)) {
993 struct drm_i915_gem_request
*request
;
995 request
= list_first_entry(&dev_priv
->mm
.request_list
,
996 struct drm_i915_gem_request
,
999 ret
= i915_wait_request(dev
, request
->seqno
);
1003 /* if waiting caused an object to become inactive,
1004 * then loop around and wait for it. Otherwise, we
1005 * assume that waiting freed and unbound something,
1006 * so there should now be some space in the GTT
1008 if (!list_empty(&dev_priv
->mm
.inactive_list
))
1013 /* If we didn't have anything on the request list but there
1014 * are buffers awaiting a flush, emit one and try again.
1015 * When we wait on it, those buffers waiting for that flush
1016 * will get moved to inactive.
1018 if (!list_empty(&dev_priv
->mm
.flushing_list
)) {
1019 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
1020 struct drm_i915_gem_object
,
1022 obj
= obj_priv
->obj
;
1027 i915_add_request(dev
, obj
->write_domain
);
1033 DRM_ERROR("inactive empty %d request empty %d "
1034 "flushing empty %d\n",
1035 list_empty(&dev_priv
->mm
.inactive_list
),
1036 list_empty(&dev_priv
->mm
.request_list
),
1037 list_empty(&dev_priv
->mm
.flushing_list
));
1038 /* If we didn't do any of the above, there's nothing to be done
1039 * and we just can't fit it in.
1047 i915_gem_object_get_page_list(struct drm_gem_object
*obj
)
1049 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1051 struct address_space
*mapping
;
1052 struct inode
*inode
;
1056 if (obj_priv
->page_list
)
1059 /* Get the list of pages out of our struct file. They'll be pinned
1060 * at this point until we release them.
1062 page_count
= obj
->size
/ PAGE_SIZE
;
1063 BUG_ON(obj_priv
->page_list
!= NULL
);
1064 obj_priv
->page_list
= drm_calloc(page_count
, sizeof(struct page
*),
1066 if (obj_priv
->page_list
== NULL
) {
1067 DRM_ERROR("Faled to allocate page list\n");
1071 inode
= obj
->filp
->f_path
.dentry
->d_inode
;
1072 mapping
= inode
->i_mapping
;
1073 for (i
= 0; i
< page_count
; i
++) {
1074 page
= read_mapping_page(mapping
, i
, NULL
);
1076 ret
= PTR_ERR(page
);
1077 DRM_ERROR("read_mapping_page failed: %d\n", ret
);
1078 i915_gem_object_free_page_list(obj
);
1081 obj_priv
->page_list
[i
] = page
;
1087 * Finds free space in the GTT aperture and binds the object there.
1090 i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
, unsigned alignment
)
1092 struct drm_device
*dev
= obj
->dev
;
1093 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1094 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1095 struct drm_mm_node
*free_space
;
1096 int page_count
, ret
;
1099 alignment
= PAGE_SIZE
;
1100 if (alignment
& (PAGE_SIZE
- 1)) {
1101 DRM_ERROR("Invalid object alignment requested %u\n", alignment
);
1106 free_space
= drm_mm_search_free(&dev_priv
->mm
.gtt_space
,
1107 obj
->size
, alignment
, 0);
1108 if (free_space
!= NULL
) {
1109 obj_priv
->gtt_space
= drm_mm_get_block(free_space
, obj
->size
,
1111 if (obj_priv
->gtt_space
!= NULL
) {
1112 obj_priv
->gtt_space
->private = obj
;
1113 obj_priv
->gtt_offset
= obj_priv
->gtt_space
->start
;
1116 if (obj_priv
->gtt_space
== NULL
) {
1117 /* If the gtt is empty and we're still having trouble
1118 * fitting our object in, we're out of memory.
1121 DRM_INFO("%s: GTT full, evicting something\n", __func__
);
1123 if (list_empty(&dev_priv
->mm
.inactive_list
) &&
1124 list_empty(&dev_priv
->mm
.flushing_list
) &&
1125 list_empty(&dev_priv
->mm
.active_list
)) {
1126 DRM_ERROR("GTT full, but LRU list empty\n");
1130 ret
= i915_gem_evict_something(dev
);
1132 DRM_ERROR("Failed to evict a buffer %d\n", ret
);
1139 DRM_INFO("Binding object of size %d at 0x%08x\n",
1140 obj
->size
, obj_priv
->gtt_offset
);
1142 ret
= i915_gem_object_get_page_list(obj
);
1144 drm_mm_put_block(obj_priv
->gtt_space
);
1145 obj_priv
->gtt_space
= NULL
;
1149 page_count
= obj
->size
/ PAGE_SIZE
;
1150 /* Create an AGP memory structure pointing at our pages, and bind it
1153 obj_priv
->agp_mem
= drm_agp_bind_pages(dev
,
1154 obj_priv
->page_list
,
1156 obj_priv
->gtt_offset
);
1157 if (obj_priv
->agp_mem
== NULL
) {
1158 i915_gem_object_free_page_list(obj
);
1159 drm_mm_put_block(obj_priv
->gtt_space
);
1160 obj_priv
->gtt_space
= NULL
;
1163 atomic_inc(&dev
->gtt_count
);
1164 atomic_add(obj
->size
, &dev
->gtt_memory
);
1166 /* Assert that the object is not currently in any GPU domain. As it
1167 * wasn't in the GTT, there shouldn't be any way it could have been in
1170 BUG_ON(obj
->read_domains
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1171 BUG_ON(obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1177 i915_gem_clflush_object(struct drm_gem_object
*obj
)
1179 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1181 /* If we don't have a page list set up, then we're not pinned
1182 * to GPU, and we can ignore the cache flush because it'll happen
1183 * again at bind time.
1185 if (obj_priv
->page_list
== NULL
)
1188 drm_clflush_pages(obj_priv
->page_list
, obj
->size
/ PAGE_SIZE
);
1192 * Set the next domain for the specified object. This
1193 * may not actually perform the necessary flushing/invaliding though,
1194 * as that may want to be batched with other set_domain operations
1196 * This is (we hope) the only really tricky part of gem. The goal
1197 * is fairly simple -- track which caches hold bits of the object
1198 * and make sure they remain coherent. A few concrete examples may
1199 * help to explain how it works. For shorthand, we use the notation
1200 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1201 * a pair of read and write domain masks.
1203 * Case 1: the batch buffer
1209 * 5. Unmapped from GTT
1212 * Let's take these a step at a time
1215 * Pages allocated from the kernel may still have
1216 * cache contents, so we set them to (CPU, CPU) always.
1217 * 2. Written by CPU (using pwrite)
1218 * The pwrite function calls set_domain (CPU, CPU) and
1219 * this function does nothing (as nothing changes)
1221 * This function asserts that the object is not
1222 * currently in any GPU-based read or write domains
1224 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1225 * As write_domain is zero, this function adds in the
1226 * current read domains (CPU+COMMAND, 0).
1227 * flush_domains is set to CPU.
1228 * invalidate_domains is set to COMMAND
1229 * clflush is run to get data out of the CPU caches
1230 * then i915_dev_set_domain calls i915_gem_flush to
1231 * emit an MI_FLUSH and drm_agp_chipset_flush
1232 * 5. Unmapped from GTT
1233 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1234 * flush_domains and invalidate_domains end up both zero
1235 * so no flushing/invalidating happens
1239 * Case 2: The shared render buffer
1243 * 3. Read/written by GPU
1244 * 4. set_domain to (CPU,CPU)
1245 * 5. Read/written by CPU
1246 * 6. Read/written by GPU
1249 * Same as last example, (CPU, CPU)
1251 * Nothing changes (assertions find that it is not in the GPU)
1252 * 3. Read/written by GPU
1253 * execbuffer calls set_domain (RENDER, RENDER)
1254 * flush_domains gets CPU
1255 * invalidate_domains gets GPU
1257 * MI_FLUSH and drm_agp_chipset_flush
1258 * 4. set_domain (CPU, CPU)
1259 * flush_domains gets GPU
1260 * invalidate_domains gets CPU
1261 * wait_rendering (obj) to make sure all drawing is complete.
1262 * This will include an MI_FLUSH to get the data from GPU
1264 * clflush (obj) to invalidate the CPU cache
1265 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1266 * 5. Read/written by CPU
1267 * cache lines are loaded and dirtied
1268 * 6. Read written by GPU
1269 * Same as last GPU access
1271 * Case 3: The constant buffer
1276 * 4. Updated (written) by CPU again
1285 * flush_domains = CPU
1286 * invalidate_domains = RENDER
1289 * drm_agp_chipset_flush
1290 * 4. Updated (written) by CPU again
1292 * flush_domains = 0 (no previous write domain)
1293 * invalidate_domains = 0 (no new read domains)
1296 * flush_domains = CPU
1297 * invalidate_domains = RENDER
1300 * drm_agp_chipset_flush
1303 i915_gem_object_set_domain(struct drm_gem_object
*obj
,
1304 uint32_t read_domains
,
1305 uint32_t write_domain
)
1307 struct drm_device
*dev
= obj
->dev
;
1308 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1309 uint32_t invalidate_domains
= 0;
1310 uint32_t flush_domains
= 0;
1314 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1316 obj
->read_domains
, read_domains
,
1317 obj
->write_domain
, write_domain
);
1320 * If the object isn't moving to a new write domain,
1321 * let the object stay in multiple read domains
1323 if (write_domain
== 0)
1324 read_domains
|= obj
->read_domains
;
1326 obj_priv
->dirty
= 1;
1329 * Flush the current write domain if
1330 * the new read domains don't match. Invalidate
1331 * any read domains which differ from the old
1334 if (obj
->write_domain
&& obj
->write_domain
!= read_domains
) {
1335 flush_domains
|= obj
->write_domain
;
1336 invalidate_domains
|= read_domains
& ~obj
->write_domain
;
1339 * Invalidate any read caches which may have
1340 * stale data. That is, any new read domains.
1342 invalidate_domains
|= read_domains
& ~obj
->read_domains
;
1343 if ((flush_domains
| invalidate_domains
) & I915_GEM_DOMAIN_CPU
) {
1345 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1346 __func__
, flush_domains
, invalidate_domains
);
1349 * If we're invaliding the CPU cache and flushing a GPU cache,
1350 * then pause for rendering so that the GPU caches will be
1351 * flushed before the cpu cache is invalidated
1353 if ((invalidate_domains
& I915_GEM_DOMAIN_CPU
) &&
1354 (flush_domains
& ~(I915_GEM_DOMAIN_CPU
|
1355 I915_GEM_DOMAIN_GTT
))) {
1356 ret
= i915_gem_object_wait_rendering(obj
);
1360 i915_gem_clflush_object(obj
);
1363 if ((write_domain
| flush_domains
) != 0)
1364 obj
->write_domain
= write_domain
;
1366 /* If we're invalidating the CPU domain, clear the per-page CPU
1367 * domain list as well.
1369 if (obj_priv
->page_cpu_valid
!= NULL
&&
1370 (write_domain
!= 0 ||
1371 read_domains
& I915_GEM_DOMAIN_CPU
)) {
1372 drm_free(obj_priv
->page_cpu_valid
, obj
->size
/ PAGE_SIZE
,
1374 obj_priv
->page_cpu_valid
= NULL
;
1376 obj
->read_domains
= read_domains
;
1378 dev
->invalidate_domains
|= invalidate_domains
;
1379 dev
->flush_domains
|= flush_domains
;
1381 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1383 obj
->read_domains
, obj
->write_domain
,
1384 dev
->invalidate_domains
, dev
->flush_domains
);
1390 * Set the read/write domain on a range of the object.
1392 * Currently only implemented for CPU reads, otherwise drops to normal
1393 * i915_gem_object_set_domain().
1396 i915_gem_object_set_domain_range(struct drm_gem_object
*obj
,
1399 uint32_t read_domains
,
1400 uint32_t write_domain
)
1402 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1405 if (obj
->read_domains
& I915_GEM_DOMAIN_CPU
)
1408 if (read_domains
!= I915_GEM_DOMAIN_CPU
||
1410 return i915_gem_object_set_domain(obj
,
1411 read_domains
, write_domain
);
1413 /* Wait on any GPU rendering to the object to be flushed. */
1414 if (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
| I915_GEM_DOMAIN_GTT
)) {
1415 ret
= i915_gem_object_wait_rendering(obj
);
1420 if (obj_priv
->page_cpu_valid
== NULL
) {
1421 obj_priv
->page_cpu_valid
= drm_calloc(1, obj
->size
/ PAGE_SIZE
,
1425 /* Flush the cache on any pages that are still invalid from the CPU's
1428 for (i
= offset
/ PAGE_SIZE
; i
<= (offset
+ size
- 1) / PAGE_SIZE
; i
++) {
1429 if (obj_priv
->page_cpu_valid
[i
])
1432 drm_clflush_pages(obj_priv
->page_list
+ i
, 1);
1434 obj_priv
->page_cpu_valid
[i
] = 1;
1441 * Once all of the objects have been set in the proper domain,
1442 * perform the necessary flush and invalidate operations.
1444 * Returns the write domains flushed, for use in flush tracking.
1447 i915_gem_dev_set_domain(struct drm_device
*dev
)
1449 uint32_t flush_domains
= dev
->flush_domains
;
1452 * Now that all the buffers are synced to the proper domains,
1453 * flush and invalidate the collected domains
1455 if (dev
->invalidate_domains
| dev
->flush_domains
) {
1457 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1459 dev
->invalidate_domains
,
1460 dev
->flush_domains
);
1463 dev
->invalidate_domains
,
1464 dev
->flush_domains
);
1465 dev
->invalidate_domains
= 0;
1466 dev
->flush_domains
= 0;
1469 return flush_domains
;
1473 * Pin an object to the GTT and evaluate the relocations landing in it.
1476 i915_gem_object_pin_and_relocate(struct drm_gem_object
*obj
,
1477 struct drm_file
*file_priv
,
1478 struct drm_i915_gem_exec_object
*entry
)
1480 struct drm_device
*dev
= obj
->dev
;
1481 struct drm_i915_gem_relocation_entry reloc
;
1482 struct drm_i915_gem_relocation_entry __user
*relocs
;
1483 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1485 uint32_t last_reloc_offset
= -1;
1486 void *reloc_page
= NULL
;
1488 /* Choose the GTT offset for our buffer and put it there. */
1489 ret
= i915_gem_object_pin(obj
, (uint32_t) entry
->alignment
);
1493 entry
->offset
= obj_priv
->gtt_offset
;
1495 relocs
= (struct drm_i915_gem_relocation_entry __user
*)
1496 (uintptr_t) entry
->relocs_ptr
;
1497 /* Apply the relocations, using the GTT aperture to avoid cache
1498 * flushing requirements.
1500 for (i
= 0; i
< entry
->relocation_count
; i
++) {
1501 struct drm_gem_object
*target_obj
;
1502 struct drm_i915_gem_object
*target_obj_priv
;
1503 uint32_t reloc_val
, reloc_offset
, *reloc_entry
;
1506 ret
= copy_from_user(&reloc
, relocs
+ i
, sizeof(reloc
));
1508 i915_gem_object_unpin(obj
);
1512 target_obj
= drm_gem_object_lookup(obj
->dev
, file_priv
,
1513 reloc
.target_handle
);
1514 if (target_obj
== NULL
) {
1515 i915_gem_object_unpin(obj
);
1518 target_obj_priv
= target_obj
->driver_private
;
1520 /* The target buffer should have appeared before us in the
1521 * exec_object list, so it should have a GTT space bound by now.
1523 if (target_obj_priv
->gtt_space
== NULL
) {
1524 DRM_ERROR("No GTT space found for object %d\n",
1525 reloc
.target_handle
);
1526 drm_gem_object_unreference(target_obj
);
1527 i915_gem_object_unpin(obj
);
1531 if (reloc
.offset
> obj
->size
- 4) {
1532 DRM_ERROR("Relocation beyond object bounds: "
1533 "obj %p target %d offset %d size %d.\n",
1534 obj
, reloc
.target_handle
,
1535 (int) reloc
.offset
, (int) obj
->size
);
1536 drm_gem_object_unreference(target_obj
);
1537 i915_gem_object_unpin(obj
);
1540 if (reloc
.offset
& 3) {
1541 DRM_ERROR("Relocation not 4-byte aligned: "
1542 "obj %p target %d offset %d.\n",
1543 obj
, reloc
.target_handle
,
1544 (int) reloc
.offset
);
1545 drm_gem_object_unreference(target_obj
);
1546 i915_gem_object_unpin(obj
);
1550 if (reloc
.write_domain
&& target_obj
->pending_write_domain
&&
1551 reloc
.write_domain
!= target_obj
->pending_write_domain
) {
1552 DRM_ERROR("Write domain conflict: "
1553 "obj %p target %d offset %d "
1554 "new %08x old %08x\n",
1555 obj
, reloc
.target_handle
,
1558 target_obj
->pending_write_domain
);
1559 drm_gem_object_unreference(target_obj
);
1560 i915_gem_object_unpin(obj
);
1565 DRM_INFO("%s: obj %p offset %08x target %d "
1566 "read %08x write %08x gtt %08x "
1567 "presumed %08x delta %08x\n",
1571 (int) reloc
.target_handle
,
1572 (int) reloc
.read_domains
,
1573 (int) reloc
.write_domain
,
1574 (int) target_obj_priv
->gtt_offset
,
1575 (int) reloc
.presumed_offset
,
1579 target_obj
->pending_read_domains
|= reloc
.read_domains
;
1580 target_obj
->pending_write_domain
|= reloc
.write_domain
;
1582 /* If the relocation already has the right value in it, no
1583 * more work needs to be done.
1585 if (target_obj_priv
->gtt_offset
== reloc
.presumed_offset
) {
1586 drm_gem_object_unreference(target_obj
);
1590 /* Now that we're going to actually write some data in,
1591 * make sure that any rendering using this buffer's contents
1594 i915_gem_object_wait_rendering(obj
);
1596 /* As we're writing through the gtt, flush
1597 * any CPU writes before we write the relocations
1599 if (obj
->write_domain
& I915_GEM_DOMAIN_CPU
) {
1600 i915_gem_clflush_object(obj
);
1601 drm_agp_chipset_flush(dev
);
1602 obj
->write_domain
= 0;
1605 /* Map the page containing the relocation we're going to
1608 reloc_offset
= obj_priv
->gtt_offset
+ reloc
.offset
;
1609 if (reloc_page
== NULL
||
1610 (last_reloc_offset
& ~(PAGE_SIZE
- 1)) !=
1611 (reloc_offset
& ~(PAGE_SIZE
- 1))) {
1612 if (reloc_page
!= NULL
)
1613 iounmap(reloc_page
);
1615 reloc_page
= ioremap(dev
->agp
->base
+
1616 (reloc_offset
& ~(PAGE_SIZE
- 1)),
1618 last_reloc_offset
= reloc_offset
;
1619 if (reloc_page
== NULL
) {
1620 drm_gem_object_unreference(target_obj
);
1621 i915_gem_object_unpin(obj
);
1626 reloc_entry
= (uint32_t *)((char *)reloc_page
+
1627 (reloc_offset
& (PAGE_SIZE
- 1)));
1628 reloc_val
= target_obj_priv
->gtt_offset
+ reloc
.delta
;
1631 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1632 obj
, (unsigned int) reloc
.offset
,
1633 readl(reloc_entry
), reloc_val
);
1635 writel(reloc_val
, reloc_entry
);
1637 /* Write the updated presumed offset for this entry back out
1640 reloc
.presumed_offset
= target_obj_priv
->gtt_offset
;
1641 ret
= copy_to_user(relocs
+ i
, &reloc
, sizeof(reloc
));
1643 drm_gem_object_unreference(target_obj
);
1644 i915_gem_object_unpin(obj
);
1648 drm_gem_object_unreference(target_obj
);
1651 if (reloc_page
!= NULL
)
1652 iounmap(reloc_page
);
1656 i915_gem_dump_object(obj
, 128, __func__
, ~0);
1661 /** Dispatch a batchbuffer to the ring
1664 i915_dispatch_gem_execbuffer(struct drm_device
*dev
,
1665 struct drm_i915_gem_execbuffer
*exec
,
1666 uint64_t exec_offset
)
1668 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1669 struct drm_clip_rect __user
*boxes
= (struct drm_clip_rect __user
*)
1670 (uintptr_t) exec
->cliprects_ptr
;
1671 int nbox
= exec
->num_cliprects
;
1673 uint32_t exec_start
, exec_len
;
1676 exec_start
= (uint32_t) exec_offset
+ exec
->batch_start_offset
;
1677 exec_len
= (uint32_t) exec
->batch_len
;
1679 if ((exec_start
| exec_len
) & 0x7) {
1680 DRM_ERROR("alignment\n");
1687 count
= nbox
? nbox
: 1;
1689 for (i
= 0; i
< count
; i
++) {
1691 int ret
= i915_emit_box(dev
, boxes
, i
,
1692 exec
->DR1
, exec
->DR4
);
1697 if (IS_I830(dev
) || IS_845G(dev
)) {
1699 OUT_RING(MI_BATCH_BUFFER
);
1700 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
1701 OUT_RING(exec_start
+ exec_len
- 4);
1706 if (IS_I965G(dev
)) {
1707 OUT_RING(MI_BATCH_BUFFER_START
|
1709 MI_BATCH_NON_SECURE_I965
);
1710 OUT_RING(exec_start
);
1712 OUT_RING(MI_BATCH_BUFFER_START
|
1714 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
1720 /* XXX breadcrumb */
1724 /* Throttle our rendering by waiting until the ring has completed our requests
1725 * emitted over 20 msec ago.
1727 * This should get us reasonable parallelism between CPU and GPU but also
1728 * relatively low latency when blocking on a particular request to finish.
1731 i915_gem_ring_throttle(struct drm_device
*dev
, struct drm_file
*file_priv
)
1733 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
1737 mutex_lock(&dev
->struct_mutex
);
1738 seqno
= i915_file_priv
->mm
.last_gem_throttle_seqno
;
1739 i915_file_priv
->mm
.last_gem_throttle_seqno
=
1740 i915_file_priv
->mm
.last_gem_seqno
;
1742 ret
= i915_wait_request(dev
, seqno
);
1743 mutex_unlock(&dev
->struct_mutex
);
1748 i915_gem_execbuffer(struct drm_device
*dev
, void *data
,
1749 struct drm_file
*file_priv
)
1751 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1752 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
1753 struct drm_i915_gem_execbuffer
*args
= data
;
1754 struct drm_i915_gem_exec_object
*exec_list
= NULL
;
1755 struct drm_gem_object
**object_list
= NULL
;
1756 struct drm_gem_object
*batch_obj
;
1757 int ret
, i
, pinned
= 0;
1758 uint64_t exec_offset
;
1759 uint32_t seqno
, flush_domains
;
1762 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1763 (int) args
->buffers_ptr
, args
->buffer_count
, args
->batch_len
);
1766 /* Copy in the exec list from userland */
1767 exec_list
= drm_calloc(sizeof(*exec_list
), args
->buffer_count
,
1769 object_list
= drm_calloc(sizeof(*object_list
), args
->buffer_count
,
1771 if (exec_list
== NULL
|| object_list
== NULL
) {
1772 DRM_ERROR("Failed to allocate exec or object list "
1774 args
->buffer_count
);
1778 ret
= copy_from_user(exec_list
,
1779 (struct drm_i915_relocation_entry __user
*)
1780 (uintptr_t) args
->buffers_ptr
,
1781 sizeof(*exec_list
) * args
->buffer_count
);
1783 DRM_ERROR("copy %d exec entries failed %d\n",
1784 args
->buffer_count
, ret
);
1788 mutex_lock(&dev
->struct_mutex
);
1790 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1792 if (dev_priv
->mm
.wedged
) {
1793 DRM_ERROR("Execbuf while wedged\n");
1794 mutex_unlock(&dev
->struct_mutex
);
1798 if (dev_priv
->mm
.suspended
) {
1799 DRM_ERROR("Execbuf while VT-switched.\n");
1800 mutex_unlock(&dev
->struct_mutex
);
1804 /* Zero the gloabl flush/invalidate flags. These
1805 * will be modified as each object is bound to the
1808 dev
->invalidate_domains
= 0;
1809 dev
->flush_domains
= 0;
1811 /* Look up object handles and perform the relocations */
1812 for (i
= 0; i
< args
->buffer_count
; i
++) {
1813 object_list
[i
] = drm_gem_object_lookup(dev
, file_priv
,
1814 exec_list
[i
].handle
);
1815 if (object_list
[i
] == NULL
) {
1816 DRM_ERROR("Invalid object handle %d at index %d\n",
1817 exec_list
[i
].handle
, i
);
1822 object_list
[i
]->pending_read_domains
= 0;
1823 object_list
[i
]->pending_write_domain
= 0;
1824 ret
= i915_gem_object_pin_and_relocate(object_list
[i
],
1828 DRM_ERROR("object bind and relocate failed %d\n", ret
);
1834 /* Set the pending read domains for the batch buffer to COMMAND */
1835 batch_obj
= object_list
[args
->buffer_count
-1];
1836 batch_obj
->pending_read_domains
= I915_GEM_DOMAIN_COMMAND
;
1837 batch_obj
->pending_write_domain
= 0;
1839 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1841 for (i
= 0; i
< args
->buffer_count
; i
++) {
1842 struct drm_gem_object
*obj
= object_list
[i
];
1843 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1845 if (obj_priv
->gtt_space
== NULL
) {
1846 /* We evicted the buffer in the process of validating
1847 * our set of buffers in. We could try to recover by
1848 * kicking them everything out and trying again from
1855 /* make sure all previous memory operations have passed */
1856 ret
= i915_gem_object_set_domain(obj
,
1857 obj
->pending_read_domains
,
1858 obj
->pending_write_domain
);
1863 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1865 /* Flush/invalidate caches and chipset buffer */
1866 flush_domains
= i915_gem_dev_set_domain(dev
);
1868 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1871 for (i
= 0; i
< args
->buffer_count
; i
++) {
1872 i915_gem_object_check_coherency(object_list
[i
],
1873 exec_list
[i
].handle
);
1877 exec_offset
= exec_list
[args
->buffer_count
- 1].offset
;
1880 i915_gem_dump_object(object_list
[args
->buffer_count
- 1],
1886 (void)i915_add_request(dev
, flush_domains
);
1888 /* Exec the batchbuffer */
1889 ret
= i915_dispatch_gem_execbuffer(dev
, args
, exec_offset
);
1891 DRM_ERROR("dispatch failed %d\n", ret
);
1896 * Ensure that the commands in the batch buffer are
1897 * finished before the interrupt fires
1899 flush_domains
= i915_retire_commands(dev
);
1901 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1904 * Get a seqno representing the execution of the current buffer,
1905 * which we can wait on. We would like to mitigate these interrupts,
1906 * likely by only creating seqnos occasionally (so that we have
1907 * *some* interrupts representing completion of buffers that we can
1908 * wait on when trying to clear up gtt space).
1910 seqno
= i915_add_request(dev
, flush_domains
);
1912 i915_file_priv
->mm
.last_gem_seqno
= seqno
;
1913 for (i
= 0; i
< args
->buffer_count
; i
++) {
1914 struct drm_gem_object
*obj
= object_list
[i
];
1915 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1917 i915_gem_object_move_to_active(obj
);
1918 obj_priv
->last_rendering_seqno
= seqno
;
1920 DRM_INFO("%s: move to exec list %p\n", __func__
, obj
);
1924 i915_dump_lru(dev
, __func__
);
1927 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1929 /* Copy the new buffer offsets back to the user's exec list. */
1930 ret
= copy_to_user((struct drm_i915_relocation_entry __user
*)
1931 (uintptr_t) args
->buffers_ptr
,
1933 sizeof(*exec_list
) * args
->buffer_count
);
1935 DRM_ERROR("failed to copy %d exec entries "
1936 "back to user (%d)\n",
1937 args
->buffer_count
, ret
);
1939 if (object_list
!= NULL
) {
1940 for (i
= 0; i
< pinned
; i
++)
1941 i915_gem_object_unpin(object_list
[i
]);
1943 for (i
= 0; i
< args
->buffer_count
; i
++)
1944 drm_gem_object_unreference(object_list
[i
]);
1946 mutex_unlock(&dev
->struct_mutex
);
1949 drm_free(object_list
, sizeof(*object_list
) * args
->buffer_count
,
1951 drm_free(exec_list
, sizeof(*exec_list
) * args
->buffer_count
,
1958 i915_gem_object_pin(struct drm_gem_object
*obj
, uint32_t alignment
)
1960 struct drm_device
*dev
= obj
->dev
;
1961 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1964 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1965 if (obj_priv
->gtt_space
== NULL
) {
1966 ret
= i915_gem_object_bind_to_gtt(obj
, alignment
);
1968 DRM_ERROR("Failure to bind: %d", ret
);
1972 obj_priv
->pin_count
++;
1974 /* If the object is not active and not pending a flush,
1975 * remove it from the inactive list
1977 if (obj_priv
->pin_count
== 1) {
1978 atomic_inc(&dev
->pin_count
);
1979 atomic_add(obj
->size
, &dev
->pin_memory
);
1980 if (!obj_priv
->active
&&
1981 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
1982 I915_GEM_DOMAIN_GTT
)) == 0 &&
1983 !list_empty(&obj_priv
->list
))
1984 list_del_init(&obj_priv
->list
);
1986 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1992 i915_gem_object_unpin(struct drm_gem_object
*obj
)
1994 struct drm_device
*dev
= obj
->dev
;
1995 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1996 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1998 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1999 obj_priv
->pin_count
--;
2000 BUG_ON(obj_priv
->pin_count
< 0);
2001 BUG_ON(obj_priv
->gtt_space
== NULL
);
2003 /* If the object is no longer pinned, and is
2004 * neither active nor being flushed, then stick it on
2007 if (obj_priv
->pin_count
== 0) {
2008 if (!obj_priv
->active
&&
2009 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
2010 I915_GEM_DOMAIN_GTT
)) == 0)
2011 list_move_tail(&obj_priv
->list
,
2012 &dev_priv
->mm
.inactive_list
);
2013 atomic_dec(&dev
->pin_count
);
2014 atomic_sub(obj
->size
, &dev
->pin_memory
);
2016 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2020 i915_gem_pin_ioctl(struct drm_device
*dev
, void *data
,
2021 struct drm_file
*file_priv
)
2023 struct drm_i915_gem_pin
*args
= data
;
2024 struct drm_gem_object
*obj
;
2025 struct drm_i915_gem_object
*obj_priv
;
2028 mutex_lock(&dev
->struct_mutex
);
2030 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2032 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2034 mutex_unlock(&dev
->struct_mutex
);
2037 obj_priv
= obj
->driver_private
;
2039 ret
= i915_gem_object_pin(obj
, args
->alignment
);
2041 drm_gem_object_unreference(obj
);
2042 mutex_unlock(&dev
->struct_mutex
);
2046 /* XXX - flush the CPU caches for pinned objects
2047 * as the X server doesn't manage domains yet
2049 if (obj
->write_domain
& I915_GEM_DOMAIN_CPU
) {
2050 i915_gem_clflush_object(obj
);
2051 drm_agp_chipset_flush(dev
);
2052 obj
->write_domain
= 0;
2054 args
->offset
= obj_priv
->gtt_offset
;
2055 drm_gem_object_unreference(obj
);
2056 mutex_unlock(&dev
->struct_mutex
);
2062 i915_gem_unpin_ioctl(struct drm_device
*dev
, void *data
,
2063 struct drm_file
*file_priv
)
2065 struct drm_i915_gem_pin
*args
= data
;
2066 struct drm_gem_object
*obj
;
2068 mutex_lock(&dev
->struct_mutex
);
2070 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2072 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2074 mutex_unlock(&dev
->struct_mutex
);
2078 i915_gem_object_unpin(obj
);
2080 drm_gem_object_unreference(obj
);
2081 mutex_unlock(&dev
->struct_mutex
);
2086 i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
2087 struct drm_file
*file_priv
)
2089 struct drm_i915_gem_busy
*args
= data
;
2090 struct drm_gem_object
*obj
;
2091 struct drm_i915_gem_object
*obj_priv
;
2093 mutex_lock(&dev
->struct_mutex
);
2094 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2096 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2098 mutex_unlock(&dev
->struct_mutex
);
2102 obj_priv
= obj
->driver_private
;
2103 args
->busy
= obj_priv
->active
;
2105 drm_gem_object_unreference(obj
);
2106 mutex_unlock(&dev
->struct_mutex
);
2111 i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
2112 struct drm_file
*file_priv
)
2114 return i915_gem_ring_throttle(dev
, file_priv
);
2117 int i915_gem_init_object(struct drm_gem_object
*obj
)
2119 struct drm_i915_gem_object
*obj_priv
;
2121 obj_priv
= drm_calloc(1, sizeof(*obj_priv
), DRM_MEM_DRIVER
);
2122 if (obj_priv
== NULL
)
2126 * We've just allocated pages from the kernel,
2127 * so they've just been written by the CPU with
2128 * zeros. They'll need to be clflushed before we
2129 * use them with the GPU.
2131 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
2132 obj
->read_domains
= I915_GEM_DOMAIN_CPU
;
2134 obj
->driver_private
= obj_priv
;
2135 obj_priv
->obj
= obj
;
2136 INIT_LIST_HEAD(&obj_priv
->list
);
2140 void i915_gem_free_object(struct drm_gem_object
*obj
)
2142 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2144 while (obj_priv
->pin_count
> 0)
2145 i915_gem_object_unpin(obj
);
2147 i915_gem_object_unbind(obj
);
2149 drm_free(obj_priv
->page_cpu_valid
, 1, DRM_MEM_DRIVER
);
2150 drm_free(obj
->driver_private
, 1, DRM_MEM_DRIVER
);
2154 i915_gem_set_domain(struct drm_gem_object
*obj
,
2155 struct drm_file
*file_priv
,
2156 uint32_t read_domains
,
2157 uint32_t write_domain
)
2159 struct drm_device
*dev
= obj
->dev
;
2161 uint32_t flush_domains
;
2163 BUG_ON(!mutex_is_locked(&dev
->struct_mutex
));
2165 ret
= i915_gem_object_set_domain(obj
, read_domains
, write_domain
);
2168 flush_domains
= i915_gem_dev_set_domain(obj
->dev
);
2170 if (flush_domains
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
))
2171 (void) i915_add_request(dev
, flush_domains
);
2176 /** Unbinds all objects that are on the given buffer list. */
2178 i915_gem_evict_from_list(struct drm_device
*dev
, struct list_head
*head
)
2180 struct drm_gem_object
*obj
;
2181 struct drm_i915_gem_object
*obj_priv
;
2184 while (!list_empty(head
)) {
2185 obj_priv
= list_first_entry(head
,
2186 struct drm_i915_gem_object
,
2188 obj
= obj_priv
->obj
;
2190 if (obj_priv
->pin_count
!= 0) {
2191 DRM_ERROR("Pinned object in unbind list\n");
2192 mutex_unlock(&dev
->struct_mutex
);
2196 ret
= i915_gem_object_unbind(obj
);
2198 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2200 mutex_unlock(&dev
->struct_mutex
);
2210 i915_gem_idle(struct drm_device
*dev
)
2212 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2213 uint32_t seqno
, cur_seqno
, last_seqno
;
2216 if (dev_priv
->mm
.suspended
)
2219 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2220 * We need to replace this with a semaphore, or something.
2222 dev_priv
->mm
.suspended
= 1;
2224 i915_kernel_lost_context(dev
);
2226 /* Flush the GPU along with all non-CPU write domains
2228 i915_gem_flush(dev
, ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
),
2229 ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
2230 seqno
= i915_add_request(dev
, ~(I915_GEM_DOMAIN_CPU
|
2231 I915_GEM_DOMAIN_GTT
));
2234 mutex_unlock(&dev
->struct_mutex
);
2238 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
2242 cur_seqno
= i915_get_gem_seqno(dev
);
2243 if (i915_seqno_passed(cur_seqno
, seqno
))
2245 if (last_seqno
== cur_seqno
) {
2246 if (stuck
++ > 100) {
2247 DRM_ERROR("hardware wedged\n");
2248 dev_priv
->mm
.wedged
= 1;
2249 DRM_WAKEUP(&dev_priv
->irq_queue
);
2254 last_seqno
= cur_seqno
;
2256 dev_priv
->mm
.waiting_gem_seqno
= 0;
2258 i915_gem_retire_requests(dev
);
2260 /* Active and flushing should now be empty as we've
2261 * waited for a sequence higher than any pending execbuffer
2263 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
2264 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
2266 /* Request should now be empty as we've also waited
2267 * for the last request in the list
2269 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
2271 /* Move all buffers out of the GTT. */
2272 ret
= i915_gem_evict_from_list(dev
, &dev_priv
->mm
.inactive_list
);
2276 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
2277 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
2278 BUG_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
2279 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
2284 i915_gem_init_hws(struct drm_device
*dev
)
2286 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2287 struct drm_gem_object
*obj
;
2288 struct drm_i915_gem_object
*obj_priv
;
2291 /* If we need a physical address for the status page, it's already
2292 * initialized at driver load time.
2294 if (!I915_NEED_GFX_HWS(dev
))
2297 obj
= drm_gem_object_alloc(dev
, 4096);
2299 DRM_ERROR("Failed to allocate status page\n");
2302 obj_priv
= obj
->driver_private
;
2304 ret
= i915_gem_object_pin(obj
, 4096);
2306 drm_gem_object_unreference(obj
);
2310 dev_priv
->status_gfx_addr
= obj_priv
->gtt_offset
;
2311 dev_priv
->hws_map
.offset
= dev
->agp
->base
+ obj_priv
->gtt_offset
;
2312 dev_priv
->hws_map
.size
= 4096;
2313 dev_priv
->hws_map
.type
= 0;
2314 dev_priv
->hws_map
.flags
= 0;
2315 dev_priv
->hws_map
.mtrr
= 0;
2317 drm_core_ioremap(&dev_priv
->hws_map
, dev
);
2318 if (dev_priv
->hws_map
.handle
== NULL
) {
2319 DRM_ERROR("Failed to map status page.\n");
2320 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
2321 drm_gem_object_unreference(obj
);
2324 dev_priv
->hws_obj
= obj
;
2325 dev_priv
->hw_status_page
= dev_priv
->hws_map
.handle
;
2326 memset(dev_priv
->hw_status_page
, 0, PAGE_SIZE
);
2327 I915_WRITE(HWS_PGA
, dev_priv
->status_gfx_addr
);
2328 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv
->status_gfx_addr
);
2334 i915_gem_init_ringbuffer(struct drm_device
*dev
)
2336 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2337 struct drm_gem_object
*obj
;
2338 struct drm_i915_gem_object
*obj_priv
;
2341 ret
= i915_gem_init_hws(dev
);
2345 obj
= drm_gem_object_alloc(dev
, 128 * 1024);
2347 DRM_ERROR("Failed to allocate ringbuffer\n");
2350 obj_priv
= obj
->driver_private
;
2352 ret
= i915_gem_object_pin(obj
, 4096);
2354 drm_gem_object_unreference(obj
);
2358 /* Set up the kernel mapping for the ring. */
2359 dev_priv
->ring
.Size
= obj
->size
;
2360 dev_priv
->ring
.tail_mask
= obj
->size
- 1;
2362 dev_priv
->ring
.map
.offset
= dev
->agp
->base
+ obj_priv
->gtt_offset
;
2363 dev_priv
->ring
.map
.size
= obj
->size
;
2364 dev_priv
->ring
.map
.type
= 0;
2365 dev_priv
->ring
.map
.flags
= 0;
2366 dev_priv
->ring
.map
.mtrr
= 0;
2368 drm_core_ioremap(&dev_priv
->ring
.map
, dev
);
2369 if (dev_priv
->ring
.map
.handle
== NULL
) {
2370 DRM_ERROR("Failed to map ringbuffer.\n");
2371 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
2372 drm_gem_object_unreference(obj
);
2375 dev_priv
->ring
.ring_obj
= obj
;
2376 dev_priv
->ring
.virtual_start
= dev_priv
->ring
.map
.handle
;
2378 /* Stop the ring if it's running. */
2379 I915_WRITE(PRB0_CTL
, 0);
2380 I915_WRITE(PRB0_HEAD
, 0);
2381 I915_WRITE(PRB0_TAIL
, 0);
2382 I915_WRITE(PRB0_START
, 0);
2384 /* Initialize the ring. */
2385 I915_WRITE(PRB0_START
, obj_priv
->gtt_offset
);
2386 I915_WRITE(PRB0_CTL
,
2387 ((obj
->size
- 4096) & RING_NR_PAGES
) |
2391 /* Update our cache of the ring state */
2392 i915_kernel_lost_context(dev
);
2398 i915_gem_cleanup_ringbuffer(struct drm_device
*dev
)
2400 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2402 if (dev_priv
->ring
.ring_obj
== NULL
)
2405 drm_core_ioremapfree(&dev_priv
->ring
.map
, dev
);
2407 i915_gem_object_unpin(dev_priv
->ring
.ring_obj
);
2408 drm_gem_object_unreference(dev_priv
->ring
.ring_obj
);
2409 dev_priv
->ring
.ring_obj
= NULL
;
2410 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
2412 if (dev_priv
->hws_obj
!= NULL
) {
2413 i915_gem_object_unpin(dev_priv
->hws_obj
);
2414 drm_gem_object_unreference(dev_priv
->hws_obj
);
2415 dev_priv
->hws_obj
= NULL
;
2416 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
2418 /* Write high address into HWS_PGA when disabling. */
2419 I915_WRITE(HWS_PGA
, 0x1ffff000);
2424 i915_gem_entervt_ioctl(struct drm_device
*dev
, void *data
,
2425 struct drm_file
*file_priv
)
2427 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2430 if (dev_priv
->mm
.wedged
) {
2431 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2432 dev_priv
->mm
.wedged
= 0;
2435 ret
= i915_gem_init_ringbuffer(dev
);
2439 mutex_lock(&dev
->struct_mutex
);
2440 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
2441 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
2442 BUG_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
2443 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
2444 dev_priv
->mm
.suspended
= 0;
2445 mutex_unlock(&dev
->struct_mutex
);
2450 i915_gem_leavevt_ioctl(struct drm_device
*dev
, void *data
,
2451 struct drm_file
*file_priv
)
2455 mutex_lock(&dev
->struct_mutex
);
2456 ret
= i915_gem_idle(dev
);
2458 i915_gem_cleanup_ringbuffer(dev
);
2459 mutex_unlock(&dev
->struct_mutex
);
2465 i915_gem_lastclose(struct drm_device
*dev
)
2468 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2470 mutex_lock(&dev
->struct_mutex
);
2472 if (dev_priv
->ring
.ring_obj
!= NULL
) {
2473 ret
= i915_gem_idle(dev
);
2475 DRM_ERROR("failed to idle hardware: %d\n", ret
);
2477 i915_gem_cleanup_ringbuffer(dev
);
2480 mutex_unlock(&dev
->struct_mutex
);
2484 i915_gem_load(struct drm_device
*dev
)
2486 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2488 INIT_LIST_HEAD(&dev_priv
->mm
.active_list
);
2489 INIT_LIST_HEAD(&dev_priv
->mm
.flushing_list
);
2490 INIT_LIST_HEAD(&dev_priv
->mm
.inactive_list
);
2491 INIT_LIST_HEAD(&dev_priv
->mm
.request_list
);
2492 INIT_DELAYED_WORK(&dev_priv
->mm
.retire_work
,
2493 i915_gem_retire_work_handler
);
2494 dev_priv
->mm
.next_gem_seqno
= 1;
2496 i915_gem_detect_bit_6_swizzle(dev
);