2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
35 i915_gem_object_set_domain(struct drm_gem_object
*obj
,
36 uint32_t read_domains
,
37 uint32_t write_domain
);
39 i915_gem_object_set_domain_range(struct drm_gem_object
*obj
,
42 uint32_t read_domains
,
43 uint32_t write_domain
);
45 i915_gem_set_domain(struct drm_gem_object
*obj
,
46 struct drm_file
*file_priv
,
47 uint32_t read_domains
,
48 uint32_t write_domain
);
49 static int i915_gem_object_get_page_list(struct drm_gem_object
*obj
);
50 static void i915_gem_object_free_page_list(struct drm_gem_object
*obj
);
51 static int i915_gem_object_wait_rendering(struct drm_gem_object
*obj
);
54 i915_gem_cleanup_ringbuffer(struct drm_device
*dev
);
57 i915_gem_init_ioctl(struct drm_device
*dev
, void *data
,
58 struct drm_file
*file_priv
)
60 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
61 struct drm_i915_gem_init
*args
= data
;
63 mutex_lock(&dev
->struct_mutex
);
65 if (args
->gtt_start
>= args
->gtt_end
||
66 (args
->gtt_start
& (PAGE_SIZE
- 1)) != 0 ||
67 (args
->gtt_end
& (PAGE_SIZE
- 1)) != 0) {
68 mutex_unlock(&dev
->struct_mutex
);
72 drm_mm_init(&dev_priv
->mm
.gtt_space
, args
->gtt_start
,
73 args
->gtt_end
- args
->gtt_start
);
75 dev
->gtt_total
= (uint32_t) (args
->gtt_end
- args
->gtt_start
);
77 mutex_unlock(&dev
->struct_mutex
);
83 i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
84 struct drm_file
*file_priv
)
86 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
87 struct drm_i915_gem_get_aperture
*args
= data
;
88 struct drm_i915_gem_object
*obj_priv
;
90 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
93 args
->aper_size
= dev
->gtt_total
;
94 args
->aper_available_size
= args
->aper_size
;
96 list_for_each_entry(obj_priv
, &dev_priv
->mm
.active_list
, list
) {
97 if (obj_priv
->pin_count
> 0)
98 args
->aper_available_size
-= obj_priv
->obj
->size
;
106 * Creates a new mm object and returns a handle to it.
109 i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
110 struct drm_file
*file_priv
)
112 struct drm_i915_gem_create
*args
= data
;
113 struct drm_gem_object
*obj
;
116 args
->size
= roundup(args
->size
, PAGE_SIZE
);
118 /* Allocate the new object */
119 obj
= drm_gem_object_alloc(dev
, args
->size
);
123 ret
= drm_gem_handle_create(file_priv
, obj
, &handle
);
124 mutex_lock(&dev
->struct_mutex
);
125 drm_gem_object_handle_unreference(obj
);
126 mutex_unlock(&dev
->struct_mutex
);
131 args
->handle
= handle
;
137 * Reads data from the object referenced by handle.
139 * On error, the contents of *data are undefined.
142 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
143 struct drm_file
*file_priv
)
145 struct drm_i915_gem_pread
*args
= data
;
146 struct drm_gem_object
*obj
;
147 struct drm_i915_gem_object
*obj_priv
;
152 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
155 obj_priv
= obj
->driver_private
;
157 /* Bounds check source.
159 * XXX: This could use review for overflow issues...
161 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
162 args
->offset
+ args
->size
> obj
->size
) {
163 drm_gem_object_unreference(obj
);
167 mutex_lock(&dev
->struct_mutex
);
169 ret
= i915_gem_object_set_domain_range(obj
, args
->offset
, args
->size
,
170 I915_GEM_DOMAIN_CPU
, 0);
172 drm_gem_object_unreference(obj
);
173 mutex_unlock(&dev
->struct_mutex
);
177 offset
= args
->offset
;
179 read
= vfs_read(obj
->filp
, (char __user
*)(uintptr_t)args
->data_ptr
,
180 args
->size
, &offset
);
181 if (read
!= args
->size
) {
182 drm_gem_object_unreference(obj
);
183 mutex_unlock(&dev
->struct_mutex
);
190 drm_gem_object_unreference(obj
);
191 mutex_unlock(&dev
->struct_mutex
);
196 /* This is the fast write path which cannot handle
197 * page faults in the source data
201 fast_user_write(struct io_mapping
*mapping
,
202 loff_t page_base
, int page_offset
,
203 char __user
*user_data
,
207 unsigned long unwritten
;
209 vaddr_atomic
= io_mapping_map_atomic_wc(mapping
, page_base
);
210 unwritten
= __copy_from_user_inatomic_nocache(vaddr_atomic
+ page_offset
,
212 io_mapping_unmap_atomic(vaddr_atomic
);
218 /* Here's the write path which can sleep for
223 slow_user_write(struct io_mapping
*mapping
,
224 loff_t page_base
, int page_offset
,
225 char __user
*user_data
,
229 unsigned long unwritten
;
231 vaddr
= io_mapping_map_wc(mapping
, page_base
);
234 unwritten
= __copy_from_user(vaddr
+ page_offset
,
236 io_mapping_unmap(vaddr
);
243 i915_gem_gtt_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
244 struct drm_i915_gem_pwrite
*args
,
245 struct drm_file
*file_priv
)
247 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
248 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
250 loff_t offset
, page_base
;
251 char __user
*user_data
;
252 int page_offset
, page_length
;
255 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
257 if (!access_ok(VERIFY_READ
, user_data
, remain
))
261 mutex_lock(&dev
->struct_mutex
);
262 ret
= i915_gem_object_pin(obj
, 0);
264 mutex_unlock(&dev
->struct_mutex
);
267 ret
= i915_gem_set_domain(obj
, file_priv
,
268 I915_GEM_DOMAIN_GTT
, I915_GEM_DOMAIN_GTT
);
272 obj_priv
= obj
->driver_private
;
273 offset
= obj_priv
->gtt_offset
+ args
->offset
;
277 /* Operation in this page
279 * page_base = page offset within aperture
280 * page_offset = offset within page
281 * page_length = bytes to copy for this page
283 page_base
= (offset
& ~(PAGE_SIZE
-1));
284 page_offset
= offset
& (PAGE_SIZE
-1);
285 page_length
= remain
;
286 if ((page_offset
+ remain
) > PAGE_SIZE
)
287 page_length
= PAGE_SIZE
- page_offset
;
289 ret
= fast_user_write (dev_priv
->mm
.gtt_mapping
, page_base
,
290 page_offset
, user_data
, page_length
);
292 /* If we get a fault while copying data, then (presumably) our
293 * source page isn't available. In this case, use the
294 * non-atomic function
297 ret
= slow_user_write (dev_priv
->mm
.gtt_mapping
,
298 page_base
, page_offset
,
299 user_data
, page_length
);
304 remain
-= page_length
;
305 user_data
+= page_length
;
306 offset
+= page_length
;
310 i915_gem_object_unpin(obj
);
311 mutex_unlock(&dev
->struct_mutex
);
317 i915_gem_shmem_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
318 struct drm_i915_gem_pwrite
*args
,
319 struct drm_file
*file_priv
)
325 mutex_lock(&dev
->struct_mutex
);
327 ret
= i915_gem_set_domain(obj
, file_priv
,
328 I915_GEM_DOMAIN_CPU
, I915_GEM_DOMAIN_CPU
);
330 mutex_unlock(&dev
->struct_mutex
);
334 offset
= args
->offset
;
336 written
= vfs_write(obj
->filp
,
337 (char __user
*)(uintptr_t) args
->data_ptr
,
338 args
->size
, &offset
);
339 if (written
!= args
->size
) {
340 mutex_unlock(&dev
->struct_mutex
);
347 mutex_unlock(&dev
->struct_mutex
);
353 * Writes data to the object referenced by handle.
355 * On error, the contents of the buffer that were to be modified are undefined.
358 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
359 struct drm_file
*file_priv
)
361 struct drm_i915_gem_pwrite
*args
= data
;
362 struct drm_gem_object
*obj
;
363 struct drm_i915_gem_object
*obj_priv
;
366 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
369 obj_priv
= obj
->driver_private
;
371 /* Bounds check destination.
373 * XXX: This could use review for overflow issues...
375 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
376 args
->offset
+ args
->size
> obj
->size
) {
377 drm_gem_object_unreference(obj
);
381 /* We can only do the GTT pwrite on untiled buffers, as otherwise
382 * it would end up going through the fenced access, and we'll get
383 * different detiling behavior between reading and writing.
384 * pread/pwrite currently are reading and writing from the CPU
385 * perspective, requiring manual detiling by the client.
387 if (obj_priv
->tiling_mode
== I915_TILING_NONE
&&
389 ret
= i915_gem_gtt_pwrite(dev
, obj
, args
, file_priv
);
391 ret
= i915_gem_shmem_pwrite(dev
, obj
, args
, file_priv
);
395 DRM_INFO("pwrite failed %d\n", ret
);
398 drm_gem_object_unreference(obj
);
404 * Called when user space prepares to use an object
407 i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
408 struct drm_file
*file_priv
)
410 struct drm_i915_gem_set_domain
*args
= data
;
411 struct drm_gem_object
*obj
;
414 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
417 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
421 mutex_lock(&dev
->struct_mutex
);
423 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
424 obj
, obj
->size
, args
->read_domains
, args
->write_domain
);
426 ret
= i915_gem_set_domain(obj
, file_priv
,
427 args
->read_domains
, args
->write_domain
);
428 drm_gem_object_unreference(obj
);
429 mutex_unlock(&dev
->struct_mutex
);
434 * Called when user space has done writes to this buffer
437 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
438 struct drm_file
*file_priv
)
440 struct drm_i915_gem_sw_finish
*args
= data
;
441 struct drm_gem_object
*obj
;
442 struct drm_i915_gem_object
*obj_priv
;
445 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
448 mutex_lock(&dev
->struct_mutex
);
449 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
451 mutex_unlock(&dev
->struct_mutex
);
456 DRM_INFO("%s: sw_finish %d (%p %d)\n",
457 __func__
, args
->handle
, obj
, obj
->size
);
459 obj_priv
= obj
->driver_private
;
461 /* Pinned buffers may be scanout, so flush the cache */
462 if ((obj
->write_domain
& I915_GEM_DOMAIN_CPU
) && obj_priv
->pin_count
) {
463 i915_gem_clflush_object(obj
);
464 drm_agp_chipset_flush(dev
);
466 drm_gem_object_unreference(obj
);
467 mutex_unlock(&dev
->struct_mutex
);
472 * Maps the contents of an object, returning the address it is mapped
475 * While the mapping holds a reference on the contents of the object, it doesn't
476 * imply a ref on the object itself.
479 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
480 struct drm_file
*file_priv
)
482 struct drm_i915_gem_mmap
*args
= data
;
483 struct drm_gem_object
*obj
;
487 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
490 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
494 offset
= args
->offset
;
496 down_write(¤t
->mm
->mmap_sem
);
497 addr
= do_mmap(obj
->filp
, 0, args
->size
,
498 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
500 up_write(¤t
->mm
->mmap_sem
);
501 mutex_lock(&dev
->struct_mutex
);
502 drm_gem_object_unreference(obj
);
503 mutex_unlock(&dev
->struct_mutex
);
504 if (IS_ERR((void *)addr
))
507 args
->addr_ptr
= (uint64_t) addr
;
513 i915_gem_object_free_page_list(struct drm_gem_object
*obj
)
515 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
516 int page_count
= obj
->size
/ PAGE_SIZE
;
519 if (obj_priv
->page_list
== NULL
)
523 for (i
= 0; i
< page_count
; i
++)
524 if (obj_priv
->page_list
[i
] != NULL
) {
526 set_page_dirty(obj_priv
->page_list
[i
]);
527 mark_page_accessed(obj_priv
->page_list
[i
]);
528 page_cache_release(obj_priv
->page_list
[i
]);
532 drm_free(obj_priv
->page_list
,
533 page_count
* sizeof(struct page
*),
535 obj_priv
->page_list
= NULL
;
539 i915_gem_object_move_to_active(struct drm_gem_object
*obj
)
541 struct drm_device
*dev
= obj
->dev
;
542 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
543 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
545 /* Add a reference if we're newly entering the active list. */
546 if (!obj_priv
->active
) {
547 drm_gem_object_reference(obj
);
548 obj_priv
->active
= 1;
550 /* Move from whatever list we were on to the tail of execution. */
551 list_move_tail(&obj_priv
->list
,
552 &dev_priv
->mm
.active_list
);
557 i915_gem_object_move_to_inactive(struct drm_gem_object
*obj
)
559 struct drm_device
*dev
= obj
->dev
;
560 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
561 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
563 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
564 if (obj_priv
->pin_count
!= 0)
565 list_del_init(&obj_priv
->list
);
567 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
569 if (obj_priv
->active
) {
570 obj_priv
->active
= 0;
571 drm_gem_object_unreference(obj
);
573 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
577 * Creates a new sequence number, emitting a write of it to the status page
578 * plus an interrupt, which will trigger i915_user_interrupt_handler.
580 * Must be called with struct_lock held.
582 * Returned sequence numbers are nonzero on success.
585 i915_add_request(struct drm_device
*dev
, uint32_t flush_domains
)
587 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
588 struct drm_i915_gem_request
*request
;
593 request
= drm_calloc(1, sizeof(*request
), DRM_MEM_DRIVER
);
597 /* Grab the seqno we're going to make this request be, and bump the
598 * next (skipping 0 so it can be the reserved no-seqno value).
600 seqno
= dev_priv
->mm
.next_gem_seqno
;
601 dev_priv
->mm
.next_gem_seqno
++;
602 if (dev_priv
->mm
.next_gem_seqno
== 0)
603 dev_priv
->mm
.next_gem_seqno
++;
606 OUT_RING(MI_STORE_DWORD_INDEX
);
607 OUT_RING(I915_GEM_HWS_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
610 OUT_RING(MI_USER_INTERRUPT
);
613 DRM_DEBUG("%d\n", seqno
);
615 request
->seqno
= seqno
;
616 request
->emitted_jiffies
= jiffies
;
617 request
->flush_domains
= flush_domains
;
618 was_empty
= list_empty(&dev_priv
->mm
.request_list
);
619 list_add_tail(&request
->list
, &dev_priv
->mm
.request_list
);
621 if (was_empty
&& !dev_priv
->mm
.suspended
)
622 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
627 * Command execution barrier
629 * Ensures that all commands in the ring are finished
630 * before signalling the CPU
633 i915_retire_commands(struct drm_device
*dev
)
635 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
636 uint32_t cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
637 uint32_t flush_domains
= 0;
640 /* The sampler always gets flushed on i965 (sigh) */
642 flush_domains
|= I915_GEM_DOMAIN_SAMPLER
;
645 OUT_RING(0); /* noop */
647 return flush_domains
;
651 * Moves buffers associated only with the given active seqno from the active
652 * to inactive list, potentially freeing them.
655 i915_gem_retire_request(struct drm_device
*dev
,
656 struct drm_i915_gem_request
*request
)
658 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
660 /* Move any buffers on the active list that are no longer referenced
661 * by the ringbuffer to the flushing/inactive lists as appropriate.
663 while (!list_empty(&dev_priv
->mm
.active_list
)) {
664 struct drm_gem_object
*obj
;
665 struct drm_i915_gem_object
*obj_priv
;
667 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
668 struct drm_i915_gem_object
,
672 /* If the seqno being retired doesn't match the oldest in the
673 * list, then the oldest in the list must still be newer than
676 if (obj_priv
->last_rendering_seqno
!= request
->seqno
)
679 DRM_INFO("%s: retire %d moves to inactive list %p\n",
680 __func__
, request
->seqno
, obj
);
683 if (obj
->write_domain
!= 0) {
684 list_move_tail(&obj_priv
->list
,
685 &dev_priv
->mm
.flushing_list
);
687 i915_gem_object_move_to_inactive(obj
);
691 if (request
->flush_domains
!= 0) {
692 struct drm_i915_gem_object
*obj_priv
, *next
;
694 /* Clear the write domain and activity from any buffers
695 * that are just waiting for a flush matching the one retired.
697 list_for_each_entry_safe(obj_priv
, next
,
698 &dev_priv
->mm
.flushing_list
, list
) {
699 struct drm_gem_object
*obj
= obj_priv
->obj
;
701 if (obj
->write_domain
& request
->flush_domains
) {
702 obj
->write_domain
= 0;
703 i915_gem_object_move_to_inactive(obj
);
711 * Returns true if seq1 is later than seq2.
714 i915_seqno_passed(uint32_t seq1
, uint32_t seq2
)
716 return (int32_t)(seq1
- seq2
) >= 0;
720 i915_get_gem_seqno(struct drm_device
*dev
)
722 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
724 return READ_HWSP(dev_priv
, I915_GEM_HWS_INDEX
);
728 * This function clears the request list as sequence numbers are passed.
731 i915_gem_retire_requests(struct drm_device
*dev
)
733 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
736 seqno
= i915_get_gem_seqno(dev
);
738 while (!list_empty(&dev_priv
->mm
.request_list
)) {
739 struct drm_i915_gem_request
*request
;
740 uint32_t retiring_seqno
;
742 request
= list_first_entry(&dev_priv
->mm
.request_list
,
743 struct drm_i915_gem_request
,
745 retiring_seqno
= request
->seqno
;
747 if (i915_seqno_passed(seqno
, retiring_seqno
) ||
748 dev_priv
->mm
.wedged
) {
749 i915_gem_retire_request(dev
, request
);
751 list_del(&request
->list
);
752 drm_free(request
, sizeof(*request
), DRM_MEM_DRIVER
);
759 i915_gem_retire_work_handler(struct work_struct
*work
)
761 drm_i915_private_t
*dev_priv
;
762 struct drm_device
*dev
;
764 dev_priv
= container_of(work
, drm_i915_private_t
,
765 mm
.retire_work
.work
);
768 mutex_lock(&dev
->struct_mutex
);
769 i915_gem_retire_requests(dev
);
770 if (!dev_priv
->mm
.suspended
&&
771 !list_empty(&dev_priv
->mm
.request_list
))
772 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
773 mutex_unlock(&dev
->struct_mutex
);
777 * Waits for a sequence number to be signaled, and cleans up the
778 * request and object lists appropriately for that event.
781 i915_wait_request(struct drm_device
*dev
, uint32_t seqno
)
783 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
788 if (!i915_seqno_passed(i915_get_gem_seqno(dev
), seqno
)) {
789 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
790 i915_user_irq_get(dev
);
791 ret
= wait_event_interruptible(dev_priv
->irq_queue
,
792 i915_seqno_passed(i915_get_gem_seqno(dev
),
794 dev_priv
->mm
.wedged
);
795 i915_user_irq_put(dev
);
796 dev_priv
->mm
.waiting_gem_seqno
= 0;
798 if (dev_priv
->mm
.wedged
)
801 if (ret
&& ret
!= -ERESTARTSYS
)
802 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
803 __func__
, ret
, seqno
, i915_get_gem_seqno(dev
));
805 /* Directly dispatch request retiring. While we have the work queue
806 * to handle this, the waiter on a request often wants an associated
807 * buffer to have made it to the inactive list, and we would need
808 * a separate wait queue to handle that.
811 i915_gem_retire_requests(dev
);
817 i915_gem_flush(struct drm_device
*dev
,
818 uint32_t invalidate_domains
,
819 uint32_t flush_domains
)
821 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
826 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__
,
827 invalidate_domains
, flush_domains
);
830 if (flush_domains
& I915_GEM_DOMAIN_CPU
)
831 drm_agp_chipset_flush(dev
);
833 if ((invalidate_domains
| flush_domains
) & ~(I915_GEM_DOMAIN_CPU
|
834 I915_GEM_DOMAIN_GTT
)) {
838 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
839 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
840 * also flushed at 2d versus 3d pipeline switches.
844 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
845 * MI_READ_FLUSH is set, and is always flushed on 965.
847 * I915_GEM_DOMAIN_COMMAND may not exist?
849 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
850 * invalidated when MI_EXE_FLUSH is set.
852 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
853 * invalidated with every MI_FLUSH.
857 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
858 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
859 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
860 * are flushed at any MI_FLUSH.
863 cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
864 if ((invalidate_domains
|flush_domains
) &
865 I915_GEM_DOMAIN_RENDER
)
866 cmd
&= ~MI_NO_WRITE_FLUSH
;
867 if (!IS_I965G(dev
)) {
869 * On the 965, the sampler cache always gets flushed
870 * and this bit is reserved.
872 if (invalidate_domains
& I915_GEM_DOMAIN_SAMPLER
)
873 cmd
|= MI_READ_FLUSH
;
875 if (invalidate_domains
& I915_GEM_DOMAIN_INSTRUCTION
)
879 DRM_INFO("%s: queue flush %08x to ring\n", __func__
, cmd
);
883 OUT_RING(0); /* noop */
889 * Ensures that all rendering to the object has completed and the object is
890 * safe to unbind from the GTT or access from the CPU.
893 i915_gem_object_wait_rendering(struct drm_gem_object
*obj
)
895 struct drm_device
*dev
= obj
->dev
;
896 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
899 /* If there are writes queued to the buffer, flush and
900 * create a new seqno to wait for.
902 if (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
)) {
903 uint32_t write_domain
= obj
->write_domain
;
905 DRM_INFO("%s: flushing object %p from write domain %08x\n",
906 __func__
, obj
, write_domain
);
908 i915_gem_flush(dev
, 0, write_domain
);
910 i915_gem_object_move_to_active(obj
);
911 obj_priv
->last_rendering_seqno
= i915_add_request(dev
,
913 BUG_ON(obj_priv
->last_rendering_seqno
== 0);
915 DRM_INFO("%s: flush moves to exec list %p\n", __func__
, obj
);
919 /* If there is rendering queued on the buffer being evicted, wait for
922 if (obj_priv
->active
) {
924 DRM_INFO("%s: object %p wait for seqno %08x\n",
925 __func__
, obj
, obj_priv
->last_rendering_seqno
);
927 ret
= i915_wait_request(dev
, obj_priv
->last_rendering_seqno
);
936 * Unbinds an object from the GTT aperture.
939 i915_gem_object_unbind(struct drm_gem_object
*obj
)
941 struct drm_device
*dev
= obj
->dev
;
942 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
946 DRM_INFO("%s:%d %p\n", __func__
, __LINE__
, obj
);
947 DRM_INFO("gtt_space %p\n", obj_priv
->gtt_space
);
949 if (obj_priv
->gtt_space
== NULL
)
952 if (obj_priv
->pin_count
!= 0) {
953 DRM_ERROR("Attempting to unbind pinned buffer\n");
957 /* Wait for any rendering to complete
959 ret
= i915_gem_object_wait_rendering(obj
);
961 DRM_ERROR("wait_rendering failed: %d\n", ret
);
965 /* Move the object to the CPU domain to ensure that
966 * any possible CPU writes while it's not in the GTT
967 * are flushed when we go to remap it. This will
968 * also ensure that all pending GPU writes are finished
971 ret
= i915_gem_object_set_domain(obj
, I915_GEM_DOMAIN_CPU
,
972 I915_GEM_DOMAIN_CPU
);
974 DRM_ERROR("set_domain failed: %d\n", ret
);
978 if (obj_priv
->agp_mem
!= NULL
) {
979 drm_unbind_agp(obj_priv
->agp_mem
);
980 drm_free_agp(obj_priv
->agp_mem
, obj
->size
/ PAGE_SIZE
);
981 obj_priv
->agp_mem
= NULL
;
984 BUG_ON(obj_priv
->active
);
986 i915_gem_object_free_page_list(obj
);
988 if (obj_priv
->gtt_space
) {
989 atomic_dec(&dev
->gtt_count
);
990 atomic_sub(obj
->size
, &dev
->gtt_memory
);
992 drm_mm_put_block(obj_priv
->gtt_space
);
993 obj_priv
->gtt_space
= NULL
;
996 /* Remove ourselves from the LRU list if present. */
997 if (!list_empty(&obj_priv
->list
))
998 list_del_init(&obj_priv
->list
);
1004 i915_gem_evict_something(struct drm_device
*dev
)
1006 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1007 struct drm_gem_object
*obj
;
1008 struct drm_i915_gem_object
*obj_priv
;
1012 /* If there's an inactive buffer available now, grab it
1015 if (!list_empty(&dev_priv
->mm
.inactive_list
)) {
1016 obj_priv
= list_first_entry(&dev_priv
->mm
.inactive_list
,
1017 struct drm_i915_gem_object
,
1019 obj
= obj_priv
->obj
;
1020 BUG_ON(obj_priv
->pin_count
!= 0);
1022 DRM_INFO("%s: evicting %p\n", __func__
, obj
);
1024 BUG_ON(obj_priv
->active
);
1026 /* Wait on the rendering and unbind the buffer. */
1027 ret
= i915_gem_object_unbind(obj
);
1031 /* If we didn't get anything, but the ring is still processing
1032 * things, wait for one of those things to finish and hopefully
1033 * leave us a buffer to evict.
1035 if (!list_empty(&dev_priv
->mm
.request_list
)) {
1036 struct drm_i915_gem_request
*request
;
1038 request
= list_first_entry(&dev_priv
->mm
.request_list
,
1039 struct drm_i915_gem_request
,
1042 ret
= i915_wait_request(dev
, request
->seqno
);
1046 /* if waiting caused an object to become inactive,
1047 * then loop around and wait for it. Otherwise, we
1048 * assume that waiting freed and unbound something,
1049 * so there should now be some space in the GTT
1051 if (!list_empty(&dev_priv
->mm
.inactive_list
))
1056 /* If we didn't have anything on the request list but there
1057 * are buffers awaiting a flush, emit one and try again.
1058 * When we wait on it, those buffers waiting for that flush
1059 * will get moved to inactive.
1061 if (!list_empty(&dev_priv
->mm
.flushing_list
)) {
1062 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
1063 struct drm_i915_gem_object
,
1065 obj
= obj_priv
->obj
;
1070 i915_add_request(dev
, obj
->write_domain
);
1076 DRM_ERROR("inactive empty %d request empty %d "
1077 "flushing empty %d\n",
1078 list_empty(&dev_priv
->mm
.inactive_list
),
1079 list_empty(&dev_priv
->mm
.request_list
),
1080 list_empty(&dev_priv
->mm
.flushing_list
));
1081 /* If we didn't do any of the above, there's nothing to be done
1082 * and we just can't fit it in.
1090 i915_gem_object_get_page_list(struct drm_gem_object
*obj
)
1092 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1094 struct address_space
*mapping
;
1095 struct inode
*inode
;
1099 if (obj_priv
->page_list
)
1102 /* Get the list of pages out of our struct file. They'll be pinned
1103 * at this point until we release them.
1105 page_count
= obj
->size
/ PAGE_SIZE
;
1106 BUG_ON(obj_priv
->page_list
!= NULL
);
1107 obj_priv
->page_list
= drm_calloc(page_count
, sizeof(struct page
*),
1109 if (obj_priv
->page_list
== NULL
) {
1110 DRM_ERROR("Faled to allocate page list\n");
1114 inode
= obj
->filp
->f_path
.dentry
->d_inode
;
1115 mapping
= inode
->i_mapping
;
1116 for (i
= 0; i
< page_count
; i
++) {
1117 page
= read_mapping_page(mapping
, i
, NULL
);
1119 ret
= PTR_ERR(page
);
1120 DRM_ERROR("read_mapping_page failed: %d\n", ret
);
1121 i915_gem_object_free_page_list(obj
);
1124 obj_priv
->page_list
[i
] = page
;
1130 * Finds free space in the GTT aperture and binds the object there.
1133 i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
, unsigned alignment
)
1135 struct drm_device
*dev
= obj
->dev
;
1136 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1137 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1138 struct drm_mm_node
*free_space
;
1139 int page_count
, ret
;
1142 alignment
= PAGE_SIZE
;
1143 if (alignment
& (PAGE_SIZE
- 1)) {
1144 DRM_ERROR("Invalid object alignment requested %u\n", alignment
);
1149 free_space
= drm_mm_search_free(&dev_priv
->mm
.gtt_space
,
1150 obj
->size
, alignment
, 0);
1151 if (free_space
!= NULL
) {
1152 obj_priv
->gtt_space
= drm_mm_get_block(free_space
, obj
->size
,
1154 if (obj_priv
->gtt_space
!= NULL
) {
1155 obj_priv
->gtt_space
->private = obj
;
1156 obj_priv
->gtt_offset
= obj_priv
->gtt_space
->start
;
1159 if (obj_priv
->gtt_space
== NULL
) {
1160 /* If the gtt is empty and we're still having trouble
1161 * fitting our object in, we're out of memory.
1164 DRM_INFO("%s: GTT full, evicting something\n", __func__
);
1166 if (list_empty(&dev_priv
->mm
.inactive_list
) &&
1167 list_empty(&dev_priv
->mm
.flushing_list
) &&
1168 list_empty(&dev_priv
->mm
.active_list
)) {
1169 DRM_ERROR("GTT full, but LRU list empty\n");
1173 ret
= i915_gem_evict_something(dev
);
1175 DRM_ERROR("Failed to evict a buffer %d\n", ret
);
1182 DRM_INFO("Binding object of size %d at 0x%08x\n",
1183 obj
->size
, obj_priv
->gtt_offset
);
1185 ret
= i915_gem_object_get_page_list(obj
);
1187 drm_mm_put_block(obj_priv
->gtt_space
);
1188 obj_priv
->gtt_space
= NULL
;
1192 page_count
= obj
->size
/ PAGE_SIZE
;
1193 /* Create an AGP memory structure pointing at our pages, and bind it
1196 obj_priv
->agp_mem
= drm_agp_bind_pages(dev
,
1197 obj_priv
->page_list
,
1199 obj_priv
->gtt_offset
,
1200 obj_priv
->agp_type
);
1201 if (obj_priv
->agp_mem
== NULL
) {
1202 i915_gem_object_free_page_list(obj
);
1203 drm_mm_put_block(obj_priv
->gtt_space
);
1204 obj_priv
->gtt_space
= NULL
;
1207 atomic_inc(&dev
->gtt_count
);
1208 atomic_add(obj
->size
, &dev
->gtt_memory
);
1210 /* Assert that the object is not currently in any GPU domain. As it
1211 * wasn't in the GTT, there shouldn't be any way it could have been in
1214 BUG_ON(obj
->read_domains
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1215 BUG_ON(obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1221 i915_gem_clflush_object(struct drm_gem_object
*obj
)
1223 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1225 /* If we don't have a page list set up, then we're not pinned
1226 * to GPU, and we can ignore the cache flush because it'll happen
1227 * again at bind time.
1229 if (obj_priv
->page_list
== NULL
)
1232 drm_clflush_pages(obj_priv
->page_list
, obj
->size
/ PAGE_SIZE
);
1236 * Set the next domain for the specified object. This
1237 * may not actually perform the necessary flushing/invaliding though,
1238 * as that may want to be batched with other set_domain operations
1240 * This is (we hope) the only really tricky part of gem. The goal
1241 * is fairly simple -- track which caches hold bits of the object
1242 * and make sure they remain coherent. A few concrete examples may
1243 * help to explain how it works. For shorthand, we use the notation
1244 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1245 * a pair of read and write domain masks.
1247 * Case 1: the batch buffer
1253 * 5. Unmapped from GTT
1256 * Let's take these a step at a time
1259 * Pages allocated from the kernel may still have
1260 * cache contents, so we set them to (CPU, CPU) always.
1261 * 2. Written by CPU (using pwrite)
1262 * The pwrite function calls set_domain (CPU, CPU) and
1263 * this function does nothing (as nothing changes)
1265 * This function asserts that the object is not
1266 * currently in any GPU-based read or write domains
1268 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1269 * As write_domain is zero, this function adds in the
1270 * current read domains (CPU+COMMAND, 0).
1271 * flush_domains is set to CPU.
1272 * invalidate_domains is set to COMMAND
1273 * clflush is run to get data out of the CPU caches
1274 * then i915_dev_set_domain calls i915_gem_flush to
1275 * emit an MI_FLUSH and drm_agp_chipset_flush
1276 * 5. Unmapped from GTT
1277 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1278 * flush_domains and invalidate_domains end up both zero
1279 * so no flushing/invalidating happens
1283 * Case 2: The shared render buffer
1287 * 3. Read/written by GPU
1288 * 4. set_domain to (CPU,CPU)
1289 * 5. Read/written by CPU
1290 * 6. Read/written by GPU
1293 * Same as last example, (CPU, CPU)
1295 * Nothing changes (assertions find that it is not in the GPU)
1296 * 3. Read/written by GPU
1297 * execbuffer calls set_domain (RENDER, RENDER)
1298 * flush_domains gets CPU
1299 * invalidate_domains gets GPU
1301 * MI_FLUSH and drm_agp_chipset_flush
1302 * 4. set_domain (CPU, CPU)
1303 * flush_domains gets GPU
1304 * invalidate_domains gets CPU
1305 * wait_rendering (obj) to make sure all drawing is complete.
1306 * This will include an MI_FLUSH to get the data from GPU
1308 * clflush (obj) to invalidate the CPU cache
1309 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1310 * 5. Read/written by CPU
1311 * cache lines are loaded and dirtied
1312 * 6. Read written by GPU
1313 * Same as last GPU access
1315 * Case 3: The constant buffer
1320 * 4. Updated (written) by CPU again
1329 * flush_domains = CPU
1330 * invalidate_domains = RENDER
1333 * drm_agp_chipset_flush
1334 * 4. Updated (written) by CPU again
1336 * flush_domains = 0 (no previous write domain)
1337 * invalidate_domains = 0 (no new read domains)
1340 * flush_domains = CPU
1341 * invalidate_domains = RENDER
1344 * drm_agp_chipset_flush
1347 i915_gem_object_set_domain(struct drm_gem_object
*obj
,
1348 uint32_t read_domains
,
1349 uint32_t write_domain
)
1351 struct drm_device
*dev
= obj
->dev
;
1352 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1353 uint32_t invalidate_domains
= 0;
1354 uint32_t flush_domains
= 0;
1358 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1360 obj
->read_domains
, read_domains
,
1361 obj
->write_domain
, write_domain
);
1364 * If the object isn't moving to a new write domain,
1365 * let the object stay in multiple read domains
1367 if (write_domain
== 0)
1368 read_domains
|= obj
->read_domains
;
1370 obj_priv
->dirty
= 1;
1373 * Flush the current write domain if
1374 * the new read domains don't match. Invalidate
1375 * any read domains which differ from the old
1378 if (obj
->write_domain
&& obj
->write_domain
!= read_domains
) {
1379 flush_domains
|= obj
->write_domain
;
1380 invalidate_domains
|= read_domains
& ~obj
->write_domain
;
1383 * Invalidate any read caches which may have
1384 * stale data. That is, any new read domains.
1386 invalidate_domains
|= read_domains
& ~obj
->read_domains
;
1387 if ((flush_domains
| invalidate_domains
) & I915_GEM_DOMAIN_CPU
) {
1389 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1390 __func__
, flush_domains
, invalidate_domains
);
1393 * If we're invaliding the CPU cache and flushing a GPU cache,
1394 * then pause for rendering so that the GPU caches will be
1395 * flushed before the cpu cache is invalidated
1397 if ((invalidate_domains
& I915_GEM_DOMAIN_CPU
) &&
1398 (flush_domains
& ~(I915_GEM_DOMAIN_CPU
|
1399 I915_GEM_DOMAIN_GTT
))) {
1400 ret
= i915_gem_object_wait_rendering(obj
);
1404 i915_gem_clflush_object(obj
);
1407 if ((write_domain
| flush_domains
) != 0)
1408 obj
->write_domain
= write_domain
;
1410 /* If we're invalidating the CPU domain, clear the per-page CPU
1411 * domain list as well.
1413 if (obj_priv
->page_cpu_valid
!= NULL
&&
1414 (write_domain
!= 0 ||
1415 read_domains
& I915_GEM_DOMAIN_CPU
)) {
1416 drm_free(obj_priv
->page_cpu_valid
, obj
->size
/ PAGE_SIZE
,
1418 obj_priv
->page_cpu_valid
= NULL
;
1420 obj
->read_domains
= read_domains
;
1422 dev
->invalidate_domains
|= invalidate_domains
;
1423 dev
->flush_domains
|= flush_domains
;
1425 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1427 obj
->read_domains
, obj
->write_domain
,
1428 dev
->invalidate_domains
, dev
->flush_domains
);
1434 * Set the read/write domain on a range of the object.
1436 * Currently only implemented for CPU reads, otherwise drops to normal
1437 * i915_gem_object_set_domain().
1440 i915_gem_object_set_domain_range(struct drm_gem_object
*obj
,
1443 uint32_t read_domains
,
1444 uint32_t write_domain
)
1446 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1449 if (obj
->read_domains
& I915_GEM_DOMAIN_CPU
)
1452 if (read_domains
!= I915_GEM_DOMAIN_CPU
||
1454 return i915_gem_object_set_domain(obj
,
1455 read_domains
, write_domain
);
1457 /* Wait on any GPU rendering to the object to be flushed. */
1458 ret
= i915_gem_object_wait_rendering(obj
);
1462 if (obj_priv
->page_cpu_valid
== NULL
) {
1463 obj_priv
->page_cpu_valid
= drm_calloc(1, obj
->size
/ PAGE_SIZE
,
1467 /* Flush the cache on any pages that are still invalid from the CPU's
1470 for (i
= offset
/ PAGE_SIZE
; i
<= (offset
+ size
- 1) / PAGE_SIZE
; i
++) {
1471 if (obj_priv
->page_cpu_valid
[i
])
1474 drm_clflush_pages(obj_priv
->page_list
+ i
, 1);
1476 obj_priv
->page_cpu_valid
[i
] = 1;
1483 * Once all of the objects have been set in the proper domain,
1484 * perform the necessary flush and invalidate operations.
1486 * Returns the write domains flushed, for use in flush tracking.
1489 i915_gem_dev_set_domain(struct drm_device
*dev
)
1491 uint32_t flush_domains
= dev
->flush_domains
;
1494 * Now that all the buffers are synced to the proper domains,
1495 * flush and invalidate the collected domains
1497 if (dev
->invalidate_domains
| dev
->flush_domains
) {
1499 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1501 dev
->invalidate_domains
,
1502 dev
->flush_domains
);
1505 dev
->invalidate_domains
,
1506 dev
->flush_domains
);
1507 dev
->invalidate_domains
= 0;
1508 dev
->flush_domains
= 0;
1511 return flush_domains
;
1515 * Pin an object to the GTT and evaluate the relocations landing in it.
1518 i915_gem_object_pin_and_relocate(struct drm_gem_object
*obj
,
1519 struct drm_file
*file_priv
,
1520 struct drm_i915_gem_exec_object
*entry
)
1522 struct drm_device
*dev
= obj
->dev
;
1523 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1524 struct drm_i915_gem_relocation_entry reloc
;
1525 struct drm_i915_gem_relocation_entry __user
*relocs
;
1526 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1528 void __iomem
*reloc_page
;
1530 /* Choose the GTT offset for our buffer and put it there. */
1531 ret
= i915_gem_object_pin(obj
, (uint32_t) entry
->alignment
);
1535 entry
->offset
= obj_priv
->gtt_offset
;
1537 relocs
= (struct drm_i915_gem_relocation_entry __user
*)
1538 (uintptr_t) entry
->relocs_ptr
;
1539 /* Apply the relocations, using the GTT aperture to avoid cache
1540 * flushing requirements.
1542 for (i
= 0; i
< entry
->relocation_count
; i
++) {
1543 struct drm_gem_object
*target_obj
;
1544 struct drm_i915_gem_object
*target_obj_priv
;
1545 uint32_t reloc_val
, reloc_offset
;
1546 uint32_t __iomem
*reloc_entry
;
1548 ret
= copy_from_user(&reloc
, relocs
+ i
, sizeof(reloc
));
1550 i915_gem_object_unpin(obj
);
1554 target_obj
= drm_gem_object_lookup(obj
->dev
, file_priv
,
1555 reloc
.target_handle
);
1556 if (target_obj
== NULL
) {
1557 i915_gem_object_unpin(obj
);
1560 target_obj_priv
= target_obj
->driver_private
;
1562 /* The target buffer should have appeared before us in the
1563 * exec_object list, so it should have a GTT space bound by now.
1565 if (target_obj_priv
->gtt_space
== NULL
) {
1566 DRM_ERROR("No GTT space found for object %d\n",
1567 reloc
.target_handle
);
1568 drm_gem_object_unreference(target_obj
);
1569 i915_gem_object_unpin(obj
);
1573 if (reloc
.offset
> obj
->size
- 4) {
1574 DRM_ERROR("Relocation beyond object bounds: "
1575 "obj %p target %d offset %d size %d.\n",
1576 obj
, reloc
.target_handle
,
1577 (int) reloc
.offset
, (int) obj
->size
);
1578 drm_gem_object_unreference(target_obj
);
1579 i915_gem_object_unpin(obj
);
1582 if (reloc
.offset
& 3) {
1583 DRM_ERROR("Relocation not 4-byte aligned: "
1584 "obj %p target %d offset %d.\n",
1585 obj
, reloc
.target_handle
,
1586 (int) reloc
.offset
);
1587 drm_gem_object_unreference(target_obj
);
1588 i915_gem_object_unpin(obj
);
1592 if (reloc
.write_domain
&& target_obj
->pending_write_domain
&&
1593 reloc
.write_domain
!= target_obj
->pending_write_domain
) {
1594 DRM_ERROR("Write domain conflict: "
1595 "obj %p target %d offset %d "
1596 "new %08x old %08x\n",
1597 obj
, reloc
.target_handle
,
1600 target_obj
->pending_write_domain
);
1601 drm_gem_object_unreference(target_obj
);
1602 i915_gem_object_unpin(obj
);
1607 DRM_INFO("%s: obj %p offset %08x target %d "
1608 "read %08x write %08x gtt %08x "
1609 "presumed %08x delta %08x\n",
1613 (int) reloc
.target_handle
,
1614 (int) reloc
.read_domains
,
1615 (int) reloc
.write_domain
,
1616 (int) target_obj_priv
->gtt_offset
,
1617 (int) reloc
.presumed_offset
,
1621 target_obj
->pending_read_domains
|= reloc
.read_domains
;
1622 target_obj
->pending_write_domain
|= reloc
.write_domain
;
1624 /* If the relocation already has the right value in it, no
1625 * more work needs to be done.
1627 if (target_obj_priv
->gtt_offset
== reloc
.presumed_offset
) {
1628 drm_gem_object_unreference(target_obj
);
1632 /* Now that we're going to actually write some data in,
1633 * make sure that any rendering using this buffer's contents
1636 i915_gem_object_wait_rendering(obj
);
1638 /* As we're writing through the gtt, flush
1639 * any CPU writes before we write the relocations
1641 if (obj
->write_domain
& I915_GEM_DOMAIN_CPU
) {
1642 i915_gem_clflush_object(obj
);
1643 drm_agp_chipset_flush(dev
);
1644 obj
->write_domain
= 0;
1647 /* Map the page containing the relocation we're going to
1650 reloc_offset
= obj_priv
->gtt_offset
+ reloc
.offset
;
1651 reloc_page
= io_mapping_map_atomic_wc(dev_priv
->mm
.gtt_mapping
,
1654 reloc_entry
= (uint32_t __iomem
*)(reloc_page
+
1655 (reloc_offset
& (PAGE_SIZE
- 1)));
1656 reloc_val
= target_obj_priv
->gtt_offset
+ reloc
.delta
;
1659 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1660 obj
, (unsigned int) reloc
.offset
,
1661 readl(reloc_entry
), reloc_val
);
1663 writel(reloc_val
, reloc_entry
);
1664 io_mapping_unmap_atomic(reloc_page
);
1666 /* Write the updated presumed offset for this entry back out
1669 reloc
.presumed_offset
= target_obj_priv
->gtt_offset
;
1670 ret
= copy_to_user(relocs
+ i
, &reloc
, sizeof(reloc
));
1672 drm_gem_object_unreference(target_obj
);
1673 i915_gem_object_unpin(obj
);
1677 drm_gem_object_unreference(target_obj
);
1682 i915_gem_dump_object(obj
, 128, __func__
, ~0);
1687 /** Dispatch a batchbuffer to the ring
1690 i915_dispatch_gem_execbuffer(struct drm_device
*dev
,
1691 struct drm_i915_gem_execbuffer
*exec
,
1692 uint64_t exec_offset
)
1694 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1695 struct drm_clip_rect __user
*boxes
= (struct drm_clip_rect __user
*)
1696 (uintptr_t) exec
->cliprects_ptr
;
1697 int nbox
= exec
->num_cliprects
;
1699 uint32_t exec_start
, exec_len
;
1702 exec_start
= (uint32_t) exec_offset
+ exec
->batch_start_offset
;
1703 exec_len
= (uint32_t) exec
->batch_len
;
1705 if ((exec_start
| exec_len
) & 0x7) {
1706 DRM_ERROR("alignment\n");
1713 count
= nbox
? nbox
: 1;
1715 for (i
= 0; i
< count
; i
++) {
1717 int ret
= i915_emit_box(dev
, boxes
, i
,
1718 exec
->DR1
, exec
->DR4
);
1723 if (IS_I830(dev
) || IS_845G(dev
)) {
1725 OUT_RING(MI_BATCH_BUFFER
);
1726 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
1727 OUT_RING(exec_start
+ exec_len
- 4);
1732 if (IS_I965G(dev
)) {
1733 OUT_RING(MI_BATCH_BUFFER_START
|
1735 MI_BATCH_NON_SECURE_I965
);
1736 OUT_RING(exec_start
);
1738 OUT_RING(MI_BATCH_BUFFER_START
|
1740 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
1746 /* XXX breadcrumb */
1750 /* Throttle our rendering by waiting until the ring has completed our requests
1751 * emitted over 20 msec ago.
1753 * This should get us reasonable parallelism between CPU and GPU but also
1754 * relatively low latency when blocking on a particular request to finish.
1757 i915_gem_ring_throttle(struct drm_device
*dev
, struct drm_file
*file_priv
)
1759 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
1763 mutex_lock(&dev
->struct_mutex
);
1764 seqno
= i915_file_priv
->mm
.last_gem_throttle_seqno
;
1765 i915_file_priv
->mm
.last_gem_throttle_seqno
=
1766 i915_file_priv
->mm
.last_gem_seqno
;
1768 ret
= i915_wait_request(dev
, seqno
);
1769 mutex_unlock(&dev
->struct_mutex
);
1774 i915_gem_execbuffer(struct drm_device
*dev
, void *data
,
1775 struct drm_file
*file_priv
)
1777 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1778 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
1779 struct drm_i915_gem_execbuffer
*args
= data
;
1780 struct drm_i915_gem_exec_object
*exec_list
= NULL
;
1781 struct drm_gem_object
**object_list
= NULL
;
1782 struct drm_gem_object
*batch_obj
;
1783 int ret
, i
, pinned
= 0;
1784 uint64_t exec_offset
;
1785 uint32_t seqno
, flush_domains
;
1788 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1789 (int) args
->buffers_ptr
, args
->buffer_count
, args
->batch_len
);
1792 if (args
->buffer_count
< 1) {
1793 DRM_ERROR("execbuf with %d buffers\n", args
->buffer_count
);
1796 /* Copy in the exec list from userland */
1797 exec_list
= drm_calloc(sizeof(*exec_list
), args
->buffer_count
,
1799 object_list
= drm_calloc(sizeof(*object_list
), args
->buffer_count
,
1801 if (exec_list
== NULL
|| object_list
== NULL
) {
1802 DRM_ERROR("Failed to allocate exec or object list "
1804 args
->buffer_count
);
1808 ret
= copy_from_user(exec_list
,
1809 (struct drm_i915_relocation_entry __user
*)
1810 (uintptr_t) args
->buffers_ptr
,
1811 sizeof(*exec_list
) * args
->buffer_count
);
1813 DRM_ERROR("copy %d exec entries failed %d\n",
1814 args
->buffer_count
, ret
);
1818 mutex_lock(&dev
->struct_mutex
);
1820 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1822 if (dev_priv
->mm
.wedged
) {
1823 DRM_ERROR("Execbuf while wedged\n");
1824 mutex_unlock(&dev
->struct_mutex
);
1828 if (dev_priv
->mm
.suspended
) {
1829 DRM_ERROR("Execbuf while VT-switched.\n");
1830 mutex_unlock(&dev
->struct_mutex
);
1834 /* Zero the gloabl flush/invalidate flags. These
1835 * will be modified as each object is bound to the
1838 dev
->invalidate_domains
= 0;
1839 dev
->flush_domains
= 0;
1841 /* Look up object handles and perform the relocations */
1842 for (i
= 0; i
< args
->buffer_count
; i
++) {
1843 object_list
[i
] = drm_gem_object_lookup(dev
, file_priv
,
1844 exec_list
[i
].handle
);
1845 if (object_list
[i
] == NULL
) {
1846 DRM_ERROR("Invalid object handle %d at index %d\n",
1847 exec_list
[i
].handle
, i
);
1852 object_list
[i
]->pending_read_domains
= 0;
1853 object_list
[i
]->pending_write_domain
= 0;
1854 ret
= i915_gem_object_pin_and_relocate(object_list
[i
],
1858 DRM_ERROR("object bind and relocate failed %d\n", ret
);
1864 /* Set the pending read domains for the batch buffer to COMMAND */
1865 batch_obj
= object_list
[args
->buffer_count
-1];
1866 batch_obj
->pending_read_domains
= I915_GEM_DOMAIN_COMMAND
;
1867 batch_obj
->pending_write_domain
= 0;
1869 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1871 for (i
= 0; i
< args
->buffer_count
; i
++) {
1872 struct drm_gem_object
*obj
= object_list
[i
];
1873 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1875 if (obj_priv
->gtt_space
== NULL
) {
1876 /* We evicted the buffer in the process of validating
1877 * our set of buffers in. We could try to recover by
1878 * kicking them everything out and trying again from
1885 /* make sure all previous memory operations have passed */
1886 ret
= i915_gem_object_set_domain(obj
,
1887 obj
->pending_read_domains
,
1888 obj
->pending_write_domain
);
1893 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1895 /* Flush/invalidate caches and chipset buffer */
1896 flush_domains
= i915_gem_dev_set_domain(dev
);
1898 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1901 for (i
= 0; i
< args
->buffer_count
; i
++) {
1902 i915_gem_object_check_coherency(object_list
[i
],
1903 exec_list
[i
].handle
);
1907 exec_offset
= exec_list
[args
->buffer_count
- 1].offset
;
1910 i915_gem_dump_object(object_list
[args
->buffer_count
- 1],
1916 (void)i915_add_request(dev
, flush_domains
);
1918 /* Exec the batchbuffer */
1919 ret
= i915_dispatch_gem_execbuffer(dev
, args
, exec_offset
);
1921 DRM_ERROR("dispatch failed %d\n", ret
);
1926 * Ensure that the commands in the batch buffer are
1927 * finished before the interrupt fires
1929 flush_domains
= i915_retire_commands(dev
);
1931 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1934 * Get a seqno representing the execution of the current buffer,
1935 * which we can wait on. We would like to mitigate these interrupts,
1936 * likely by only creating seqnos occasionally (so that we have
1937 * *some* interrupts representing completion of buffers that we can
1938 * wait on when trying to clear up gtt space).
1940 seqno
= i915_add_request(dev
, flush_domains
);
1942 i915_file_priv
->mm
.last_gem_seqno
= seqno
;
1943 for (i
= 0; i
< args
->buffer_count
; i
++) {
1944 struct drm_gem_object
*obj
= object_list
[i
];
1945 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1947 i915_gem_object_move_to_active(obj
);
1948 obj_priv
->last_rendering_seqno
= seqno
;
1950 DRM_INFO("%s: move to exec list %p\n", __func__
, obj
);
1954 i915_dump_lru(dev
, __func__
);
1957 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1959 /* Copy the new buffer offsets back to the user's exec list. */
1960 ret
= copy_to_user((struct drm_i915_relocation_entry __user
*)
1961 (uintptr_t) args
->buffers_ptr
,
1963 sizeof(*exec_list
) * args
->buffer_count
);
1965 DRM_ERROR("failed to copy %d exec entries "
1966 "back to user (%d)\n",
1967 args
->buffer_count
, ret
);
1969 if (object_list
!= NULL
) {
1970 for (i
= 0; i
< pinned
; i
++)
1971 i915_gem_object_unpin(object_list
[i
]);
1973 for (i
= 0; i
< args
->buffer_count
; i
++)
1974 drm_gem_object_unreference(object_list
[i
]);
1976 mutex_unlock(&dev
->struct_mutex
);
1979 drm_free(object_list
, sizeof(*object_list
) * args
->buffer_count
,
1981 drm_free(exec_list
, sizeof(*exec_list
) * args
->buffer_count
,
1988 i915_gem_object_pin(struct drm_gem_object
*obj
, uint32_t alignment
)
1990 struct drm_device
*dev
= obj
->dev
;
1991 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1994 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1995 if (obj_priv
->gtt_space
== NULL
) {
1996 ret
= i915_gem_object_bind_to_gtt(obj
, alignment
);
1998 DRM_ERROR("Failure to bind: %d", ret
);
2002 obj_priv
->pin_count
++;
2004 /* If the object is not active and not pending a flush,
2005 * remove it from the inactive list
2007 if (obj_priv
->pin_count
== 1) {
2008 atomic_inc(&dev
->pin_count
);
2009 atomic_add(obj
->size
, &dev
->pin_memory
);
2010 if (!obj_priv
->active
&&
2011 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
2012 I915_GEM_DOMAIN_GTT
)) == 0 &&
2013 !list_empty(&obj_priv
->list
))
2014 list_del_init(&obj_priv
->list
);
2016 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2022 i915_gem_object_unpin(struct drm_gem_object
*obj
)
2024 struct drm_device
*dev
= obj
->dev
;
2025 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2026 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2028 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2029 obj_priv
->pin_count
--;
2030 BUG_ON(obj_priv
->pin_count
< 0);
2031 BUG_ON(obj_priv
->gtt_space
== NULL
);
2033 /* If the object is no longer pinned, and is
2034 * neither active nor being flushed, then stick it on
2037 if (obj_priv
->pin_count
== 0) {
2038 if (!obj_priv
->active
&&
2039 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
2040 I915_GEM_DOMAIN_GTT
)) == 0)
2041 list_move_tail(&obj_priv
->list
,
2042 &dev_priv
->mm
.inactive_list
);
2043 atomic_dec(&dev
->pin_count
);
2044 atomic_sub(obj
->size
, &dev
->pin_memory
);
2046 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2050 i915_gem_pin_ioctl(struct drm_device
*dev
, void *data
,
2051 struct drm_file
*file_priv
)
2053 struct drm_i915_gem_pin
*args
= data
;
2054 struct drm_gem_object
*obj
;
2055 struct drm_i915_gem_object
*obj_priv
;
2058 mutex_lock(&dev
->struct_mutex
);
2060 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2062 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2064 mutex_unlock(&dev
->struct_mutex
);
2067 obj_priv
= obj
->driver_private
;
2069 ret
= i915_gem_object_pin(obj
, args
->alignment
);
2071 drm_gem_object_unreference(obj
);
2072 mutex_unlock(&dev
->struct_mutex
);
2076 /* XXX - flush the CPU caches for pinned objects
2077 * as the X server doesn't manage domains yet
2079 if (obj
->write_domain
& I915_GEM_DOMAIN_CPU
) {
2080 i915_gem_clflush_object(obj
);
2081 drm_agp_chipset_flush(dev
);
2082 obj
->write_domain
= 0;
2084 args
->offset
= obj_priv
->gtt_offset
;
2085 drm_gem_object_unreference(obj
);
2086 mutex_unlock(&dev
->struct_mutex
);
2092 i915_gem_unpin_ioctl(struct drm_device
*dev
, void *data
,
2093 struct drm_file
*file_priv
)
2095 struct drm_i915_gem_pin
*args
= data
;
2096 struct drm_gem_object
*obj
;
2098 mutex_lock(&dev
->struct_mutex
);
2100 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2102 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2104 mutex_unlock(&dev
->struct_mutex
);
2108 i915_gem_object_unpin(obj
);
2110 drm_gem_object_unreference(obj
);
2111 mutex_unlock(&dev
->struct_mutex
);
2116 i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
2117 struct drm_file
*file_priv
)
2119 struct drm_i915_gem_busy
*args
= data
;
2120 struct drm_gem_object
*obj
;
2121 struct drm_i915_gem_object
*obj_priv
;
2123 mutex_lock(&dev
->struct_mutex
);
2124 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2126 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2128 mutex_unlock(&dev
->struct_mutex
);
2132 obj_priv
= obj
->driver_private
;
2133 args
->busy
= obj_priv
->active
;
2135 drm_gem_object_unreference(obj
);
2136 mutex_unlock(&dev
->struct_mutex
);
2141 i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
2142 struct drm_file
*file_priv
)
2144 return i915_gem_ring_throttle(dev
, file_priv
);
2147 int i915_gem_init_object(struct drm_gem_object
*obj
)
2149 struct drm_i915_gem_object
*obj_priv
;
2151 obj_priv
= drm_calloc(1, sizeof(*obj_priv
), DRM_MEM_DRIVER
);
2152 if (obj_priv
== NULL
)
2156 * We've just allocated pages from the kernel,
2157 * so they've just been written by the CPU with
2158 * zeros. They'll need to be clflushed before we
2159 * use them with the GPU.
2161 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
2162 obj
->read_domains
= I915_GEM_DOMAIN_CPU
;
2164 obj_priv
->agp_type
= AGP_USER_MEMORY
;
2166 obj
->driver_private
= obj_priv
;
2167 obj_priv
->obj
= obj
;
2168 INIT_LIST_HEAD(&obj_priv
->list
);
2172 void i915_gem_free_object(struct drm_gem_object
*obj
)
2174 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2176 while (obj_priv
->pin_count
> 0)
2177 i915_gem_object_unpin(obj
);
2179 i915_gem_object_unbind(obj
);
2181 drm_free(obj_priv
->page_cpu_valid
, 1, DRM_MEM_DRIVER
);
2182 drm_free(obj
->driver_private
, 1, DRM_MEM_DRIVER
);
2186 i915_gem_set_domain(struct drm_gem_object
*obj
,
2187 struct drm_file
*file_priv
,
2188 uint32_t read_domains
,
2189 uint32_t write_domain
)
2191 struct drm_device
*dev
= obj
->dev
;
2193 uint32_t flush_domains
;
2195 BUG_ON(!mutex_is_locked(&dev
->struct_mutex
));
2197 ret
= i915_gem_object_set_domain(obj
, read_domains
, write_domain
);
2200 flush_domains
= i915_gem_dev_set_domain(obj
->dev
);
2202 if (flush_domains
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
))
2203 (void) i915_add_request(dev
, flush_domains
);
2208 /** Unbinds all objects that are on the given buffer list. */
2210 i915_gem_evict_from_list(struct drm_device
*dev
, struct list_head
*head
)
2212 struct drm_gem_object
*obj
;
2213 struct drm_i915_gem_object
*obj_priv
;
2216 while (!list_empty(head
)) {
2217 obj_priv
= list_first_entry(head
,
2218 struct drm_i915_gem_object
,
2220 obj
= obj_priv
->obj
;
2222 if (obj_priv
->pin_count
!= 0) {
2223 DRM_ERROR("Pinned object in unbind list\n");
2224 mutex_unlock(&dev
->struct_mutex
);
2228 ret
= i915_gem_object_unbind(obj
);
2230 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2232 mutex_unlock(&dev
->struct_mutex
);
2242 i915_gem_idle(struct drm_device
*dev
)
2244 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2245 uint32_t seqno
, cur_seqno
, last_seqno
;
2248 mutex_lock(&dev
->struct_mutex
);
2250 if (dev_priv
->mm
.suspended
|| dev_priv
->ring
.ring_obj
== NULL
) {
2251 mutex_unlock(&dev
->struct_mutex
);
2255 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2256 * We need to replace this with a semaphore, or something.
2258 dev_priv
->mm
.suspended
= 1;
2260 /* Cancel the retire work handler, wait for it to finish if running
2262 mutex_unlock(&dev
->struct_mutex
);
2263 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
2264 mutex_lock(&dev
->struct_mutex
);
2266 i915_kernel_lost_context(dev
);
2268 /* Flush the GPU along with all non-CPU write domains
2270 i915_gem_flush(dev
, ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
),
2271 ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
2272 seqno
= i915_add_request(dev
, ~(I915_GEM_DOMAIN_CPU
|
2273 I915_GEM_DOMAIN_GTT
));
2276 mutex_unlock(&dev
->struct_mutex
);
2280 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
2284 cur_seqno
= i915_get_gem_seqno(dev
);
2285 if (i915_seqno_passed(cur_seqno
, seqno
))
2287 if (last_seqno
== cur_seqno
) {
2288 if (stuck
++ > 100) {
2289 DRM_ERROR("hardware wedged\n");
2290 dev_priv
->mm
.wedged
= 1;
2291 DRM_WAKEUP(&dev_priv
->irq_queue
);
2296 last_seqno
= cur_seqno
;
2298 dev_priv
->mm
.waiting_gem_seqno
= 0;
2300 i915_gem_retire_requests(dev
);
2302 /* Active and flushing should now be empty as we've
2303 * waited for a sequence higher than any pending execbuffer
2305 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
2306 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
2308 /* Request should now be empty as we've also waited
2309 * for the last request in the list
2311 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
2313 /* Move all buffers out of the GTT. */
2314 ret
= i915_gem_evict_from_list(dev
, &dev_priv
->mm
.inactive_list
);
2316 mutex_unlock(&dev
->struct_mutex
);
2320 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
2321 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
2322 BUG_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
2323 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
2325 i915_gem_cleanup_ringbuffer(dev
);
2326 mutex_unlock(&dev
->struct_mutex
);
2332 i915_gem_init_hws(struct drm_device
*dev
)
2334 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2335 struct drm_gem_object
*obj
;
2336 struct drm_i915_gem_object
*obj_priv
;
2339 /* If we need a physical address for the status page, it's already
2340 * initialized at driver load time.
2342 if (!I915_NEED_GFX_HWS(dev
))
2345 obj
= drm_gem_object_alloc(dev
, 4096);
2347 DRM_ERROR("Failed to allocate status page\n");
2350 obj_priv
= obj
->driver_private
;
2351 obj_priv
->agp_type
= AGP_USER_CACHED_MEMORY
;
2353 ret
= i915_gem_object_pin(obj
, 4096);
2355 drm_gem_object_unreference(obj
);
2359 dev_priv
->status_gfx_addr
= obj_priv
->gtt_offset
;
2361 dev_priv
->hw_status_page
= kmap(obj_priv
->page_list
[0]);
2362 if (dev_priv
->hw_status_page
== NULL
) {
2363 DRM_ERROR("Failed to map status page.\n");
2364 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
2365 drm_gem_object_unreference(obj
);
2368 dev_priv
->hws_obj
= obj
;
2369 memset(dev_priv
->hw_status_page
, 0, PAGE_SIZE
);
2370 I915_WRITE(HWS_PGA
, dev_priv
->status_gfx_addr
);
2371 I915_READ(HWS_PGA
); /* posting read */
2372 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv
->status_gfx_addr
);
2378 i915_gem_init_ringbuffer(struct drm_device
*dev
)
2380 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2381 struct drm_gem_object
*obj
;
2382 struct drm_i915_gem_object
*obj_priv
;
2386 ret
= i915_gem_init_hws(dev
);
2390 obj
= drm_gem_object_alloc(dev
, 128 * 1024);
2392 DRM_ERROR("Failed to allocate ringbuffer\n");
2395 obj_priv
= obj
->driver_private
;
2397 ret
= i915_gem_object_pin(obj
, 4096);
2399 drm_gem_object_unreference(obj
);
2403 /* Set up the kernel mapping for the ring. */
2404 dev_priv
->ring
.Size
= obj
->size
;
2405 dev_priv
->ring
.tail_mask
= obj
->size
- 1;
2407 dev_priv
->ring
.map
.offset
= dev
->agp
->base
+ obj_priv
->gtt_offset
;
2408 dev_priv
->ring
.map
.size
= obj
->size
;
2409 dev_priv
->ring
.map
.type
= 0;
2410 dev_priv
->ring
.map
.flags
= 0;
2411 dev_priv
->ring
.map
.mtrr
= 0;
2413 drm_core_ioremap_wc(&dev_priv
->ring
.map
, dev
);
2414 if (dev_priv
->ring
.map
.handle
== NULL
) {
2415 DRM_ERROR("Failed to map ringbuffer.\n");
2416 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
2417 drm_gem_object_unreference(obj
);
2420 dev_priv
->ring
.ring_obj
= obj
;
2421 dev_priv
->ring
.virtual_start
= dev_priv
->ring
.map
.handle
;
2423 /* Stop the ring if it's running. */
2424 I915_WRITE(PRB0_CTL
, 0);
2425 I915_WRITE(PRB0_TAIL
, 0);
2426 I915_WRITE(PRB0_HEAD
, 0);
2428 /* Initialize the ring. */
2429 I915_WRITE(PRB0_START
, obj_priv
->gtt_offset
);
2430 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
2432 /* G45 ring initialization fails to reset head to zero */
2434 DRM_ERROR("Ring head not reset to zero "
2435 "ctl %08x head %08x tail %08x start %08x\n",
2436 I915_READ(PRB0_CTL
),
2437 I915_READ(PRB0_HEAD
),
2438 I915_READ(PRB0_TAIL
),
2439 I915_READ(PRB0_START
));
2440 I915_WRITE(PRB0_HEAD
, 0);
2442 DRM_ERROR("Ring head forced to zero "
2443 "ctl %08x head %08x tail %08x start %08x\n",
2444 I915_READ(PRB0_CTL
),
2445 I915_READ(PRB0_HEAD
),
2446 I915_READ(PRB0_TAIL
),
2447 I915_READ(PRB0_START
));
2450 I915_WRITE(PRB0_CTL
,
2451 ((obj
->size
- 4096) & RING_NR_PAGES
) |
2455 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
2457 /* If the head is still not zero, the ring is dead */
2459 DRM_ERROR("Ring initialization failed "
2460 "ctl %08x head %08x tail %08x start %08x\n",
2461 I915_READ(PRB0_CTL
),
2462 I915_READ(PRB0_HEAD
),
2463 I915_READ(PRB0_TAIL
),
2464 I915_READ(PRB0_START
));
2468 /* Update our cache of the ring state */
2469 i915_kernel_lost_context(dev
);
2475 i915_gem_cleanup_ringbuffer(struct drm_device
*dev
)
2477 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2479 if (dev_priv
->ring
.ring_obj
== NULL
)
2482 drm_core_ioremapfree(&dev_priv
->ring
.map
, dev
);
2484 i915_gem_object_unpin(dev_priv
->ring
.ring_obj
);
2485 drm_gem_object_unreference(dev_priv
->ring
.ring_obj
);
2486 dev_priv
->ring
.ring_obj
= NULL
;
2487 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
2489 if (dev_priv
->hws_obj
!= NULL
) {
2490 struct drm_gem_object
*obj
= dev_priv
->hws_obj
;
2491 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2493 kunmap(obj_priv
->page_list
[0]);
2494 i915_gem_object_unpin(obj
);
2495 drm_gem_object_unreference(obj
);
2496 dev_priv
->hws_obj
= NULL
;
2497 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
2498 dev_priv
->hw_status_page
= NULL
;
2500 /* Write high address into HWS_PGA when disabling. */
2501 I915_WRITE(HWS_PGA
, 0x1ffff000);
2506 i915_gem_entervt_ioctl(struct drm_device
*dev
, void *data
,
2507 struct drm_file
*file_priv
)
2509 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2512 if (dev_priv
->mm
.wedged
) {
2513 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2514 dev_priv
->mm
.wedged
= 0;
2517 ret
= i915_gem_init_ringbuffer(dev
);
2521 dev_priv
->mm
.gtt_mapping
= io_mapping_create_wc(dev
->agp
->base
,
2522 dev
->agp
->agp_info
.aper_size
2525 mutex_lock(&dev
->struct_mutex
);
2526 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
2527 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
2528 BUG_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
2529 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
2530 dev_priv
->mm
.suspended
= 0;
2531 mutex_unlock(&dev
->struct_mutex
);
2533 drm_irq_install(dev
);
2539 i915_gem_leavevt_ioctl(struct drm_device
*dev
, void *data
,
2540 struct drm_file
*file_priv
)
2542 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2545 ret
= i915_gem_idle(dev
);
2546 drm_irq_uninstall(dev
);
2548 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
2553 i915_gem_lastclose(struct drm_device
*dev
)
2557 ret
= i915_gem_idle(dev
);
2559 DRM_ERROR("failed to idle hardware: %d\n", ret
);
2563 i915_gem_load(struct drm_device
*dev
)
2565 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2567 INIT_LIST_HEAD(&dev_priv
->mm
.active_list
);
2568 INIT_LIST_HEAD(&dev_priv
->mm
.flushing_list
);
2569 INIT_LIST_HEAD(&dev_priv
->mm
.inactive_list
);
2570 INIT_LIST_HEAD(&dev_priv
->mm
.request_list
);
2571 INIT_DELAYED_WORK(&dev_priv
->mm
.retire_work
,
2572 i915_gem_retire_work_handler
);
2573 dev_priv
->mm
.next_gem_seqno
= 1;
2575 i915_gem_detect_bit_6_swizzle(dev
);