2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
31 #include <drm/i915_drm.h>
32 #include "i915_trace.h"
35 mark_free(struct i915_vma
*vma
, struct list_head
*unwind
)
37 if (vma
->obj
->pin_count
)
40 if (WARN_ON(!list_empty(&vma
->exec_list
)))
43 list_add(&vma
->exec_list
, unwind
);
44 return drm_mm_scan_add_block(&vma
->node
);
48 i915_gem_evict_something(struct drm_device
*dev
, struct i915_address_space
*vm
,
49 int min_size
, unsigned alignment
, unsigned cache_level
,
50 bool mappable
, bool nonblocking
)
52 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
53 struct list_head eviction_list
, unwind_list
;
57 trace_i915_gem_evict(dev
, min_size
, alignment
, mappable
);
60 * The goal is to evict objects and amalgamate space in LRU order.
61 * The oldest idle objects reside on the inactive list, which is in
62 * retirement order. The next objects to retire are those on the (per
63 * ring) active list that do not have an outstanding flush. Once the
64 * hardware reports completion (the seqno is updated after the
65 * batchbuffer has been finished) the clean buffer objects would
66 * be retired to the inactive list. Any dirty objects would be added
67 * to the tail of the flushing list. So after processing the clean
68 * active objects we need to emit a MI_FLUSH to retire the flushing
69 * list, hence the retirement order of the flushing list is in
70 * advance of the dirty objects on the active lists.
72 * The retirement sequence is thus:
73 * 1. Inactive objects (already retired)
74 * 2. Clean active objects
76 * 4. Dirty active objects.
78 * On each list, the oldest objects lie at the HEAD with the freshest
82 INIT_LIST_HEAD(&unwind_list
);
84 BUG_ON(!i915_is_ggtt(vm
));
85 drm_mm_init_scan_with_range(&vm
->mm
, min_size
,
86 alignment
, cache_level
, 0,
87 dev_priv
->gtt
.mappable_end
);
89 drm_mm_init_scan(&vm
->mm
, min_size
, alignment
, cache_level
);
91 /* First see if there is a large enough contiguous idle region... */
92 list_for_each_entry(vma
, &vm
->inactive_list
, mm_list
) {
93 if (mark_free(vma
, &unwind_list
))
100 /* Now merge in the soon-to-be-expired objects... */
101 list_for_each_entry(vma
, &vm
->active_list
, mm_list
) {
102 if (mark_free(vma
, &unwind_list
))
107 /* Nothing found, clean up and bail out! */
108 while (!list_empty(&unwind_list
)) {
109 vma
= list_first_entry(&unwind_list
,
112 ret
= drm_mm_scan_remove_block(&vma
->node
);
115 list_del_init(&vma
->exec_list
);
118 /* We expect the caller to unpin, evict all and try again, or give up.
119 * So calling i915_gem_evict_everything() is unnecessary.
124 /* drm_mm doesn't allow any other other operations while
125 * scanning, therefore store to be evicted objects on a
127 INIT_LIST_HEAD(&eviction_list
);
128 while (!list_empty(&unwind_list
)) {
129 vma
= list_first_entry(&unwind_list
,
132 if (drm_mm_scan_remove_block(&vma
->node
)) {
133 list_move(&vma
->exec_list
, &eviction_list
);
134 drm_gem_object_reference(&vma
->obj
->base
);
137 list_del_init(&vma
->exec_list
);
140 /* Unbinding will emit any required flushes */
141 while (!list_empty(&eviction_list
)) {
142 struct drm_gem_object
*obj
;
143 vma
= list_first_entry(&eviction_list
,
147 obj
= &vma
->obj
->base
;
148 list_del_init(&vma
->exec_list
);
150 ret
= i915_vma_unbind(vma
);
152 drm_gem_object_unreference(obj
);
158 static int i915_gem_evict_vm(struct i915_address_space
*vm
, bool do_idle
)
160 struct i915_vma
*vma
, *next
;
164 ret
= i915_gpu_idle(vm
->dev
);
168 i915_gem_retire_requests(vm
->dev
);
171 list_for_each_entry_safe(vma
, next
, &vm
->inactive_list
, mm_list
)
172 if (vma
->obj
->pin_count
== 0)
173 WARN_ON(i915_vma_unbind(vma
));
179 i915_gem_evict_everything(struct drm_device
*dev
)
181 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
182 struct i915_address_space
*vm
;
183 bool lists_empty
= true;
186 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
) {
187 lists_empty
= (list_empty(&vm
->inactive_list
) &&
188 list_empty(&vm
->active_list
));
196 trace_i915_gem_evict_everything(dev
);
198 /* The gpu_idle will flush everything in the write domain to the
199 * active list. Then we must move everything off the active list
200 * with retire requests.
202 ret
= i915_gpu_idle(dev
);
206 i915_gem_retire_requests(dev
);
208 /* Having flushed everything, unbind() should never raise an error */
209 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
210 WARN_ON(i915_gem_evict_vm(vm
, false));
This page took 0.036135 seconds and 6 git commands to generate.