Merge branch 'gma500-next' of git://github.com/patjak/drm-gma500 into drm-next
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_evict.c
1 /*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
29 #include <drm/drmP.h>
30 #include "i915_drv.h"
31 #include <drm/i915_drm.h>
32 #include "i915_trace.h"
33
34 static bool
35 mark_free(struct i915_vma *vma, struct list_head *unwind)
36 {
37 if (vma->obj->pin_count)
38 return false;
39
40 list_add(&vma->obj->exec_list, unwind);
41 return drm_mm_scan_add_block(&vma->node);
42 }
43
44 int
45 i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
46 int min_size, unsigned alignment, unsigned cache_level,
47 bool mappable, bool nonblocking)
48 {
49 drm_i915_private_t *dev_priv = dev->dev_private;
50 struct list_head eviction_list, unwind_list;
51 struct i915_vma *vma;
52 struct drm_i915_gem_object *obj;
53 int ret = 0;
54
55 trace_i915_gem_evict(dev, min_size, alignment, mappable);
56
57 /*
58 * The goal is to evict objects and amalgamate space in LRU order.
59 * The oldest idle objects reside on the inactive list, which is in
60 * retirement order. The next objects to retire are those on the (per
61 * ring) active list that do not have an outstanding flush. Once the
62 * hardware reports completion (the seqno is updated after the
63 * batchbuffer has been finished) the clean buffer objects would
64 * be retired to the inactive list. Any dirty objects would be added
65 * to the tail of the flushing list. So after processing the clean
66 * active objects we need to emit a MI_FLUSH to retire the flushing
67 * list, hence the retirement order of the flushing list is in
68 * advance of the dirty objects on the active lists.
69 *
70 * The retirement sequence is thus:
71 * 1. Inactive objects (already retired)
72 * 2. Clean active objects
73 * 3. Flushing list
74 * 4. Dirty active objects.
75 *
76 * On each list, the oldest objects lie at the HEAD with the freshest
77 * object on the TAIL.
78 */
79
80 INIT_LIST_HEAD(&unwind_list);
81 if (mappable) {
82 BUG_ON(!i915_is_ggtt(vm));
83 drm_mm_init_scan_with_range(&vm->mm, min_size,
84 alignment, cache_level, 0,
85 dev_priv->gtt.mappable_end);
86 } else
87 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
88
89 /* First see if there is a large enough contiguous idle region... */
90 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
91 if (mark_free(vma, &unwind_list))
92 goto found;
93 }
94
95 if (nonblocking)
96 goto none;
97
98 /* Now merge in the soon-to-be-expired objects... */
99 list_for_each_entry(vma, &vm->active_list, mm_list) {
100 if (mark_free(vma, &unwind_list))
101 goto found;
102 }
103
104 none:
105 /* Nothing found, clean up and bail out! */
106 while (!list_empty(&unwind_list)) {
107 obj = list_first_entry(&unwind_list,
108 struct drm_i915_gem_object,
109 exec_list);
110 vma = i915_gem_obj_to_vma(obj, vm);
111 ret = drm_mm_scan_remove_block(&vma->node);
112 BUG_ON(ret);
113
114 list_del_init(&obj->exec_list);
115 }
116
117 /* We expect the caller to unpin, evict all and try again, or give up.
118 * So calling i915_gem_evict_everything() is unnecessary.
119 */
120 return -ENOSPC;
121
122 found:
123 /* drm_mm doesn't allow any other other operations while
124 * scanning, therefore store to be evicted objects on a
125 * temporary list. */
126 INIT_LIST_HEAD(&eviction_list);
127 while (!list_empty(&unwind_list)) {
128 obj = list_first_entry(&unwind_list,
129 struct drm_i915_gem_object,
130 exec_list);
131 vma = i915_gem_obj_to_vma(obj, vm);
132 if (drm_mm_scan_remove_block(&vma->node)) {
133 list_move(&obj->exec_list, &eviction_list);
134 drm_gem_object_reference(&obj->base);
135 continue;
136 }
137 list_del_init(&obj->exec_list);
138 }
139
140 /* Unbinding will emit any required flushes */
141 while (!list_empty(&eviction_list)) {
142 obj = list_first_entry(&eviction_list,
143 struct drm_i915_gem_object,
144 exec_list);
145 if (ret == 0)
146 ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
147
148 list_del_init(&obj->exec_list);
149 drm_gem_object_unreference(&obj->base);
150 }
151
152 return ret;
153 }
154
155 int
156 i915_gem_evict_everything(struct drm_device *dev)
157 {
158 drm_i915_private_t *dev_priv = dev->dev_private;
159 struct i915_address_space *vm;
160 struct i915_vma *vma, *next;
161 bool lists_empty = true;
162 int ret;
163
164 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
165 lists_empty = (list_empty(&vm->inactive_list) &&
166 list_empty(&vm->active_list));
167 if (!lists_empty)
168 lists_empty = false;
169 }
170
171 if (lists_empty)
172 return -ENOSPC;
173
174 trace_i915_gem_evict_everything(dev);
175
176 /* The gpu_idle will flush everything in the write domain to the
177 * active list. Then we must move everything off the active list
178 * with retire requests.
179 */
180 ret = i915_gpu_idle(dev);
181 if (ret)
182 return ret;
183
184 i915_gem_retire_requests(dev);
185
186 /* Having flushed everything, unbind() should never raise an error */
187 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
188 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
189 if (vma->obj->pin_count == 0)
190 WARN_ON(i915_vma_unbind(vma));
191 }
192
193 return 0;
194 }
This page took 0.051416 seconds and 6 git commands to generate.