Commit | Line | Data |
---|---|---|
c078aa2f TH |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | ||
760285e7 DH |
28 | #include <drm/ttm/ttm_execbuf_util.h> |
29 | #include <drm/ttm/ttm_bo_driver.h> | |
30 | #include <drm/ttm/ttm_placement.h> | |
c078aa2f TH |
31 | #include <linux/wait.h> |
32 | #include <linux/sched.h> | |
33 | #include <linux/module.h> | |
34 | ||
d6ea8886 DA |
35 | static void ttm_eu_backoff_reservation_locked(struct list_head *list) |
36 | { | |
37 | struct ttm_validate_buffer *entry; | |
38 | ||
39 | list_for_each_entry(entry, list, head) { | |
40 | struct ttm_buffer_object *bo = entry->bo; | |
41 | if (!entry->reserved) | |
42 | continue; | |
43 | ||
44 | if (entry->removed) { | |
45 | ttm_bo_add_to_lru(bo); | |
46 | entry->removed = false; | |
47 | ||
48 | } | |
49 | entry->reserved = false; | |
50 | atomic_set(&bo->reserved, 0); | |
51 | wake_up_all(&bo->event_queue); | |
52 | } | |
53 | } | |
54 | ||
55 | static void ttm_eu_del_from_lru_locked(struct list_head *list) | |
56 | { | |
57 | struct ttm_validate_buffer *entry; | |
58 | ||
59 | list_for_each_entry(entry, list, head) { | |
60 | struct ttm_buffer_object *bo = entry->bo; | |
61 | if (!entry->reserved) | |
62 | continue; | |
63 | ||
64 | if (!entry->removed) { | |
65 | entry->put_count = ttm_bo_del_from_lru(bo); | |
66 | entry->removed = true; | |
67 | } | |
68 | } | |
69 | } | |
70 | ||
71 | static void ttm_eu_list_ref_sub(struct list_head *list) | |
72 | { | |
73 | struct ttm_validate_buffer *entry; | |
74 | ||
75 | list_for_each_entry(entry, list, head) { | |
76 | struct ttm_buffer_object *bo = entry->bo; | |
77 | ||
78 | if (entry->put_count) { | |
79 | ttm_bo_list_ref_sub(bo, entry->put_count, true); | |
80 | entry->put_count = 0; | |
81 | } | |
82 | } | |
83 | } | |
84 | ||
c078aa2f TH |
85 | void ttm_eu_backoff_reservation(struct list_head *list) |
86 | { | |
87 | struct ttm_validate_buffer *entry; | |
68c4fa31 | 88 | struct ttm_bo_global *glob; |
c078aa2f | 89 | |
68c4fa31 TH |
90 | if (list_empty(list)) |
91 | return; | |
c078aa2f | 92 | |
68c4fa31 TH |
93 | entry = list_first_entry(list, struct ttm_validate_buffer, head); |
94 | glob = entry->bo->glob; | |
95 | spin_lock(&glob->lru_lock); | |
96 | ttm_eu_backoff_reservation_locked(list); | |
97 | spin_unlock(&glob->lru_lock); | |
c078aa2f TH |
98 | } |
99 | EXPORT_SYMBOL(ttm_eu_backoff_reservation); | |
100 | ||
101 | /* | |
102 | * Reserve buffers for validation. | |
103 | * | |
104 | * If a buffer in the list is marked for CPU access, we back off and | |
105 | * wait for that buffer to become free for GPU access. | |
106 | * | |
107 | * If a buffer is reserved for another validation, the validator with | |
108 | * the highest validation sequence backs off and waits for that buffer | |
109 | * to become unreserved. This prevents deadlocks when validating multiple | |
110 | * buffers in different orders. | |
111 | */ | |
112 | ||
65705962 | 113 | int ttm_eu_reserve_buffers(struct list_head *list) |
c078aa2f | 114 | { |
d6ea8886 | 115 | struct ttm_bo_global *glob; |
c078aa2f TH |
116 | struct ttm_validate_buffer *entry; |
117 | int ret; | |
65705962 | 118 | uint32_t val_seq; |
c078aa2f | 119 | |
d6ea8886 DA |
120 | if (list_empty(list)) |
121 | return 0; | |
122 | ||
123 | list_for_each_entry(entry, list, head) { | |
124 | entry->reserved = false; | |
125 | entry->put_count = 0; | |
126 | entry->removed = false; | |
127 | } | |
128 | ||
129 | entry = list_first_entry(list, struct ttm_validate_buffer, head); | |
130 | glob = entry->bo->glob; | |
131 | ||
d6ea8886 | 132 | spin_lock(&glob->lru_lock); |
65705962 TH |
133 | val_seq = entry->bo->bdev->val_seq++; |
134 | ||
f2d476a1 | 135 | retry: |
c078aa2f TH |
136 | list_for_each_entry(entry, list, head) { |
137 | struct ttm_buffer_object *bo = entry->bo; | |
138 | ||
f2d476a1 ML |
139 | /* already slowpath reserved? */ |
140 | if (entry->reserved) | |
141 | continue; | |
142 | ||
63d0a419 | 143 | ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); |
d6ea8886 DA |
144 | switch (ret) { |
145 | case 0: | |
146 | break; | |
147 | case -EBUSY: | |
7a186308 ML |
148 | ttm_eu_del_from_lru_locked(list); |
149 | spin_unlock(&glob->lru_lock); | |
150 | ret = ttm_bo_reserve_nolru(bo, true, false, | |
151 | true, val_seq); | |
152 | spin_lock(&glob->lru_lock); | |
153 | if (!ret) | |
154 | break; | |
155 | ||
156 | if (unlikely(ret != -EAGAIN)) | |
157 | goto err; | |
158 | ||
159 | /* fallthrough */ | |
d6ea8886 DA |
160 | case -EAGAIN: |
161 | ttm_eu_backoff_reservation_locked(list); | |
f2d476a1 ML |
162 | |
163 | /* | |
164 | * temporarily increase sequence number every retry, | |
165 | * to prevent us from seeing our old reservation | |
166 | * sequence when someone else reserved the buffer, | |
167 | * but hasn't updated the seq_valid/seqno members yet. | |
168 | */ | |
169 | val_seq = entry->bo->bdev->val_seq++; | |
170 | ||
d6ea8886 DA |
171 | spin_unlock(&glob->lru_lock); |
172 | ttm_eu_list_ref_sub(list); | |
f2d476a1 | 173 | ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq); |
d6ea8886 | 174 | if (unlikely(ret != 0)) |
c078aa2f | 175 | return ret; |
f2d476a1 ML |
176 | spin_lock(&glob->lru_lock); |
177 | entry->reserved = true; | |
178 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { | |
179 | ret = -EBUSY; | |
180 | goto err; | |
181 | } | |
d6ea8886 DA |
182 | goto retry; |
183 | default: | |
7a186308 | 184 | goto err; |
c078aa2f TH |
185 | } |
186 | ||
187 | entry->reserved = true; | |
188 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { | |
7a186308 ML |
189 | ret = -EBUSY; |
190 | goto err; | |
c078aa2f TH |
191 | } |
192 | } | |
d6ea8886 DA |
193 | |
194 | ttm_eu_del_from_lru_locked(list); | |
195 | spin_unlock(&glob->lru_lock); | |
196 | ttm_eu_list_ref_sub(list); | |
197 | ||
c078aa2f | 198 | return 0; |
7a186308 ML |
199 | |
200 | err: | |
201 | ttm_eu_backoff_reservation_locked(list); | |
202 | spin_unlock(&glob->lru_lock); | |
203 | ttm_eu_list_ref_sub(list); | |
204 | return ret; | |
c078aa2f TH |
205 | } |
206 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); | |
207 | ||
208 | void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) | |
209 | { | |
210 | struct ttm_validate_buffer *entry; | |
95762c2b TH |
211 | struct ttm_buffer_object *bo; |
212 | struct ttm_bo_global *glob; | |
213 | struct ttm_bo_device *bdev; | |
214 | struct ttm_bo_driver *driver; | |
c078aa2f | 215 | |
95762c2b TH |
216 | if (list_empty(list)) |
217 | return; | |
218 | ||
219 | bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; | |
220 | bdev = bo->bdev; | |
221 | driver = bdev->driver; | |
222 | glob = bo->glob; | |
c078aa2f | 223 | |
95762c2b | 224 | spin_lock(&glob->lru_lock); |
4154f051 | 225 | spin_lock(&bdev->fence_lock); |
95762c2b TH |
226 | |
227 | list_for_each_entry(entry, list, head) { | |
228 | bo = entry->bo; | |
229 | entry->old_sync_obj = bo->sync_obj; | |
c078aa2f | 230 | bo->sync_obj = driver->sync_obj_ref(sync_obj); |
95762c2b | 231 | ttm_bo_unreserve_locked(bo); |
c078aa2f | 232 | entry->reserved = false; |
95762c2b | 233 | } |
95762c2b | 234 | spin_unlock(&bdev->fence_lock); |
4154f051 | 235 | spin_unlock(&glob->lru_lock); |
95762c2b TH |
236 | |
237 | list_for_each_entry(entry, list, head) { | |
238 | if (entry->old_sync_obj) | |
239 | driver->sync_obj_unref(&entry->old_sync_obj); | |
c078aa2f TH |
240 | } |
241 | } | |
242 | EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); |