Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <linux/list.h> | |
33 | #include <drm/drmP.h> | |
34 | #include "radeon_drm.h" | |
35 | #include "radeon.h" | |
36 | ||
37 | struct radeon_object { | |
38 | struct ttm_buffer_object tobj; | |
39 | struct list_head list; | |
40 | struct radeon_device *rdev; | |
41 | struct drm_gem_object *gobj; | |
42 | struct ttm_bo_kmap_obj kmap; | |
43 | unsigned pin_count; | |
44 | uint64_t gpu_addr; | |
45 | void *kptr; | |
46 | bool is_iomem; | |
e024e110 DA |
47 | uint32_t tiling_flags; |
48 | uint32_t pitch; | |
49 | int surface_reg; | |
771fe6b9 JG |
50 | }; |
51 | ||
52 | int radeon_ttm_init(struct radeon_device *rdev); | |
53 | void radeon_ttm_fini(struct radeon_device *rdev); | |
54 | ||
55 | /* | |
56 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all | |
57 | * function are calling it. | |
58 | */ | |
59 | ||
60 | static int radeon_object_reserve(struct radeon_object *robj, bool interruptible) | |
61 | { | |
62 | return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0); | |
63 | } | |
64 | ||
65 | static void radeon_object_unreserve(struct radeon_object *robj) | |
66 | { | |
67 | ttm_bo_unreserve(&robj->tobj); | |
68 | } | |
69 | ||
70 | static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) | |
71 | { | |
72 | struct radeon_object *robj; | |
73 | ||
74 | robj = container_of(tobj, struct radeon_object, tobj); | |
75 | list_del_init(&robj->list); | |
e024e110 | 76 | radeon_object_clear_surface_reg(robj); |
771fe6b9 JG |
77 | kfree(robj); |
78 | } | |
79 | ||
80 | static inline void radeon_object_gpu_addr(struct radeon_object *robj) | |
81 | { | |
82 | /* Default gpu address */ | |
83 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; | |
84 | if (robj->tobj.mem.mm_node == NULL) { | |
85 | return; | |
86 | } | |
87 | robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT; | |
88 | switch (robj->tobj.mem.mem_type) { | |
89 | case TTM_PL_VRAM: | |
90 | robj->gpu_addr += (u64)robj->rdev->mc.vram_location; | |
91 | break; | |
92 | case TTM_PL_TT: | |
93 | robj->gpu_addr += (u64)robj->rdev->mc.gtt_location; | |
94 | break; | |
95 | default: | |
96 | DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type); | |
97 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; | |
98 | return; | |
99 | } | |
100 | } | |
101 | ||
102 | static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) | |
103 | { | |
104 | uint32_t flags = 0; | |
105 | if (domain & RADEON_GEM_DOMAIN_VRAM) { | |
106 | flags |= TTM_PL_FLAG_VRAM; | |
107 | } | |
108 | if (domain & RADEON_GEM_DOMAIN_GTT) { | |
109 | flags |= TTM_PL_FLAG_TT; | |
110 | } | |
111 | if (domain & RADEON_GEM_DOMAIN_CPU) { | |
112 | flags |= TTM_PL_FLAG_SYSTEM; | |
113 | } | |
114 | if (!flags) { | |
115 | flags |= TTM_PL_FLAG_SYSTEM; | |
116 | } | |
117 | return flags; | |
118 | } | |
119 | ||
120 | int radeon_object_create(struct radeon_device *rdev, | |
121 | struct drm_gem_object *gobj, | |
122 | unsigned long size, | |
123 | bool kernel, | |
124 | uint32_t domain, | |
125 | bool interruptible, | |
126 | struct radeon_object **robj_ptr) | |
127 | { | |
128 | struct radeon_object *robj; | |
129 | enum ttm_bo_type type; | |
130 | uint32_t flags; | |
131 | int r; | |
132 | ||
133 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { | |
134 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; | |
135 | } | |
136 | if (kernel) { | |
137 | type = ttm_bo_type_kernel; | |
138 | } else { | |
139 | type = ttm_bo_type_device; | |
140 | } | |
141 | *robj_ptr = NULL; | |
142 | robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); | |
143 | if (robj == NULL) { | |
144 | return -ENOMEM; | |
145 | } | |
146 | robj->rdev = rdev; | |
147 | robj->gobj = gobj; | |
e024e110 | 148 | robj->surface_reg = -1; |
771fe6b9 JG |
149 | INIT_LIST_HEAD(&robj->list); |
150 | ||
151 | flags = radeon_object_flags_from_domain(domain); | |
152 | r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, | |
153 | 0, 0, false, NULL, size, | |
154 | &radeon_ttm_object_object_destroy); | |
155 | if (unlikely(r != 0)) { | |
156 | /* ttm call radeon_ttm_object_object_destroy if error happen */ | |
157 | DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", | |
158 | size, flags, 0); | |
159 | return r; | |
160 | } | |
161 | *robj_ptr = robj; | |
162 | if (gobj) { | |
163 | list_add_tail(&robj->list, &rdev->gem.objects); | |
164 | } | |
165 | return 0; | |
166 | } | |
167 | ||
168 | int radeon_object_kmap(struct radeon_object *robj, void **ptr) | |
169 | { | |
170 | int r; | |
171 | ||
172 | spin_lock(&robj->tobj.lock); | |
173 | if (robj->kptr) { | |
174 | if (ptr) { | |
175 | *ptr = robj->kptr; | |
176 | } | |
177 | spin_unlock(&robj->tobj.lock); | |
178 | return 0; | |
179 | } | |
180 | spin_unlock(&robj->tobj.lock); | |
181 | r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap); | |
182 | if (r) { | |
183 | return r; | |
184 | } | |
185 | spin_lock(&robj->tobj.lock); | |
186 | robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem); | |
187 | spin_unlock(&robj->tobj.lock); | |
188 | if (ptr) { | |
189 | *ptr = robj->kptr; | |
190 | } | |
191 | return 0; | |
192 | } | |
193 | ||
194 | void radeon_object_kunmap(struct radeon_object *robj) | |
195 | { | |
196 | spin_lock(&robj->tobj.lock); | |
197 | if (robj->kptr == NULL) { | |
198 | spin_unlock(&robj->tobj.lock); | |
199 | return; | |
200 | } | |
201 | robj->kptr = NULL; | |
202 | spin_unlock(&robj->tobj.lock); | |
203 | ttm_bo_kunmap(&robj->kmap); | |
204 | } | |
205 | ||
206 | void radeon_object_unref(struct radeon_object **robj) | |
207 | { | |
208 | struct ttm_buffer_object *tobj; | |
209 | ||
210 | if ((*robj) == NULL) { | |
211 | return; | |
212 | } | |
213 | tobj = &((*robj)->tobj); | |
214 | ttm_bo_unref(&tobj); | |
215 | if (tobj == NULL) { | |
216 | *robj = NULL; | |
217 | } | |
218 | } | |
219 | ||
220 | int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset) | |
221 | { | |
222 | *offset = robj->tobj.addr_space_offset; | |
223 | return 0; | |
224 | } | |
225 | ||
226 | int radeon_object_pin(struct radeon_object *robj, uint32_t domain, | |
227 | uint64_t *gpu_addr) | |
228 | { | |
229 | uint32_t flags; | |
230 | uint32_t tmp; | |
771fe6b9 JG |
231 | int r; |
232 | ||
233 | flags = radeon_object_flags_from_domain(domain); | |
234 | spin_lock(&robj->tobj.lock); | |
235 | if (robj->pin_count) { | |
236 | robj->pin_count++; | |
237 | if (gpu_addr != NULL) { | |
238 | *gpu_addr = robj->gpu_addr; | |
239 | } | |
240 | spin_unlock(&robj->tobj.lock); | |
241 | return 0; | |
242 | } | |
243 | spin_unlock(&robj->tobj.lock); | |
244 | r = radeon_object_reserve(robj, false); | |
245 | if (unlikely(r != 0)) { | |
246 | DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); | |
247 | return r; | |
248 | } | |
771fe6b9 JG |
249 | tmp = robj->tobj.mem.placement; |
250 | ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); | |
251 | robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; | |
252 | r = ttm_buffer_object_validate(&robj->tobj, | |
253 | robj->tobj.proposed_placement, | |
254 | false, false); | |
255 | radeon_object_gpu_addr(robj); | |
256 | if (gpu_addr != NULL) { | |
257 | *gpu_addr = robj->gpu_addr; | |
258 | } | |
259 | robj->pin_count = 1; | |
260 | if (unlikely(r != 0)) { | |
261 | DRM_ERROR("radeon: failed to pin object.\n"); | |
262 | } | |
263 | radeon_object_unreserve(robj); | |
771fe6b9 JG |
264 | return r; |
265 | } | |
266 | ||
267 | void radeon_object_unpin(struct radeon_object *robj) | |
268 | { | |
269 | uint32_t flags; | |
771fe6b9 JG |
270 | int r; |
271 | ||
272 | spin_lock(&robj->tobj.lock); | |
273 | if (!robj->pin_count) { | |
274 | spin_unlock(&robj->tobj.lock); | |
275 | printk(KERN_WARNING "Unpin not necessary for %p !\n", robj); | |
276 | return; | |
277 | } | |
278 | robj->pin_count--; | |
279 | if (robj->pin_count) { | |
280 | spin_unlock(&robj->tobj.lock); | |
281 | return; | |
282 | } | |
283 | spin_unlock(&robj->tobj.lock); | |
284 | r = radeon_object_reserve(robj, false); | |
285 | if (unlikely(r != 0)) { | |
286 | DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); | |
287 | return; | |
288 | } | |
771fe6b9 JG |
289 | flags = robj->tobj.mem.placement; |
290 | robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; | |
291 | r = ttm_buffer_object_validate(&robj->tobj, | |
292 | robj->tobj.proposed_placement, | |
293 | false, false); | |
294 | if (unlikely(r != 0)) { | |
295 | DRM_ERROR("radeon: failed to unpin buffer.\n"); | |
296 | } | |
297 | radeon_object_unreserve(robj); | |
771fe6b9 JG |
298 | } |
299 | ||
300 | int radeon_object_wait(struct radeon_object *robj) | |
301 | { | |
302 | int r = 0; | |
303 | ||
304 | /* FIXME: should use block reservation instead */ | |
305 | r = radeon_object_reserve(robj, true); | |
306 | if (unlikely(r != 0)) { | |
307 | DRM_ERROR("radeon: failed to reserve object for waiting.\n"); | |
308 | return r; | |
309 | } | |
310 | spin_lock(&robj->tobj.lock); | |
311 | if (robj->tobj.sync_obj) { | |
312 | r = ttm_bo_wait(&robj->tobj, true, false, false); | |
313 | } | |
314 | spin_unlock(&robj->tobj.lock); | |
315 | radeon_object_unreserve(robj); | |
316 | return r; | |
317 | } | |
318 | ||
319 | int radeon_object_evict_vram(struct radeon_device *rdev) | |
320 | { | |
321 | if (rdev->flags & RADEON_IS_IGP) { | |
322 | /* Useless to evict on IGP chips */ | |
323 | return 0; | |
324 | } | |
325 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); | |
326 | } | |
327 | ||
328 | void radeon_object_force_delete(struct radeon_device *rdev) | |
329 | { | |
330 | struct radeon_object *robj, *n; | |
331 | struct drm_gem_object *gobj; | |
332 | ||
333 | if (list_empty(&rdev->gem.objects)) { | |
334 | return; | |
335 | } | |
336 | DRM_ERROR("Userspace still has active objects !\n"); | |
337 | list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) { | |
338 | mutex_lock(&rdev->ddev->struct_mutex); | |
339 | gobj = robj->gobj; | |
340 | DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n", | |
341 | gobj, robj, (unsigned long)gobj->size, | |
342 | *((unsigned long *)&gobj->refcount)); | |
343 | list_del_init(&robj->list); | |
344 | radeon_object_unref(&robj); | |
345 | gobj->driver_private = NULL; | |
346 | drm_gem_object_unreference(gobj); | |
347 | mutex_unlock(&rdev->ddev->struct_mutex); | |
348 | } | |
349 | } | |
350 | ||
351 | int radeon_object_init(struct radeon_device *rdev) | |
352 | { | |
353 | return radeon_ttm_init(rdev); | |
354 | } | |
355 | ||
356 | void radeon_object_fini(struct radeon_device *rdev) | |
357 | { | |
358 | radeon_ttm_fini(rdev); | |
359 | } | |
360 | ||
361 | void radeon_object_list_add_object(struct radeon_object_list *lobj, | |
362 | struct list_head *head) | |
363 | { | |
364 | if (lobj->wdomain) { | |
365 | list_add(&lobj->list, head); | |
366 | } else { | |
367 | list_add_tail(&lobj->list, head); | |
368 | } | |
369 | } | |
370 | ||
371 | int radeon_object_list_reserve(struct list_head *head) | |
372 | { | |
373 | struct radeon_object_list *lobj; | |
374 | struct list_head *i; | |
375 | int r; | |
376 | ||
377 | list_for_each(i, head) { | |
378 | lobj = list_entry(i, struct radeon_object_list, list); | |
379 | if (!lobj->robj->pin_count) { | |
380 | r = radeon_object_reserve(lobj->robj, true); | |
381 | if (unlikely(r != 0)) { | |
382 | DRM_ERROR("radeon: failed to reserve object.\n"); | |
383 | return r; | |
384 | } | |
385 | } else { | |
386 | } | |
387 | } | |
388 | return 0; | |
389 | } | |
390 | ||
391 | void radeon_object_list_unreserve(struct list_head *head) | |
392 | { | |
393 | struct radeon_object_list *lobj; | |
394 | struct list_head *i; | |
395 | ||
396 | list_for_each(i, head) { | |
397 | lobj = list_entry(i, struct radeon_object_list, list); | |
398 | if (!lobj->robj->pin_count) { | |
399 | radeon_object_unreserve(lobj->robj); | |
400 | } else { | |
401 | } | |
402 | } | |
403 | } | |
404 | ||
405 | int radeon_object_list_validate(struct list_head *head, void *fence) | |
406 | { | |
407 | struct radeon_object_list *lobj; | |
408 | struct radeon_object *robj; | |
409 | struct radeon_fence *old_fence = NULL; | |
410 | struct list_head *i; | |
411 | uint32_t flags; | |
412 | int r; | |
413 | ||
414 | r = radeon_object_list_reserve(head); | |
415 | if (unlikely(r != 0)) { | |
416 | radeon_object_list_unreserve(head); | |
417 | return r; | |
418 | } | |
419 | list_for_each(i, head) { | |
420 | lobj = list_entry(i, struct radeon_object_list, list); | |
421 | robj = lobj->robj; | |
422 | if (lobj->wdomain) { | |
423 | flags = radeon_object_flags_from_domain(lobj->wdomain); | |
424 | flags |= TTM_PL_FLAG_TT; | |
425 | } else { | |
426 | flags = radeon_object_flags_from_domain(lobj->rdomain); | |
427 | flags |= TTM_PL_FLAG_TT; | |
428 | flags |= TTM_PL_FLAG_VRAM; | |
429 | } | |
430 | if (!robj->pin_count) { | |
431 | robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING; | |
432 | r = ttm_buffer_object_validate(&robj->tobj, | |
433 | robj->tobj.proposed_placement, | |
434 | true, false); | |
435 | if (unlikely(r)) { | |
436 | radeon_object_list_unreserve(head); | |
437 | DRM_ERROR("radeon: failed to validate.\n"); | |
438 | return r; | |
439 | } | |
440 | radeon_object_gpu_addr(robj); | |
441 | } | |
442 | lobj->gpu_offset = robj->gpu_addr; | |
e024e110 | 443 | lobj->tiling_flags = robj->tiling_flags; |
771fe6b9 JG |
444 | if (fence) { |
445 | old_fence = (struct radeon_fence *)robj->tobj.sync_obj; | |
446 | robj->tobj.sync_obj = radeon_fence_ref(fence); | |
447 | robj->tobj.sync_obj_arg = NULL; | |
448 | } | |
449 | if (old_fence) { | |
450 | radeon_fence_unref(&old_fence); | |
451 | } | |
452 | } | |
453 | return 0; | |
454 | } | |
455 | ||
456 | void radeon_object_list_unvalidate(struct list_head *head) | |
457 | { | |
458 | struct radeon_object_list *lobj; | |
459 | struct radeon_fence *old_fence = NULL; | |
460 | struct list_head *i; | |
461 | ||
462 | list_for_each(i, head) { | |
463 | lobj = list_entry(i, struct radeon_object_list, list); | |
464 | old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; | |
465 | lobj->robj->tobj.sync_obj = NULL; | |
466 | if (old_fence) { | |
467 | radeon_fence_unref(&old_fence); | |
468 | } | |
469 | } | |
470 | radeon_object_list_unreserve(head); | |
471 | } | |
472 | ||
473 | void radeon_object_list_clean(struct list_head *head) | |
474 | { | |
475 | radeon_object_list_unreserve(head); | |
476 | } | |
477 | ||
478 | int radeon_object_fbdev_mmap(struct radeon_object *robj, | |
479 | struct vm_area_struct *vma) | |
480 | { | |
481 | return ttm_fbdev_mmap(vma, &robj->tobj); | |
482 | } | |
483 | ||
484 | unsigned long radeon_object_size(struct radeon_object *robj) | |
485 | { | |
486 | return robj->tobj.num_pages << PAGE_SHIFT; | |
487 | } | |
e024e110 DA |
488 | |
489 | int radeon_object_get_surface_reg(struct radeon_object *robj) | |
490 | { | |
491 | struct radeon_device *rdev = robj->rdev; | |
492 | struct radeon_surface_reg *reg; | |
493 | struct radeon_object *old_object; | |
494 | int steal; | |
495 | int i; | |
496 | ||
497 | if (!robj->tiling_flags) | |
498 | return 0; | |
499 | ||
500 | if (robj->surface_reg >= 0) { | |
501 | reg = &rdev->surface_regs[robj->surface_reg]; | |
502 | i = robj->surface_reg; | |
503 | goto out; | |
504 | } | |
505 | ||
506 | steal = -1; | |
507 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { | |
508 | ||
509 | reg = &rdev->surface_regs[i]; | |
510 | if (!reg->robj) | |
511 | break; | |
512 | ||
513 | old_object = reg->robj; | |
514 | if (old_object->pin_count == 0) | |
515 | steal = i; | |
516 | } | |
517 | ||
518 | /* if we are all out */ | |
519 | if (i == RADEON_GEM_MAX_SURFACES) { | |
520 | if (steal == -1) | |
521 | return -ENOMEM; | |
522 | /* find someone with a surface reg and nuke their BO */ | |
523 | reg = &rdev->surface_regs[steal]; | |
524 | old_object = reg->robj; | |
525 | /* blow away the mapping */ | |
526 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); | |
527 | ttm_bo_unmap_virtual(&old_object->tobj); | |
528 | old_object->surface_reg = -1; | |
529 | i = steal; | |
530 | } | |
531 | ||
532 | robj->surface_reg = i; | |
533 | reg->robj = robj; | |
534 | ||
535 | out: | |
536 | radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch, | |
537 | robj->tobj.mem.mm_node->start << PAGE_SHIFT, | |
538 | robj->tobj.num_pages << PAGE_SHIFT); | |
539 | return 0; | |
540 | } | |
541 | ||
542 | void radeon_object_clear_surface_reg(struct radeon_object *robj) | |
543 | { | |
544 | struct radeon_device *rdev = robj->rdev; | |
545 | struct radeon_surface_reg *reg; | |
546 | ||
547 | if (robj->surface_reg == -1) | |
548 | return; | |
549 | ||
550 | reg = &rdev->surface_regs[robj->surface_reg]; | |
551 | radeon_clear_surface_reg(rdev, robj->surface_reg); | |
552 | ||
553 | reg->robj = NULL; | |
554 | robj->surface_reg = -1; | |
555 | } | |
556 | ||
557 | void radeon_object_set_tiling_flags(struct radeon_object *robj, | |
558 | uint32_t tiling_flags, uint32_t pitch) | |
559 | { | |
560 | robj->tiling_flags = tiling_flags; | |
561 | robj->pitch = pitch; | |
562 | } | |
563 | ||
564 | void radeon_object_get_tiling_flags(struct radeon_object *robj, | |
565 | uint32_t *tiling_flags, | |
566 | uint32_t *pitch) | |
567 | { | |
568 | if (tiling_flags) | |
569 | *tiling_flags = robj->tiling_flags; | |
570 | if (pitch) | |
571 | *pitch = robj->pitch; | |
572 | } | |
573 | ||
574 | int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, | |
575 | bool force_drop) | |
576 | { | |
577 | if (!(robj->tiling_flags & RADEON_TILING_SURFACE)) | |
578 | return 0; | |
579 | ||
580 | if (force_drop) { | |
581 | radeon_object_clear_surface_reg(robj); | |
582 | return 0; | |
583 | } | |
584 | ||
585 | if (robj->tobj.mem.mem_type != TTM_PL_VRAM) { | |
586 | if (!has_moved) | |
587 | return 0; | |
588 | ||
589 | if (robj->surface_reg >= 0) | |
590 | radeon_object_clear_surface_reg(robj); | |
591 | return 0; | |
592 | } | |
593 | ||
594 | if ((robj->surface_reg >= 0) && !has_moved) | |
595 | return 0; | |
596 | ||
597 | return radeon_object_get_surface_reg(robj); | |
598 | } | |
599 | ||
600 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | |
601 | struct ttm_mem_reg *mem) | |
602 | { | |
603 | struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); | |
604 | radeon_object_check_tiling(robj, 0, 1); | |
605 | } | |
606 | ||
607 | void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |
608 | { | |
609 | struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); | |
610 | radeon_object_check_tiling(robj, 0, 0); | |
611 | } |