2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 #include <linux/list.h>
33 #include <linux/slab.h>
35 #include "radeon_drm.h"
37 #include "radeon_trace.h"
40 int radeon_ttm_init(struct radeon_device
*rdev
);
41 void radeon_ttm_fini(struct radeon_device
*rdev
);
42 static void radeon_bo_clear_surface_reg(struct radeon_bo
*bo
);
45 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
46 * function are calling it.
49 static void radeon_ttm_bo_destroy(struct ttm_buffer_object
*tbo
)
53 bo
= container_of(tbo
, struct radeon_bo
, tbo
);
54 mutex_lock(&bo
->rdev
->gem
.mutex
);
55 list_del_init(&bo
->list
);
56 mutex_unlock(&bo
->rdev
->gem
.mutex
);
57 radeon_bo_clear_surface_reg(bo
);
58 drm_gem_object_release(&bo
->gem_base
);
62 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object
*bo
)
64 if (bo
->destroy
== &radeon_ttm_bo_destroy
)
69 void radeon_ttm_placement_from_domain(struct radeon_bo
*rbo
, u32 domain
)
73 rbo
->placement
.fpfn
= 0;
74 rbo
->placement
.lpfn
= 0;
75 rbo
->placement
.placement
= rbo
->placements
;
76 rbo
->placement
.busy_placement
= rbo
->placements
;
77 if (domain
& RADEON_GEM_DOMAIN_VRAM
)
78 rbo
->placements
[c
++] = TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
|
80 if (domain
& RADEON_GEM_DOMAIN_GTT
)
81 rbo
->placements
[c
++] = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
82 if (domain
& RADEON_GEM_DOMAIN_CPU
)
83 rbo
->placements
[c
++] = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
85 rbo
->placements
[c
++] = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
86 rbo
->placement
.num_placement
= c
;
87 rbo
->placement
.num_busy_placement
= c
;
90 int radeon_bo_create(struct radeon_device
*rdev
,
91 unsigned long size
, int byte_align
, bool kernel
, u32 domain
,
92 struct radeon_bo
**bo_ptr
)
95 enum ttm_bo_type type
;
96 unsigned long page_align
= roundup(byte_align
, PAGE_SIZE
) >> PAGE_SHIFT
;
97 unsigned long max_size
= 0;
101 size
= ALIGN(size
, PAGE_SIZE
);
103 if (unlikely(rdev
->mman
.bdev
.dev_mapping
== NULL
)) {
104 rdev
->mman
.bdev
.dev_mapping
= rdev
->ddev
->dev_mapping
;
107 type
= ttm_bo_type_kernel
;
109 type
= ttm_bo_type_device
;
113 /* maximun bo size is the minimun btw visible vram and gtt size */
114 max_size
= min(rdev
->mc
.visible_vram_size
, rdev
->mc
.gtt_size
);
115 if ((page_align
<< PAGE_SHIFT
) >= max_size
) {
116 printk(KERN_WARNING
"%s:%d alloc size %ldM bigger than %ldMb limit\n",
117 __func__
, __LINE__
, page_align
>> (20 - PAGE_SHIFT
), max_size
>> 20);
121 acc_size
= ttm_bo_dma_acc_size(&rdev
->mman
.bdev
, size
,
122 sizeof(struct radeon_bo
));
125 bo
= kzalloc(sizeof(struct radeon_bo
), GFP_KERNEL
);
128 r
= drm_gem_object_init(rdev
->ddev
, &bo
->gem_base
, size
);
134 bo
->gem_base
.driver_private
= NULL
;
135 bo
->surface_reg
= -1;
136 INIT_LIST_HEAD(&bo
->list
);
137 radeon_ttm_placement_from_domain(bo
, domain
);
138 /* Kernel allocation are uninterruptible */
139 mutex_lock(&rdev
->vram_mutex
);
140 r
= ttm_bo_init(&rdev
->mman
.bdev
, &bo
->tbo
, size
, type
,
141 &bo
->placement
, page_align
, 0, !kernel
, NULL
,
142 acc_size
, &radeon_ttm_bo_destroy
);
143 mutex_unlock(&rdev
->vram_mutex
);
144 if (unlikely(r
!= 0)) {
145 if (r
!= -ERESTARTSYS
) {
146 if (domain
== RADEON_GEM_DOMAIN_VRAM
) {
147 domain
|= RADEON_GEM_DOMAIN_GTT
;
151 "object_init failed for (%lu, 0x%08X)\n",
158 trace_radeon_bo_create(bo
);
163 int radeon_bo_kmap(struct radeon_bo
*bo
, void **ptr
)
174 r
= ttm_bo_kmap(&bo
->tbo
, 0, bo
->tbo
.num_pages
, &bo
->kmap
);
178 bo
->kptr
= ttm_kmap_obj_virtual(&bo
->kmap
, &is_iomem
);
182 radeon_bo_check_tiling(bo
, 0, 0);
186 void radeon_bo_kunmap(struct radeon_bo
*bo
)
188 if (bo
->kptr
== NULL
)
191 radeon_bo_check_tiling(bo
, 0, 0);
192 ttm_bo_kunmap(&bo
->kmap
);
195 void radeon_bo_unref(struct radeon_bo
**bo
)
197 struct ttm_buffer_object
*tbo
;
198 struct radeon_device
*rdev
;
204 mutex_lock(&rdev
->vram_mutex
);
206 mutex_unlock(&rdev
->vram_mutex
);
211 int radeon_bo_pin(struct radeon_bo
*bo
, u32 domain
, u64
*gpu_addr
)
218 *gpu_addr
= radeon_bo_gpu_offset(bo
);
221 radeon_ttm_placement_from_domain(bo
, domain
);
222 if (domain
== RADEON_GEM_DOMAIN_VRAM
) {
223 /* force to pin into visible video ram */
224 bo
->placement
.lpfn
= bo
->rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
226 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
227 bo
->placements
[i
] |= TTM_PL_FLAG_NO_EVICT
;
228 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false, false);
229 if (likely(r
== 0)) {
231 if (gpu_addr
!= NULL
)
232 *gpu_addr
= radeon_bo_gpu_offset(bo
);
234 if (unlikely(r
!= 0))
235 dev_err(bo
->rdev
->dev
, "%p pin failed\n", bo
);
239 int radeon_bo_unpin(struct radeon_bo
*bo
)
243 if (!bo
->pin_count
) {
244 dev_warn(bo
->rdev
->dev
, "%p unpin not necessary\n", bo
);
250 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
251 bo
->placements
[i
] &= ~TTM_PL_FLAG_NO_EVICT
;
252 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false, false);
253 if (unlikely(r
!= 0))
254 dev_err(bo
->rdev
->dev
, "%p validate failed for unpin\n", bo
);
258 int radeon_bo_evict_vram(struct radeon_device
*rdev
)
260 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
261 if (0 && (rdev
->flags
& RADEON_IS_IGP
)) {
262 if (rdev
->mc
.igp_sideport_enabled
== false)
263 /* Useless to evict on IGP chips */
266 return ttm_bo_evict_mm(&rdev
->mman
.bdev
, TTM_PL_VRAM
);
269 void radeon_bo_force_delete(struct radeon_device
*rdev
)
271 struct radeon_bo
*bo
, *n
;
273 if (list_empty(&rdev
->gem
.objects
)) {
276 dev_err(rdev
->dev
, "Userspace still has active objects !\n");
277 list_for_each_entry_safe(bo
, n
, &rdev
->gem
.objects
, list
) {
278 mutex_lock(&rdev
->ddev
->struct_mutex
);
279 dev_err(rdev
->dev
, "%p %p %lu %lu force free\n",
280 &bo
->gem_base
, bo
, (unsigned long)bo
->gem_base
.size
,
281 *((unsigned long *)&bo
->gem_base
.refcount
));
282 mutex_lock(&bo
->rdev
->gem
.mutex
);
283 list_del_init(&bo
->list
);
284 mutex_unlock(&bo
->rdev
->gem
.mutex
);
285 /* this should unref the ttm bo */
286 drm_gem_object_unreference(&bo
->gem_base
);
287 mutex_unlock(&rdev
->ddev
->struct_mutex
);
291 int radeon_bo_init(struct radeon_device
*rdev
)
293 /* Add an MTRR for the VRAM */
294 rdev
->mc
.vram_mtrr
= mtrr_add(rdev
->mc
.aper_base
, rdev
->mc
.aper_size
,
295 MTRR_TYPE_WRCOMB
, 1);
296 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
297 rdev
->mc
.mc_vram_size
>> 20,
298 (unsigned long long)rdev
->mc
.aper_size
>> 20);
299 DRM_INFO("RAM width %dbits %cDR\n",
300 rdev
->mc
.vram_width
, rdev
->mc
.vram_is_ddr
? 'D' : 'S');
301 return radeon_ttm_init(rdev
);
304 void radeon_bo_fini(struct radeon_device
*rdev
)
306 radeon_ttm_fini(rdev
);
309 void radeon_bo_list_add_object(struct radeon_bo_list
*lobj
,
310 struct list_head
*head
)
313 list_add(&lobj
->tv
.head
, head
);
315 list_add_tail(&lobj
->tv
.head
, head
);
319 int radeon_bo_list_validate(struct list_head
*head
)
321 struct radeon_bo_list
*lobj
;
322 struct radeon_bo
*bo
;
326 r
= ttm_eu_reserve_buffers(head
);
327 if (unlikely(r
!= 0)) {
330 list_for_each_entry(lobj
, head
, tv
.head
) {
332 if (!bo
->pin_count
) {
333 domain
= lobj
->wdomain
? lobj
->wdomain
: lobj
->rdomain
;
336 radeon_ttm_placement_from_domain(bo
, domain
);
337 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
,
340 if (r
!= -ERESTARTSYS
&& domain
== RADEON_GEM_DOMAIN_VRAM
) {
341 domain
|= RADEON_GEM_DOMAIN_GTT
;
347 lobj
->gpu_offset
= radeon_bo_gpu_offset(bo
);
348 lobj
->tiling_flags
= bo
->tiling_flags
;
353 int radeon_bo_fbdev_mmap(struct radeon_bo
*bo
,
354 struct vm_area_struct
*vma
)
356 return ttm_fbdev_mmap(vma
, &bo
->tbo
);
359 int radeon_bo_get_surface_reg(struct radeon_bo
*bo
)
361 struct radeon_device
*rdev
= bo
->rdev
;
362 struct radeon_surface_reg
*reg
;
363 struct radeon_bo
*old_object
;
367 BUG_ON(!atomic_read(&bo
->tbo
.reserved
));
369 if (!bo
->tiling_flags
)
372 if (bo
->surface_reg
>= 0) {
373 reg
= &rdev
->surface_regs
[bo
->surface_reg
];
379 for (i
= 0; i
< RADEON_GEM_MAX_SURFACES
; i
++) {
381 reg
= &rdev
->surface_regs
[i
];
385 old_object
= reg
->bo
;
386 if (old_object
->pin_count
== 0)
390 /* if we are all out */
391 if (i
== RADEON_GEM_MAX_SURFACES
) {
394 /* find someone with a surface reg and nuke their BO */
395 reg
= &rdev
->surface_regs
[steal
];
396 old_object
= reg
->bo
;
397 /* blow away the mapping */
398 DRM_DEBUG("stealing surface reg %d from %p\n", steal
, old_object
);
399 ttm_bo_unmap_virtual(&old_object
->tbo
);
400 old_object
->surface_reg
= -1;
408 radeon_set_surface_reg(rdev
, i
, bo
->tiling_flags
, bo
->pitch
,
409 bo
->tbo
.mem
.start
<< PAGE_SHIFT
,
410 bo
->tbo
.num_pages
<< PAGE_SHIFT
);
414 static void radeon_bo_clear_surface_reg(struct radeon_bo
*bo
)
416 struct radeon_device
*rdev
= bo
->rdev
;
417 struct radeon_surface_reg
*reg
;
419 if (bo
->surface_reg
== -1)
422 reg
= &rdev
->surface_regs
[bo
->surface_reg
];
423 radeon_clear_surface_reg(rdev
, bo
->surface_reg
);
426 bo
->surface_reg
= -1;
429 int radeon_bo_set_tiling_flags(struct radeon_bo
*bo
,
430 uint32_t tiling_flags
, uint32_t pitch
)
434 r
= radeon_bo_reserve(bo
, false);
435 if (unlikely(r
!= 0))
437 bo
->tiling_flags
= tiling_flags
;
439 radeon_bo_unreserve(bo
);
443 void radeon_bo_get_tiling_flags(struct radeon_bo
*bo
,
444 uint32_t *tiling_flags
,
447 BUG_ON(!atomic_read(&bo
->tbo
.reserved
));
449 *tiling_flags
= bo
->tiling_flags
;
454 int radeon_bo_check_tiling(struct radeon_bo
*bo
, bool has_moved
,
457 BUG_ON(!atomic_read(&bo
->tbo
.reserved
));
459 if (!(bo
->tiling_flags
& RADEON_TILING_SURFACE
))
463 radeon_bo_clear_surface_reg(bo
);
467 if (bo
->tbo
.mem
.mem_type
!= TTM_PL_VRAM
) {
471 if (bo
->surface_reg
>= 0)
472 radeon_bo_clear_surface_reg(bo
);
476 if ((bo
->surface_reg
>= 0) && !has_moved
)
479 return radeon_bo_get_surface_reg(bo
);
482 void radeon_bo_move_notify(struct ttm_buffer_object
*bo
,
483 struct ttm_mem_reg
*mem
)
485 struct radeon_bo
*rbo
;
486 if (!radeon_ttm_bo_is_radeon_bo(bo
))
488 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
489 radeon_bo_check_tiling(rbo
, 0, 1);
492 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object
*bo
)
494 struct radeon_device
*rdev
;
495 struct radeon_bo
*rbo
;
496 unsigned long offset
, size
;
499 if (!radeon_ttm_bo_is_radeon_bo(bo
))
501 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
502 radeon_bo_check_tiling(rbo
, 0, 0);
504 if (bo
->mem
.mem_type
== TTM_PL_VRAM
) {
505 size
= bo
->mem
.num_pages
<< PAGE_SHIFT
;
506 offset
= bo
->mem
.start
<< PAGE_SHIFT
;
507 if ((offset
+ size
) > rdev
->mc
.visible_vram_size
) {
508 /* hurrah the memory is not visible ! */
509 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_VRAM
);
510 rbo
->placement
.lpfn
= rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
511 r
= ttm_bo_validate(bo
, &rbo
->placement
, false, true, false);
512 if (unlikely(r
!= 0))
514 offset
= bo
->mem
.start
<< PAGE_SHIFT
;
515 /* this should not happen */
516 if ((offset
+ size
) > rdev
->mc
.visible_vram_size
)
523 int radeon_bo_wait(struct radeon_bo
*bo
, u32
*mem_type
, bool no_wait
)
527 r
= ttm_bo_reserve(&bo
->tbo
, true, no_wait
, false, 0);
528 if (unlikely(r
!= 0))
530 spin_lock(&bo
->tbo
.bdev
->fence_lock
);
532 *mem_type
= bo
->tbo
.mem
.mem_type
;
533 if (bo
->tbo
.sync_obj
)
534 r
= ttm_bo_wait(&bo
->tbo
, true, true, no_wait
);
535 spin_unlock(&bo
->tbo
.bdev
->fence_lock
);
536 ttm_bo_unreserve(&bo
->tbo
);
542 * radeon_bo_reserve - reserve bo
544 * @no_wait: don't sleep while trying to reserve (return -EBUSY)
547 * -EBUSY: buffer is busy and @no_wait is true
548 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
549 * a signal. Release all buffer reservations and return to user-space.
551 int radeon_bo_reserve(struct radeon_bo
*bo
, bool no_wait
)
555 r
= ttm_bo_reserve(&bo
->tbo
, true, no_wait
, false, 0);
556 if (unlikely(r
!= 0)) {
557 if (r
!= -ERESTARTSYS
)
558 dev_err(bo
->rdev
->dev
, "%p reserve failed\n", bo
);
This page took 0.050383 seconds and 5 git commands to generate.