x86: Move call to print_modules() out of show_regs()
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_gem.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "drm.h"
30#include "radeon_drm.h"
31#include "radeon.h"
32
33int radeon_gem_object_init(struct drm_gem_object *obj)
34{
441921d5
DV
35 BUG();
36
771fe6b9
JG
37 return 0;
38}
39
40void radeon_gem_object_free(struct drm_gem_object *gobj)
41{
7e4d15d9 42 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
771fe6b9 43
771fe6b9 44 if (robj) {
40f5cf99
AD
45 if (robj->gem_base.import_attach)
46 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
4c788679 47 radeon_bo_unref(&robj);
771fe6b9
JG
48 }
49}
50
51int radeon_gem_object_create(struct radeon_device *rdev, int size,
4c788679
JG
52 int alignment, int initial_domain,
53 bool discardable, bool kernel,
54 struct drm_gem_object **obj)
771fe6b9 55{
4c788679 56 struct radeon_bo *robj;
771fe6b9
JG
57 int r;
58
59 *obj = NULL;
771fe6b9
JG
60 /* At least align on page size */
61 if (alignment < PAGE_SIZE) {
62 alignment = PAGE_SIZE;
63 }
40f5cf99 64 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
771fe6b9 65 if (r) {
ecabd32a
DA
66 if (r != -ERESTARTSYS)
67 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
68 size, initial_domain, alignment, r);
771fe6b9
JG
69 return r;
70 }
441921d5
DV
71 *obj = &robj->gem_base;
72
73 mutex_lock(&rdev->gem.mutex);
74 list_add_tail(&robj->list, &rdev->gem.objects);
75 mutex_unlock(&rdev->gem.mutex);
76
771fe6b9
JG
77 return 0;
78}
79
771fe6b9
JG
80int radeon_gem_set_domain(struct drm_gem_object *gobj,
81 uint32_t rdomain, uint32_t wdomain)
82{
4c788679 83 struct radeon_bo *robj;
771fe6b9
JG
84 uint32_t domain;
85 int r;
86
87 /* FIXME: reeimplement */
7e4d15d9 88 robj = gem_to_radeon_bo(gobj);
771fe6b9
JG
89 /* work out where to validate the buffer to */
90 domain = wdomain;
91 if (!domain) {
92 domain = rdomain;
93 }
94 if (!domain) {
95 /* Do nothings */
b6cafa27 96 printk(KERN_WARNING "Set domain without domain !\n");
771fe6b9
JG
97 return 0;
98 }
99 if (domain == RADEON_GEM_DOMAIN_CPU) {
100 /* Asking for cpu access wait for object idle */
4c788679 101 r = radeon_bo_wait(robj, NULL, false);
771fe6b9
JG
102 if (r) {
103 printk(KERN_ERR "Failed to wait for object !\n");
104 return r;
105 }
106 }
107 return 0;
108}
109
110int radeon_gem_init(struct radeon_device *rdev)
111{
112 INIT_LIST_HEAD(&rdev->gem.objects);
113 return 0;
114}
115
116void radeon_gem_fini(struct radeon_device *rdev)
117{
4c788679 118 radeon_bo_force_delete(rdev);
771fe6b9
JG
119}
120
721604a1
JG
121/*
122 * Call from drm_gem_handle_create which appear in both new and open ioctl
123 * case.
124 */
125int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
126{
127 return 0;
128}
129
130void radeon_gem_object_close(struct drm_gem_object *obj,
131 struct drm_file *file_priv)
132{
133 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
134 struct radeon_device *rdev = rbo->rdev;
135 struct radeon_fpriv *fpriv = file_priv->driver_priv;
136 struct radeon_vm *vm = &fpriv->vm;
137 struct radeon_bo_va *bo_va, *tmp;
138
139 if (rdev->family < CHIP_CAYMAN) {
140 return;
141 }
142
143 if (radeon_bo_reserve(rbo, false)) {
144 return;
145 }
146 list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
147 if (bo_va->vm == vm) {
148 /* remove from this vm address space */
149 mutex_lock(&vm->mutex);
150 list_del(&bo_va->vm_list);
151 mutex_unlock(&vm->mutex);
152 list_del(&bo_va->bo_list);
153 kfree(bo_va);
154 }
155 }
156 radeon_bo_unreserve(rbo);
157}
158
6c6f4783
CK
159static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
160{
161 if (r == -EDEADLK) {
162 radeon_mutex_lock(&rdev->cs_mutex);
163 r = radeon_gpu_reset(rdev);
164 if (!r)
165 r = -EAGAIN;
166 radeon_mutex_unlock(&rdev->cs_mutex);
167 }
168 return r;
169}
771fe6b9
JG
170
171/*
172 * GEM ioctls.
173 */
174int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
175 struct drm_file *filp)
176{
177 struct radeon_device *rdev = dev->dev_private;
178 struct drm_radeon_gem_info *args = data;
53595338 179 struct ttm_mem_type_manager *man;
bf852799 180 unsigned i;
53595338
DA
181
182 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
771fe6b9 183
7a50f01a 184 args->vram_size = rdev->mc.real_vram_size;
53595338 185 args->vram_visible = (u64)man->size << PAGE_SHIFT;
38e14921 186 if (rdev->stollen_vga_memory)
4c788679 187 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
38651674 188 args->vram_visible -= radeon_fbdev_total_size(rdev);
7b1f2485 189 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
bf852799 190 for(i = 0; i < RADEON_NUM_RINGS; ++i)
e32eb50d 191 args->gart_size -= rdev->ring[i].ring_size;
771fe6b9
JG
192 return 0;
193}
194
195int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
196 struct drm_file *filp)
197{
198 /* TODO: implement */
199 DRM_ERROR("unimplemented %s\n", __func__);
200 return -ENOSYS;
201}
202
203int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
204 struct drm_file *filp)
205{
206 /* TODO: implement */
207 DRM_ERROR("unimplemented %s\n", __func__);
208 return -ENOSYS;
209}
210
211int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
212 struct drm_file *filp)
213{
214 struct radeon_device *rdev = dev->dev_private;
215 struct drm_radeon_gem_create *args = data;
216 struct drm_gem_object *gobj;
217 uint32_t handle;
218 int r;
219
220 /* create a gem object to contain this object in */
221 args->size = roundup(args->size, PAGE_SIZE);
222 r = radeon_gem_object_create(rdev, args->size, args->alignment,
4c788679
JG
223 args->initial_domain, false,
224 false, &gobj);
771fe6b9 225 if (r) {
6c6f4783 226 r = radeon_gem_handle_lockup(rdev, r);
771fe6b9
JG
227 return r;
228 }
229 r = drm_gem_handle_create(filp, gobj, &handle);
29d08b3e
DA
230 /* drop reference from allocate - handle holds it now */
231 drm_gem_object_unreference_unlocked(gobj);
771fe6b9 232 if (r) {
6c6f4783 233 r = radeon_gem_handle_lockup(rdev, r);
771fe6b9
JG
234 return r;
235 }
771fe6b9
JG
236 args->handle = handle;
237 return 0;
238}
239
240int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
241 struct drm_file *filp)
242{
243 /* transition the BO to a domain -
244 * just validate the BO into a certain domain */
245 struct drm_radeon_gem_set_domain *args = data;
246 struct drm_gem_object *gobj;
4c788679 247 struct radeon_bo *robj;
771fe6b9
JG
248 int r;
249
250 /* for now if someone requests domain CPU -
251 * just make sure the buffer is finished with */
252
253 /* just do a BO wait for now */
254 gobj = drm_gem_object_lookup(dev, filp, args->handle);
255 if (gobj == NULL) {
bf79cb91 256 return -ENOENT;
771fe6b9 257 }
7e4d15d9 258 robj = gem_to_radeon_bo(gobj);
771fe6b9
JG
259
260 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
261
bc9025bd 262 drm_gem_object_unreference_unlocked(gobj);
6c6f4783 263 r = radeon_gem_handle_lockup(robj->rdev, r);
771fe6b9
JG
264 return r;
265}
266
ff72145b
DA
267int radeon_mode_dumb_mmap(struct drm_file *filp,
268 struct drm_device *dev,
269 uint32_t handle, uint64_t *offset_p)
771fe6b9 270{
771fe6b9 271 struct drm_gem_object *gobj;
4c788679 272 struct radeon_bo *robj;
771fe6b9 273
ff72145b 274 gobj = drm_gem_object_lookup(dev, filp, handle);
771fe6b9 275 if (gobj == NULL) {
bf79cb91 276 return -ENOENT;
771fe6b9 277 }
7e4d15d9 278 robj = gem_to_radeon_bo(gobj);
ff72145b 279 *offset_p = radeon_bo_mmap_offset(robj);
bc9025bd 280 drm_gem_object_unreference_unlocked(gobj);
4c788679 281 return 0;
771fe6b9
JG
282}
283
ff72145b
DA
284int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
285 struct drm_file *filp)
286{
287 struct drm_radeon_gem_mmap *args = data;
288
289 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
290}
291
771fe6b9
JG
292int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
293 struct drm_file *filp)
294{
cefb87ef
DA
295 struct drm_radeon_gem_busy *args = data;
296 struct drm_gem_object *gobj;
4c788679 297 struct radeon_bo *robj;
cefb87ef 298 int r;
4361e52a 299 uint32_t cur_placement = 0;
cefb87ef
DA
300
301 gobj = drm_gem_object_lookup(dev, filp, args->handle);
302 if (gobj == NULL) {
bf79cb91 303 return -ENOENT;
cefb87ef 304 }
7e4d15d9 305 robj = gem_to_radeon_bo(gobj);
4c788679 306 r = radeon_bo_wait(robj, &cur_placement, true);
9f844e51
MD
307 switch (cur_placement) {
308 case TTM_PL_VRAM:
cefb87ef 309 args->domain = RADEON_GEM_DOMAIN_VRAM;
9f844e51
MD
310 break;
311 case TTM_PL_TT:
cefb87ef 312 args->domain = RADEON_GEM_DOMAIN_GTT;
9f844e51
MD
313 break;
314 case TTM_PL_SYSTEM:
cefb87ef 315 args->domain = RADEON_GEM_DOMAIN_CPU;
9f844e51
MD
316 default:
317 break;
318 }
bc9025bd 319 drm_gem_object_unreference_unlocked(gobj);
6c6f4783 320 r = radeon_gem_handle_lockup(robj->rdev, r);
e3b2415e 321 return r;
771fe6b9
JG
322}
323
324int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
325 struct drm_file *filp)
326{
327 struct drm_radeon_gem_wait_idle *args = data;
328 struct drm_gem_object *gobj;
4c788679 329 struct radeon_bo *robj;
771fe6b9
JG
330 int r;
331
332 gobj = drm_gem_object_lookup(dev, filp, args->handle);
333 if (gobj == NULL) {
bf79cb91 334 return -ENOENT;
771fe6b9 335 }
7e4d15d9 336 robj = gem_to_radeon_bo(gobj);
4c788679 337 r = radeon_bo_wait(robj, NULL, false);
062b389c
JG
338 /* callback hw specific functions if any */
339 if (robj->rdev->asic->ioctl_wait_idle)
340 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
bc9025bd 341 drm_gem_object_unreference_unlocked(gobj);
6c6f4783 342 r = radeon_gem_handle_lockup(robj->rdev, r);
771fe6b9
JG
343 return r;
344}
e024e110
DA
345
346int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
347 struct drm_file *filp)
348{
349 struct drm_radeon_gem_set_tiling *args = data;
350 struct drm_gem_object *gobj;
4c788679 351 struct radeon_bo *robj;
e024e110
DA
352 int r = 0;
353
354 DRM_DEBUG("%d \n", args->handle);
355 gobj = drm_gem_object_lookup(dev, filp, args->handle);
356 if (gobj == NULL)
bf79cb91 357 return -ENOENT;
7e4d15d9 358 robj = gem_to_radeon_bo(gobj);
4c788679 359 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
bc9025bd 360 drm_gem_object_unreference_unlocked(gobj);
e024e110
DA
361 return r;
362}
363
364int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
365 struct drm_file *filp)
366{
367 struct drm_radeon_gem_get_tiling *args = data;
368 struct drm_gem_object *gobj;
4c788679 369 struct radeon_bo *rbo;
e024e110
DA
370 int r = 0;
371
372 DRM_DEBUG("\n");
373 gobj = drm_gem_object_lookup(dev, filp, args->handle);
374 if (gobj == NULL)
bf79cb91 375 return -ENOENT;
7e4d15d9 376 rbo = gem_to_radeon_bo(gobj);
4c788679
JG
377 r = radeon_bo_reserve(rbo, false);
378 if (unlikely(r != 0))
51f07b7e 379 goto out;
4c788679
JG
380 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
381 radeon_bo_unreserve(rbo);
51f07b7e 382out:
bc9025bd 383 drm_gem_object_unreference_unlocked(gobj);
721604a1
JG
384 return r;
385}
386
387int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
388 struct drm_file *filp)
389{
390 struct drm_radeon_gem_va *args = data;
391 struct drm_gem_object *gobj;
392 struct radeon_device *rdev = dev->dev_private;
393 struct radeon_fpriv *fpriv = filp->driver_priv;
394 struct radeon_bo *rbo;
395 struct radeon_bo_va *bo_va;
396 u32 invalid_flags;
397 int r = 0;
398
67e915e4
AD
399 if (!rdev->vm_manager.enabled) {
400 args->operation = RADEON_VA_RESULT_ERROR;
401 return -ENOTTY;
402 }
403
721604a1
JG
404 /* !! DONT REMOVE !!
405 * We don't support vm_id yet, to be sure we don't have have broken
406 * userspace, reject anyone trying to use non 0 value thus moving
407 * forward we can use those fields without breaking existant userspace
408 */
409 if (args->vm_id) {
410 args->operation = RADEON_VA_RESULT_ERROR;
411 return -EINVAL;
412 }
413
414 if (args->offset < RADEON_VA_RESERVED_SIZE) {
415 dev_err(&dev->pdev->dev,
416 "offset 0x%lX is in reserved area 0x%X\n",
417 (unsigned long)args->offset,
418 RADEON_VA_RESERVED_SIZE);
419 args->operation = RADEON_VA_RESULT_ERROR;
420 return -EINVAL;
421 }
422
423 /* don't remove, we need to enforce userspace to set the snooped flag
424 * otherwise we will endup with broken userspace and we won't be able
425 * to enable this feature without adding new interface
426 */
427 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
428 if ((args->flags & invalid_flags)) {
429 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
430 args->flags, invalid_flags);
431 args->operation = RADEON_VA_RESULT_ERROR;
432 return -EINVAL;
433 }
434 if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
435 dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
436 args->operation = RADEON_VA_RESULT_ERROR;
437 return -EINVAL;
438 }
439
440 switch (args->operation) {
441 case RADEON_VA_MAP:
442 case RADEON_VA_UNMAP:
443 break;
444 default:
445 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
446 args->operation);
447 args->operation = RADEON_VA_RESULT_ERROR;
448 return -EINVAL;
449 }
450
451 gobj = drm_gem_object_lookup(dev, filp, args->handle);
452 if (gobj == NULL) {
453 args->operation = RADEON_VA_RESULT_ERROR;
454 return -ENOENT;
455 }
456 rbo = gem_to_radeon_bo(gobj);
457 r = radeon_bo_reserve(rbo, false);
458 if (r) {
459 args->operation = RADEON_VA_RESULT_ERROR;
460 drm_gem_object_unreference_unlocked(gobj);
461 return r;
462 }
463 switch (args->operation) {
464 case RADEON_VA_MAP:
465 bo_va = radeon_bo_va(rbo, &fpriv->vm);
466 if (bo_va) {
467 args->operation = RADEON_VA_RESULT_VA_EXIST;
468 args->offset = bo_va->soffset;
469 goto out;
470 }
471 r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
472 args->offset, args->flags);
473 break;
474 case RADEON_VA_UNMAP:
475 r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
476 break;
477 default:
478 break;
479 }
480 args->operation = RADEON_VA_RESULT_OK;
481 if (r) {
482 args->operation = RADEON_VA_RESULT_ERROR;
483 }
484out:
485 radeon_bo_unreserve(rbo);
486 drm_gem_object_unreference_unlocked(gobj);
e024e110
DA
487 return r;
488}
ff72145b
DA
489
490int radeon_mode_dumb_create(struct drm_file *file_priv,
491 struct drm_device *dev,
492 struct drm_mode_create_dumb *args)
493{
494 struct radeon_device *rdev = dev->dev_private;
495 struct drm_gem_object *gobj;
c87a8d8d 496 uint32_t handle;
ff72145b
DA
497 int r;
498
499 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
500 args->size = args->pitch * args->height;
501 args->size = ALIGN(args->size, PAGE_SIZE);
502
503 r = radeon_gem_object_create(rdev, args->size, 0,
504 RADEON_GEM_DOMAIN_VRAM,
505 false, ttm_bo_type_device,
506 &gobj);
507 if (r)
508 return -ENOMEM;
509
c87a8d8d
DA
510 r = drm_gem_handle_create(file_priv, gobj, &handle);
511 /* drop reference from allocate - handle holds it now */
512 drm_gem_object_unreference_unlocked(gobj);
ff72145b 513 if (r) {
ff72145b
DA
514 return r;
515 }
c87a8d8d 516 args->handle = handle;
ff72145b
DA
517 return 0;
518}
519
520int radeon_mode_dumb_destroy(struct drm_file *file_priv,
521 struct drm_device *dev,
522 uint32_t handle)
523{
524 return drm_gem_handle_delete(file_priv, handle);
525}
This page took 0.387551 seconds and 5 git commands to generate.