drm: Use ENOENT consistently for the error return for an unmatched handle.
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_gem.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright (C) 2008 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26#include "drmP.h"
27#include "drm.h"
28
29#include "nouveau_drv.h"
30#include "nouveau_drm.h"
31#include "nouveau_dma.h"
32
33#define nouveau_gem_pushbuf_sync(chan) 0
34
35int
36nouveau_gem_object_new(struct drm_gem_object *gem)
37{
38 return 0;
39}
40
41void
42nouveau_gem_object_del(struct drm_gem_object *gem)
43{
44 struct nouveau_bo *nvbo = gem->driver_private;
45 struct ttm_buffer_object *bo = &nvbo->bo;
46
47 if (!nvbo)
48 return;
49 nvbo->gem = NULL;
50
51 if (unlikely(nvbo->cpu_filp))
52 ttm_bo_synccpu_write_release(bo);
53
54 if (unlikely(nvbo->pin_refcnt)) {
55 nvbo->pin_refcnt = 1;
56 nouveau_bo_unpin(nvbo);
57 }
58
59 ttm_bo_unref(&bo);
fd632aa3
DV
60
61 drm_gem_object_release(gem);
62 kfree(gem);
6ee73861
BS
63}
64
65int
66nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
67 int size, int align, uint32_t flags, uint32_t tile_mode,
68 uint32_t tile_flags, bool no_vm, bool mappable,
69 struct nouveau_bo **pnvbo)
70{
71 struct nouveau_bo *nvbo;
72 int ret;
73
74 ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
75 tile_flags, no_vm, mappable, pnvbo);
76 if (ret)
77 return ret;
78 nvbo = *pnvbo;
79
80 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
81 if (!nvbo->gem) {
82 nouveau_bo_ref(NULL, pnvbo);
83 return -ENOMEM;
84 }
85
86 nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
87 nvbo->gem->driver_private = nvbo;
88 return 0;
89}
90
91static int
92nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
93{
94 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
95
96 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
97 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
98 else
99 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
100
101 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
102 rep->offset = nvbo->bo.offset;
103 rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
104 rep->tile_mode = nvbo->tile_mode;
105 rep->tile_flags = nvbo->tile_flags;
106 return 0;
107}
108
109static bool
110nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) {
111 switch (tile_flags) {
112 case 0x0000:
113 case 0x1800:
114 case 0x2800:
115 case 0x4800:
116 case 0x7000:
117 case 0x7400:
118 case 0x7a00:
119 case 0xe000:
120 break;
121 default:
122 NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
123 return false;
124 }
125
126 return true;
127}
128
129int
130nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
131 struct drm_file *file_priv)
132{
133 struct drm_nouveau_private *dev_priv = dev->dev_private;
134 struct drm_nouveau_gem_new *req = data;
135 struct nouveau_bo *nvbo = NULL;
136 struct nouveau_channel *chan = NULL;
137 uint32_t flags = 0;
138 int ret = 0;
139
6ee73861
BS
140 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
141 dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
142
143 if (req->channel_hint) {
144 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
145 file_priv, chan);
146 }
147
148 if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
149 flags |= TTM_PL_FLAG_VRAM;
150 if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
151 flags |= TTM_PL_FLAG_TT;
152 if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
153 flags |= TTM_PL_FLAG_SYSTEM;
154
155 if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
156 return -EINVAL;
157
158 ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
159 req->info.tile_mode, req->info.tile_flags, false,
160 (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
161 &nvbo);
162 if (ret)
163 return ret;
164
165 ret = nouveau_gem_info(nvbo->gem, &req->info);
166 if (ret)
167 goto out;
168
169 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
170out:
bc9025bd 171 drm_gem_object_handle_unreference_unlocked(nvbo->gem);
6ee73861
BS
172
173 if (ret)
bc9025bd 174 drm_gem_object_unreference_unlocked(nvbo->gem);
6ee73861
BS
175 return ret;
176}
177
178static int
179nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
180 uint32_t write_domains, uint32_t valid_domains)
181{
182 struct nouveau_bo *nvbo = gem->driver_private;
183 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b
FJ
184 uint32_t domains = valid_domains &
185 (write_domains ? write_domains : read_domains);
186 uint32_t pref_flags = 0, valid_flags = 0;
6ee73861 187
78ad0f7b 188 if (!domains)
6ee73861
BS
189 return -EINVAL;
190
78ad0f7b
FJ
191 if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
192 valid_flags |= TTM_PL_FLAG_VRAM;
193
194 if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
195 valid_flags |= TTM_PL_FLAG_TT;
196
197 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
198 bo->mem.mem_type == TTM_PL_VRAM)
199 pref_flags |= TTM_PL_FLAG_VRAM;
200
201 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
202 bo->mem.mem_type == TTM_PL_TT)
203 pref_flags |= TTM_PL_FLAG_TT;
204
205 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
206 pref_flags |= TTM_PL_FLAG_VRAM;
207
208 else
209 pref_flags |= TTM_PL_FLAG_TT;
210
211 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
6ee73861 212
6ee73861
BS
213 return 0;
214}
215
216struct validate_op {
6ee73861
BS
217 struct list_head vram_list;
218 struct list_head gart_list;
219 struct list_head both_list;
220};
221
222static void
223validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
224{
225 struct list_head *entry, *tmp;
226 struct nouveau_bo *nvbo;
227
228 list_for_each_safe(entry, tmp, list) {
229 nvbo = list_entry(entry, struct nouveau_bo, entry);
230 if (likely(fence)) {
231 struct nouveau_fence *prev_fence;
232
233 spin_lock(&nvbo->bo.lock);
234 prev_fence = nvbo->bo.sync_obj;
235 nvbo->bo.sync_obj = nouveau_fence_ref(fence);
236 spin_unlock(&nvbo->bo.lock);
237 nouveau_fence_unref((void *)&prev_fence);
238 }
239
a1606a95
BS
240 if (unlikely(nvbo->validate_mapped)) {
241 ttm_bo_kunmap(&nvbo->kmap);
242 nvbo->validate_mapped = false;
243 }
244
6ee73861
BS
245 list_del(&nvbo->entry);
246 nvbo->reserved_by = NULL;
247 ttm_bo_unreserve(&nvbo->bo);
248 drm_gem_object_unreference(nvbo->gem);
249 }
250}
251
252static void
234896a7 253validate_fini(struct validate_op *op, struct nouveau_fence* fence)
6ee73861 254{
234896a7
LB
255 validate_fini_list(&op->vram_list, fence);
256 validate_fini_list(&op->gart_list, fence);
257 validate_fini_list(&op->both_list, fence);
6ee73861
BS
258}
259
260static int
261validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
262 struct drm_nouveau_gem_pushbuf_bo *pbbo,
263 int nr_buffers, struct validate_op *op)
264{
265 struct drm_device *dev = chan->dev;
266 struct drm_nouveau_private *dev_priv = dev->dev_private;
267 uint32_t sequence;
268 int trycnt = 0;
269 int ret, i;
270
271 sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
272retry:
273 if (++trycnt > 100000) {
274 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
275 return -EINVAL;
276 }
277
278 for (i = 0; i < nr_buffers; i++) {
279 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
280 struct drm_gem_object *gem;
281 struct nouveau_bo *nvbo;
282
283 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
284 if (!gem) {
285 NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
286 validate_fini(op, NULL);
bf79cb91 287 return -ENOENT;
6ee73861
BS
288 }
289 nvbo = gem->driver_private;
290
291 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
292 NV_ERROR(dev, "multiple instances of buffer %d on "
293 "validation list\n", b->handle);
294 validate_fini(op, NULL);
295 return -EINVAL;
296 }
297
298 ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
299 if (ret) {
300 validate_fini(op, NULL);
301 if (ret == -EAGAIN)
302 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
303 drm_gem_object_unreference(gem);
a1606a95
BS
304 if (ret) {
305 NV_ERROR(dev, "fail reserve\n");
6ee73861 306 return ret;
a1606a95 307 }
6ee73861
BS
308 goto retry;
309 }
310
a1606a95 311 b->user_priv = (uint64_t)(unsigned long)nvbo;
6ee73861
BS
312 nvbo->reserved_by = file_priv;
313 nvbo->pbbo_index = i;
314 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
315 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
316 list_add_tail(&nvbo->entry, &op->both_list);
317 else
318 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
319 list_add_tail(&nvbo->entry, &op->vram_list);
320 else
321 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
322 list_add_tail(&nvbo->entry, &op->gart_list);
323 else {
324 NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
325 b->valid_domains);
0208843d 326 list_add_tail(&nvbo->entry, &op->both_list);
6ee73861
BS
327 validate_fini(op, NULL);
328 return -EINVAL;
329 }
330
331 if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
332 validate_fini(op, NULL);
333
334 if (nvbo->cpu_filp == file_priv) {
335 NV_ERROR(dev, "bo %p mapped by process trying "
336 "to validate it!\n", nvbo);
337 return -EINVAL;
338 }
339
340 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
a1606a95
BS
341 if (ret) {
342 NV_ERROR(dev, "fail wait_cpu\n");
6ee73861 343 return ret;
a1606a95 344 }
6ee73861
BS
345 goto retry;
346 }
347 }
348
349 return 0;
350}
351
352static int
353validate_list(struct nouveau_channel *chan, struct list_head *list,
354 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
355{
356 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
357 (void __force __user *)(uintptr_t)user_pbbo_ptr;
a1606a95 358 struct drm_device *dev = chan->dev;
6ee73861
BS
359 struct nouveau_bo *nvbo;
360 int ret, relocs = 0;
361
362 list_for_each_entry(nvbo, list, entry) {
363 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
364 struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
365
366 if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
367 spin_lock(&nvbo->bo.lock);
368 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
369 spin_unlock(&nvbo->bo.lock);
a1606a95
BS
370 if (unlikely(ret)) {
371 NV_ERROR(dev, "fail wait other chan\n");
6ee73861 372 return ret;
a1606a95 373 }
6ee73861
BS
374 }
375
376 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
377 b->write_domains,
378 b->valid_domains);
a1606a95
BS
379 if (unlikely(ret)) {
380 NV_ERROR(dev, "fail set_domain\n");
6ee73861 381 return ret;
a1606a95 382 }
6ee73861
BS
383
384 nvbo->channel = chan;
385 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
9d87fa21 386 false, false, false);
6ee73861 387 nvbo->channel = NULL;
a1606a95
BS
388 if (unlikely(ret)) {
389 NV_ERROR(dev, "fail ttm_validate\n");
6ee73861 390 return ret;
a1606a95 391 }
6ee73861 392
a1606a95 393 if (nvbo->bo.offset == b->presumed.offset &&
6ee73861 394 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
a1606a95 395 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
6ee73861 396 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
a1606a95 397 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
6ee73861
BS
398 continue;
399
400 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
a1606a95 401 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
6ee73861 402 else
a1606a95
BS
403 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
404 b->presumed.offset = nvbo->bo.offset;
405 b->presumed.valid = 0;
6ee73861
BS
406 relocs++;
407
a1606a95
BS
408 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
409 &b->presumed, sizeof(b->presumed)))
6ee73861
BS
410 return -EFAULT;
411 }
412
413 return relocs;
414}
415
416static int
417nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
418 struct drm_file *file_priv,
419 struct drm_nouveau_gem_pushbuf_bo *pbbo,
420 uint64_t user_buffers, int nr_buffers,
421 struct validate_op *op, int *apply_relocs)
422{
a1606a95 423 struct drm_device *dev = chan->dev;
6ee73861
BS
424 int ret, relocs = 0;
425
426 INIT_LIST_HEAD(&op->vram_list);
427 INIT_LIST_HEAD(&op->gart_list);
428 INIT_LIST_HEAD(&op->both_list);
429
6ee73861
BS
430 if (nr_buffers == 0)
431 return 0;
432
433 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
a1606a95
BS
434 if (unlikely(ret)) {
435 NV_ERROR(dev, "validate_init\n");
6ee73861 436 return ret;
a1606a95 437 }
6ee73861
BS
438
439 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
440 if (unlikely(ret < 0)) {
a1606a95 441 NV_ERROR(dev, "validate vram_list\n");
6ee73861
BS
442 validate_fini(op, NULL);
443 return ret;
444 }
445 relocs += ret;
446
447 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
448 if (unlikely(ret < 0)) {
a1606a95 449 NV_ERROR(dev, "validate gart_list\n");
6ee73861
BS
450 validate_fini(op, NULL);
451 return ret;
452 }
453 relocs += ret;
454
455 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
456 if (unlikely(ret < 0)) {
a1606a95 457 NV_ERROR(dev, "validate both_list\n");
6ee73861
BS
458 validate_fini(op, NULL);
459 return ret;
460 }
461 relocs += ret;
462
463 *apply_relocs = relocs;
464 return 0;
465}
466
467static inline void *
468u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
469{
470 void *mem;
471 void __user *userptr = (void __force __user *)(uintptr_t)user;
472
473 mem = kmalloc(nmemb * size, GFP_KERNEL);
474 if (!mem)
475 return ERR_PTR(-ENOMEM);
476
477 if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
478 kfree(mem);
479 return ERR_PTR(-EFAULT);
480 }
481
482 return mem;
483}
484
485static int
a1606a95
BS
486nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
487 struct drm_nouveau_gem_pushbuf *req,
488 struct drm_nouveau_gem_pushbuf_bo *bo)
6ee73861
BS
489{
490 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
12f735b7
LB
491 int ret = 0;
492 unsigned i;
6ee73861 493
a1606a95 494 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
6ee73861
BS
495 if (IS_ERR(reloc))
496 return PTR_ERR(reloc);
497
a1606a95 498 for (i = 0; i < req->nr_relocs; i++) {
6ee73861
BS
499 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
500 struct drm_nouveau_gem_pushbuf_bo *b;
a1606a95 501 struct nouveau_bo *nvbo;
6ee73861
BS
502 uint32_t data;
503
a1606a95
BS
504 if (unlikely(r->bo_index > req->nr_buffers)) {
505 NV_ERROR(dev, "reloc bo index invalid\n");
6ee73861
BS
506 ret = -EINVAL;
507 break;
508 }
509
510 b = &bo[r->bo_index];
a1606a95 511 if (b->presumed.valid)
6ee73861
BS
512 continue;
513
a1606a95
BS
514 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
515 NV_ERROR(dev, "reloc container bo index invalid\n");
516 ret = -EINVAL;
517 break;
518 }
519 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
520
521 if (unlikely(r->reloc_bo_offset + 4 >
522 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
523 NV_ERROR(dev, "reloc outside of bo\n");
524 ret = -EINVAL;
525 break;
526 }
527
528 if (!nvbo->kmap.virtual) {
529 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
530 &nvbo->kmap);
531 if (ret) {
532 NV_ERROR(dev, "failed kmap for reloc\n");
533 break;
534 }
535 nvbo->validate_mapped = true;
536 }
537
6ee73861 538 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
a1606a95 539 data = b->presumed.offset + r->data;
6ee73861
BS
540 else
541 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
a1606a95 542 data = (b->presumed.offset + r->data) >> 32;
6ee73861
BS
543 else
544 data = r->data;
545
546 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
a1606a95 547 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
6ee73861
BS
548 data |= r->tor;
549 else
550 data |= r->vor;
551 }
552
a1606a95
BS
553 spin_lock(&nvbo->bo.lock);
554 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
e32b2c88 555 spin_unlock(&nvbo->bo.lock);
a1606a95
BS
556 if (ret) {
557 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
558 break;
559 }
a1606a95
BS
560
561 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
6ee73861
BS
562 }
563
564 kfree(reloc);
565 return ret;
566}
567
568int
569nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
570 struct drm_file *file_priv)
571{
a1606a95 572 struct drm_nouveau_private *dev_priv = dev->dev_private;
6ee73861 573 struct drm_nouveau_gem_pushbuf *req = data;
a1606a95
BS
574 struct drm_nouveau_gem_pushbuf_push *push;
575 struct drm_nouveau_gem_pushbuf_bo *bo;
6ee73861
BS
576 struct nouveau_channel *chan;
577 struct validate_op op;
6e86e041 578 struct nouveau_fence *fence = NULL;
a1606a95 579 int i, j, ret = 0, do_reloc = 0;
6ee73861 580
6ee73861
BS
581 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
582
a1606a95
BS
583 req->vram_available = dev_priv->fb_aper_free;
584 req->gart_available = dev_priv->gart_info.aper_free;
585 if (unlikely(req->nr_push == 0))
586 goto out_next;
6ee73861 587
a1606a95
BS
588 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
589 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
590 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
591 return -EINVAL;
6ee73861
BS
592 }
593
a1606a95
BS
594 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
595 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
596 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
597 return -EINVAL;
6ee73861
BS
598 }
599
a1606a95
BS
600 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
601 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
602 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
6ee73861
BS
603 return -EINVAL;
604 }
605
a1606a95
BS
606 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
607 if (IS_ERR(push))
608 return PTR_ERR(push);
609
6ee73861 610 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
a1606a95
BS
611 if (IS_ERR(bo)) {
612 kfree(push);
6ee73861 613 return PTR_ERR(bo);
a1606a95 614 }
6ee73861
BS
615
616 mutex_lock(&dev->struct_mutex);
617
618 /* Validate buffer list */
619 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
620 req->nr_buffers, &op, &do_reloc);
621 if (ret) {
622 NV_ERROR(dev, "validate: %d\n", ret);
623 goto out;
624 }
625
6ee73861
BS
626 /* Apply any relocations that are required */
627 if (do_reloc) {
a1606a95 628 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
6ee73861 629 if (ret) {
6ee73861 630 NV_ERROR(dev, "reloc apply: %d\n", ret);
6ee73861
BS
631 goto out;
632 }
6ee73861 633 }
6ee73861 634
9a391ad8 635 if (chan->dma.ib_max) {
a1606a95 636 ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
6ee73861 637 if (ret) {
9a391ad8 638 NV_INFO(dev, "nv50cal_space: %d\n", ret);
6ee73861
BS
639 goto out;
640 }
6ee73861 641
a1606a95
BS
642 for (i = 0; i < req->nr_push; i++) {
643 struct nouveau_bo *nvbo = (void *)(unsigned long)
644 bo[push[i].bo_index].user_priv;
645
646 nv50_dma_push(chan, nvbo, push[i].offset,
647 push[i].length);
648 }
9a391ad8 649 } else
2ccb04ec 650 if (dev_priv->card_type >= NV_20) {
a1606a95 651 ret = RING_SPACE(chan, req->nr_push * 2);
6ee73861
BS
652 if (ret) {
653 NV_ERROR(dev, "cal_space: %d\n", ret);
654 goto out;
655 }
a1606a95
BS
656
657 for (i = 0; i < req->nr_push; i++) {
658 struct nouveau_bo *nvbo = (void *)(unsigned long)
659 bo[push[i].bo_index].user_priv;
660 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
661
662 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
663 push[i].offset) | 2);
664 OUT_RING(chan, 0);
665 }
6ee73861 666 } else {
a1606a95 667 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
6ee73861
BS
668 if (ret) {
669 NV_ERROR(dev, "jmp_space: %d\n", ret);
670 goto out;
671 }
6ee73861 672
a1606a95
BS
673 for (i = 0; i < req->nr_push; i++) {
674 struct nouveau_bo *nvbo = (void *)(unsigned long)
675 bo[push[i].bo_index].user_priv;
676 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
677 uint32_t cmd;
678
679 cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
680 cmd |= 0x20000000;
681 if (unlikely(cmd != req->suffix0)) {
682 if (!nvbo->kmap.virtual) {
683 ret = ttm_bo_kmap(&nvbo->bo, 0,
684 nvbo->bo.mem.
685 num_pages,
686 &nvbo->kmap);
687 if (ret) {
688 WIND_RING(chan);
689 goto out;
690 }
691 nvbo->validate_mapped = true;
692 }
693
694 nouveau_bo_wr32(nvbo, (push[i].offset +
695 push[i].length - 8) / 4, cmd);
696 }
697
698 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
699 push[i].offset) | 0x20000000);
6ee73861 700 OUT_RING(chan, 0);
a1606a95
BS
701 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
702 OUT_RING(chan, 0);
703 }
6ee73861
BS
704 }
705
234896a7 706 ret = nouveau_fence_new(chan, &fence, true);
6ee73861
BS
707 if (ret) {
708 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
709 WIND_RING(chan);
710 goto out;
711 }
712
713out:
234896a7
LB
714 validate_fini(&op, fence);
715 nouveau_fence_unref((void**)&fence);
6ee73861
BS
716 mutex_unlock(&dev->struct_mutex);
717 kfree(bo);
a1606a95 718 kfree(push);
6ee73861
BS
719
720out_next:
9a391ad8
BS
721 if (chan->dma.ib_max) {
722 req->suffix0 = 0x00000000;
723 req->suffix1 = 0x00000000;
724 } else
2ccb04ec 725 if (dev_priv->card_type >= NV_20) {
6ee73861
BS
726 req->suffix0 = 0x00020000;
727 req->suffix1 = 0x00000000;
728 } else {
729 req->suffix0 = 0x20000000 |
730 (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
731 req->suffix1 = 0x00000000;
732 }
733
734 return ret;
735}
736
6ee73861
BS
737static inline uint32_t
738domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
739{
740 uint32_t flags = 0;
741
742 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
743 flags |= TTM_PL_FLAG_VRAM;
744 if (domain & NOUVEAU_GEM_DOMAIN_GART)
745 flags |= TTM_PL_FLAG_TT;
746
747 return flags;
748}
749
6ee73861
BS
750int
751nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
752 struct drm_file *file_priv)
753{
754 struct drm_nouveau_gem_cpu_prep *req = data;
755 struct drm_gem_object *gem;
756 struct nouveau_bo *nvbo;
757 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
758 int ret = -EINVAL;
759
6ee73861
BS
760 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
761 if (!gem)
bf79cb91 762 return -ENOENT;
6ee73861
BS
763 nvbo = nouveau_gem_object(gem);
764
765 if (nvbo->cpu_filp) {
766 if (nvbo->cpu_filp == file_priv)
767 goto out;
768
769 ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
6ee73861
BS
770 if (ret)
771 goto out;
772 }
773
774 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
f0fbe3eb 775 spin_lock(&nvbo->bo.lock);
6ee73861 776 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
f0fbe3eb 777 spin_unlock(&nvbo->bo.lock);
6ee73861
BS
778 } else {
779 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
6ee73861
BS
780 if (ret == 0)
781 nvbo->cpu_filp = file_priv;
782 }
783
784out:
bc9025bd 785 drm_gem_object_unreference_unlocked(gem);
6ee73861
BS
786 return ret;
787}
788
789int
790nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
791 struct drm_file *file_priv)
792{
793 struct drm_nouveau_gem_cpu_prep *req = data;
794 struct drm_gem_object *gem;
795 struct nouveau_bo *nvbo;
796 int ret = -EINVAL;
797
6ee73861
BS
798 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
799 if (!gem)
bf79cb91 800 return -ENOENT;
6ee73861
BS
801 nvbo = nouveau_gem_object(gem);
802
803 if (nvbo->cpu_filp != file_priv)
804 goto out;
805 nvbo->cpu_filp = NULL;
806
807 ttm_bo_synccpu_write_release(&nvbo->bo);
808 ret = 0;
809
810out:
bc9025bd 811 drm_gem_object_unreference_unlocked(gem);
6ee73861
BS
812 return ret;
813}
814
815int
816nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
817 struct drm_file *file_priv)
818{
819 struct drm_nouveau_gem_info *req = data;
820 struct drm_gem_object *gem;
821 int ret;
822
6ee73861
BS
823 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
824 if (!gem)
bf79cb91 825 return -ENOENT;
6ee73861
BS
826
827 ret = nouveau_gem_info(gem, req);
bc9025bd 828 drm_gem_object_unreference_unlocked(gem);
6ee73861
BS
829 return ret;
830}
831
This page took 0.093106 seconds and 5 git commands to generate.