Revert "drm/radeon/kms: add a new gem_wait ioctl with read/write flags"
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
31
32#include "nouveau_drm.h"
33#include "nouveau_drv.h"
34#include "nouveau_dma.h"
f869ef88
BS
35#include "nouveau_mm.h"
36#include "nouveau_vm.h"
6ee73861 37
a510604d 38#include <linux/log2.h>
5a0e3ad6 39#include <linux/slab.h>
a510604d 40
6ee73861
BS
41static void
42nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
43{
44 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
a0af9add 45 struct drm_device *dev = dev_priv->dev;
6ee73861
BS
46 struct nouveau_bo *nvbo = nouveau_bo(bo);
47
6ee73861
BS
48 if (unlikely(nvbo->gem))
49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50
a5cf68b0 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
6ee73861
BS
52 kfree(nvbo);
53}
54
a0af9add 55static void
db5c8e29 56nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
f91bac5b 57 int *align, int *size)
a0af9add 58{
bfd83aca 59 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
a0af9add 60
573a2a37 61 if (dev_priv->card_type < NV_50) {
bfd83aca 62 if (nvbo->tile_mode) {
a0af9add
FJ
63 if (dev_priv->chipset >= 0x40) {
64 *align = 65536;
bfd83aca 65 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
66
67 } else if (dev_priv->chipset >= 0x30) {
68 *align = 32768;
bfd83aca 69 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
70
71 } else if (dev_priv->chipset >= 0x20) {
72 *align = 16384;
bfd83aca 73 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
74
75 } else if (dev_priv->chipset >= 0x10) {
76 *align = 16384;
bfd83aca 77 *size = roundup(*size, 32 * nvbo->tile_mode);
a0af9add
FJ
78 }
79 }
bfd83aca 80 } else {
f91bac5b
BS
81 *size = roundup(*size, (1 << nvbo->page_shift));
82 *align = max((1 << nvbo->page_shift), *align);
a0af9add
FJ
83 }
84
1c7059e4 85 *size = roundup(*size, PAGE_SIZE);
a0af9add
FJ
86}
87
6ee73861 88int
7375c95b
BS
89nouveau_bo_new(struct drm_device *dev, int size, int align,
90 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
91 struct nouveau_bo **pnvbo)
6ee73861
BS
92{
93 struct drm_nouveau_private *dev_priv = dev->dev_private;
94 struct nouveau_bo *nvbo;
f91bac5b 95 int ret;
6ee73861
BS
96
97 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
98 if (!nvbo)
99 return -ENOMEM;
100 INIT_LIST_HEAD(&nvbo->head);
101 INIT_LIST_HEAD(&nvbo->entry);
fd2871af 102 INIT_LIST_HEAD(&nvbo->vma_list);
6ee73861
BS
103 nvbo->tile_mode = tile_mode;
104 nvbo->tile_flags = tile_flags;
699ddfd9 105 nvbo->bo.bdev = &dev_priv->ttm.bdev;
6ee73861 106
f91bac5b
BS
107 nvbo->page_shift = 12;
108 if (dev_priv->bar1_vm) {
109 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
110 nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
111 }
112
113 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
fd2871af
BS
114 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
115 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861 116
6ee73861 117 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
fd2871af
BS
118 ttm_bo_type_device, &nvbo->placement,
119 align >> PAGE_SHIFT, 0, false, NULL, size,
120 nouveau_bo_del_ttm);
6ee73861
BS
121 if (ret) {
122 /* ttm will call nouveau_bo_del_ttm if it fails.. */
123 return ret;
124 }
125
6ee73861
BS
126 *pnvbo = nvbo;
127 return 0;
128}
129
78ad0f7b
FJ
130static void
131set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
132{
133 *n = 0;
134
135 if (type & TTM_PL_FLAG_VRAM)
136 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
137 if (type & TTM_PL_FLAG_TT)
138 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
139 if (type & TTM_PL_FLAG_SYSTEM)
140 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
141}
142
699ddfd9
FJ
143static void
144set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
145{
146 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
812f219a 147 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
699ddfd9
FJ
148
149 if (dev_priv->card_type == NV_10 &&
812f219a
FJ
150 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
151 nvbo->bo.mem.num_pages < vram_pages / 2) {
699ddfd9
FJ
152 /*
153 * Make sure that the color and depth buffers are handled
154 * by independent memory controller units. Up to a 9x
155 * speed up when alpha-blending and depth-test are enabled
156 * at the same time.
157 */
699ddfd9
FJ
158 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
159 nvbo->placement.fpfn = vram_pages / 2;
160 nvbo->placement.lpfn = ~0;
161 } else {
162 nvbo->placement.fpfn = 0;
163 nvbo->placement.lpfn = vram_pages / 2;
164 }
165 }
166}
167
6ee73861 168void
78ad0f7b 169nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 170{
78ad0f7b
FJ
171 struct ttm_placement *pl = &nvbo->placement;
172 uint32_t flags = TTM_PL_MASK_CACHING |
173 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
174
175 pl->placement = nvbo->placements;
176 set_placement_list(nvbo->placements, &pl->num_placement,
177 type, flags);
178
179 pl->busy_placement = nvbo->busy_placements;
180 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
181 type | busy, flags);
699ddfd9
FJ
182
183 set_placement_range(nvbo, type);
6ee73861
BS
184}
185
186int
187nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
188{
189 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
190 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 191 int ret;
6ee73861
BS
192
193 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
194 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
195 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
196 1 << bo->mem.mem_type, memtype);
197 return -EINVAL;
198 }
199
200 if (nvbo->pin_refcnt++)
201 return 0;
202
203 ret = ttm_bo_reserve(bo, false, false, false, 0);
204 if (ret)
205 goto out;
206
78ad0f7b 207 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 208
7a45d764 209 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
210 if (ret == 0) {
211 switch (bo->mem.mem_type) {
212 case TTM_PL_VRAM:
213 dev_priv->fb_aper_free -= bo->mem.size;
214 break;
215 case TTM_PL_TT:
216 dev_priv->gart_info.aper_free -= bo->mem.size;
217 break;
218 default:
219 break;
220 }
221 }
222 ttm_bo_unreserve(bo);
223out:
224 if (unlikely(ret))
225 nvbo->pin_refcnt--;
226 return ret;
227}
228
229int
230nouveau_bo_unpin(struct nouveau_bo *nvbo)
231{
232 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
233 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 234 int ret;
6ee73861
BS
235
236 if (--nvbo->pin_refcnt)
237 return 0;
238
239 ret = ttm_bo_reserve(bo, false, false, false, 0);
240 if (ret)
241 return ret;
242
78ad0f7b 243 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 244
7a45d764 245 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
246 if (ret == 0) {
247 switch (bo->mem.mem_type) {
248 case TTM_PL_VRAM:
249 dev_priv->fb_aper_free += bo->mem.size;
250 break;
251 case TTM_PL_TT:
252 dev_priv->gart_info.aper_free += bo->mem.size;
253 break;
254 default:
255 break;
256 }
257 }
258
259 ttm_bo_unreserve(bo);
260 return ret;
261}
262
263int
264nouveau_bo_map(struct nouveau_bo *nvbo)
265{
266 int ret;
267
268 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
269 if (ret)
270 return ret;
271
272 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
273 ttm_bo_unreserve(&nvbo->bo);
274 return ret;
275}
276
277void
278nouveau_bo_unmap(struct nouveau_bo *nvbo)
279{
9d59e8a1
BS
280 if (nvbo)
281 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
282}
283
7a45d764
BS
284int
285nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
286 bool no_wait_reserve, bool no_wait_gpu)
287{
288 int ret;
289
290 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
291 no_wait_reserve, no_wait_gpu);
292 if (ret)
293 return ret;
294
295 return 0;
296}
297
6ee73861
BS
298u16
299nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
300{
301 bool is_iomem;
302 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
303 mem = &mem[index];
304 if (is_iomem)
305 return ioread16_native((void __force __iomem *)mem);
306 else
307 return *mem;
308}
309
310void
311nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
312{
313 bool is_iomem;
314 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
315 mem = &mem[index];
316 if (is_iomem)
317 iowrite16_native(val, (void __force __iomem *)mem);
318 else
319 *mem = val;
320}
321
322u32
323nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
324{
325 bool is_iomem;
326 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
327 mem = &mem[index];
328 if (is_iomem)
329 return ioread32_native((void __force __iomem *)mem);
330 else
331 return *mem;
332}
333
334void
335nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
336{
337 bool is_iomem;
338 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
339 mem = &mem[index];
340 if (is_iomem)
341 iowrite32_native(val, (void __force __iomem *)mem);
342 else
343 *mem = val;
344}
345
346static struct ttm_backend *
347nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
348{
349 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
350 struct drm_device *dev = dev_priv->dev;
351
352 switch (dev_priv->gart_info.type) {
b694dfb2 353#if __OS_HAS_AGP
6ee73861
BS
354 case NOUVEAU_GART_AGP:
355 return ttm_agp_backend_init(bdev, dev->agp->bridge);
b694dfb2 356#endif
58e6c7a9
BS
357 case NOUVEAU_GART_PDMA:
358 case NOUVEAU_GART_HW:
6ee73861
BS
359 return nouveau_sgdma_init_ttm(dev);
360 default:
361 NV_ERROR(dev, "Unknown GART type %d\n",
362 dev_priv->gart_info.type);
363 break;
364 }
365
366 return NULL;
367}
368
369static int
370nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
371{
372 /* We'll do this from user space. */
373 return 0;
374}
375
376static int
377nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
378 struct ttm_mem_type_manager *man)
379{
380 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
381 struct drm_device *dev = dev_priv->dev;
382
383 switch (type) {
384 case TTM_PL_SYSTEM:
385 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
386 man->available_caching = TTM_PL_MASK_CACHING;
387 man->default_caching = TTM_PL_FLAG_CACHED;
388 break;
389 case TTM_PL_VRAM:
8984e046 390 if (dev_priv->card_type >= NV_50) {
573a2a37 391 man->func = &nouveau_vram_manager;
f869ef88
BS
392 man->io_reserve_fastpath = false;
393 man->use_io_reserve_lru = true;
394 } else {
573a2a37 395 man->func = &ttm_bo_manager_func;
f869ef88 396 }
6ee73861 397 man->flags = TTM_MEMTYPE_FLAG_FIXED |
f32f02fd 398 TTM_MEMTYPE_FLAG_MAPPABLE;
6ee73861
BS
399 man->available_caching = TTM_PL_FLAG_UNCACHED |
400 TTM_PL_FLAG_WC;
401 man->default_caching = TTM_PL_FLAG_WC;
6ee73861
BS
402 break;
403 case TTM_PL_TT:
26c0c9e3
BS
404 if (dev_priv->card_type >= NV_50)
405 man->func = &nouveau_gart_manager;
406 else
407 man->func = &ttm_bo_manager_func;
6ee73861
BS
408 switch (dev_priv->gart_info.type) {
409 case NOUVEAU_GART_AGP:
f32f02fd 410 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
411 man->available_caching = TTM_PL_FLAG_UNCACHED |
412 TTM_PL_FLAG_WC;
413 man->default_caching = TTM_PL_FLAG_WC;
6ee73861 414 break;
58e6c7a9
BS
415 case NOUVEAU_GART_PDMA:
416 case NOUVEAU_GART_HW:
6ee73861
BS
417 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
418 TTM_MEMTYPE_FLAG_CMA;
419 man->available_caching = TTM_PL_MASK_CACHING;
420 man->default_caching = TTM_PL_FLAG_CACHED;
421 break;
422 default:
423 NV_ERROR(dev, "Unknown GART type: %d\n",
424 dev_priv->gart_info.type);
425 return -EINVAL;
426 }
6ee73861
BS
427 break;
428 default:
429 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
430 return -EINVAL;
431 }
432 return 0;
433}
434
435static void
436nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
437{
438 struct nouveau_bo *nvbo = nouveau_bo(bo);
439
440 switch (bo->mem.mem_type) {
22fbd538 441 case TTM_PL_VRAM:
78ad0f7b
FJ
442 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
443 TTM_PL_FLAG_SYSTEM);
22fbd538 444 break;
6ee73861 445 default:
78ad0f7b 446 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
447 break;
448 }
22fbd538
FJ
449
450 *pl = nvbo->placement;
6ee73861
BS
451}
452
453
454/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
455 * TTM_PL_{VRAM,TT} directly.
456 */
a0af9add 457
6ee73861
BS
458static int
459nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
9d87fa21
JG
460 struct nouveau_bo *nvbo, bool evict,
461 bool no_wait_reserve, bool no_wait_gpu,
6ee73861
BS
462 struct ttm_mem_reg *new_mem)
463{
464 struct nouveau_fence *fence = NULL;
465 int ret;
466
467 ret = nouveau_fence_new(chan, &fence, true);
468 if (ret)
469 return ret;
470
64798817 471 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
311ab694 472 no_wait_reserve, no_wait_gpu, new_mem);
382d62e5 473 nouveau_fence_unref(&fence);
6ee73861
BS
474 return ret;
475}
476
183720b8
BS
477static int
478nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
479 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
480{
d2f96666
BS
481 struct nouveau_mem *node = old_mem->mm_node;
482 u64 src_offset = node->vma[0].offset;
483 u64 dst_offset = node->vma[1].offset;
183720b8
BS
484 u32 page_count = new_mem->num_pages;
485 int ret;
486
183720b8
BS
487 page_count = new_mem->num_pages;
488 while (page_count) {
489 int line_count = (page_count > 2047) ? 2047 : page_count;
490
491 ret = RING_SPACE(chan, 12);
492 if (ret)
493 return ret;
494
495 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
496 OUT_RING (chan, upper_32_bits(dst_offset));
497 OUT_RING (chan, lower_32_bits(dst_offset));
498 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
499 OUT_RING (chan, upper_32_bits(src_offset));
500 OUT_RING (chan, lower_32_bits(src_offset));
501 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
502 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
503 OUT_RING (chan, PAGE_SIZE); /* line_length */
504 OUT_RING (chan, line_count);
505 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
506 OUT_RING (chan, 0x00100110);
507
508 page_count -= line_count;
509 src_offset += (PAGE_SIZE * line_count);
510 dst_offset += (PAGE_SIZE * line_count);
511 }
512
513 return 0;
514}
515
6ee73861 516static int
f1ab0cc9
BS
517nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
518 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 519{
d2f96666 520 struct nouveau_mem *node = old_mem->mm_node;
f1ab0cc9
BS
521 struct nouveau_bo *nvbo = nouveau_bo(bo);
522 u64 length = (new_mem->num_pages << PAGE_SHIFT);
d2f96666
BS
523 u64 src_offset = node->vma[0].offset;
524 u64 dst_offset = node->vma[1].offset;
6ee73861
BS
525 int ret;
526
f1ab0cc9
BS
527 while (length) {
528 u32 amount, stride, height;
529
5220b3c1
BS
530 amount = min(length, (u64)(4 * 1024 * 1024));
531 stride = 16 * 4;
f1ab0cc9
BS
532 height = amount / stride;
533
f13b3263
FJ
534 if (new_mem->mem_type == TTM_PL_VRAM &&
535 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
536 ret = RING_SPACE(chan, 8);
537 if (ret)
538 return ret;
539
540 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
541 OUT_RING (chan, 0);
5220b3c1 542 OUT_RING (chan, 0);
f1ab0cc9
BS
543 OUT_RING (chan, stride);
544 OUT_RING (chan, height);
545 OUT_RING (chan, 1);
546 OUT_RING (chan, 0);
547 OUT_RING (chan, 0);
548 } else {
549 ret = RING_SPACE(chan, 2);
550 if (ret)
551 return ret;
552
553 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
554 OUT_RING (chan, 1);
555 }
f13b3263
FJ
556 if (old_mem->mem_type == TTM_PL_VRAM &&
557 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
558 ret = RING_SPACE(chan, 8);
559 if (ret)
560 return ret;
561
562 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
563 OUT_RING (chan, 0);
5220b3c1 564 OUT_RING (chan, 0);
f1ab0cc9
BS
565 OUT_RING (chan, stride);
566 OUT_RING (chan, height);
567 OUT_RING (chan, 1);
568 OUT_RING (chan, 0);
569 OUT_RING (chan, 0);
570 } else {
571 ret = RING_SPACE(chan, 2);
572 if (ret)
573 return ret;
574
575 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
576 OUT_RING (chan, 1);
577 }
578
579 ret = RING_SPACE(chan, 14);
6ee73861
BS
580 if (ret)
581 return ret;
f1ab0cc9
BS
582
583 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
584 OUT_RING (chan, upper_32_bits(src_offset));
585 OUT_RING (chan, upper_32_bits(dst_offset));
586 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
587 OUT_RING (chan, lower_32_bits(src_offset));
588 OUT_RING (chan, lower_32_bits(dst_offset));
589 OUT_RING (chan, stride);
590 OUT_RING (chan, stride);
591 OUT_RING (chan, stride);
592 OUT_RING (chan, height);
593 OUT_RING (chan, 0x00000101);
594 OUT_RING (chan, 0x00000000);
595 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
596 OUT_RING (chan, 0);
597
598 length -= amount;
599 src_offset += amount;
600 dst_offset += amount;
6ee73861
BS
601 }
602
f1ab0cc9
BS
603 return 0;
604}
605
a6704788
BS
606static inline uint32_t
607nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
608 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
609{
610 if (mem->mem_type == TTM_PL_TT)
611 return chan->gart_handle;
612 return chan->vram_handle;
613}
614
f1ab0cc9
BS
615static int
616nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
617 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
618{
d961db75
BS
619 u32 src_offset = old_mem->start << PAGE_SHIFT;
620 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
621 u32 page_count = new_mem->num_pages;
622 int ret;
623
624 ret = RING_SPACE(chan, 3);
625 if (ret)
626 return ret;
627
628 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
629 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
630 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
631
6ee73861
BS
632 page_count = new_mem->num_pages;
633 while (page_count) {
634 int line_count = (page_count > 2047) ? 2047 : page_count;
635
6ee73861
BS
636 ret = RING_SPACE(chan, 11);
637 if (ret)
638 return ret;
f1ab0cc9 639
6ee73861
BS
640 BEGIN_RING(chan, NvSubM2MF,
641 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
642 OUT_RING (chan, src_offset);
643 OUT_RING (chan, dst_offset);
644 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
645 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
646 OUT_RING (chan, PAGE_SIZE); /* line_length */
647 OUT_RING (chan, line_count);
648 OUT_RING (chan, 0x00000101);
649 OUT_RING (chan, 0x00000000);
6ee73861 650 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 651 OUT_RING (chan, 0);
6ee73861
BS
652
653 page_count -= line_count;
654 src_offset += (PAGE_SIZE * line_count);
655 dst_offset += (PAGE_SIZE * line_count);
656 }
657
f1ab0cc9
BS
658 return 0;
659}
660
d2f96666
BS
661static int
662nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
663 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
664{
665 struct nouveau_mem *node = mem->mm_node;
666 int ret;
667
668 ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
669 node->page_shift, NV_MEM_ACCESS_RO, vma);
670 if (ret)
671 return ret;
672
673 if (mem->mem_type == TTM_PL_VRAM)
674 nouveau_vm_map(vma, node);
675 else
676 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
677 node, node->pages);
678
679 return 0;
680}
681
f1ab0cc9
BS
682static int
683nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
684 bool no_wait_reserve, bool no_wait_gpu,
685 struct ttm_mem_reg *new_mem)
686{
687 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
688 struct nouveau_bo *nvbo = nouveau_bo(bo);
3425df48 689 struct ttm_mem_reg *old_mem = &bo->mem;
f1ab0cc9
BS
690 struct nouveau_channel *chan;
691 int ret;
692
693 chan = nvbo->channel;
d550c41e 694 if (!chan) {
f1ab0cc9 695 chan = dev_priv->channel;
e419cf09 696 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
6a6b73f2 697 }
f1ab0cc9 698
d2f96666
BS
699 /* create temporary vmas for the transfer and attach them to the
700 * old nouveau_mem node, these will get cleaned up after ttm has
701 * destroyed the ttm_mem_reg
3425df48 702 */
26c0c9e3 703 if (dev_priv->card_type >= NV_50) {
d5f42394 704 struct nouveau_mem *node = old_mem->mm_node;
3425df48 705
d2f96666
BS
706 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
707 if (ret)
708 goto out;
709
710 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
711 if (ret)
712 goto out;
3425df48
BS
713 }
714
f1ab0cc9
BS
715 if (dev_priv->card_type < NV_50)
716 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
717 else
183720b8 718 if (dev_priv->card_type < NV_C0)
f1ab0cc9 719 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
183720b8
BS
720 else
721 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
6a6b73f2
BS
722 if (ret == 0) {
723 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
724 no_wait_reserve,
725 no_wait_gpu, new_mem);
726 }
f1ab0cc9 727
3425df48 728out:
6a6b73f2
BS
729 if (chan == dev_priv->channel)
730 mutex_unlock(&chan->mutex);
731 return ret;
6ee73861
BS
732}
733
734static int
735nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
736 bool no_wait_reserve, bool no_wait_gpu,
737 struct ttm_mem_reg *new_mem)
6ee73861
BS
738{
739 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
740 struct ttm_placement placement;
741 struct ttm_mem_reg tmp_mem;
742 int ret;
743
744 placement.fpfn = placement.lpfn = 0;
745 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 746 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
747
748 tmp_mem = *new_mem;
749 tmp_mem.mm_node = NULL;
9d87fa21 750 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
751 if (ret)
752 return ret;
753
754 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
755 if (ret)
756 goto out;
757
9d87fa21 758 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
759 if (ret)
760 goto out;
761
b8884da6 762 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 763out:
42311ff9 764 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
765 return ret;
766}
767
768static int
769nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
770 bool no_wait_reserve, bool no_wait_gpu,
771 struct ttm_mem_reg *new_mem)
6ee73861
BS
772{
773 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
774 struct ttm_placement placement;
775 struct ttm_mem_reg tmp_mem;
776 int ret;
777
778 placement.fpfn = placement.lpfn = 0;
779 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 780 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
781
782 tmp_mem = *new_mem;
783 tmp_mem.mm_node = NULL;
9d87fa21 784 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
785 if (ret)
786 return ret;
787
b8884da6 788 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
789 if (ret)
790 goto out;
791
b8884da6 792 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861
BS
793 if (ret)
794 goto out;
795
796out:
42311ff9 797 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
798 return ret;
799}
800
a4154bbf
BS
801static void
802nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
803{
26c0c9e3 804 struct nouveau_mem *node = new_mem->mm_node;
a4154bbf 805 struct nouveau_bo *nvbo = nouveau_bo(bo);
fd2871af
BS
806 struct nouveau_vma *vma;
807
808 list_for_each_entry(vma, &nvbo->vma_list, head) {
809 if (new_mem->mem_type == TTM_PL_VRAM) {
810 nouveau_vm_map(vma, new_mem->mm_node);
811 } else
812 if (new_mem->mem_type == TTM_PL_TT &&
813 nvbo->page_shift == vma->vm->spg_shift) {
814 nouveau_vm_map_sg(vma, 0, new_mem->
815 num_pages << PAGE_SHIFT,
816 node, node->pages);
817 } else {
818 nouveau_vm_unmap(vma);
819 }
a4154bbf
BS
820 }
821}
822
6ee73861 823static int
a0af9add
FJ
824nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
825 struct nouveau_tile_reg **new_tile)
6ee73861
BS
826{
827 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
6ee73861 828 struct drm_device *dev = dev_priv->dev;
a0af9add 829 struct nouveau_bo *nvbo = nouveau_bo(bo);
a4154bbf 830 u64 offset = new_mem->start << PAGE_SHIFT;
6ee73861 831
a4154bbf
BS
832 *new_tile = NULL;
833 if (new_mem->mem_type != TTM_PL_VRAM)
a0af9add 834 return 0;
a0af9add 835
a4154bbf 836 if (dev_priv->card_type >= NV_10) {
a0af9add 837 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
838 nvbo->tile_mode,
839 nvbo->tile_flags);
6ee73861
BS
840 }
841
a0af9add
FJ
842 return 0;
843}
844
845static void
846nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
847 struct nouveau_tile_reg *new_tile,
848 struct nouveau_tile_reg **old_tile)
849{
850 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
851 struct drm_device *dev = dev_priv->dev;
852
a4154bbf
BS
853 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
854 *old_tile = new_tile;
a0af9add
FJ
855}
856
857static int
858nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
859 bool no_wait_reserve, bool no_wait_gpu,
860 struct ttm_mem_reg *new_mem)
a0af9add
FJ
861{
862 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
863 struct nouveau_bo *nvbo = nouveau_bo(bo);
864 struct ttm_mem_reg *old_mem = &bo->mem;
865 struct nouveau_tile_reg *new_tile = NULL;
866 int ret = 0;
867
a4154bbf
BS
868 if (dev_priv->card_type < NV_50) {
869 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
870 if (ret)
871 return ret;
872 }
a0af9add 873
a0af9add 874 /* Fake bo copy. */
6ee73861
BS
875 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
876 BUG_ON(bo->mem.mm_node != NULL);
877 bo->mem = *new_mem;
878 new_mem->mm_node = NULL;
a0af9add 879 goto out;
6ee73861
BS
880 }
881
b8a6a804 882 /* Software copy if the card isn't up and running yet. */
183720b8 883 if (!dev_priv->channel) {
b8a6a804
BS
884 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
885 goto out;
886 }
887
a0af9add
FJ
888 /* Hardware assisted copy. */
889 if (new_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 890 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 891 else if (old_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 892 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 893 else
9d87fa21 894 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 895
a0af9add
FJ
896 if (!ret)
897 goto out;
898
899 /* Fallback to software copy. */
9d87fa21 900 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add
FJ
901
902out:
a4154bbf
BS
903 if (dev_priv->card_type < NV_50) {
904 if (ret)
905 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
906 else
907 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
908 }
a0af9add
FJ
909
910 return ret;
6ee73861
BS
911}
912
913static int
914nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
915{
916 return 0;
917}
918
f32f02fd
JG
919static int
920nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
921{
922 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
923 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
924 struct drm_device *dev = dev_priv->dev;
f869ef88 925 int ret;
f32f02fd
JG
926
927 mem->bus.addr = NULL;
928 mem->bus.offset = 0;
929 mem->bus.size = mem->num_pages << PAGE_SHIFT;
930 mem->bus.base = 0;
931 mem->bus.is_iomem = false;
932 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
933 return -EINVAL;
934 switch (mem->mem_type) {
935 case TTM_PL_SYSTEM:
936 /* System memory */
937 return 0;
938 case TTM_PL_TT:
939#if __OS_HAS_AGP
940 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
d961db75 941 mem->bus.offset = mem->start << PAGE_SHIFT;
f32f02fd
JG
942 mem->bus.base = dev_priv->gart_info.aper_base;
943 mem->bus.is_iomem = true;
944 }
945#endif
946 break;
947 case TTM_PL_VRAM:
f869ef88 948 {
d5f42394 949 struct nouveau_mem *node = mem->mm_node;
8984e046 950 u8 page_shift;
f869ef88
BS
951
952 if (!dev_priv->bar1_vm) {
953 mem->bus.offset = mem->start << PAGE_SHIFT;
954 mem->bus.base = pci_resource_start(dev->pdev, 1);
955 mem->bus.is_iomem = true;
956 break;
957 }
958
2e9733ff 959 if (dev_priv->card_type >= NV_C0)
d5f42394 960 page_shift = node->page_shift;
8984e046
BS
961 else
962 page_shift = 12;
963
4c74eb7f 964 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
8984e046 965 page_shift, NV_MEM_ACCESS_RW,
d5f42394 966 &node->bar_vma);
f869ef88
BS
967 if (ret)
968 return ret;
969
d5f42394 970 nouveau_vm_map(&node->bar_vma, node);
f869ef88 971 if (ret) {
d5f42394 972 nouveau_vm_put(&node->bar_vma);
f869ef88
BS
973 return ret;
974 }
975
d5f42394 976 mem->bus.offset = node->bar_vma.offset;
8984e046
BS
977 if (dev_priv->card_type == NV_50) /*XXX*/
978 mem->bus.offset -= 0x0020000000ULL;
01d73a69 979 mem->bus.base = pci_resource_start(dev->pdev, 1);
f32f02fd 980 mem->bus.is_iomem = true;
f869ef88 981 }
f32f02fd
JG
982 break;
983 default:
984 return -EINVAL;
985 }
986 return 0;
987}
988
989static void
990nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
991{
f869ef88 992 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
d5f42394 993 struct nouveau_mem *node = mem->mm_node;
f869ef88
BS
994
995 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
996 return;
997
d5f42394 998 if (!node->bar_vma.node)
f869ef88
BS
999 return;
1000
d5f42394
BS
1001 nouveau_vm_unmap(&node->bar_vma);
1002 nouveau_vm_put(&node->bar_vma);
f32f02fd
JG
1003}
1004
1005static int
1006nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1007{
e1429b4c
BS
1008 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1009 struct nouveau_bo *nvbo = nouveau_bo(bo);
1010
1011 /* as long as the bo isn't in vram, and isn't tiled, we've got
1012 * nothing to do here.
1013 */
1014 if (bo->mem.mem_type != TTM_PL_VRAM) {
f13b3263
FJ
1015 if (dev_priv->card_type < NV_50 ||
1016 !nouveau_bo_tile_layout(nvbo))
e1429b4c
BS
1017 return 0;
1018 }
1019
1020 /* make sure bo is in mappable vram */
d961db75 1021 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
e1429b4c
BS
1022 return 0;
1023
1024
1025 nvbo->placement.fpfn = 0;
1026 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1027 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
7a45d764 1028 return nouveau_bo_validate(nvbo, false, true, false);
f32f02fd
JG
1029}
1030
332b242f
FJ
1031void
1032nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1033{
23c45e8e 1034 struct nouveau_fence *old_fence;
332b242f
FJ
1035
1036 if (likely(fence))
23c45e8e 1037 nouveau_fence_ref(fence);
332b242f 1038
23c45e8e
FJ
1039 spin_lock(&nvbo->bo.bdev->fence_lock);
1040 old_fence = nvbo->bo.sync_obj;
1041 nvbo->bo.sync_obj = fence;
332b242f 1042 spin_unlock(&nvbo->bo.bdev->fence_lock);
23c45e8e
FJ
1043
1044 nouveau_fence_unref(&old_fence);
332b242f
FJ
1045}
1046
6ee73861
BS
1047struct ttm_bo_driver nouveau_bo_driver = {
1048 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
1049 .invalidate_caches = nouveau_bo_invalidate_caches,
1050 .init_mem_type = nouveau_bo_init_mem_type,
1051 .evict_flags = nouveau_bo_evict_flags,
a4154bbf 1052 .move_notify = nouveau_bo_move_ntfy,
6ee73861
BS
1053 .move = nouveau_bo_move,
1054 .verify_access = nouveau_bo_verify_access,
382d62e5
MS
1055 .sync_obj_signaled = __nouveau_fence_signalled,
1056 .sync_obj_wait = __nouveau_fence_wait,
1057 .sync_obj_flush = __nouveau_fence_flush,
1058 .sync_obj_unref = __nouveau_fence_unref,
1059 .sync_obj_ref = __nouveau_fence_ref,
f32f02fd
JG
1060 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1061 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1062 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
1063};
1064
fd2871af
BS
1065struct nouveau_vma *
1066nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1067{
1068 struct nouveau_vma *vma;
1069 list_for_each_entry(vma, &nvbo->vma_list, head) {
1070 if (vma->vm == vm)
1071 return vma;
1072 }
1073
1074 return NULL;
1075}
1076
1077int
1078nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1079 struct nouveau_vma *vma)
1080{
1081 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1082 struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1083 int ret;
1084
1085 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1086 NV_MEM_ACCESS_RW, vma);
1087 if (ret)
1088 return ret;
1089
1090 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1091 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1092 else
1093 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
1094 nouveau_vm_map_sg(vma, 0, size, node, node->pages);
1095
1096 list_add_tail(&vma->head, &nvbo->vma_list);
2fd3db6f 1097 vma->refcount = 1;
fd2871af
BS
1098 return 0;
1099}
1100
1101void
1102nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1103{
1104 if (vma->node) {
1105 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1106 spin_lock(&nvbo->bo.bdev->fence_lock);
dfadbbdb
MO
1107 ttm_bo_wait(&nvbo->bo, false, false, false,
1108 TTM_USAGE_READWRITE);
fd2871af
BS
1109 spin_unlock(&nvbo->bo.bdev->fence_lock);
1110 nouveau_vm_unmap(vma);
1111 }
1112
1113 nouveau_vm_put(vma);
1114 list_del(&vma->head);
1115 }
1116}
This page took 0.263062 seconds and 5 git commands to generate.