2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <core/gpuobj.h>
27 #include <subdev/timer.h>
28 #include <subdev/vm.h>
32 #define NV41_GART_SIZE (512 * 1024 * 1024)
33 #define NV41_GART_PAGE ( 4 * 1024)
35 /*******************************************************************************
36 * VM map/unmap callbacks
37 ******************************************************************************/
40 nv41_vm_map_sg(struct nouveau_vma
*vma
, struct nouveau_gpuobj
*pgt
,
41 struct nouveau_mem
*mem
, u32 pte
, u32 cnt
, dma_addr_t
*list
)
45 u32 page
= PAGE_SIZE
/ NV41_GART_PAGE
;
46 u64 phys
= (u64
)*list
++;
47 while (cnt
&& page
--) {
48 nv_wo32(pgt
, pte
, (phys
>> 7) | 1);
49 phys
+= NV41_GART_PAGE
;
57 nv41_vm_unmap(struct nouveau_gpuobj
*pgt
, u32 pte
, u32 cnt
)
61 nv_wo32(pgt
, pte
, 0x00000000);
67 nv41_vm_flush(struct nouveau_vm
*vm
)
69 struct nv04_vm_priv
*priv
= (void *)vm
->vmm
;
71 mutex_lock(&nv_subdev(priv
)->mutex
);
72 nv_wr32(priv
, 0x100810, 0x00000022);
73 if (!nv_wait(priv
, 0x100810, 0x00000100, 0x00000100)) {
74 nv_warn(priv
, "flush timeout, 0x%08x\n",
75 nv_rd32(priv
, 0x100810));
77 nv_wr32(priv
, 0x100810, 0x00000000);
78 mutex_unlock(&nv_subdev(priv
)->mutex
);
81 /*******************************************************************************
83 ******************************************************************************/
86 nv41_vmmgr_ctor(struct nouveau_object
*parent
, struct nouveau_object
*engine
,
87 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
88 struct nouveau_object
**pobject
)
90 struct nv04_vmmgr_priv
*priv
;
93 ret
= nouveau_vmmgr_create(parent
, engine
, oclass
, "PCIEGART",
95 *pobject
= nv_object(priv
);
99 priv
->base
.create
= nv04_vm_create
;
100 priv
->base
.limit
= NV41_GART_SIZE
;
101 priv
->base
.pgt_bits
= 32 - 12;
102 priv
->base
.spg_shift
= 12;
103 priv
->base
.lpg_shift
= 12;
104 priv
->base
.map_sg
= nv41_vm_map_sg
;
105 priv
->base
.unmap
= nv41_vm_unmap
;
106 priv
->base
.flush
= nv41_vm_flush
;
108 ret
= nouveau_vm_create(&priv
->base
, 0, NV41_GART_SIZE
, 0, 4096,
113 ret
= nouveau_gpuobj_new(parent
, NULL
,
114 (NV41_GART_SIZE
/ NV41_GART_PAGE
) * 4,
115 16, NVOBJ_FLAG_ZERO_ALLOC
,
116 &priv
->vm
->pgt
[0].obj
[0]);
117 priv
->vm
->pgt
[0].refcount
[0] = 1;
125 nv41_vmmgr_init(struct nouveau_object
*object
)
127 struct nv04_vmmgr_priv
*priv
= (void *)object
;
128 struct nouveau_gpuobj
*dma
= priv
->vm
->pgt
[0].obj
[0];
131 ret
= nouveau_vmmgr_init(&priv
->base
);
135 nv_wr32(priv
, 0x100800, dma
->addr
| 0x00000002);
136 nv_mask(priv
, 0x10008c, 0x00000100, 0x00000100);
137 nv_wr32(priv
, 0x100820, 0x00000000);
141 struct nouveau_oclass
142 nv41_vmmgr_oclass
= {
143 .handle
= NV_SUBDEV(VM
, 0x41),
144 .ofuncs
= &(struct nouveau_ofuncs
) {
145 .ctor
= nv41_vmmgr_ctor
,
146 .dtor
= nv04_vmmgr_dtor
,
147 .init
= nv41_vmmgr_init
,
148 .fini
= _nouveau_vmmgr_fini
,
This page took 0.033968 seconds and 5 git commands to generate.