2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <core/gpuobj.h>
27 #include <subdev/timer.h>
28 #include <subdev/fb.h>
29 #include <subdev/vm.h>
33 struct nv50_bar_priv
{
34 struct nouveau_bar base
;
36 struct nouveau_gpuobj
*mem
;
37 struct nouveau_gpuobj
*pad
;
38 struct nouveau_gpuobj
*pgd
;
39 struct nouveau_vm
*bar1_vm
;
40 struct nouveau_gpuobj
*bar1
;
41 struct nouveau_vm
*bar3_vm
;
42 struct nouveau_gpuobj
*bar3
;
46 nv50_bar_kmap(struct nouveau_bar
*bar
, struct nouveau_mem
*mem
,
47 u32 flags
, struct nouveau_vma
*vma
)
49 struct nv50_bar_priv
*priv
= (void *)bar
;
52 ret
= nouveau_vm_get(priv
->bar3_vm
, mem
->size
<< 12, 12, flags
, vma
);
56 nouveau_vm_map(vma
, mem
);
61 nv50_bar_umap(struct nouveau_bar
*bar
, struct nouveau_mem
*mem
,
62 u32 flags
, struct nouveau_vma
*vma
)
64 struct nv50_bar_priv
*priv
= (void *)bar
;
67 ret
= nouveau_vm_get(priv
->bar1_vm
, mem
->size
<< 12, 12, flags
, vma
);
71 nouveau_vm_map(vma
, mem
);
76 nv50_bar_unmap(struct nouveau_bar
*bar
, struct nouveau_vma
*vma
)
78 nouveau_vm_unmap(vma
);
83 nv50_bar_flush(struct nouveau_bar
*bar
)
85 struct nv50_bar_priv
*priv
= (void *)bar
;
87 spin_lock_irqsave(&priv
->lock
, flags
);
88 nv_wr32(priv
, 0x00330c, 0x00000001);
89 if (!nv_wait(priv
, 0x00330c, 0x00000002, 0x00000000))
90 nv_warn(priv
, "flush timeout\n");
91 spin_unlock_irqrestore(&priv
->lock
, flags
);
95 nv84_bar_flush(struct nouveau_bar
*bar
)
97 struct nv50_bar_priv
*priv
= (void *)bar
;
99 spin_lock_irqsave(&priv
->lock
, flags
);
100 nv_wr32(bar
, 0x070000, 0x00000001);
101 if (!nv_wait(priv
, 0x070000, 0x00000002, 0x00000000))
102 nv_warn(priv
, "flush timeout\n");
103 spin_unlock_irqrestore(&priv
->lock
, flags
);
107 nv50_bar_ctor(struct nouveau_object
*parent
, struct nouveau_object
*engine
,
108 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
109 struct nouveau_object
**pobject
)
111 struct nouveau_device
*device
= nv_device(parent
);
112 struct nouveau_object
*heap
;
113 struct nouveau_vm
*vm
;
114 struct nv50_bar_priv
*priv
;
118 ret
= nouveau_bar_create(parent
, engine
, oclass
, &priv
);
119 *pobject
= nv_object(priv
);
123 ret
= nouveau_gpuobj_new(nv_object(priv
), NULL
, 0x20000, 0,
124 NVOBJ_FLAG_HEAP
, &priv
->mem
);
125 heap
= nv_object(priv
->mem
);
129 ret
= nouveau_gpuobj_new(nv_object(priv
), heap
,
130 (device
->chipset
== 0x50) ? 0x1400 : 0x0200,
135 ret
= nouveau_gpuobj_new(nv_object(priv
), heap
, 0x4000, 0,
141 start
= 0x0100000000ULL
;
142 limit
= start
+ nv_device_resource_len(device
, 3);
144 ret
= nouveau_vm_new(device
, start
, limit
, start
, &vm
);
148 atomic_inc(&vm
->engref
[NVDEV_SUBDEV_BAR
]);
150 ret
= nouveau_gpuobj_new(nv_object(priv
), heap
,
151 ((limit
-- - start
) >> 12) * 8, 0x1000,
152 NVOBJ_FLAG_ZERO_ALLOC
, &vm
->pgt
[0].obj
[0]);
153 vm
->pgt
[0].refcount
[0] = 1;
157 ret
= nouveau_vm_ref(vm
, &priv
->bar3_vm
, priv
->pgd
);
158 nouveau_vm_ref(NULL
, &vm
, NULL
);
162 ret
= nouveau_gpuobj_new(nv_object(priv
), heap
, 24, 16, 0, &priv
->bar3
);
166 nv_wo32(priv
->bar3
, 0x00, 0x7fc00000);
167 nv_wo32(priv
->bar3
, 0x04, lower_32_bits(limit
));
168 nv_wo32(priv
->bar3
, 0x08, lower_32_bits(start
));
169 nv_wo32(priv
->bar3
, 0x0c, upper_32_bits(limit
) << 24 |
170 upper_32_bits(start
));
171 nv_wo32(priv
->bar3
, 0x10, 0x00000000);
172 nv_wo32(priv
->bar3
, 0x14, 0x00000000);
175 start
= 0x0000000000ULL
;
176 limit
= start
+ nv_device_resource_len(device
, 1);
178 ret
= nouveau_vm_new(device
, start
, limit
--, start
, &vm
);
182 atomic_inc(&vm
->engref
[NVDEV_SUBDEV_BAR
]);
184 ret
= nouveau_vm_ref(vm
, &priv
->bar1_vm
, priv
->pgd
);
185 nouveau_vm_ref(NULL
, &vm
, NULL
);
189 ret
= nouveau_gpuobj_new(nv_object(priv
), heap
, 24, 16, 0, &priv
->bar1
);
193 nv_wo32(priv
->bar1
, 0x00, 0x7fc00000);
194 nv_wo32(priv
->bar1
, 0x04, lower_32_bits(limit
));
195 nv_wo32(priv
->bar1
, 0x08, lower_32_bits(start
));
196 nv_wo32(priv
->bar1
, 0x0c, upper_32_bits(limit
) << 24 |
197 upper_32_bits(start
));
198 nv_wo32(priv
->bar1
, 0x10, 0x00000000);
199 nv_wo32(priv
->bar1
, 0x14, 0x00000000);
201 priv
->base
.alloc
= nouveau_bar_alloc
;
202 priv
->base
.kmap
= nv50_bar_kmap
;
203 priv
->base
.umap
= nv50_bar_umap
;
204 priv
->base
.unmap
= nv50_bar_unmap
;
205 if (device
->chipset
== 0x50)
206 priv
->base
.flush
= nv50_bar_flush
;
208 priv
->base
.flush
= nv84_bar_flush
;
209 spin_lock_init(&priv
->lock
);
214 nv50_bar_dtor(struct nouveau_object
*object
)
216 struct nv50_bar_priv
*priv
= (void *)object
;
217 nouveau_gpuobj_ref(NULL
, &priv
->bar1
);
218 nouveau_vm_ref(NULL
, &priv
->bar1_vm
, priv
->pgd
);
219 nouveau_gpuobj_ref(NULL
, &priv
->bar3
);
221 nouveau_gpuobj_ref(NULL
, &priv
->bar3_vm
->pgt
[0].obj
[0]);
222 nouveau_vm_ref(NULL
, &priv
->bar3_vm
, priv
->pgd
);
224 nouveau_gpuobj_ref(NULL
, &priv
->pgd
);
225 nouveau_gpuobj_ref(NULL
, &priv
->pad
);
226 nouveau_gpuobj_ref(NULL
, &priv
->mem
);
227 nouveau_bar_destroy(&priv
->base
);
231 nv50_bar_init(struct nouveau_object
*object
)
233 struct nv50_bar_priv
*priv
= (void *)object
;
236 ret
= nouveau_bar_init(&priv
->base
);
240 nv_mask(priv
, 0x000200, 0x00000100, 0x00000000);
241 nv_mask(priv
, 0x000200, 0x00000100, 0x00000100);
242 nv_wr32(priv
, 0x100c80, 0x00060001);
243 if (!nv_wait(priv
, 0x100c80, 0x00000001, 0x00000000)) {
244 nv_error(priv
, "vm flush timeout\n");
248 nv_wr32(priv
, 0x001704, 0x00000000 | priv
->mem
->addr
>> 12);
249 nv_wr32(priv
, 0x001704, 0x40000000 | priv
->mem
->addr
>> 12);
250 nv_wr32(priv
, 0x001708, 0x80000000 | priv
->bar1
->node
->offset
>> 4);
251 nv_wr32(priv
, 0x00170c, 0x80000000 | priv
->bar3
->node
->offset
>> 4);
256 nv50_bar_fini(struct nouveau_object
*object
, bool suspend
)
258 struct nv50_bar_priv
*priv
= (void *)object
;
259 return nouveau_bar_fini(&priv
->base
, suspend
);
262 struct nouveau_oclass
264 .handle
= NV_SUBDEV(BAR
, 0x50),
265 .ofuncs
= &(struct nouveau_ofuncs
) {
266 .ctor
= nv50_bar_ctor
,
267 .dtor
= nv50_bar_dtor
,
268 .init
= nv50_bar_init
,
269 .fini
= nv50_bar_fini
,
This page took 0.045848 seconds and 5 git commands to generate.