support for platform devices
[deliverable/linux.git] / drivers / gpu / drm / nouveau / core / subdev / bar / nv50.c
1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #include <core/gpuobj.h>
26
27 #include <subdev/timer.h>
28 #include <subdev/fb.h>
29 #include <subdev/vm.h>
30
31 #include "priv.h"
32
33 struct nv50_bar_priv {
34 struct nouveau_bar base;
35 spinlock_t lock;
36 struct nouveau_gpuobj *mem;
37 struct nouveau_gpuobj *pad;
38 struct nouveau_gpuobj *pgd;
39 struct nouveau_vm *bar1_vm;
40 struct nouveau_gpuobj *bar1;
41 struct nouveau_vm *bar3_vm;
42 struct nouveau_gpuobj *bar3;
43 };
44
45 static int
46 nv50_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
47 u32 flags, struct nouveau_vma *vma)
48 {
49 struct nv50_bar_priv *priv = (void *)bar;
50 int ret;
51
52 ret = nouveau_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma);
53 if (ret)
54 return ret;
55
56 nouveau_vm_map(vma, mem);
57 return 0;
58 }
59
60 static int
61 nv50_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
62 u32 flags, struct nouveau_vma *vma)
63 {
64 struct nv50_bar_priv *priv = (void *)bar;
65 int ret;
66
67 ret = nouveau_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
68 if (ret)
69 return ret;
70
71 nouveau_vm_map(vma, mem);
72 return 0;
73 }
74
75 static void
76 nv50_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
77 {
78 nouveau_vm_unmap(vma);
79 nouveau_vm_put(vma);
80 }
81
82 static void
83 nv50_bar_flush(struct nouveau_bar *bar)
84 {
85 struct nv50_bar_priv *priv = (void *)bar;
86 unsigned long flags;
87 spin_lock_irqsave(&priv->lock, flags);
88 nv_wr32(priv, 0x00330c, 0x00000001);
89 if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000))
90 nv_warn(priv, "flush timeout\n");
91 spin_unlock_irqrestore(&priv->lock, flags);
92 }
93
94 void
95 nv84_bar_flush(struct nouveau_bar *bar)
96 {
97 struct nv50_bar_priv *priv = (void *)bar;
98 unsigned long flags;
99 spin_lock_irqsave(&priv->lock, flags);
100 nv_wr32(bar, 0x070000, 0x00000001);
101 if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000))
102 nv_warn(priv, "flush timeout\n");
103 spin_unlock_irqrestore(&priv->lock, flags);
104 }
105
106 static int
107 nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
108 struct nouveau_oclass *oclass, void *data, u32 size,
109 struct nouveau_object **pobject)
110 {
111 struct nouveau_device *device = nv_device(parent);
112 struct nouveau_object *heap;
113 struct nouveau_vm *vm;
114 struct nv50_bar_priv *priv;
115 u64 start, limit;
116 int ret;
117
118 ret = nouveau_bar_create(parent, engine, oclass, &priv);
119 *pobject = nv_object(priv);
120 if (ret)
121 return ret;
122
123 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
124 NVOBJ_FLAG_HEAP, &priv->mem);
125 heap = nv_object(priv->mem);
126 if (ret)
127 return ret;
128
129 ret = nouveau_gpuobj_new(nv_object(priv), heap,
130 (device->chipset == 0x50) ? 0x1400 : 0x0200,
131 0, 0, &priv->pad);
132 if (ret)
133 return ret;
134
135 ret = nouveau_gpuobj_new(nv_object(priv), heap, 0x4000, 0,
136 0, &priv->pgd);
137 if (ret)
138 return ret;
139
140 /* BAR3 */
141 start = 0x0100000000ULL;
142 limit = start + nv_device_resource_len(device, 3);
143
144 ret = nouveau_vm_new(device, start, limit, start, &vm);
145 if (ret)
146 return ret;
147
148 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
149
150 ret = nouveau_gpuobj_new(nv_object(priv), heap,
151 ((limit-- - start) >> 12) * 8, 0x1000,
152 NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
153 vm->pgt[0].refcount[0] = 1;
154 if (ret)
155 return ret;
156
157 ret = nouveau_vm_ref(vm, &priv->bar3_vm, priv->pgd);
158 nouveau_vm_ref(NULL, &vm, NULL);
159 if (ret)
160 return ret;
161
162 ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3);
163 if (ret)
164 return ret;
165
166 nv_wo32(priv->bar3, 0x00, 0x7fc00000);
167 nv_wo32(priv->bar3, 0x04, lower_32_bits(limit));
168 nv_wo32(priv->bar3, 0x08, lower_32_bits(start));
169 nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 |
170 upper_32_bits(start));
171 nv_wo32(priv->bar3, 0x10, 0x00000000);
172 nv_wo32(priv->bar3, 0x14, 0x00000000);
173
174 /* BAR1 */
175 start = 0x0000000000ULL;
176 limit = start + nv_device_resource_len(device, 1);
177
178 ret = nouveau_vm_new(device, start, limit--, start, &vm);
179 if (ret)
180 return ret;
181
182 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
183
184 ret = nouveau_vm_ref(vm, &priv->bar1_vm, priv->pgd);
185 nouveau_vm_ref(NULL, &vm, NULL);
186 if (ret)
187 return ret;
188
189 ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1);
190 if (ret)
191 return ret;
192
193 nv_wo32(priv->bar1, 0x00, 0x7fc00000);
194 nv_wo32(priv->bar1, 0x04, lower_32_bits(limit));
195 nv_wo32(priv->bar1, 0x08, lower_32_bits(start));
196 nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 |
197 upper_32_bits(start));
198 nv_wo32(priv->bar1, 0x10, 0x00000000);
199 nv_wo32(priv->bar1, 0x14, 0x00000000);
200
201 priv->base.alloc = nouveau_bar_alloc;
202 priv->base.kmap = nv50_bar_kmap;
203 priv->base.umap = nv50_bar_umap;
204 priv->base.unmap = nv50_bar_unmap;
205 if (device->chipset == 0x50)
206 priv->base.flush = nv50_bar_flush;
207 else
208 priv->base.flush = nv84_bar_flush;
209 spin_lock_init(&priv->lock);
210 return 0;
211 }
212
213 static void
214 nv50_bar_dtor(struct nouveau_object *object)
215 {
216 struct nv50_bar_priv *priv = (void *)object;
217 nouveau_gpuobj_ref(NULL, &priv->bar1);
218 nouveau_vm_ref(NULL, &priv->bar1_vm, priv->pgd);
219 nouveau_gpuobj_ref(NULL, &priv->bar3);
220 if (priv->bar3_vm) {
221 nouveau_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]);
222 nouveau_vm_ref(NULL, &priv->bar3_vm, priv->pgd);
223 }
224 nouveau_gpuobj_ref(NULL, &priv->pgd);
225 nouveau_gpuobj_ref(NULL, &priv->pad);
226 nouveau_gpuobj_ref(NULL, &priv->mem);
227 nouveau_bar_destroy(&priv->base);
228 }
229
230 static int
231 nv50_bar_init(struct nouveau_object *object)
232 {
233 struct nv50_bar_priv *priv = (void *)object;
234 int ret;
235
236 ret = nouveau_bar_init(&priv->base);
237 if (ret)
238 return ret;
239
240 nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
241 nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
242 nv_wr32(priv, 0x100c80, 0x00060001);
243 if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000)) {
244 nv_error(priv, "vm flush timeout\n");
245 return -EBUSY;
246 }
247
248 nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12);
249 nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12);
250 nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4);
251 nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4);
252 return 0;
253 }
254
255 static int
256 nv50_bar_fini(struct nouveau_object *object, bool suspend)
257 {
258 struct nv50_bar_priv *priv = (void *)object;
259 return nouveau_bar_fini(&priv->base, suspend);
260 }
261
262 struct nouveau_oclass
263 nv50_bar_oclass = {
264 .handle = NV_SUBDEV(BAR, 0x50),
265 .ofuncs = &(struct nouveau_ofuncs) {
266 .ctor = nv50_bar_ctor,
267 .dtor = nv50_bar_dtor,
268 .init = nv50_bar_init,
269 .fini = nv50_bar_fini,
270 },
271 };
This page took 0.045848 seconds and 5 git commands to generate.