drm/nouveau/fifo: turn all fifo modules into engine modules
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nve0_fifo.c
1 /*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #include "drmP.h"
26
27 #include "nouveau_drv.h"
28 #include "nouveau_mm.h"
29 #include "nouveau_fifo.h"
30
31 #define NVE0_FIFO_ENGINE_NUM 32
32
33 static void nve0_fifo_isr(struct drm_device *);
34
35 struct nve0_fifo_engine {
36 struct nouveau_gpuobj *playlist[2];
37 int cur_playlist;
38 };
39
40 struct nve0_fifo_priv {
41 struct nouveau_fifo_priv base;
42 struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
43 struct {
44 struct nouveau_gpuobj *mem;
45 struct nouveau_vma bar;
46 } user;
47 int spoon_nr;
48 };
49
50 struct nve0_fifo_chan {
51 struct nouveau_fifo_chan base;
52 u32 engine;
53 };
54
55 static void
56 nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
57 {
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
60 struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
61 struct nve0_fifo_engine *peng = &priv->engine[engine];
62 struct nouveau_gpuobj *cur;
63 u32 match = (engine << 16) | 0x00000001;
64 int ret, i, p;
65
66 cur = peng->playlist[peng->cur_playlist];
67 if (unlikely(cur == NULL)) {
68 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
69 if (ret) {
70 NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
71 return;
72 }
73
74 peng->playlist[peng->cur_playlist] = cur;
75 }
76
77 peng->cur_playlist = !peng->cur_playlist;
78
79 for (i = 0, p = 0; i < priv->base.channels; i++) {
80 u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
81 if (ctrl != match)
82 continue;
83 nv_wo32(cur, p + 0, i);
84 nv_wo32(cur, p + 4, 0x00000000);
85 p += 8;
86 }
87 pinstmem->flush(dev);
88
89 nv_wr32(dev, 0x002270, cur->vinst >> 12);
90 nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
91 if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
92 NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
93 }
94
95 static int
96 nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
97 {
98 struct drm_device *dev = chan->dev;
99 struct drm_nouveau_private *dev_priv = dev->dev_private;
100 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
101 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
102 struct nve0_fifo_chan *fctx;
103 u64 usermem = priv->user.mem->vinst + chan->id * 512;
104 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
105 int ret = 0, i;
106
107 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
108 if (!fctx)
109 return -ENOMEM;
110
111 fctx->engine = 0; /* PGRAPH */
112
113 /* allocate vram for control regs, map into polling area */
114 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
115 priv->user.bar.offset + (chan->id * 512), 512);
116 if (!chan->user) {
117 ret = -ENOMEM;
118 goto error;
119 }
120
121 for (i = 0; i < 0x100; i += 4)
122 nv_wo32(chan->ramin, i, 0x00000000);
123 nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
124 nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
125 nv_wo32(chan->ramin, 0x10, 0x0000face);
126 nv_wo32(chan->ramin, 0x30, 0xfffff902);
127 nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
128 nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
129 upper_32_bits(ib_virt));
130 nv_wo32(chan->ramin, 0x84, 0x20400000);
131 nv_wo32(chan->ramin, 0x94, 0x30000001);
132 nv_wo32(chan->ramin, 0x9c, 0x00000100);
133 nv_wo32(chan->ramin, 0xac, 0x0000001f);
134 nv_wo32(chan->ramin, 0xe4, 0x00000000);
135 nv_wo32(chan->ramin, 0xe8, chan->id);
136 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
137 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
138 pinstmem->flush(dev);
139
140 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
141 (chan->ramin->vinst >> 12));
142 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
143 nve0_fifo_playlist_update(dev, fctx->engine);
144 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
145
146 error:
147 if (ret)
148 priv->base.base.context_del(chan, engine);
149 return ret;
150 }
151
152 static void
153 nve0_fifo_context_del(struct nouveau_channel *chan, int engine)
154 {
155 struct nve0_fifo_chan *fctx = chan->engctx[engine];
156 struct drm_device *dev = chan->dev;
157
158 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
159 nv_wr32(dev, 0x002634, chan->id);
160 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
161 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
162 nve0_fifo_playlist_update(dev, fctx->engine);
163 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
164
165 if (chan->user) {
166 iounmap(chan->user);
167 chan->user = NULL;
168 }
169
170 chan->engctx[NVOBJ_ENGINE_FIFO] = NULL;
171 kfree(fctx);
172 }
173
174 static int
175 nve0_fifo_init(struct drm_device *dev, int engine)
176 {
177 struct drm_nouveau_private *dev_priv = dev->dev_private;
178 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
179 struct nve0_fifo_chan *fctx;
180 int i;
181
182 /* reset PFIFO, enable all available PSUBFIFO areas */
183 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
184 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
185 nv_wr32(dev, 0x000204, 0xffffffff);
186
187 priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
188 NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
189
190 /* PSUBFIFO[n] */
191 for (i = 0; i < priv->spoon_nr; i++) {
192 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
193 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
194 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
195 }
196
197 nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
198
199 nv_wr32(dev, 0x002a00, 0xffffffff);
200 nv_wr32(dev, 0x002100, 0xffffffff);
201 nv_wr32(dev, 0x002140, 0xbfffffff);
202
203 /* restore PFIFO context table */
204 for (i = 0; i < priv->base.channels; i++) {
205 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
206 if (!chan || !(fctx = chan->engctx[engine]))
207 continue;
208
209 nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
210 (chan->ramin->vinst >> 12));
211 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
212 nve0_fifo_playlist_update(dev, fctx->engine);
213 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
214 }
215
216 return 0;
217 }
218
219 static int
220 nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
221 {
222 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
223 int i;
224
225 for (i = 0; i < priv->base.channels; i++) {
226 if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
227 continue;
228
229 nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
230 nv_wr32(dev, 0x002634, i);
231 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
232 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
233 i, nv_rd32(dev, 0x002634));
234 return -EBUSY;
235 }
236 }
237
238 nv_wr32(dev, 0x002140, 0x00000000);
239 return 0;
240 }
241
242 struct nouveau_enum nve0_fifo_fault_unit[] = {
243 {}
244 };
245
246 struct nouveau_enum nve0_fifo_fault_reason[] = {
247 { 0x00, "PT_NOT_PRESENT" },
248 { 0x01, "PT_TOO_SHORT" },
249 { 0x02, "PAGE_NOT_PRESENT" },
250 { 0x03, "VM_LIMIT_EXCEEDED" },
251 { 0x04, "NO_CHANNEL" },
252 { 0x05, "PAGE_SYSTEM_ONLY" },
253 { 0x06, "PAGE_READ_ONLY" },
254 { 0x0a, "COMPRESSED_SYSRAM" },
255 { 0x0c, "INVALID_STORAGE_TYPE" },
256 {}
257 };
258
259 struct nouveau_enum nve0_fifo_fault_hubclient[] = {
260 {}
261 };
262
263 struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
264 {}
265 };
266
267 struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
268 { 0x00200000, "ILLEGAL_MTHD" },
269 { 0x00800000, "EMPTY_SUBC" },
270 {}
271 };
272
273 static void
274 nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
275 {
276 u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
277 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
278 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
279 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
280 u32 client = (stat & 0x00001f00) >> 8;
281
282 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
283 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
284 nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
285 printk("] from ");
286 nouveau_enum_print(nve0_fifo_fault_unit, unit);
287 if (stat & 0x00000040) {
288 printk("/");
289 nouveau_enum_print(nve0_fifo_fault_hubclient, client);
290 } else {
291 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
292 nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
293 }
294 printk(" on channel 0x%010llx\n", (u64)inst << 12);
295 }
296
297 static void
298 nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
299 {
300 u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
301 u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
302 u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
303 u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
304 u32 subc = (addr & 0x00070000);
305 u32 mthd = (addr & 0x00003ffc);
306
307 NV_INFO(dev, "PSUBFIFO %d:", unit);
308 nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat);
309 NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
310 unit, chid, subc, mthd, data);
311
312 nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
313 nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
314 }
315
316 static void
317 nve0_fifo_isr(struct drm_device *dev)
318 {
319 u32 stat = nv_rd32(dev, 0x002100);
320
321 if (stat & 0x00000100) {
322 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
323 nv_wr32(dev, 0x002100, 0x00000100);
324 stat &= ~0x00000100;
325 }
326
327 if (stat & 0x10000000) {
328 u32 units = nv_rd32(dev, 0x00259c);
329 u32 u = units;
330
331 while (u) {
332 int i = ffs(u) - 1;
333 nve0_fifo_isr_vm_fault(dev, i);
334 u &= ~(1 << i);
335 }
336
337 nv_wr32(dev, 0x00259c, units);
338 stat &= ~0x10000000;
339 }
340
341 if (stat & 0x20000000) {
342 u32 units = nv_rd32(dev, 0x0025a0);
343 u32 u = units;
344
345 while (u) {
346 int i = ffs(u) - 1;
347 nve0_fifo_isr_subfifo_intr(dev, i);
348 u &= ~(1 << i);
349 }
350
351 nv_wr32(dev, 0x0025a0, units);
352 stat &= ~0x20000000;
353 }
354
355 if (stat & 0x40000000) {
356 NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
357 nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
358 stat &= ~0x40000000;
359 }
360
361 if (stat) {
362 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
363 nv_wr32(dev, 0x002100, stat);
364 nv_wr32(dev, 0x002140, 0);
365 }
366 }
367
368 static void
369 nve0_fifo_destroy(struct drm_device *dev, int engine)
370 {
371 struct drm_nouveau_private *dev_priv = dev->dev_private;
372 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
373 int i;
374
375 nouveau_vm_put(&priv->user.bar);
376 nouveau_gpuobj_ref(NULL, &priv->user.mem);
377
378 for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
379 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
380 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
381 }
382
383 dev_priv->eng[engine] = NULL;
384 kfree(priv);
385 }
386
387 int
388 nve0_fifo_create(struct drm_device *dev)
389 {
390 struct drm_nouveau_private *dev_priv = dev->dev_private;
391 struct nve0_fifo_priv *priv;
392 int ret;
393
394 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
395 if (!priv)
396 return -ENOMEM;
397
398 priv->base.base.destroy = nve0_fifo_destroy;
399 priv->base.base.init = nve0_fifo_init;
400 priv->base.base.fini = nve0_fifo_fini;
401 priv->base.base.context_new = nve0_fifo_context_new;
402 priv->base.base.context_del = nve0_fifo_context_del;
403 priv->base.channels = 4096;
404 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
405
406 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000,
407 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
408 if (ret)
409 goto error;
410
411 ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size,
412 12, NV_MEM_ACCESS_RW, &priv->user.bar);
413 if (ret)
414 goto error;
415
416 nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node);
417
418 nouveau_irq_register(dev, 8, nve0_fifo_isr);
419 error:
420 if (ret)
421 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
422 return ret;
423 }
This page took 0.04909 seconds and 5 git commands to generate.