drm/nouveau: port all engines to new engine module format
[deliverable/linux.git] / drivers / gpu / drm / nouveau / core / engine / fifo / nve0.c
1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #include <core/client.h>
26 #include <core/handle.h>
27 #include <core/namedb.h>
28 #include <core/gpuobj.h>
29 #include <core/engctx.h>
30 #include <core/class.h>
31 #include <core/math.h>
32 #include <core/enum.h>
33
34 #include <subdev/timer.h>
35 #include <subdev/bar.h>
36 #include <subdev/vm.h>
37
38 #include <engine/dmaobj.h>
39 #include <engine/fifo.h>
40
41 struct nve0_fifo_engn {
42 struct nouveau_gpuobj *playlist[2];
43 int cur_playlist;
44 };
45
46 struct nve0_fifo_priv {
47 struct nouveau_fifo base;
48 struct nve0_fifo_engn engine[16];
49 struct {
50 struct nouveau_gpuobj *mem;
51 struct nouveau_vma bar;
52 } user;
53 int spoon_nr;
54 };
55
56 struct nve0_fifo_base {
57 struct nouveau_fifo_base base;
58 struct nouveau_gpuobj *pgd;
59 struct nouveau_vm *vm;
60 };
61
62 struct nve0_fifo_chan {
63 struct nouveau_fifo_chan base;
64 u32 engine;
65 };
66
67 /*******************************************************************************
68 * FIFO channel objects
69 ******************************************************************************/
70
71 static void
72 nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
73 {
74 struct nouveau_bar *bar = nouveau_bar(priv);
75 struct nve0_fifo_engn *engn = &priv->engine[engine];
76 struct nouveau_gpuobj *cur;
77 u32 match = (engine << 16) | 0x00000001;
78 int i, p;
79
80 cur = engn->playlist[engn->cur_playlist];
81 if (unlikely(cur == NULL)) {
82 int ret = nouveau_gpuobj_new(nv_object(priv)->parent, NULL,
83 0x8000, 0x1000, 0, &cur);
84 if (ret) {
85 nv_error(priv, "playlist alloc failed\n");
86 return;
87 }
88
89 engn->playlist[engn->cur_playlist] = cur;
90 }
91
92 engn->cur_playlist = !engn->cur_playlist;
93
94 for (i = 0, p = 0; i < priv->base.max; i++) {
95 u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001;
96 if (ctrl != match)
97 continue;
98 nv_wo32(cur, p + 0, i);
99 nv_wo32(cur, p + 4, 0x00000000);
100 p += 8;
101 }
102 bar->flush(bar);
103
104 nv_wr32(priv, 0x002270, cur->addr >> 12);
105 nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
106 if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
107 nv_error(priv, "playlist %d update timeout\n", engine);
108 }
109
110 static int
111 nve0_fifo_context_attach(struct nouveau_object *parent,
112 struct nouveau_object *object)
113 {
114 struct nouveau_bar *bar = nouveau_bar(parent);
115 struct nve0_fifo_base *base = (void *)parent->parent;
116 struct nouveau_engctx *ectx = (void *)object;
117 u32 addr;
118 int ret;
119
120 switch (nv_engidx(object->engine)) {
121 case NVDEV_ENGINE_SW : return 0;
122 case NVDEV_ENGINE_GR : addr = 0x0210; break;
123 default:
124 return -EINVAL;
125 }
126
127 if (!ectx->vma.node) {
128 ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
129 NV_MEM_ACCESS_RW, &ectx->vma);
130 if (ret)
131 return ret;
132 }
133
134 nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
135 nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
136 bar->flush(bar);
137 return 0;
138 }
139
140 static int
141 nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
142 struct nouveau_object *object)
143 {
144 struct nouveau_bar *bar = nouveau_bar(parent);
145 struct nve0_fifo_priv *priv = (void *)parent->engine;
146 struct nve0_fifo_base *base = (void *)parent->parent;
147 struct nve0_fifo_chan *chan = (void *)parent;
148 u32 addr;
149
150 switch (nv_engidx(object->engine)) {
151 case NVDEV_ENGINE_SW : return 0;
152 case NVDEV_ENGINE_GR : addr = 0x0210; break;
153 default:
154 return -EINVAL;
155 }
156
157 nv_wo32(base, addr + 0x00, 0x00000000);
158 nv_wo32(base, addr + 0x04, 0x00000000);
159 bar->flush(bar);
160
161 nv_wr32(priv, 0x002634, chan->base.chid);
162 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
163 nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
164 if (suspend)
165 return -EBUSY;
166 }
167
168 return 0;
169 }
170
171 static int
172 nve0_fifo_chan_ctor(struct nouveau_object *parent,
173 struct nouveau_object *engine,
174 struct nouveau_oclass *oclass, void *data, u32 size,
175 struct nouveau_object **pobject)
176 {
177 struct nouveau_bar *bar = nouveau_bar(parent);
178 struct nve0_fifo_priv *priv = (void *)engine;
179 struct nve0_fifo_base *base = (void *)parent;
180 struct nve0_fifo_chan *chan;
181 struct nv_channel_ind_class *args = data;
182 u64 usermem, ioffset, ilength;
183 int ret, i;
184
185 if (size < sizeof(*args))
186 return -EINVAL;
187
188 ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
189 priv->user.bar.offset, 0x200,
190 args->pushbuf,
191 (1 << NVDEV_ENGINE_SW) |
192 (1 << NVDEV_ENGINE_GR), &chan);
193 *pobject = nv_object(chan);
194 if (ret)
195 return ret;
196
197 nv_parent(chan)->context_attach = nve0_fifo_context_attach;
198 nv_parent(chan)->context_detach = nve0_fifo_context_detach;
199
200 usermem = chan->base.chid * 0x200;
201 ioffset = args->ioffset;
202 ilength = log2i(args->ilength / 8);
203
204 for (i = 0; i < 0x200; i += 4)
205 nv_wo32(priv->user.mem, usermem + i, 0x00000000);
206
207 nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
208 nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
209 nv_wo32(base, 0x10, 0x0000face);
210 nv_wo32(base, 0x30, 0xfffff902);
211 nv_wo32(base, 0x48, lower_32_bits(ioffset));
212 nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
213 nv_wo32(base, 0x84, 0x20400000);
214 nv_wo32(base, 0x94, 0x30000001);
215 nv_wo32(base, 0x9c, 0x00000100);
216 nv_wo32(base, 0xac, 0x0000001f);
217 nv_wo32(base, 0xe8, chan->base.chid);
218 nv_wo32(base, 0xb8, 0xf8000000);
219 nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
220 nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
221 bar->flush(bar);
222 return 0;
223 }
224
225 static int
226 nve0_fifo_chan_init(struct nouveau_object *object)
227 {
228 struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
229 struct nve0_fifo_priv *priv = (void *)object->engine;
230 struct nve0_fifo_chan *chan = (void *)object;
231 u32 chid = chan->base.chid;
232 int ret;
233
234 ret = nouveau_fifo_channel_init(&chan->base);
235 if (ret)
236 return ret;
237
238 nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
239 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
240 nve0_fifo_playlist_update(priv, chan->engine);
241 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
242 return 0;
243 }
244
245 static int
246 nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
247 {
248 struct nve0_fifo_priv *priv = (void *)object->engine;
249 struct nve0_fifo_chan *chan = (void *)object;
250 u32 chid = chan->base.chid;
251
252 nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
253 nve0_fifo_playlist_update(priv, chan->engine);
254 nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
255
256 return nouveau_fifo_channel_fini(&chan->base, suspend);
257 }
258
259 static struct nouveau_ofuncs
260 nve0_fifo_ofuncs = {
261 .ctor = nve0_fifo_chan_ctor,
262 .dtor = _nouveau_fifo_channel_dtor,
263 .init = nve0_fifo_chan_init,
264 .fini = nve0_fifo_chan_fini,
265 .rd32 = _nouveau_fifo_channel_rd32,
266 .wr32 = _nouveau_fifo_channel_wr32,
267 };
268
269 static struct nouveau_oclass
270 nve0_fifo_sclass[] = {
271 { 0xa06f, &nve0_fifo_ofuncs },
272 {}
273 };
274
275 /*******************************************************************************
276 * FIFO context - instmem heap and vm setup
277 ******************************************************************************/
278
279 static int
280 nve0_fifo_context_ctor(struct nouveau_object *parent,
281 struct nouveau_object *engine,
282 struct nouveau_oclass *oclass, void *data, u32 size,
283 struct nouveau_object **pobject)
284 {
285 struct nve0_fifo_base *base;
286 int ret;
287
288 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
289 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
290 *pobject = nv_object(base);
291 if (ret)
292 return ret;
293
294 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
295 if (ret)
296 return ret;
297
298 nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
299 nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
300 nv_wo32(base, 0x0208, 0xffffffff);
301 nv_wo32(base, 0x020c, 0x000000ff);
302
303 ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
304 if (ret)
305 return ret;
306
307 return 0;
308 }
309
310 static void
311 nve0_fifo_context_dtor(struct nouveau_object *object)
312 {
313 struct nve0_fifo_base *base = (void *)object;
314 nouveau_vm_ref(NULL, &base->vm, base->pgd);
315 nouveau_gpuobj_ref(NULL, &base->pgd);
316 nouveau_fifo_context_destroy(&base->base);
317 }
318
319 static struct nouveau_oclass
320 nve0_fifo_cclass = {
321 .handle = NV_ENGCTX(FIFO, 0xe0),
322 .ofuncs = &(struct nouveau_ofuncs) {
323 .ctor = nve0_fifo_context_ctor,
324 .dtor = nve0_fifo_context_dtor,
325 .init = _nouveau_fifo_context_init,
326 .fini = _nouveau_fifo_context_fini,
327 .rd32 = _nouveau_fifo_context_rd32,
328 .wr32 = _nouveau_fifo_context_wr32,
329 },
330 };
331
332 /*******************************************************************************
333 * PFIFO engine
334 ******************************************************************************/
335
336 struct nouveau_enum nve0_fifo_fault_unit[] = {
337 {}
338 };
339
340 struct nouveau_enum nve0_fifo_fault_reason[] = {
341 { 0x00, "PT_NOT_PRESENT" },
342 { 0x01, "PT_TOO_SHORT" },
343 { 0x02, "PAGE_NOT_PRESENT" },
344 { 0x03, "VM_LIMIT_EXCEEDED" },
345 { 0x04, "NO_CHANNEL" },
346 { 0x05, "PAGE_SYSTEM_ONLY" },
347 { 0x06, "PAGE_READ_ONLY" },
348 { 0x0a, "COMPRESSED_SYSRAM" },
349 { 0x0c, "INVALID_STORAGE_TYPE" },
350 {}
351 };
352
353 struct nouveau_enum nve0_fifo_fault_hubclient[] = {
354 {}
355 };
356
357 struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
358 {}
359 };
360
361 struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
362 { 0x00200000, "ILLEGAL_MTHD" },
363 { 0x00800000, "EMPTY_SUBC" },
364 {}
365 };
366
367 static void
368 nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
369 {
370 u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
371 u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
372 u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
373 u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
374 u32 client = (stat & 0x00001f00) >> 8;
375
376 nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
377 "write" : "read", (u64)vahi << 32 | valo);
378 nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
379 printk("] from ");
380 nouveau_enum_print(nve0_fifo_fault_unit, unit);
381 if (stat & 0x00000040) {
382 printk("/");
383 nouveau_enum_print(nve0_fifo_fault_hubclient, client);
384 } else {
385 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
386 nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
387 }
388 printk(" on channel 0x%010llx\n", (u64)inst << 12);
389 }
390
391 static int
392 nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
393 {
394 struct nve0_fifo_chan *chan = NULL;
395 struct nouveau_handle *bind;
396 unsigned long flags;
397 int ret = -EINVAL;
398
399 spin_lock_irqsave(&priv->base.lock, flags);
400 if (likely(chid >= priv->base.min && chid <= priv->base.max))
401 chan = (void *)priv->base.channel[chid];
402 if (unlikely(!chan))
403 goto out;
404
405 bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
406 if (likely(bind)) {
407 if (!mthd || !nv_call(bind->object, mthd, data))
408 ret = 0;
409 nouveau_namedb_put(bind);
410 }
411
412 out:
413 spin_unlock_irqrestore(&priv->base.lock, flags);
414 return ret;
415 }
416
417 static void
418 nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
419 {
420 u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
421 u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
422 u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
423 u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
424 u32 subc = (addr & 0x00070000) >> 16;
425 u32 mthd = (addr & 0x00003ffc);
426 u32 show = stat;
427
428 if (stat & 0x00200000) {
429 if (mthd == 0x0054) {
430 if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
431 show &= ~0x00200000;
432 }
433 }
434
435 if (stat & 0x00800000) {
436 if (!nve0_fifo_swmthd(priv, chid, mthd, data))
437 show &= ~0x00800000;
438 }
439
440 if (show) {
441 nv_error(priv, "SUBFIFO%d:", unit);
442 nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
443 printk("\n");
444 nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
445 "data 0x%08x\n",
446 unit, chid, subc, mthd, data);
447 }
448
449 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
450 nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
451 }
452
453 static void
454 nve0_fifo_intr(struct nouveau_subdev *subdev)
455 {
456 struct nve0_fifo_priv *priv = (void *)subdev;
457 u32 mask = nv_rd32(priv, 0x002140);
458 u32 stat = nv_rd32(priv, 0x002100) & mask;
459
460 if (stat & 0x00000100) {
461 nv_warn(priv, "unknown status 0x00000100\n");
462 nv_wr32(priv, 0x002100, 0x00000100);
463 stat &= ~0x00000100;
464 }
465
466 if (stat & 0x10000000) {
467 u32 units = nv_rd32(priv, 0x00259c);
468 u32 u = units;
469
470 while (u) {
471 int i = ffs(u) - 1;
472 nve0_fifo_isr_vm_fault(priv, i);
473 u &= ~(1 << i);
474 }
475
476 nv_wr32(priv, 0x00259c, units);
477 stat &= ~0x10000000;
478 }
479
480 if (stat & 0x20000000) {
481 u32 units = nv_rd32(priv, 0x0025a0);
482 u32 u = units;
483
484 while (u) {
485 int i = ffs(u) - 1;
486 nve0_fifo_isr_subfifo_intr(priv, i);
487 u &= ~(1 << i);
488 }
489
490 nv_wr32(priv, 0x0025a0, units);
491 stat &= ~0x20000000;
492 }
493
494 if (stat & 0x40000000) {
495 nv_warn(priv, "unknown status 0x40000000\n");
496 nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
497 stat &= ~0x40000000;
498 }
499
500 if (stat) {
501 nv_fatal(priv, "unhandled status 0x%08x\n", stat);
502 nv_wr32(priv, 0x002100, stat);
503 nv_wr32(priv, 0x002140, 0);
504 }
505 }
506
507 static int
508 nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
509 struct nouveau_oclass *oclass, void *data, u32 size,
510 struct nouveau_object **pobject)
511 {
512 struct nve0_fifo_priv *priv;
513 int ret;
514
515 ret = nouveau_fifo_create(parent, engine, oclass, 0, 4095, &priv);
516 *pobject = nv_object(priv);
517 if (ret)
518 return ret;
519
520 ret = nouveau_gpuobj_new(parent, NULL, 4096 * 0x200, 0x1000,
521 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
522 if (ret)
523 return ret;
524
525 ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
526 &priv->user.bar);
527 if (ret)
528 return ret;
529
530 nv_subdev(priv)->unit = 0x00000100;
531 nv_subdev(priv)->intr = nve0_fifo_intr;
532 nv_engine(priv)->cclass = &nve0_fifo_cclass;
533 nv_engine(priv)->sclass = nve0_fifo_sclass;
534 return 0;
535 }
536
537 static void
538 nve0_fifo_dtor(struct nouveau_object *object)
539 {
540 struct nve0_fifo_priv *priv = (void *)object;
541 int i;
542
543 nouveau_gpuobj_unmap(&priv->user.bar);
544 nouveau_gpuobj_ref(NULL, &priv->user.mem);
545
546 for (i = 0; i < ARRAY_SIZE(priv->engine); i++) {
547 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
548 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
549 }
550
551 nouveau_fifo_destroy(&priv->base);
552 }
553
554 static int
555 nve0_fifo_init(struct nouveau_object *object)
556 {
557 struct nve0_fifo_priv *priv = (void *)object;
558 int ret, i;
559
560 ret = nouveau_fifo_init(&priv->base);
561 if (ret)
562 return ret;
563
564 /* enable all available PSUBFIFOs */
565 nv_wr32(priv, 0x000204, 0xffffffff);
566 priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
567 nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
568
569 /* PSUBFIFO[n] */
570 for (i = 0; i < priv->spoon_nr; i++) {
571 nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
572 nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
573 nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
574 }
575
576 nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
577
578 nv_wr32(priv, 0x002a00, 0xffffffff);
579 nv_wr32(priv, 0x002100, 0xffffffff);
580 nv_wr32(priv, 0x002140, 0xbfffffff);
581 return 0;
582 }
583
584 struct nouveau_oclass
585 nve0_fifo_oclass = {
586 .handle = NV_ENGINE(FIFO, 0xe0),
587 .ofuncs = &(struct nouveau_ofuncs) {
588 .ctor = nve0_fifo_ctor,
589 .dtor = nve0_fifo_dtor,
590 .init = nve0_fifo_init,
591 .fini = _nouveau_fifo_fini,
592 },
593 };
This page took 0.045223 seconds and 5 git commands to generate.