drm/nouveau: port all engines to new engine module format
[deliverable/linux.git] / drivers / gpu / drm / nouveau / core / engine / graph / nv40.c
1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #include <core/os.h>
26 #include <core/class.h>
27 #include <core/handle.h>
28 #include <core/engctx.h>
29
30 #include <subdev/fb.h>
31 #include <subdev/timer.h>
32
33 #include <engine/graph.h>
34 #include <engine/fifo.h>
35
36 #include "nv40.h"
37 #include "regs.h"
38
39 struct nv40_graph_priv {
40 struct nouveau_graph base;
41 u32 size;
42 };
43
44 struct nv40_graph_chan {
45 struct nouveau_graph_chan base;
46 };
47
48 /*******************************************************************************
49 * Graphics object classes
50 ******************************************************************************/
51
52 static int
53 nv40_graph_object_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57 {
58 struct nouveau_gpuobj *obj;
59 int ret;
60
61 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
62 20, 16, 0, &obj);
63 *pobject = nv_object(obj);
64 if (ret)
65 return ret;
66
67 nv_wo32(obj, 0x00, nv_mclass(obj));
68 nv_wo32(obj, 0x04, 0x00000000);
69 nv_wo32(obj, 0x08, 0x00000000);
70 #ifdef __BIG_ENDIAN
71 nv_mo32(obj, 0x08, 0x01000000, 0x01000000);
72 #endif
73 nv_wo32(obj, 0x0c, 0x00000000);
74 nv_wo32(obj, 0x10, 0x00000000);
75 return 0;
76 }
77
78 struct nouveau_ofuncs
79 nv40_graph_ofuncs = {
80 .ctor = nv40_graph_object_ctor,
81 .dtor = _nouveau_gpuobj_dtor,
82 .init = _nouveau_gpuobj_init,
83 .fini = _nouveau_gpuobj_fini,
84 .rd32 = _nouveau_gpuobj_rd32,
85 .wr32 = _nouveau_gpuobj_wr32,
86 };
87
88 static struct nouveau_oclass
89 nv40_graph_sclass[] = {
90 { 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
91 { 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
92 { 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
93 { 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
94 { 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
95 { 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
96 { 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
97 { 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
98 { 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
99 { 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
100 { 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
101 { 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
102 { 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
103 { 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
104 { 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
105 { 0x4097, &nv40_graph_ofuncs, NULL }, /* curie */
106 {},
107 };
108
109 static struct nouveau_oclass
110 nv44_graph_sclass[] = {
111 { 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
112 { 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
113 { 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
114 { 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
115 { 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
116 { 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
117 { 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
118 { 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
119 { 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
120 { 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
121 { 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
122 { 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
123 { 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
124 { 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
125 { 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
126 { 0x4497, &nv40_graph_ofuncs, NULL }, /* curie */
127 {},
128 };
129
130 /*******************************************************************************
131 * PGRAPH context
132 ******************************************************************************/
133
134 static int
135 nv40_graph_context_ctor(struct nouveau_object *parent,
136 struct nouveau_object *engine,
137 struct nouveau_oclass *oclass, void *data, u32 size,
138 struct nouveau_object **pobject)
139 {
140 struct nv40_graph_priv *priv = (void *)engine;
141 struct nv40_graph_chan *chan;
142 int ret;
143
144 ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
145 priv->size, 16,
146 NVOBJ_FLAG_ZERO_ALLOC, &chan);
147 *pobject = nv_object(chan);
148 if (ret)
149 return ret;
150
151 nv40_grctx_fill(nv_device(priv), nv_gpuobj(chan));
152 nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4);
153 return 0;
154 }
155
156 static int
157 nv40_graph_context_fini(struct nouveau_object *object, bool suspend)
158 {
159 struct nv04_graph_priv *priv = (void *)object->engine;
160 struct nv04_graph_chan *chan = (void *)object;
161 u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
162 int ret = 0;
163
164 nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
165
166 if (nv_rd32(priv, 0x40032c) == inst) {
167 if (suspend) {
168 nv_wr32(priv, 0x400720, 0x00000000);
169 nv_wr32(priv, 0x400784, inst);
170 nv_mask(priv, 0x400310, 0x00000020, 0x00000020);
171 nv_mask(priv, 0x400304, 0x00000001, 0x00000001);
172 if (!nv_wait(priv, 0x400300, 0x00000001, 0x00000000)) {
173 u32 insn = nv_rd32(priv, 0x400308);
174 nv_warn(priv, "ctxprog timeout 0x%08x\n", insn);
175 ret = -EBUSY;
176 }
177 }
178
179 nv_mask(priv, 0x40032c, 0x01000000, 0x00000000);
180 }
181
182 if (nv_rd32(priv, 0x400330) == inst)
183 nv_mask(priv, 0x400330, 0x01000000, 0x00000000);
184
185 nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
186 return ret;
187 }
188
189 static struct nouveau_oclass
190 nv40_graph_cclass = {
191 .handle = NV_ENGCTX(GR, 0x40),
192 .ofuncs = &(struct nouveau_ofuncs) {
193 .ctor = nv40_graph_context_ctor,
194 .dtor = _nouveau_graph_context_dtor,
195 .init = _nouveau_graph_context_init,
196 .fini = nv40_graph_context_fini,
197 .rd32 = _nouveau_graph_context_rd32,
198 .wr32 = _nouveau_graph_context_wr32,
199 },
200 };
201
202 /*******************************************************************************
203 * PGRAPH engine/subdev functions
204 ******************************************************************************/
205
206 static void
207 nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
208 {
209 struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
210 struct nouveau_fifo *pfifo = nouveau_fifo(engine);
211 struct nv40_graph_priv *priv = (void *)engine;
212 unsigned long flags;
213
214 pfifo->pause(pfifo, &flags);
215 nv04_graph_idle(priv);
216
217 switch (nv_device(priv)->chipset) {
218 case 0x40:
219 case 0x41: /* guess */
220 case 0x42:
221 case 0x43:
222 case 0x45: /* guess */
223 case 0x4e:
224 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
225 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
226 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
227 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
228 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
229 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
230 break;
231 case 0x44:
232 case 0x4a:
233 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
234 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
235 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
236 break;
237 case 0x46:
238 case 0x47:
239 case 0x49:
240 case 0x4b:
241 case 0x4c:
242 case 0x67:
243 default:
244 nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
245 nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
246 nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
247 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
248 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
249 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
250 break;
251 }
252
253 pfifo->start(pfifo, &flags);
254 }
255
256 static void
257 nv40_graph_intr(struct nouveau_subdev *subdev)
258 {
259 struct nv40_graph_priv *priv = (void *)subdev;
260 struct nouveau_engine *engine = nv_engine(subdev);
261 struct nouveau_handle *handle = NULL;
262 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
263 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
264 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
265 u32 inst = (nv_rd32(priv, 0x40032c) & 0x000fffff) << 4;
266 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
267 u32 subc = (addr & 0x00070000) >> 16;
268 u32 mthd = (addr & 0x00001ffc);
269 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
270 u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xffff;
271 u32 show = stat;
272
273 if (stat & NV_PGRAPH_INTR_ERROR) {
274 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
275 handle = nouveau_engctx_lookup_class(engine, inst, class);
276 if (handle && !nv_call(handle->object, mthd, data))
277 show &= ~NV_PGRAPH_INTR_ERROR;
278 nouveau_engctx_handle_put(handle);
279 }
280
281 if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
282 nv_mask(priv, 0x402000, 0, 0);
283 }
284 }
285
286 nv_wr32(priv, NV03_PGRAPH_INTR, stat);
287 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
288
289 if (show) {
290 nv_info(priv, "");
291 nouveau_bitfield_print(nv10_graph_intr_name, show);
292 printk(" nsource:");
293 nouveau_bitfield_print(nv04_graph_nsource, nsource);
294 printk(" nstatus:");
295 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
296 printk("\n");
297 nv_error(priv, "ch 0x%08x subc %d class 0x%04x "
298 "mthd 0x%04x data 0x%08x\n",
299 inst, subc, class, mthd, data);
300 }
301 }
302
303 static int
304 nv40_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
305 struct nouveau_oclass *oclass, void *data, u32 size,
306 struct nouveau_object **pobject)
307 {
308 struct nv40_graph_priv *priv;
309 int ret;
310
311 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
312 *pobject = nv_object(priv);
313 if (ret)
314 return ret;
315
316 nv_subdev(priv)->unit = 0x00001000;
317 nv_subdev(priv)->intr = nv40_graph_intr;
318 nv_engine(priv)->cclass = &nv40_graph_cclass;
319 if (nv44_graph_class(priv))
320 nv_engine(priv)->sclass = nv44_graph_sclass;
321 else
322 nv_engine(priv)->sclass = nv40_graph_sclass;
323 nv_engine(priv)->tile_prog = nv40_graph_tile_prog;
324 return 0;
325 }
326
327 static int
328 nv40_graph_init(struct nouveau_object *object)
329 {
330 struct nouveau_engine *engine = nv_engine(object);
331 struct nouveau_fb *pfb = nouveau_fb(object);
332 struct nv40_graph_priv *priv = (void *)engine;
333 int ret, i, j;
334 u32 vramsz;
335
336 ret = nouveau_graph_init(&priv->base);
337 if (ret)
338 return ret;
339
340 /* generate and upload context program */
341 nv40_grctx_init(nv_device(priv), &priv->size);
342
343 /* No context present currently */
344 nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
345
346 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
347 nv_wr32(priv, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
348
349 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
350 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
351 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
352 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
353 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
354 nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
355
356 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
357 nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
358
359 j = nv_rd32(priv, 0x1540) & 0xff;
360 if (j) {
361 for (i = 0; !(j & 1); j >>= 1, i++)
362 ;
363 nv_wr32(priv, 0x405000, i);
364 }
365
366 if (nv_device(priv)->chipset == 0x40) {
367 nv_wr32(priv, 0x4009b0, 0x83280fff);
368 nv_wr32(priv, 0x4009b4, 0x000000a0);
369 } else {
370 nv_wr32(priv, 0x400820, 0x83280eff);
371 nv_wr32(priv, 0x400824, 0x000000a0);
372 }
373
374 switch (nv_device(priv)->chipset) {
375 case 0x40:
376 case 0x45:
377 nv_wr32(priv, 0x4009b8, 0x0078e366);
378 nv_wr32(priv, 0x4009bc, 0x0000014c);
379 break;
380 case 0x41:
381 case 0x42: /* pciid also 0x00Cx */
382 /* case 0x0120: XXX (pciid) */
383 nv_wr32(priv, 0x400828, 0x007596ff);
384 nv_wr32(priv, 0x40082c, 0x00000108);
385 break;
386 case 0x43:
387 nv_wr32(priv, 0x400828, 0x0072cb77);
388 nv_wr32(priv, 0x40082c, 0x00000108);
389 break;
390 case 0x44:
391 case 0x46: /* G72 */
392 case 0x4a:
393 case 0x4c: /* G7x-based C51 */
394 case 0x4e:
395 nv_wr32(priv, 0x400860, 0);
396 nv_wr32(priv, 0x400864, 0);
397 break;
398 case 0x47: /* G70 */
399 case 0x49: /* G71 */
400 case 0x4b: /* G73 */
401 nv_wr32(priv, 0x400828, 0x07830610);
402 nv_wr32(priv, 0x40082c, 0x0000016A);
403 break;
404 default:
405 break;
406 }
407
408 nv_wr32(priv, 0x400b38, 0x2ffff800);
409 nv_wr32(priv, 0x400b3c, 0x00006000);
410
411 /* Tiling related stuff. */
412 switch (nv_device(priv)->chipset) {
413 case 0x44:
414 case 0x4a:
415 nv_wr32(priv, 0x400bc4, 0x1003d888);
416 nv_wr32(priv, 0x400bbc, 0xb7a7b500);
417 break;
418 case 0x46:
419 nv_wr32(priv, 0x400bc4, 0x0000e024);
420 nv_wr32(priv, 0x400bbc, 0xb7a7b520);
421 break;
422 case 0x4c:
423 case 0x4e:
424 case 0x67:
425 nv_wr32(priv, 0x400bc4, 0x1003d888);
426 nv_wr32(priv, 0x400bbc, 0xb7a7b540);
427 break;
428 default:
429 break;
430 }
431
432 /* Turn all the tiling regions off. */
433 for (i = 0; i < pfb->tile.regions; i++)
434 engine->tile_prog(engine, i);
435
436 /* begin RAM config */
437 vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
438 switch (nv_device(priv)->chipset) {
439 case 0x40:
440 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
441 nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
442 nv_wr32(priv, 0x4069A4, nv_rd32(priv, 0x100200));
443 nv_wr32(priv, 0x4069A8, nv_rd32(priv, 0x100204));
444 nv_wr32(priv, 0x400820, 0);
445 nv_wr32(priv, 0x400824, 0);
446 nv_wr32(priv, 0x400864, vramsz);
447 nv_wr32(priv, 0x400868, vramsz);
448 break;
449 default:
450 switch (nv_device(priv)->chipset) {
451 case 0x41:
452 case 0x42:
453 case 0x43:
454 case 0x45:
455 case 0x4e:
456 case 0x44:
457 case 0x4a:
458 nv_wr32(priv, 0x4009F0, nv_rd32(priv, 0x100200));
459 nv_wr32(priv, 0x4009F4, nv_rd32(priv, 0x100204));
460 break;
461 default:
462 nv_wr32(priv, 0x400DF0, nv_rd32(priv, 0x100200));
463 nv_wr32(priv, 0x400DF4, nv_rd32(priv, 0x100204));
464 break;
465 }
466 nv_wr32(priv, 0x4069F0, nv_rd32(priv, 0x100200));
467 nv_wr32(priv, 0x4069F4, nv_rd32(priv, 0x100204));
468 nv_wr32(priv, 0x400840, 0);
469 nv_wr32(priv, 0x400844, 0);
470 nv_wr32(priv, 0x4008A0, vramsz);
471 nv_wr32(priv, 0x4008A4, vramsz);
472 break;
473 }
474
475 return 0;
476 }
477
478 struct nouveau_oclass
479 nv40_graph_oclass = {
480 .handle = NV_ENGINE(GR, 0x40),
481 .ofuncs = &(struct nouveau_ofuncs) {
482 .ctor = nv40_graph_ctor,
483 .dtor = _nouveau_graph_dtor,
484 .init = nv40_graph_init,
485 .fini = _nouveau_graph_fini,
486 },
487 };
This page took 0.053815 seconds and 5 git commands to generate.