drm/nouveau: port all engines to new engine module format
[deliverable/linux.git] / drivers / gpu / drm / nouveau / core / engine / fifo / nv04.c
1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #include <core/os.h>
26 #include <core/class.h>
27 #include <core/engctx.h>
28 #include <core/namedb.h>
29 #include <core/handle.h>
30 #include <core/ramht.h>
31
32 #include <subdev/instmem.h>
33 #include <subdev/instmem/nv04.h>
34 #include <subdev/timer.h>
35 #include <subdev/fb.h>
36
37 #include <engine/fifo.h>
38
39 #include "nv04.h"
40
41 static struct ramfc_desc
42 nv04_ramfc[] = {
43 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
44 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
45 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
46 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
47 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
48 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
49 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
50 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
51 {}
52 };
53
54 /*******************************************************************************
55 * FIFO channel objects
56 ******************************************************************************/
57
58 int
59 nv04_fifo_object_attach(struct nouveau_object *parent,
60 struct nouveau_object *object, u32 handle)
61 {
62 struct nv04_fifo_priv *priv = (void *)parent->engine;
63 struct nv04_fifo_chan *chan = (void *)parent;
64 u32 context, chid = chan->base.chid;
65 int ret;
66
67 if (nv_iclass(object, NV_GPUOBJ_CLASS))
68 context = nv_gpuobj(object)->addr >> 4;
69 else
70 context = 0x00000004; /* just non-zero */
71
72 switch (nv_engidx(object->engine)) {
73 case NVDEV_ENGINE_DMAOBJ:
74 case NVDEV_ENGINE_SW:
75 context |= 0x00000000;
76 break;
77 case NVDEV_ENGINE_GR:
78 context |= 0x00010000;
79 break;
80 case NVDEV_ENGINE_MPEG:
81 context |= 0x00020000;
82 break;
83 default:
84 return -EINVAL;
85 }
86
87 context |= 0x80000000; /* valid */
88 context |= chid << 24;
89
90 mutex_lock(&nv_subdev(priv)->mutex);
91 ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
92 mutex_unlock(&nv_subdev(priv)->mutex);
93 return ret;
94 }
95
96 void
97 nv04_fifo_object_detach(struct nouveau_object *parent, int cookie)
98 {
99 struct nv04_fifo_priv *priv = (void *)parent->engine;
100 mutex_lock(&nv_subdev(priv)->mutex);
101 nouveau_ramht_remove(priv->ramht, cookie);
102 mutex_unlock(&nv_subdev(priv)->mutex);
103 }
104
105 static int
106 nv04_fifo_chan_ctor(struct nouveau_object *parent,
107 struct nouveau_object *engine,
108 struct nouveau_oclass *oclass, void *data, u32 size,
109 struct nouveau_object **pobject)
110 {
111 struct nv04_fifo_priv *priv = (void *)engine;
112 struct nv04_fifo_chan *chan;
113 struct nv_channel_dma_class *args = data;
114 int ret;
115
116 if (size < sizeof(*args))
117 return -EINVAL;
118
119 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
120 0x10000, args->pushbuf,
121 (1 << NVDEV_ENGINE_DMAOBJ) |
122 (1 << NVDEV_ENGINE_SW) |
123 (1 << NVDEV_ENGINE_GR), &chan);
124 *pobject = nv_object(chan);
125 if (ret)
126 return ret;
127
128 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
129 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
130 chan->ramfc = chan->base.chid * 32;
131
132 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
133 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
134 nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
135 nv_wo32(priv->ramfc, chan->ramfc + 0x10,
136 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
137 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
138 #ifdef __BIG_ENDIAN
139 NV_PFIFO_CACHE1_BIG_ENDIAN |
140 #endif
141 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
142 return 0;
143 }
144
145 void
146 nv04_fifo_chan_dtor(struct nouveau_object *object)
147 {
148 struct nv04_fifo_priv *priv = (void *)object->engine;
149 struct nv04_fifo_chan *chan = (void *)object;
150 struct ramfc_desc *c = priv->ramfc_desc;
151
152 do {
153 nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000);
154 } while ((++c)->bits);
155
156 nouveau_fifo_channel_destroy(&chan->base);
157 }
158
159 int
160 nv04_fifo_chan_init(struct nouveau_object *object)
161 {
162 struct nv04_fifo_priv *priv = (void *)object->engine;
163 struct nv04_fifo_chan *chan = (void *)object;
164 u32 mask = 1 << chan->base.chid;
165 unsigned long flags;
166 int ret;
167
168 ret = nouveau_fifo_channel_init(&chan->base);
169 if (ret)
170 return ret;
171
172 spin_lock_irqsave(&priv->base.lock, flags);
173 nv_mask(priv, NV04_PFIFO_MODE, mask, mask);
174 spin_unlock_irqrestore(&priv->base.lock, flags);
175 return 0;
176 }
177
178 int
179 nv04_fifo_chan_fini(struct nouveau_object *object, bool suspend)
180 {
181 struct nv04_fifo_priv *priv = (void *)object->engine;
182 struct nv04_fifo_chan *chan = (void *)object;
183 struct nouveau_gpuobj *fctx = priv->ramfc;
184 struct ramfc_desc *c;
185 unsigned long flags;
186 u32 data = chan->ramfc;
187 u32 chid;
188
189 /* prevent fifo context switches */
190 spin_lock_irqsave(&priv->base.lock, flags);
191 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
192
193 /* if this channel is active, replace it with a null context */
194 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
195 if (chid == chan->base.chid) {
196 nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
197 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0);
198 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
199
200 c = priv->ramfc_desc;
201 do {
202 u32 rm = ((1ULL << c->bits) - 1) << c->regs;
203 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
204 u32 rv = (nv_rd32(priv, c->regp) & rm) >> c->regs;
205 u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm);
206 nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
207 } while ((++c)->bits);
208
209 c = priv->ramfc_desc;
210 do {
211 nv_wr32(priv, c->regp, 0x00000000);
212 } while ((++c)->bits);
213
214 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0);
215 nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0);
216 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
217 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
218 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
219 }
220
221 /* restore normal operation, after disabling dma mode */
222 nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
223 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
224 spin_unlock_irqrestore(&priv->base.lock, flags);
225
226 return nouveau_fifo_channel_fini(&chan->base, suspend);
227 }
228
229 static struct nouveau_ofuncs
230 nv04_fifo_ofuncs = {
231 .ctor = nv04_fifo_chan_ctor,
232 .dtor = nv04_fifo_chan_dtor,
233 .init = nv04_fifo_chan_init,
234 .fini = nv04_fifo_chan_fini,
235 .rd32 = _nouveau_fifo_channel_rd32,
236 .wr32 = _nouveau_fifo_channel_wr32,
237 };
238
239 static struct nouveau_oclass
240 nv04_fifo_sclass[] = {
241 { 0x006e, &nv04_fifo_ofuncs },
242 {}
243 };
244
245 /*******************************************************************************
246 * FIFO context - basically just the instmem reserved for the channel
247 ******************************************************************************/
248
249 int
250 nv04_fifo_context_ctor(struct nouveau_object *parent,
251 struct nouveau_object *engine,
252 struct nouveau_oclass *oclass, void *data, u32 size,
253 struct nouveau_object **pobject)
254 {
255 struct nv04_fifo_base *base;
256 int ret;
257
258 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
259 0x1000, NVOBJ_FLAG_HEAP, &base);
260 *pobject = nv_object(base);
261 if (ret)
262 return ret;
263
264 return 0;
265 }
266
267 static struct nouveau_oclass
268 nv04_fifo_cclass = {
269 .handle = NV_ENGCTX(FIFO, 0x04),
270 .ofuncs = &(struct nouveau_ofuncs) {
271 .ctor = nv04_fifo_context_ctor,
272 .dtor = _nouveau_fifo_context_dtor,
273 .init = _nouveau_fifo_context_init,
274 .fini = _nouveau_fifo_context_fini,
275 .rd32 = _nouveau_fifo_context_rd32,
276 .wr32 = _nouveau_fifo_context_wr32,
277 },
278 };
279
280 /*******************************************************************************
281 * PFIFO engine
282 ******************************************************************************/
283
284 void
285 nv04_fifo_pause(struct nouveau_fifo *pfifo, unsigned long *pflags)
286 __acquires(priv->base.lock)
287 {
288 struct nv04_fifo_priv *priv = (void *)pfifo;
289 unsigned long flags;
290
291 spin_lock_irqsave(&priv->base.lock, flags);
292 *pflags = flags;
293
294 nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000);
295 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
296
297 /* in some cases the puller may be left in an inconsistent state
298 * if you try to stop it while it's busy translating handles.
299 * sometimes you get a CACHE_ERROR, sometimes it just fails
300 * silently; sending incorrect instance offsets to PGRAPH after
301 * it's started up again.
302 *
303 * to avoid this, we invalidate the most recently calculated
304 * instance.
305 */
306 if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0,
307 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000))
308 nv_warn(priv, "timeout idling puller\n");
309
310 if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) &
311 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
312 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
313
314 nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000);
315 }
316
317 void
318 nv04_fifo_start(struct nouveau_fifo *pfifo, unsigned long *pflags)
319 __releases(priv->base.lock)
320 {
321 struct nv04_fifo_priv *priv = (void *)pfifo;
322 unsigned long flags = *pflags;
323
324 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
325 nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001);
326
327 spin_unlock_irqrestore(&priv->base.lock, flags);
328 }
329
330 static const char *
331 nv_dma_state_err(u32 state)
332 {
333 static const char * const desc[] = {
334 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
335 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
336 };
337 return desc[(state >> 29) & 0x7];
338 }
339
340 static bool
341 nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data)
342 {
343 struct nv04_fifo_chan *chan = NULL;
344 struct nouveau_handle *bind;
345 const int subc = (addr >> 13) & 0x7;
346 const int mthd = addr & 0x1ffc;
347 bool handled = false;
348 unsigned long flags;
349 u32 engine;
350
351 spin_lock_irqsave(&priv->base.lock, flags);
352 if (likely(chid >= priv->base.min && chid <= priv->base.max))
353 chan = (void *)priv->base.channel[chid];
354 if (unlikely(!chan))
355 goto out;
356
357 switch (mthd) {
358 case 0x0000:
359 bind = nouveau_namedb_get(nv_namedb(chan), data);
360 if (unlikely(!bind))
361 break;
362
363 if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) {
364 engine = 0x0000000f << (subc * 4);
365 chan->subc[subc] = data;
366 handled = true;
367
368 nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
369 }
370
371 nouveau_namedb_put(bind);
372 break;
373 default:
374 engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE);
375 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
376 break;
377
378 bind = nouveau_namedb_get(nv_namedb(chan), chan->subc[subc]);
379 if (likely(bind)) {
380 if (!nv_call(bind->object, mthd, data))
381 handled = true;
382 nouveau_namedb_put(bind);
383 }
384 break;
385 }
386
387 out:
388 spin_unlock_irqrestore(&priv->base.lock, flags);
389 return handled;
390 }
391
392 void
393 nv04_fifo_intr(struct nouveau_subdev *subdev)
394 {
395 struct nouveau_device *device = nv_device(subdev);
396 struct nv04_fifo_priv *priv = (void *)subdev;
397 uint32_t status, reassign;
398 int cnt = 0;
399
400 reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
401 while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
402 uint32_t chid, get;
403
404 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
405
406 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
407 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
408
409 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
410 uint32_t mthd, data;
411 int ptr;
412
413 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
414 * wrapping on my G80 chips, but CACHE1 isn't big
415 * enough for this much data.. Tests show that it
416 * wraps around to the start at GET=0x800.. No clue
417 * as to why..
418 */
419 ptr = (get & 0x7ff) >> 2;
420
421 if (device->card_type < NV_40) {
422 mthd = nv_rd32(priv,
423 NV04_PFIFO_CACHE1_METHOD(ptr));
424 data = nv_rd32(priv,
425 NV04_PFIFO_CACHE1_DATA(ptr));
426 } else {
427 mthd = nv_rd32(priv,
428 NV40_PFIFO_CACHE1_METHOD(ptr));
429 data = nv_rd32(priv,
430 NV40_PFIFO_CACHE1_DATA(ptr));
431 }
432
433 if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
434 nv_info(priv, "CACHE_ERROR - Ch %d/%d "
435 "Mthd 0x%04x Data 0x%08x\n",
436 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
437 data);
438 }
439
440 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
441 nv_wr32(priv, NV03_PFIFO_INTR_0,
442 NV_PFIFO_INTR_CACHE_ERROR);
443
444 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
445 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
446 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
447 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
448 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
449 nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
450
451 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
452 nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
453 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
454
455 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
456 }
457
458 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
459 u32 dma_get = nv_rd32(priv, 0x003244);
460 u32 dma_put = nv_rd32(priv, 0x003240);
461 u32 push = nv_rd32(priv, 0x003220);
462 u32 state = nv_rd32(priv, 0x003228);
463
464 if (device->card_type == NV_50) {
465 u32 ho_get = nv_rd32(priv, 0x003328);
466 u32 ho_put = nv_rd32(priv, 0x003320);
467 u32 ib_get = nv_rd32(priv, 0x003334);
468 u32 ib_put = nv_rd32(priv, 0x003330);
469
470 nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
471 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
472 "State 0x%08x (err: %s) Push 0x%08x\n",
473 chid, ho_get, dma_get, ho_put,
474 dma_put, ib_get, ib_put, state,
475 nv_dma_state_err(state),
476 push);
477
478 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
479 nv_wr32(priv, 0x003364, 0x00000000);
480 if (dma_get != dma_put || ho_get != ho_put) {
481 nv_wr32(priv, 0x003244, dma_put);
482 nv_wr32(priv, 0x003328, ho_put);
483 } else
484 if (ib_get != ib_put) {
485 nv_wr32(priv, 0x003334, ib_put);
486 }
487 } else {
488 nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
489 "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
490 chid, dma_get, dma_put, state,
491 nv_dma_state_err(state), push);
492
493 if (dma_get != dma_put)
494 nv_wr32(priv, 0x003244, dma_put);
495 }
496
497 nv_wr32(priv, 0x003228, 0x00000000);
498 nv_wr32(priv, 0x003220, 0x00000001);
499 nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
500 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
501 }
502
503 if (status & NV_PFIFO_INTR_SEMAPHORE) {
504 uint32_t sem;
505
506 status &= ~NV_PFIFO_INTR_SEMAPHORE;
507 nv_wr32(priv, NV03_PFIFO_INTR_0,
508 NV_PFIFO_INTR_SEMAPHORE);
509
510 sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
511 nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
512
513 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
514 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
515 }
516
517 if (device->card_type == NV_50) {
518 if (status & 0x00000010) {
519 nv50_fb_trap(nouveau_fb(priv), 1);
520 status &= ~0x00000010;
521 nv_wr32(priv, 0x002100, 0x00000010);
522 }
523 }
524
525 if (status) {
526 nv_info(priv, "unknown intr 0x%08x, ch %d\n",
527 status, chid);
528 nv_wr32(priv, NV03_PFIFO_INTR_0, status);
529 status = 0;
530 }
531
532 nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
533 }
534
535 if (status) {
536 nv_info(priv, "still angry after %d spins, halt\n", cnt);
537 nv_wr32(priv, 0x002140, 0);
538 nv_wr32(priv, 0x000140, 0);
539 }
540
541 nv_wr32(priv, 0x000100, 0x00000100);
542 }
543
544 static int
545 nv04_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
546 struct nouveau_oclass *oclass, void *data, u32 size,
547 struct nouveau_object **pobject)
548 {
549 struct nv04_instmem_priv *imem = nv04_instmem(parent);
550 struct nv04_fifo_priv *priv;
551 int ret;
552
553 ret = nouveau_fifo_create(parent, engine, oclass, 0, 15, &priv);
554 *pobject = nv_object(priv);
555 if (ret)
556 return ret;
557
558 nouveau_ramht_ref(imem->ramht, &priv->ramht);
559 nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
560 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
561
562 nv_subdev(priv)->unit = 0x00000100;
563 nv_subdev(priv)->intr = nv04_fifo_intr;
564 nv_engine(priv)->cclass = &nv04_fifo_cclass;
565 nv_engine(priv)->sclass = nv04_fifo_sclass;
566 priv->base.pause = nv04_fifo_pause;
567 priv->base.start = nv04_fifo_start;
568 priv->ramfc_desc = nv04_ramfc;
569 return 0;
570 }
571
572 void
573 nv04_fifo_dtor(struct nouveau_object *object)
574 {
575 struct nv04_fifo_priv *priv = (void *)object;
576 nouveau_gpuobj_ref(NULL, &priv->ramfc);
577 nouveau_gpuobj_ref(NULL, &priv->ramro);
578 nouveau_ramht_ref(NULL, &priv->ramht);
579 nouveau_fifo_destroy(&priv->base);
580 }
581
582 int
583 nv04_fifo_init(struct nouveau_object *object)
584 {
585 struct nv04_fifo_priv *priv = (void *)object;
586 int ret;
587
588 ret = nouveau_fifo_init(&priv->base);
589 if (ret)
590 return ret;
591
592 nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
593 nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
594
595 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
596 ((priv->ramht->bits - 9) << 16) |
597 (priv->ramht->base.addr >> 8));
598 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
599 nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
600
601 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
602
603 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
604 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
605
606 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
607 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
608 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
609 return 0;
610 }
611
612 struct nouveau_oclass
613 nv04_fifo_oclass = {
614 .handle = NV_ENGINE(FIFO, 0x04),
615 .ofuncs = &(struct nouveau_ofuncs) {
616 .ctor = nv04_fifo_ctor,
617 .dtor = nv04_fifo_dtor,
618 .init = nv04_fifo_init,
619 .fini = _nouveau_fifo_fini,
620 },
621 };
This page took 0.048061 seconds and 5 git commands to generate.