2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/class.h>
27 #include <core/engctx.h>
28 #include <core/namedb.h>
29 #include <core/handle.h>
30 #include <core/ramht.h>
31 #include <core/event.h>
33 #include <subdev/instmem.h>
34 #include <subdev/instmem/nv04.h>
35 #include <subdev/timer.h>
36 #include <subdev/fb.h>
38 #include <engine/fifo.h>
42 static struct ramfc_desc
44 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT
},
45 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET
},
46 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE
},
47 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT
},
48 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE
},
49 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH
},
50 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE
},
51 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1
},
55 /*******************************************************************************
56 * FIFO channel objects
57 ******************************************************************************/
60 nv04_fifo_object_attach(struct nouveau_object
*parent
,
61 struct nouveau_object
*object
, u32 handle
)
63 struct nv04_fifo_priv
*priv
= (void *)parent
->engine
;
64 struct nv04_fifo_chan
*chan
= (void *)parent
;
65 u32 context
, chid
= chan
->base
.chid
;
68 if (nv_iclass(object
, NV_GPUOBJ_CLASS
))
69 context
= nv_gpuobj(object
)->addr
>> 4;
71 context
= 0x00000004; /* just non-zero */
73 switch (nv_engidx(object
->engine
)) {
74 case NVDEV_ENGINE_DMAOBJ
:
76 context
|= 0x00000000;
79 context
|= 0x00010000;
81 case NVDEV_ENGINE_MPEG
:
82 context
|= 0x00020000;
88 context
|= 0x80000000; /* valid */
89 context
|= chid
<< 24;
91 mutex_lock(&nv_subdev(priv
)->mutex
);
92 ret
= nouveau_ramht_insert(priv
->ramht
, chid
, handle
, context
);
93 mutex_unlock(&nv_subdev(priv
)->mutex
);
98 nv04_fifo_object_detach(struct nouveau_object
*parent
, int cookie
)
100 struct nv04_fifo_priv
*priv
= (void *)parent
->engine
;
101 mutex_lock(&nv_subdev(priv
)->mutex
);
102 nouveau_ramht_remove(priv
->ramht
, cookie
);
103 mutex_unlock(&nv_subdev(priv
)->mutex
);
107 nv04_fifo_context_attach(struct nouveau_object
*parent
,
108 struct nouveau_object
*object
)
110 nv_engctx(object
)->addr
= nouveau_fifo_chan(parent
)->chid
;
115 nv04_fifo_chan_ctor(struct nouveau_object
*parent
,
116 struct nouveau_object
*engine
,
117 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
118 struct nouveau_object
**pobject
)
120 struct nv04_fifo_priv
*priv
= (void *)engine
;
121 struct nv04_fifo_chan
*chan
;
122 struct nv03_channel_dma_class
*args
= data
;
125 if (size
< sizeof(*args
))
128 ret
= nouveau_fifo_channel_create(parent
, engine
, oclass
, 0, 0x800000,
129 0x10000, args
->pushbuf
,
130 (1ULL << NVDEV_ENGINE_DMAOBJ
) |
131 (1ULL << NVDEV_ENGINE_SW
) |
132 (1ULL << NVDEV_ENGINE_GR
), &chan
);
133 *pobject
= nv_object(chan
);
137 nv_parent(chan
)->object_attach
= nv04_fifo_object_attach
;
138 nv_parent(chan
)->object_detach
= nv04_fifo_object_detach
;
139 nv_parent(chan
)->context_attach
= nv04_fifo_context_attach
;
140 chan
->ramfc
= chan
->base
.chid
* 32;
142 nv_wo32(priv
->ramfc
, chan
->ramfc
+ 0x00, args
->offset
);
143 nv_wo32(priv
->ramfc
, chan
->ramfc
+ 0x04, args
->offset
);
144 nv_wo32(priv
->ramfc
, chan
->ramfc
+ 0x08, chan
->base
.pushgpu
->addr
>> 4);
145 nv_wo32(priv
->ramfc
, chan
->ramfc
+ 0x10,
146 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
147 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
149 NV_PFIFO_CACHE1_BIG_ENDIAN
|
151 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8
);
156 nv04_fifo_chan_dtor(struct nouveau_object
*object
)
158 struct nv04_fifo_priv
*priv
= (void *)object
->engine
;
159 struct nv04_fifo_chan
*chan
= (void *)object
;
160 struct ramfc_desc
*c
= priv
->ramfc_desc
;
163 nv_wo32(priv
->ramfc
, chan
->ramfc
+ c
->ctxp
, 0x00000000);
164 } while ((++c
)->bits
);
166 nouveau_fifo_channel_destroy(&chan
->base
);
170 nv04_fifo_chan_init(struct nouveau_object
*object
)
172 struct nv04_fifo_priv
*priv
= (void *)object
->engine
;
173 struct nv04_fifo_chan
*chan
= (void *)object
;
174 u32 mask
= 1 << chan
->base
.chid
;
178 ret
= nouveau_fifo_channel_init(&chan
->base
);
182 spin_lock_irqsave(&priv
->base
.lock
, flags
);
183 nv_mask(priv
, NV04_PFIFO_MODE
, mask
, mask
);
184 spin_unlock_irqrestore(&priv
->base
.lock
, flags
);
189 nv04_fifo_chan_fini(struct nouveau_object
*object
, bool suspend
)
191 struct nv04_fifo_priv
*priv
= (void *)object
->engine
;
192 struct nv04_fifo_chan
*chan
= (void *)object
;
193 struct nouveau_gpuobj
*fctx
= priv
->ramfc
;
194 struct ramfc_desc
*c
;
196 u32 data
= chan
->ramfc
;
199 /* prevent fifo context switches */
200 spin_lock_irqsave(&priv
->base
.lock
, flags
);
201 nv_wr32(priv
, NV03_PFIFO_CACHES
, 0);
203 /* if this channel is active, replace it with a null context */
204 chid
= nv_rd32(priv
, NV03_PFIFO_CACHE1_PUSH1
) & priv
->base
.max
;
205 if (chid
== chan
->base
.chid
) {
206 nv_mask(priv
, NV04_PFIFO_CACHE1_DMA_PUSH
, 0x00000001, 0);
207 nv_wr32(priv
, NV03_PFIFO_CACHE1_PUSH0
, 0);
208 nv_mask(priv
, NV04_PFIFO_CACHE1_PULL0
, 0x00000001, 0);
210 c
= priv
->ramfc_desc
;
212 u32 rm
= ((1ULL << c
->bits
) - 1) << c
->regs
;
213 u32 cm
= ((1ULL << c
->bits
) - 1) << c
->ctxs
;
214 u32 rv
= (nv_rd32(priv
, c
->regp
) & rm
) >> c
->regs
;
215 u32 cv
= (nv_ro32(fctx
, c
->ctxp
+ data
) & ~cm
);
216 nv_wo32(fctx
, c
->ctxp
+ data
, cv
| (rv
<< c
->ctxs
));
217 } while ((++c
)->bits
);
219 c
= priv
->ramfc_desc
;
221 nv_wr32(priv
, c
->regp
, 0x00000000);
222 } while ((++c
)->bits
);
224 nv_wr32(priv
, NV03_PFIFO_CACHE1_GET
, 0);
225 nv_wr32(priv
, NV03_PFIFO_CACHE1_PUT
, 0);
226 nv_wr32(priv
, NV03_PFIFO_CACHE1_PUSH1
, priv
->base
.max
);
227 nv_wr32(priv
, NV03_PFIFO_CACHE1_PUSH0
, 1);
228 nv_wr32(priv
, NV04_PFIFO_CACHE1_PULL0
, 1);
231 /* restore normal operation, after disabling dma mode */
232 nv_mask(priv
, NV04_PFIFO_MODE
, 1 << chan
->base
.chid
, 0);
233 nv_wr32(priv
, NV03_PFIFO_CACHES
, 1);
234 spin_unlock_irqrestore(&priv
->base
.lock
, flags
);
236 return nouveau_fifo_channel_fini(&chan
->base
, suspend
);
239 static struct nouveau_ofuncs
241 .ctor
= nv04_fifo_chan_ctor
,
242 .dtor
= nv04_fifo_chan_dtor
,
243 .init
= nv04_fifo_chan_init
,
244 .fini
= nv04_fifo_chan_fini
,
245 .rd32
= _nouveau_fifo_channel_rd32
,
246 .wr32
= _nouveau_fifo_channel_wr32
,
249 static struct nouveau_oclass
250 nv04_fifo_sclass
[] = {
251 { NV03_CHANNEL_DMA_CLASS
, &nv04_fifo_ofuncs
},
255 /*******************************************************************************
256 * FIFO context - basically just the instmem reserved for the channel
257 ******************************************************************************/
260 nv04_fifo_context_ctor(struct nouveau_object
*parent
,
261 struct nouveau_object
*engine
,
262 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
263 struct nouveau_object
**pobject
)
265 struct nv04_fifo_base
*base
;
268 ret
= nouveau_fifo_context_create(parent
, engine
, oclass
, NULL
, 0x1000,
269 0x1000, NVOBJ_FLAG_HEAP
, &base
);
270 *pobject
= nv_object(base
);
277 static struct nouveau_oclass
279 .handle
= NV_ENGCTX(FIFO
, 0x04),
280 .ofuncs
= &(struct nouveau_ofuncs
) {
281 .ctor
= nv04_fifo_context_ctor
,
282 .dtor
= _nouveau_fifo_context_dtor
,
283 .init
= _nouveau_fifo_context_init
,
284 .fini
= _nouveau_fifo_context_fini
,
285 .rd32
= _nouveau_fifo_context_rd32
,
286 .wr32
= _nouveau_fifo_context_wr32
,
290 /*******************************************************************************
292 ******************************************************************************/
295 nv04_fifo_pause(struct nouveau_fifo
*pfifo
, unsigned long *pflags
)
296 __acquires(priv
->base
.lock
)
298 struct nv04_fifo_priv
*priv
= (void *)pfifo
;
301 spin_lock_irqsave(&priv
->base
.lock
, flags
);
304 nv_wr32(priv
, NV03_PFIFO_CACHES
, 0x00000000);
305 nv_mask(priv
, NV04_PFIFO_CACHE1_PULL0
, 0x00000001, 0x00000000);
307 /* in some cases the puller may be left in an inconsistent state
308 * if you try to stop it while it's busy translating handles.
309 * sometimes you get a CACHE_ERROR, sometimes it just fails
310 * silently; sending incorrect instance offsets to PGRAPH after
311 * it's started up again.
313 * to avoid this, we invalidate the most recently calculated
316 if (!nv_wait(priv
, NV04_PFIFO_CACHE1_PULL0
,
317 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY
, 0x00000000))
318 nv_warn(priv
, "timeout idling puller\n");
320 if (nv_rd32(priv
, NV04_PFIFO_CACHE1_PULL0
) &
321 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED
)
322 nv_wr32(priv
, NV03_PFIFO_INTR_0
, NV_PFIFO_INTR_CACHE_ERROR
);
324 nv_wr32(priv
, NV04_PFIFO_CACHE1_HASH
, 0x00000000);
328 nv04_fifo_start(struct nouveau_fifo
*pfifo
, unsigned long *pflags
)
329 __releases(priv
->base
.lock
)
331 struct nv04_fifo_priv
*priv
= (void *)pfifo
;
332 unsigned long flags
= *pflags
;
334 nv_mask(priv
, NV04_PFIFO_CACHE1_PULL0
, 0x00000001, 0x00000001);
335 nv_wr32(priv
, NV03_PFIFO_CACHES
, 0x00000001);
337 spin_unlock_irqrestore(&priv
->base
.lock
, flags
);
341 nv_dma_state_err(u32 state
)
343 static const char * const desc
[] = {
344 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
345 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
347 return desc
[(state
>> 29) & 0x7];
351 nv04_fifo_swmthd(struct nv04_fifo_priv
*priv
, u32 chid
, u32 addr
, u32 data
)
353 struct nv04_fifo_chan
*chan
= NULL
;
354 struct nouveau_handle
*bind
;
355 const int subc
= (addr
>> 13) & 0x7;
356 const int mthd
= addr
& 0x1ffc;
357 bool handled
= false;
361 spin_lock_irqsave(&priv
->base
.lock
, flags
);
362 if (likely(chid
>= priv
->base
.min
&& chid
<= priv
->base
.max
))
363 chan
= (void *)priv
->base
.channel
[chid
];
369 bind
= nouveau_namedb_get(nv_namedb(chan
), data
);
373 if (nv_engidx(bind
->object
->engine
) == NVDEV_ENGINE_SW
) {
374 engine
= 0x0000000f << (subc
* 4);
375 chan
->subc
[subc
] = data
;
378 nv_mask(priv
, NV04_PFIFO_CACHE1_ENGINE
, engine
, 0);
381 nouveau_namedb_put(bind
);
384 engine
= nv_rd32(priv
, NV04_PFIFO_CACHE1_ENGINE
);
385 if (unlikely(((engine
>> (subc
* 4)) & 0xf) != 0))
388 bind
= nouveau_namedb_get(nv_namedb(chan
), chan
->subc
[subc
]);
390 if (!nv_call(bind
->object
, mthd
, data
))
392 nouveau_namedb_put(bind
);
398 spin_unlock_irqrestore(&priv
->base
.lock
, flags
);
403 nv04_fifo_cache_error(struct nouveau_device
*device
,
404 struct nv04_fifo_priv
*priv
, u32 chid
, u32 get
)
409 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
410 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests
411 * show that it wraps around to the start at GET=0x800.. No clue as to
414 ptr
= (get
& 0x7ff) >> 2;
416 if (device
->card_type
< NV_40
) {
417 mthd
= nv_rd32(priv
, NV04_PFIFO_CACHE1_METHOD(ptr
));
418 data
= nv_rd32(priv
, NV04_PFIFO_CACHE1_DATA(ptr
));
420 mthd
= nv_rd32(priv
, NV40_PFIFO_CACHE1_METHOD(ptr
));
421 data
= nv_rd32(priv
, NV40_PFIFO_CACHE1_DATA(ptr
));
424 if (!nv04_fifo_swmthd(priv
, chid
, mthd
, data
)) {
425 const char *client_name
=
426 nouveau_client_name_for_fifo_chid(&priv
->base
, chid
);
428 "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
429 chid
, client_name
, (mthd
>> 13) & 7, mthd
& 0x1ffc,
433 nv_wr32(priv
, NV04_PFIFO_CACHE1_DMA_PUSH
, 0);
434 nv_wr32(priv
, NV03_PFIFO_INTR_0
, NV_PFIFO_INTR_CACHE_ERROR
);
436 nv_wr32(priv
, NV03_PFIFO_CACHE1_PUSH0
,
437 nv_rd32(priv
, NV03_PFIFO_CACHE1_PUSH0
) & ~1);
438 nv_wr32(priv
, NV03_PFIFO_CACHE1_GET
, get
+ 4);
439 nv_wr32(priv
, NV03_PFIFO_CACHE1_PUSH0
,
440 nv_rd32(priv
, NV03_PFIFO_CACHE1_PUSH0
) | 1);
441 nv_wr32(priv
, NV04_PFIFO_CACHE1_HASH
, 0);
443 nv_wr32(priv
, NV04_PFIFO_CACHE1_DMA_PUSH
,
444 nv_rd32(priv
, NV04_PFIFO_CACHE1_DMA_PUSH
) | 1);
445 nv_wr32(priv
, NV04_PFIFO_CACHE1_PULL0
, 1);
449 nv04_fifo_dma_pusher(struct nouveau_device
*device
, struct nv04_fifo_priv
*priv
,
452 const char *client_name
;
453 u32 dma_get
= nv_rd32(priv
, 0x003244);
454 u32 dma_put
= nv_rd32(priv
, 0x003240);
455 u32 push
= nv_rd32(priv
, 0x003220);
456 u32 state
= nv_rd32(priv
, 0x003228);
458 client_name
= nouveau_client_name_for_fifo_chid(&priv
->base
, chid
);
460 if (device
->card_type
== NV_50
) {
461 u32 ho_get
= nv_rd32(priv
, 0x003328);
462 u32 ho_put
= nv_rd32(priv
, 0x003320);
463 u32 ib_get
= nv_rd32(priv
, 0x003334);
464 u32 ib_put
= nv_rd32(priv
, 0x003330);
467 "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
468 chid
, client_name
, ho_get
, dma_get
, ho_put
, dma_put
,
469 ib_get
, ib_put
, state
, nv_dma_state_err(state
), push
);
471 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
472 nv_wr32(priv
, 0x003364, 0x00000000);
473 if (dma_get
!= dma_put
|| ho_get
!= ho_put
) {
474 nv_wr32(priv
, 0x003244, dma_put
);
475 nv_wr32(priv
, 0x003328, ho_put
);
477 if (ib_get
!= ib_put
)
478 nv_wr32(priv
, 0x003334, ib_put
);
481 "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
482 chid
, client_name
, dma_get
, dma_put
, state
,
483 nv_dma_state_err(state
), push
);
485 if (dma_get
!= dma_put
)
486 nv_wr32(priv
, 0x003244, dma_put
);
489 nv_wr32(priv
, 0x003228, 0x00000000);
490 nv_wr32(priv
, 0x003220, 0x00000001);
491 nv_wr32(priv
, 0x002100, NV_PFIFO_INTR_DMA_PUSHER
);
495 nv04_fifo_intr(struct nouveau_subdev
*subdev
)
497 struct nouveau_device
*device
= nv_device(subdev
);
498 struct nv04_fifo_priv
*priv
= (void *)subdev
;
499 uint32_t status
, reassign
;
502 reassign
= nv_rd32(priv
, NV03_PFIFO_CACHES
) & 1;
503 while ((status
= nv_rd32(priv
, NV03_PFIFO_INTR_0
)) && (cnt
++ < 100)) {
506 nv_wr32(priv
, NV03_PFIFO_CACHES
, 0);
508 chid
= nv_rd32(priv
, NV03_PFIFO_CACHE1_PUSH1
) & priv
->base
.max
;
509 get
= nv_rd32(priv
, NV03_PFIFO_CACHE1_GET
);
511 if (status
& NV_PFIFO_INTR_CACHE_ERROR
) {
512 nv04_fifo_cache_error(device
, priv
, chid
, get
);
513 status
&= ~NV_PFIFO_INTR_CACHE_ERROR
;
516 if (status
& NV_PFIFO_INTR_DMA_PUSHER
) {
517 nv04_fifo_dma_pusher(device
, priv
, chid
);
518 status
&= ~NV_PFIFO_INTR_DMA_PUSHER
;
521 if (status
& NV_PFIFO_INTR_SEMAPHORE
) {
524 status
&= ~NV_PFIFO_INTR_SEMAPHORE
;
525 nv_wr32(priv
, NV03_PFIFO_INTR_0
,
526 NV_PFIFO_INTR_SEMAPHORE
);
528 sem
= nv_rd32(priv
, NV10_PFIFO_CACHE1_SEMAPHORE
);
529 nv_wr32(priv
, NV10_PFIFO_CACHE1_SEMAPHORE
, sem
| 0x1);
531 nv_wr32(priv
, NV03_PFIFO_CACHE1_GET
, get
+ 4);
532 nv_wr32(priv
, NV04_PFIFO_CACHE1_PULL0
, 1);
535 if (device
->card_type
== NV_50
) {
536 if (status
& 0x00000010) {
537 status
&= ~0x00000010;
538 nv_wr32(priv
, 0x002100, 0x00000010);
541 if (status
& 0x40000000) {
542 nouveau_event_trigger(priv
->base
.uevent
, 0);
543 nv_wr32(priv
, 0x002100, 0x40000000);
544 status
&= ~0x40000000;
549 nv_warn(priv
, "unknown intr 0x%08x, ch %d\n",
551 nv_wr32(priv
, NV03_PFIFO_INTR_0
, status
);
555 nv_wr32(priv
, NV03_PFIFO_CACHES
, reassign
);
559 nv_error(priv
, "still angry after %d spins, halt\n", cnt
);
560 nv_wr32(priv
, 0x002140, 0);
561 nv_wr32(priv
, 0x000140, 0);
564 nv_wr32(priv
, 0x000100, 0x00000100);
568 nv04_fifo_ctor(struct nouveau_object
*parent
, struct nouveau_object
*engine
,
569 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
570 struct nouveau_object
**pobject
)
572 struct nv04_instmem_priv
*imem
= nv04_instmem(parent
);
573 struct nv04_fifo_priv
*priv
;
576 ret
= nouveau_fifo_create(parent
, engine
, oclass
, 0, 15, &priv
);
577 *pobject
= nv_object(priv
);
581 nouveau_ramht_ref(imem
->ramht
, &priv
->ramht
);
582 nouveau_gpuobj_ref(imem
->ramro
, &priv
->ramro
);
583 nouveau_gpuobj_ref(imem
->ramfc
, &priv
->ramfc
);
585 nv_subdev(priv
)->unit
= 0x00000100;
586 nv_subdev(priv
)->intr
= nv04_fifo_intr
;
587 nv_engine(priv
)->cclass
= &nv04_fifo_cclass
;
588 nv_engine(priv
)->sclass
= nv04_fifo_sclass
;
589 priv
->base
.pause
= nv04_fifo_pause
;
590 priv
->base
.start
= nv04_fifo_start
;
591 priv
->ramfc_desc
= nv04_ramfc
;
596 nv04_fifo_dtor(struct nouveau_object
*object
)
598 struct nv04_fifo_priv
*priv
= (void *)object
;
599 nouveau_gpuobj_ref(NULL
, &priv
->ramfc
);
600 nouveau_gpuobj_ref(NULL
, &priv
->ramro
);
601 nouveau_ramht_ref(NULL
, &priv
->ramht
);
602 nouveau_fifo_destroy(&priv
->base
);
606 nv04_fifo_init(struct nouveau_object
*object
)
608 struct nv04_fifo_priv
*priv
= (void *)object
;
611 ret
= nouveau_fifo_init(&priv
->base
);
615 nv_wr32(priv
, NV04_PFIFO_DELAY_0
, 0x000000ff);
616 nv_wr32(priv
, NV04_PFIFO_DMA_TIMESLICE
, 0x0101ffff);
618 nv_wr32(priv
, NV03_PFIFO_RAMHT
, (0x03 << 24) /* search 128 */ |
619 ((priv
->ramht
->bits
- 9) << 16) |
620 (priv
->ramht
->base
.addr
>> 8));
621 nv_wr32(priv
, NV03_PFIFO_RAMRO
, priv
->ramro
->addr
>> 8);
622 nv_wr32(priv
, NV03_PFIFO_RAMFC
, priv
->ramfc
->addr
>> 8);
624 nv_wr32(priv
, NV03_PFIFO_CACHE1_PUSH1
, priv
->base
.max
);
626 nv_wr32(priv
, NV03_PFIFO_INTR_0
, 0xffffffff);
627 nv_wr32(priv
, NV03_PFIFO_INTR_EN_0
, 0xffffffff);
629 nv_wr32(priv
, NV03_PFIFO_CACHE1_PUSH0
, 1);
630 nv_wr32(priv
, NV04_PFIFO_CACHE1_PULL0
, 1);
631 nv_wr32(priv
, NV03_PFIFO_CACHES
, 1);
635 struct nouveau_oclass
637 .handle
= NV_ENGINE(FIFO
, 0x04),
638 .ofuncs
= &(struct nouveau_ofuncs
) {
639 .ctor
= nv04_fifo_ctor
,
640 .dtor
= nv04_fifo_dtor
,
641 .init
= nv04_fifo_init
,
642 .fini
= _nouveau_fifo_fini
,