2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/class.h>
27 #include <core/engctx.h>
28 #include <core/namedb.h>
29 #include <core/handle.h>
30 #include <core/ramht.h>
32 #include <subdev/instmem.h>
33 #include <subdev/instmem/nv04.h>
34 #include <subdev/timer.h>
35 #include <subdev/fb.h>
37 #include <engine/fifo.h>
41 static struct ramfc_desc
43 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT
},
44 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET
},
45 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE
},
46 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT
},
47 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE
},
48 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH
},
49 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE
},
50 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1
},
54 /*******************************************************************************
55 * FIFO channel objects
56 ******************************************************************************/
59 nv04_fifo_object_attach(struct nouveau_object
*parent
,
60 struct nouveau_object
*object
, u32 handle
)
62 struct nv04_fifo_priv
*priv
= (void *)parent
->engine
;
63 struct nv04_fifo_chan
*chan
= (void *)parent
;
64 u32 context
, chid
= chan
->base
.chid
;
67 if (nv_iclass(object
, NV_GPUOBJ_CLASS
))
68 context
= nv_gpuobj(object
)->addr
>> 4;
70 context
= 0x00000004; /* just non-zero */
72 switch (nv_engidx(object
->engine
)) {
73 case NVDEV_ENGINE_DMAOBJ
:
75 context
|= 0x00000000;
78 context
|= 0x00010000;
80 case NVDEV_ENGINE_MPEG
:
81 context
|= 0x00020000;
87 context
|= 0x80000000; /* valid */
88 context
|= chid
<< 24;
90 mutex_lock(&nv_subdev(priv
)->mutex
);
91 ret
= nouveau_ramht_insert(priv
->ramht
, chid
, handle
, context
);
92 mutex_unlock(&nv_subdev(priv
)->mutex
);
97 nv04_fifo_object_detach(struct nouveau_object
*parent
, int cookie
)
99 struct nv04_fifo_priv
*priv
= (void *)parent
->engine
;
100 mutex_lock(&nv_subdev(priv
)->mutex
);
101 nouveau_ramht_remove(priv
->ramht
, cookie
);
102 mutex_unlock(&nv_subdev(priv
)->mutex
);
106 nv04_fifo_chan_ctor(struct nouveau_object
*parent
,
107 struct nouveau_object
*engine
,
108 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
109 struct nouveau_object
**pobject
)
111 struct nv04_fifo_priv
*priv
= (void *)engine
;
112 struct nv04_fifo_chan
*chan
;
113 struct nv_channel_dma_class
*args
= data
;
116 if (size
< sizeof(*args
))
119 ret
= nouveau_fifo_channel_create(parent
, engine
, oclass
, 0, 0x800000,
120 0x10000, args
->pushbuf
,
121 (1 << NVDEV_ENGINE_DMAOBJ
) |
122 (1 << NVDEV_ENGINE_SW
) |
123 (1 << NVDEV_ENGINE_GR
), &chan
);
124 *pobject
= nv_object(chan
);
128 nv_parent(chan
)->object_attach
= nv04_fifo_object_attach
;
129 nv_parent(chan
)->object_detach
= nv04_fifo_object_detach
;
130 chan
->ramfc
= chan
->base
.chid
* 32;
132 nv_wo32(priv
->ramfc
, chan
->ramfc
+ 0x00, args
->offset
);
133 nv_wo32(priv
->ramfc
, chan
->ramfc
+ 0x04, args
->offset
);
134 nv_wo32(priv
->ramfc
, chan
->ramfc
+ 0x08, chan
->base
.pushgpu
->addr
>> 4);
135 nv_wo32(priv
->ramfc
, chan
->ramfc
+ 0x10,
136 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
137 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
139 NV_PFIFO_CACHE1_BIG_ENDIAN
|
141 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8
);
146 nv04_fifo_chan_dtor(struct nouveau_object
*object
)
148 struct nv04_fifo_priv
*priv
= (void *)object
->engine
;
149 struct nv04_fifo_chan
*chan
= (void *)object
;
150 struct ramfc_desc
*c
= priv
->ramfc_desc
;
153 nv_wo32(priv
->ramfc
, chan
->ramfc
+ c
->ctxp
, 0x00000000);
154 } while ((++c
)->bits
);
156 nouveau_fifo_channel_destroy(&chan
->base
);
160 nv04_fifo_chan_init(struct nouveau_object
*object
)
162 struct nv04_fifo_priv
*priv
= (void *)object
->engine
;
163 struct nv04_fifo_chan
*chan
= (void *)object
;
164 u32 mask
= 1 << chan
->base
.chid
;
168 ret
= nouveau_fifo_channel_init(&chan
->base
);
172 spin_lock_irqsave(&priv
->base
.lock
, flags
);
173 nv_mask(priv
, NV04_PFIFO_MODE
, mask
, mask
);
174 spin_unlock_irqrestore(&priv
->base
.lock
, flags
);
179 nv04_fifo_chan_fini(struct nouveau_object
*object
, bool suspend
)
181 struct nv04_fifo_priv
*priv
= (void *)object
->engine
;
182 struct nv04_fifo_chan
*chan
= (void *)object
;
183 struct nouveau_gpuobj
*fctx
= priv
->ramfc
;
184 struct ramfc_desc
*c
;
186 u32 data
= chan
->ramfc
;
189 /* prevent fifo context switches */
190 spin_lock_irqsave(&priv
->base
.lock
, flags
);
191 nv_wr32(priv
, NV03_PFIFO_CACHES
, 0);
193 /* if this channel is active, replace it with a null context */
194 chid
= nv_rd32(priv
, NV03_PFIFO_CACHE1_PUSH1
) & priv
->base
.max
;
195 if (chid
== chan
->base
.chid
) {
196 nv_mask(priv
, NV04_PFIFO_CACHE1_DMA_PUSH
, 0x00000001, 0);
197 nv_wr32(priv
, NV03_PFIFO_CACHE1_PUSH0
, 0);
198 nv_mask(priv
, NV04_PFIFO_CACHE1_PULL0
, 0x00000001, 0);
200 c
= priv
->ramfc_desc
;
202 u32 rm
= ((1ULL << c
->bits
) - 1) << c
->regs
;
203 u32 cm
= ((1ULL << c
->bits
) - 1) << c
->ctxs
;
204 u32 rv
= (nv_rd32(priv
, c
->regp
) & rm
) >> c
->regs
;
205 u32 cv
= (nv_ro32(fctx
, c
->ctxp
+ data
) & ~cm
);
206 nv_wo32(fctx
, c
->ctxp
+ data
, cv
| (rv
<< c
->ctxs
));
207 } while ((++c
)->bits
);
209 c
= priv
->ramfc_desc
;
211 nv_wr32(priv
, c
->regp
, 0x00000000);
212 } while ((++c
)->bits
);
214 nv_wr32(priv
, NV03_PFIFO_CACHE1_GET
, 0);
215 nv_wr32(priv
, NV03_PFIFO_CACHE1_PUT
, 0);
216 nv_wr32(priv
, NV03_PFIFO_CACHE1_PUSH1
, priv
->base
.max
);
217 nv_wr32(priv
, NV03_PFIFO_CACHE1_PUSH0
, 1);
218 nv_wr32(priv
, NV04_PFIFO_CACHE1_PULL0
, 1);
221 /* restore normal operation, after disabling dma mode */
222 nv_mask(priv
, NV04_PFIFO_MODE
, 1 << chan
->base
.chid
, 0);
223 nv_wr32(priv
, NV03_PFIFO_CACHES
, 1);
224 spin_unlock_irqrestore(&priv
->base
.lock
, flags
);
226 return nouveau_fifo_channel_fini(&chan
->base
, suspend
);
229 static struct nouveau_ofuncs
231 .ctor
= nv04_fifo_chan_ctor
,
232 .dtor
= nv04_fifo_chan_dtor
,
233 .init
= nv04_fifo_chan_init
,
234 .fini
= nv04_fifo_chan_fini
,
235 .rd32
= _nouveau_fifo_channel_rd32
,
236 .wr32
= _nouveau_fifo_channel_wr32
,
239 static struct nouveau_oclass
240 nv04_fifo_sclass
[] = {
241 { 0x006e, &nv04_fifo_ofuncs
},
245 /*******************************************************************************
246 * FIFO context - basically just the instmem reserved for the channel
247 ******************************************************************************/
250 nv04_fifo_context_ctor(struct nouveau_object
*parent
,
251 struct nouveau_object
*engine
,
252 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
253 struct nouveau_object
**pobject
)
255 struct nv04_fifo_base
*base
;
258 ret
= nouveau_fifo_context_create(parent
, engine
, oclass
, NULL
, 0x1000,
259 0x1000, NVOBJ_FLAG_HEAP
, &base
);
260 *pobject
= nv_object(base
);
267 static struct nouveau_oclass
269 .handle
= NV_ENGCTX(FIFO
, 0x04),
270 .ofuncs
= &(struct nouveau_ofuncs
) {
271 .ctor
= nv04_fifo_context_ctor
,
272 .dtor
= _nouveau_fifo_context_dtor
,
273 .init
= _nouveau_fifo_context_init
,
274 .fini
= _nouveau_fifo_context_fini
,
275 .rd32
= _nouveau_fifo_context_rd32
,
276 .wr32
= _nouveau_fifo_context_wr32
,
280 /*******************************************************************************
282 ******************************************************************************/
285 nv04_fifo_pause(struct nouveau_fifo
*pfifo
, unsigned long *pflags
)
286 __acquires(priv
->base
.lock
)
288 struct nv04_fifo_priv
*priv
= (void *)pfifo
;
291 spin_lock_irqsave(&priv
->base
.lock
, flags
);
294 nv_wr32(priv
, NV03_PFIFO_CACHES
, 0x00000000);
295 nv_mask(priv
, NV04_PFIFO_CACHE1_PULL0
, 0x00000001, 0x00000000);
297 /* in some cases the puller may be left in an inconsistent state
298 * if you try to stop it while it's busy translating handles.
299 * sometimes you get a CACHE_ERROR, sometimes it just fails
300 * silently; sending incorrect instance offsets to PGRAPH after
301 * it's started up again.
303 * to avoid this, we invalidate the most recently calculated
306 if (!nv_wait(priv
, NV04_PFIFO_CACHE1_PULL0
,
307 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY
, 0x00000000))
308 nv_warn(priv
, "timeout idling puller\n");
310 if (nv_rd32(priv
, NV04_PFIFO_CACHE1_PULL0
) &
311 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED
)
312 nv_wr32(priv
, NV03_PFIFO_INTR_0
, NV_PFIFO_INTR_CACHE_ERROR
);
314 nv_wr32(priv
, NV04_PFIFO_CACHE1_HASH
, 0x00000000);
318 nv04_fifo_start(struct nouveau_fifo
*pfifo
, unsigned long *pflags
)
319 __releases(priv
->base
.lock
)
321 struct nv04_fifo_priv
*priv
= (void *)pfifo
;
322 unsigned long flags
= *pflags
;
324 nv_mask(priv
, NV04_PFIFO_CACHE1_PULL0
, 0x00000001, 0x00000001);
325 nv_wr32(priv
, NV03_PFIFO_CACHES
, 0x00000001);
327 spin_unlock_irqrestore(&priv
->base
.lock
, flags
);
331 nv_dma_state_err(u32 state
)
333 static const char * const desc
[] = {
334 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
335 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
337 return desc
[(state
>> 29) & 0x7];
341 nv04_fifo_swmthd(struct nv04_fifo_priv
*priv
, u32 chid
, u32 addr
, u32 data
)
343 struct nv04_fifo_chan
*chan
= NULL
;
344 struct nouveau_handle
*bind
;
345 const int subc
= (addr
>> 13) & 0x7;
346 const int mthd
= addr
& 0x1ffc;
347 bool handled
= false;
351 spin_lock_irqsave(&priv
->base
.lock
, flags
);
352 if (likely(chid
>= priv
->base
.min
&& chid
<= priv
->base
.max
))
353 chan
= (void *)priv
->base
.channel
[chid
];
359 bind
= nouveau_namedb_get(nv_namedb(chan
), data
);
363 if (nv_engidx(bind
->object
->engine
) == NVDEV_ENGINE_SW
) {
364 engine
= 0x0000000f << (subc
* 4);
365 chan
->subc
[subc
] = data
;
368 nv_mask(priv
, NV04_PFIFO_CACHE1_ENGINE
, engine
, 0);
371 nouveau_namedb_put(bind
);
374 engine
= nv_rd32(priv
, NV04_PFIFO_CACHE1_ENGINE
);
375 if (unlikely(((engine
>> (subc
* 4)) & 0xf) != 0))
378 bind
= nouveau_namedb_get(nv_namedb(chan
), chan
->subc
[subc
]);
380 if (!nv_call(bind
->object
, mthd
, data
))
382 nouveau_namedb_put(bind
);
388 spin_unlock_irqrestore(&priv
->base
.lock
, flags
);
393 nv04_fifo_intr(struct nouveau_subdev
*subdev
)
395 struct nouveau_device
*device
= nv_device(subdev
);
396 struct nv04_fifo_priv
*priv
= (void *)subdev
;
397 uint32_t status
, reassign
;
400 reassign
= nv_rd32(priv
, NV03_PFIFO_CACHES
) & 1;
401 while ((status
= nv_rd32(priv
, NV03_PFIFO_INTR_0
)) && (cnt
++ < 100)) {
404 nv_wr32(priv
, NV03_PFIFO_CACHES
, 0);
406 chid
= nv_rd32(priv
, NV03_PFIFO_CACHE1_PUSH1
) & priv
->base
.max
;
407 get
= nv_rd32(priv
, NV03_PFIFO_CACHE1_GET
);
409 if (status
& NV_PFIFO_INTR_CACHE_ERROR
) {
413 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
414 * wrapping on my G80 chips, but CACHE1 isn't big
415 * enough for this much data.. Tests show that it
416 * wraps around to the start at GET=0x800.. No clue
419 ptr
= (get
& 0x7ff) >> 2;
421 if (device
->card_type
< NV_40
) {
423 NV04_PFIFO_CACHE1_METHOD(ptr
));
425 NV04_PFIFO_CACHE1_DATA(ptr
));
428 NV40_PFIFO_CACHE1_METHOD(ptr
));
430 NV40_PFIFO_CACHE1_DATA(ptr
));
433 if (!nv04_fifo_swmthd(priv
, chid
, mthd
, data
)) {
434 nv_info(priv
, "CACHE_ERROR - Ch %d/%d "
435 "Mthd 0x%04x Data 0x%08x\n",
436 chid
, (mthd
>> 13) & 7, mthd
& 0x1ffc,
440 nv_wr32(priv
, NV04_PFIFO_CACHE1_DMA_PUSH
, 0);
441 nv_wr32(priv
, NV03_PFIFO_INTR_0
,
442 NV_PFIFO_INTR_CACHE_ERROR
);
444 nv_wr32(priv
, NV03_PFIFO_CACHE1_PUSH0
,
445 nv_rd32(priv
, NV03_PFIFO_CACHE1_PUSH0
) & ~1);
446 nv_wr32(priv
, NV03_PFIFO_CACHE1_GET
, get
+ 4);
447 nv_wr32(priv
, NV03_PFIFO_CACHE1_PUSH0
,
448 nv_rd32(priv
, NV03_PFIFO_CACHE1_PUSH0
) | 1);
449 nv_wr32(priv
, NV04_PFIFO_CACHE1_HASH
, 0);
451 nv_wr32(priv
, NV04_PFIFO_CACHE1_DMA_PUSH
,
452 nv_rd32(priv
, NV04_PFIFO_CACHE1_DMA_PUSH
) | 1);
453 nv_wr32(priv
, NV04_PFIFO_CACHE1_PULL0
, 1);
455 status
&= ~NV_PFIFO_INTR_CACHE_ERROR
;
458 if (status
& NV_PFIFO_INTR_DMA_PUSHER
) {
459 u32 dma_get
= nv_rd32(priv
, 0x003244);
460 u32 dma_put
= nv_rd32(priv
, 0x003240);
461 u32 push
= nv_rd32(priv
, 0x003220);
462 u32 state
= nv_rd32(priv
, 0x003228);
464 if (device
->card_type
== NV_50
) {
465 u32 ho_get
= nv_rd32(priv
, 0x003328);
466 u32 ho_put
= nv_rd32(priv
, 0x003320);
467 u32 ib_get
= nv_rd32(priv
, 0x003334);
468 u32 ib_put
= nv_rd32(priv
, 0x003330);
470 nv_info(priv
, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
471 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
472 "State 0x%08x (err: %s) Push 0x%08x\n",
473 chid
, ho_get
, dma_get
, ho_put
,
474 dma_put
, ib_get
, ib_put
, state
,
475 nv_dma_state_err(state
),
478 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
479 nv_wr32(priv
, 0x003364, 0x00000000);
480 if (dma_get
!= dma_put
|| ho_get
!= ho_put
) {
481 nv_wr32(priv
, 0x003244, dma_put
);
482 nv_wr32(priv
, 0x003328, ho_put
);
484 if (ib_get
!= ib_put
) {
485 nv_wr32(priv
, 0x003334, ib_put
);
488 nv_info(priv
, "DMA_PUSHER - Ch %d Get 0x%08x "
489 "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
490 chid
, dma_get
, dma_put
, state
,
491 nv_dma_state_err(state
), push
);
493 if (dma_get
!= dma_put
)
494 nv_wr32(priv
, 0x003244, dma_put
);
497 nv_wr32(priv
, 0x003228, 0x00000000);
498 nv_wr32(priv
, 0x003220, 0x00000001);
499 nv_wr32(priv
, 0x002100, NV_PFIFO_INTR_DMA_PUSHER
);
500 status
&= ~NV_PFIFO_INTR_DMA_PUSHER
;
503 if (status
& NV_PFIFO_INTR_SEMAPHORE
) {
506 status
&= ~NV_PFIFO_INTR_SEMAPHORE
;
507 nv_wr32(priv
, NV03_PFIFO_INTR_0
,
508 NV_PFIFO_INTR_SEMAPHORE
);
510 sem
= nv_rd32(priv
, NV10_PFIFO_CACHE1_SEMAPHORE
);
511 nv_wr32(priv
, NV10_PFIFO_CACHE1_SEMAPHORE
, sem
| 0x1);
513 nv_wr32(priv
, NV03_PFIFO_CACHE1_GET
, get
+ 4);
514 nv_wr32(priv
, NV04_PFIFO_CACHE1_PULL0
, 1);
517 if (device
->card_type
== NV_50
) {
518 if (status
& 0x00000010) {
519 nv50_fb_trap(nouveau_fb(priv
), 1);
520 status
&= ~0x00000010;
521 nv_wr32(priv
, 0x002100, 0x00000010);
526 nv_info(priv
, "unknown intr 0x%08x, ch %d\n",
528 nv_wr32(priv
, NV03_PFIFO_INTR_0
, status
);
532 nv_wr32(priv
, NV03_PFIFO_CACHES
, reassign
);
536 nv_info(priv
, "still angry after %d spins, halt\n", cnt
);
537 nv_wr32(priv
, 0x002140, 0);
538 nv_wr32(priv
, 0x000140, 0);
541 nv_wr32(priv
, 0x000100, 0x00000100);
545 nv04_fifo_ctor(struct nouveau_object
*parent
, struct nouveau_object
*engine
,
546 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
547 struct nouveau_object
**pobject
)
549 struct nv04_instmem_priv
*imem
= nv04_instmem(parent
);
550 struct nv04_fifo_priv
*priv
;
553 ret
= nouveau_fifo_create(parent
, engine
, oclass
, 0, 15, &priv
);
554 *pobject
= nv_object(priv
);
558 nouveau_ramht_ref(imem
->ramht
, &priv
->ramht
);
559 nouveau_gpuobj_ref(imem
->ramro
, &priv
->ramro
);
560 nouveau_gpuobj_ref(imem
->ramfc
, &priv
->ramfc
);
562 nv_subdev(priv
)->unit
= 0x00000100;
563 nv_subdev(priv
)->intr
= nv04_fifo_intr
;
564 nv_engine(priv
)->cclass
= &nv04_fifo_cclass
;
565 nv_engine(priv
)->sclass
= nv04_fifo_sclass
;
566 priv
->base
.pause
= nv04_fifo_pause
;
567 priv
->base
.start
= nv04_fifo_start
;
568 priv
->ramfc_desc
= nv04_ramfc
;
573 nv04_fifo_dtor(struct nouveau_object
*object
)
575 struct nv04_fifo_priv
*priv
= (void *)object
;
576 nouveau_gpuobj_ref(NULL
, &priv
->ramfc
);
577 nouveau_gpuobj_ref(NULL
, &priv
->ramro
);
578 nouveau_ramht_ref(NULL
, &priv
->ramht
);
579 nouveau_fifo_destroy(&priv
->base
);
583 nv04_fifo_init(struct nouveau_object
*object
)
585 struct nv04_fifo_priv
*priv
= (void *)object
;
588 ret
= nouveau_fifo_init(&priv
->base
);
592 nv_wr32(priv
, NV04_PFIFO_DELAY_0
, 0x000000ff);
593 nv_wr32(priv
, NV04_PFIFO_DMA_TIMESLICE
, 0x0101ffff);
595 nv_wr32(priv
, NV03_PFIFO_RAMHT
, (0x03 << 24) /* search 128 */ |
596 ((priv
->ramht
->bits
- 9) << 16) |
597 (priv
->ramht
->base
.addr
>> 8));
598 nv_wr32(priv
, NV03_PFIFO_RAMRO
, priv
->ramro
->addr
>> 8);
599 nv_wr32(priv
, NV03_PFIFO_RAMFC
, priv
->ramfc
->addr
>> 8);
601 nv_wr32(priv
, NV03_PFIFO_CACHE1_PUSH1
, priv
->base
.max
);
603 nv_wr32(priv
, NV03_PFIFO_INTR_0
, 0xffffffff);
604 nv_wr32(priv
, NV03_PFIFO_INTR_EN_0
, 0xffffffff);
606 nv_wr32(priv
, NV03_PFIFO_CACHE1_PUSH0
, 1);
607 nv_wr32(priv
, NV04_PFIFO_CACHE1_PULL0
, 1);
608 nv_wr32(priv
, NV03_PFIFO_CACHES
, 1);
612 struct nouveau_oclass
614 .handle
= NV_ENGINE(FIFO
, 0x04),
615 .ofuncs
= &(struct nouveau_ofuncs
) {
616 .ctor
= nv04_fifo_ctor
,
617 .dtor
= nv04_fifo_dtor
,
618 .init
= nv04_fifo_init
,
619 .fini
= _nouveau_fifo_fini
,