2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <core/client.h>
26 #include <core/handle.h>
27 #include <core/namedb.h>
28 #include <core/gpuobj.h>
29 #include <core/engctx.h>
30 #include <core/event.h>
31 #include <core/class.h>
32 #include <core/math.h>
33 #include <core/enum.h>
35 #include <subdev/timer.h>
36 #include <subdev/bar.h>
37 #include <subdev/vm.h>
39 #include <engine/dmaobj.h>
40 #include <engine/fifo.h>
42 #define _(a,b) { (a), ((1ULL << (a)) | (b)) }
47 _(NVDEV_ENGINE_GR
, (1ULL << NVDEV_ENGINE_SW
)),
48 _(NVDEV_ENGINE_VP
, 0),
49 _(NVDEV_ENGINE_PPP
, 0),
50 _(NVDEV_ENGINE_BSP
, 0),
51 _(NVDEV_ENGINE_COPY0
, 0),
52 _(NVDEV_ENGINE_COPY1
, 0),
53 _(NVDEV_ENGINE_VENC
, 0),
56 #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
58 struct nve0_fifo_engn
{
59 struct nouveau_gpuobj
*playlist
[2];
63 struct nve0_fifo_priv
{
64 struct nouveau_fifo base
;
65 struct nve0_fifo_engn engine
[FIFO_ENGINE_NR
];
67 struct nouveau_gpuobj
*mem
;
68 struct nouveau_vma bar
;
73 struct nve0_fifo_base
{
74 struct nouveau_fifo_base base
;
75 struct nouveau_gpuobj
*pgd
;
76 struct nouveau_vm
*vm
;
79 struct nve0_fifo_chan
{
80 struct nouveau_fifo_chan base
;
84 /*******************************************************************************
85 * FIFO channel objects
86 ******************************************************************************/
89 nve0_fifo_playlist_update(struct nve0_fifo_priv
*priv
, u32 engine
)
91 struct nouveau_bar
*bar
= nouveau_bar(priv
);
92 struct nve0_fifo_engn
*engn
= &priv
->engine
[engine
];
93 struct nouveau_gpuobj
*cur
;
94 u32 match
= (engine
<< 16) | 0x00000001;
97 mutex_lock(&nv_subdev(priv
)->mutex
);
98 cur
= engn
->playlist
[engn
->cur_playlist
];
99 if (unlikely(cur
== NULL
)) {
100 int ret
= nouveau_gpuobj_new(nv_object(priv
), NULL
,
101 0x8000, 0x1000, 0, &cur
);
103 mutex_unlock(&nv_subdev(priv
)->mutex
);
104 nv_error(priv
, "playlist alloc failed\n");
108 engn
->playlist
[engn
->cur_playlist
] = cur
;
111 engn
->cur_playlist
= !engn
->cur_playlist
;
113 for (i
= 0, p
= 0; i
< priv
->base
.max
; i
++) {
114 u32 ctrl
= nv_rd32(priv
, 0x800004 + (i
* 8)) & 0x001f0001;
117 nv_wo32(cur
, p
+ 0, i
);
118 nv_wo32(cur
, p
+ 4, 0x00000000);
123 nv_wr32(priv
, 0x002270, cur
->addr
>> 12);
124 nv_wr32(priv
, 0x002274, (engine
<< 20) | (p
>> 3));
125 if (!nv_wait(priv
, 0x002284 + (engine
* 4), 0x00100000, 0x00000000))
126 nv_error(priv
, "playlist %d update timeout\n", engine
);
127 mutex_unlock(&nv_subdev(priv
)->mutex
);
131 nve0_fifo_context_attach(struct nouveau_object
*parent
,
132 struct nouveau_object
*object
)
134 struct nouveau_bar
*bar
= nouveau_bar(parent
);
135 struct nve0_fifo_base
*base
= (void *)parent
->parent
;
136 struct nouveau_engctx
*ectx
= (void *)object
;
140 switch (nv_engidx(object
->engine
)) {
141 case NVDEV_ENGINE_SW
:
142 case NVDEV_ENGINE_COPY0
:
143 case NVDEV_ENGINE_COPY1
:
144 case NVDEV_ENGINE_COPY2
:
146 case NVDEV_ENGINE_GR
: addr
= 0x0210; break;
147 case NVDEV_ENGINE_BSP
: addr
= 0x0270; break;
148 case NVDEV_ENGINE_VP
: addr
= 0x0250; break;
149 case NVDEV_ENGINE_PPP
: addr
= 0x0260; break;
154 if (!ectx
->vma
.node
) {
155 ret
= nouveau_gpuobj_map_vm(nv_gpuobj(ectx
), base
->vm
,
156 NV_MEM_ACCESS_RW
, &ectx
->vma
);
160 nv_engctx(ectx
)->addr
= nv_gpuobj(base
)->addr
>> 12;
163 nv_wo32(base
, addr
+ 0x00, lower_32_bits(ectx
->vma
.offset
) | 4);
164 nv_wo32(base
, addr
+ 0x04, upper_32_bits(ectx
->vma
.offset
));
170 nve0_fifo_context_detach(struct nouveau_object
*parent
, bool suspend
,
171 struct nouveau_object
*object
)
173 struct nouveau_bar
*bar
= nouveau_bar(parent
);
174 struct nve0_fifo_priv
*priv
= (void *)parent
->engine
;
175 struct nve0_fifo_base
*base
= (void *)parent
->parent
;
176 struct nve0_fifo_chan
*chan
= (void *)parent
;
179 switch (nv_engidx(object
->engine
)) {
180 case NVDEV_ENGINE_SW
: return 0;
181 case NVDEV_ENGINE_COPY0
:
182 case NVDEV_ENGINE_COPY1
:
183 case NVDEV_ENGINE_COPY2
: addr
= 0x0000; break;
184 case NVDEV_ENGINE_GR
: addr
= 0x0210; break;
185 case NVDEV_ENGINE_BSP
: addr
= 0x0270; break;
186 case NVDEV_ENGINE_VP
: addr
= 0x0250; break;
187 case NVDEV_ENGINE_PPP
: addr
= 0x0260; break;
192 nv_wr32(priv
, 0x002634, chan
->base
.chid
);
193 if (!nv_wait(priv
, 0x002634, 0xffffffff, chan
->base
.chid
)) {
194 nv_error(priv
, "channel %d [%s] kick timeout\n",
195 chan
->base
.chid
, nouveau_client_name(chan
));
201 nv_wo32(base
, addr
+ 0x00, 0x00000000);
202 nv_wo32(base
, addr
+ 0x04, 0x00000000);
210 nve0_fifo_chan_ctor(struct nouveau_object
*parent
,
211 struct nouveau_object
*engine
,
212 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
213 struct nouveau_object
**pobject
)
215 struct nouveau_bar
*bar
= nouveau_bar(parent
);
216 struct nve0_fifo_priv
*priv
= (void *)engine
;
217 struct nve0_fifo_base
*base
= (void *)parent
;
218 struct nve0_fifo_chan
*chan
;
219 struct nve0_channel_ind_class
*args
= data
;
220 u64 usermem
, ioffset
, ilength
;
223 if (size
< sizeof(*args
))
226 for (i
= 0; i
< FIFO_ENGINE_NR
; i
++) {
227 if (args
->engine
& (1 << i
)) {
228 if (nouveau_engine(parent
, fifo_engine
[i
].subdev
)) {
229 args
->engine
= (1 << i
);
235 if (i
== FIFO_ENGINE_NR
)
238 ret
= nouveau_fifo_channel_create(parent
, engine
, oclass
, 1,
239 priv
->user
.bar
.offset
, 0x200,
241 fifo_engine
[i
].mask
, &chan
);
242 *pobject
= nv_object(chan
);
246 nv_parent(chan
)->context_attach
= nve0_fifo_context_attach
;
247 nv_parent(chan
)->context_detach
= nve0_fifo_context_detach
;
250 usermem
= chan
->base
.chid
* 0x200;
251 ioffset
= args
->ioffset
;
252 ilength
= log2i(args
->ilength
/ 8);
254 for (i
= 0; i
< 0x200; i
+= 4)
255 nv_wo32(priv
->user
.mem
, usermem
+ i
, 0x00000000);
257 nv_wo32(base
, 0x08, lower_32_bits(priv
->user
.mem
->addr
+ usermem
));
258 nv_wo32(base
, 0x0c, upper_32_bits(priv
->user
.mem
->addr
+ usermem
));
259 nv_wo32(base
, 0x10, 0x0000face);
260 nv_wo32(base
, 0x30, 0xfffff902);
261 nv_wo32(base
, 0x48, lower_32_bits(ioffset
));
262 nv_wo32(base
, 0x4c, upper_32_bits(ioffset
) | (ilength
<< 16));
263 nv_wo32(base
, 0x84, 0x20400000);
264 nv_wo32(base
, 0x94, 0x30000001);
265 nv_wo32(base
, 0x9c, 0x00000100);
266 nv_wo32(base
, 0xac, 0x0000001f);
267 nv_wo32(base
, 0xe8, chan
->base
.chid
);
268 nv_wo32(base
, 0xb8, 0xf8000000);
269 nv_wo32(base
, 0xf8, 0x10003080); /* 0x002310 */
270 nv_wo32(base
, 0xfc, 0x10000010); /* 0x002350 */
276 nve0_fifo_chan_init(struct nouveau_object
*object
)
278 struct nouveau_gpuobj
*base
= nv_gpuobj(object
->parent
);
279 struct nve0_fifo_priv
*priv
= (void *)object
->engine
;
280 struct nve0_fifo_chan
*chan
= (void *)object
;
281 u32 chid
= chan
->base
.chid
;
284 ret
= nouveau_fifo_channel_init(&chan
->base
);
288 nv_mask(priv
, 0x800004 + (chid
* 8), 0x000f0000, chan
->engine
<< 16);
289 nv_wr32(priv
, 0x800000 + (chid
* 8), 0x80000000 | base
->addr
>> 12);
290 nv_mask(priv
, 0x800004 + (chid
* 8), 0x00000400, 0x00000400);
291 nve0_fifo_playlist_update(priv
, chan
->engine
);
292 nv_mask(priv
, 0x800004 + (chid
* 8), 0x00000400, 0x00000400);
297 nve0_fifo_chan_fini(struct nouveau_object
*object
, bool suspend
)
299 struct nve0_fifo_priv
*priv
= (void *)object
->engine
;
300 struct nve0_fifo_chan
*chan
= (void *)object
;
301 u32 chid
= chan
->base
.chid
;
303 nv_mask(priv
, 0x800004 + (chid
* 8), 0x00000800, 0x00000800);
304 nve0_fifo_playlist_update(priv
, chan
->engine
);
305 nv_wr32(priv
, 0x800000 + (chid
* 8), 0x00000000);
307 return nouveau_fifo_channel_fini(&chan
->base
, suspend
);
310 static struct nouveau_ofuncs
312 .ctor
= nve0_fifo_chan_ctor
,
313 .dtor
= _nouveau_fifo_channel_dtor
,
314 .init
= nve0_fifo_chan_init
,
315 .fini
= nve0_fifo_chan_fini
,
316 .rd32
= _nouveau_fifo_channel_rd32
,
317 .wr32
= _nouveau_fifo_channel_wr32
,
320 static struct nouveau_oclass
321 nve0_fifo_sclass
[] = {
322 { NVE0_CHANNEL_IND_CLASS
, &nve0_fifo_ofuncs
},
326 /*******************************************************************************
327 * FIFO context - instmem heap and vm setup
328 ******************************************************************************/
331 nve0_fifo_context_ctor(struct nouveau_object
*parent
,
332 struct nouveau_object
*engine
,
333 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
334 struct nouveau_object
**pobject
)
336 struct nve0_fifo_base
*base
;
339 ret
= nouveau_fifo_context_create(parent
, engine
, oclass
, NULL
, 0x1000,
340 0x1000, NVOBJ_FLAG_ZERO_ALLOC
, &base
);
341 *pobject
= nv_object(base
);
345 ret
= nouveau_gpuobj_new(nv_object(base
), NULL
, 0x10000, 0x1000, 0,
350 nv_wo32(base
, 0x0200, lower_32_bits(base
->pgd
->addr
));
351 nv_wo32(base
, 0x0204, upper_32_bits(base
->pgd
->addr
));
352 nv_wo32(base
, 0x0208, 0xffffffff);
353 nv_wo32(base
, 0x020c, 0x000000ff);
355 ret
= nouveau_vm_ref(nouveau_client(parent
)->vm
, &base
->vm
, base
->pgd
);
363 nve0_fifo_context_dtor(struct nouveau_object
*object
)
365 struct nve0_fifo_base
*base
= (void *)object
;
366 nouveau_vm_ref(NULL
, &base
->vm
, base
->pgd
);
367 nouveau_gpuobj_ref(NULL
, &base
->pgd
);
368 nouveau_fifo_context_destroy(&base
->base
);
371 static struct nouveau_oclass
373 .handle
= NV_ENGCTX(FIFO
, 0xe0),
374 .ofuncs
= &(struct nouveau_ofuncs
) {
375 .ctor
= nve0_fifo_context_ctor
,
376 .dtor
= nve0_fifo_context_dtor
,
377 .init
= _nouveau_fifo_context_init
,
378 .fini
= _nouveau_fifo_context_fini
,
379 .rd32
= _nouveau_fifo_context_rd32
,
380 .wr32
= _nouveau_fifo_context_wr32
,
384 /*******************************************************************************
386 ******************************************************************************/
388 static const struct nouveau_enum nve0_fifo_fault_unit
[] = {
392 static const struct nouveau_enum nve0_fifo_fault_reason
[] = {
393 { 0x00, "PT_NOT_PRESENT" },
394 { 0x01, "PT_TOO_SHORT" },
395 { 0x02, "PAGE_NOT_PRESENT" },
396 { 0x03, "VM_LIMIT_EXCEEDED" },
397 { 0x04, "NO_CHANNEL" },
398 { 0x05, "PAGE_SYSTEM_ONLY" },
399 { 0x06, "PAGE_READ_ONLY" },
400 { 0x0a, "COMPRESSED_SYSRAM" },
401 { 0x0c, "INVALID_STORAGE_TYPE" },
405 static const struct nouveau_enum nve0_fifo_fault_hubclient
[] = {
409 static const struct nouveau_enum nve0_fifo_fault_gpcclient
[] = {
413 static const struct nouveau_bitfield nve0_fifo_subfifo_intr
[] = {
414 { 0x00200000, "ILLEGAL_MTHD" },
415 { 0x00800000, "EMPTY_SUBC" },
420 nve0_fifo_isr_vm_fault(struct nve0_fifo_priv
*priv
, int unit
)
422 u32 inst
= nv_rd32(priv
, 0x2800 + (unit
* 0x10));
423 u32 valo
= nv_rd32(priv
, 0x2804 + (unit
* 0x10));
424 u32 vahi
= nv_rd32(priv
, 0x2808 + (unit
* 0x10));
425 u32 stat
= nv_rd32(priv
, 0x280c + (unit
* 0x10));
426 u32 client
= (stat
& 0x00001f00) >> 8;
427 const struct nouveau_enum
*en
;
428 struct nouveau_engine
*engine
;
429 struct nouveau_object
*engctx
= NULL
;
431 nv_error(priv
, "PFIFO: %s fault at 0x%010llx [", (stat
& 0x00000080) ?
432 "write" : "read", (u64
)vahi
<< 32 | valo
);
433 nouveau_enum_print(nve0_fifo_fault_reason
, stat
& 0x0000000f);
435 en
= nouveau_enum_print(nve0_fifo_fault_unit
, unit
);
436 if (stat
& 0x00000040) {
438 nouveau_enum_print(nve0_fifo_fault_hubclient
, client
);
440 pr_cont("/GPC%d/", (stat
& 0x1f000000) >> 24);
441 nouveau_enum_print(nve0_fifo_fault_gpcclient
, client
);
444 if (en
&& en
->data2
) {
445 engine
= nouveau_engine(priv
, en
->data2
);
447 engctx
= nouveau_engctx_get(engine
, inst
);
451 pr_cont(" on channel 0x%010llx [%s]\n", (u64
)inst
<< 12,
452 nouveau_client_name(engctx
));
454 nouveau_engctx_put(engctx
);
458 nve0_fifo_swmthd(struct nve0_fifo_priv
*priv
, u32 chid
, u32 mthd
, u32 data
)
460 struct nve0_fifo_chan
*chan
= NULL
;
461 struct nouveau_handle
*bind
;
465 spin_lock_irqsave(&priv
->base
.lock
, flags
);
466 if (likely(chid
>= priv
->base
.min
&& chid
<= priv
->base
.max
))
467 chan
= (void *)priv
->base
.channel
[chid
];
471 bind
= nouveau_namedb_get_class(nv_namedb(chan
), 0x906e);
473 if (!mthd
|| !nv_call(bind
->object
, mthd
, data
))
475 nouveau_namedb_put(bind
);
479 spin_unlock_irqrestore(&priv
->base
.lock
, flags
);
484 nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv
*priv
, int unit
)
486 u32 stat
= nv_rd32(priv
, 0x040108 + (unit
* 0x2000));
487 u32 addr
= nv_rd32(priv
, 0x0400c0 + (unit
* 0x2000));
488 u32 data
= nv_rd32(priv
, 0x0400c4 + (unit
* 0x2000));
489 u32 chid
= nv_rd32(priv
, 0x040120 + (unit
* 0x2000)) & 0xfff;
490 u32 subc
= (addr
& 0x00070000) >> 16;
491 u32 mthd
= (addr
& 0x00003ffc);
494 if (stat
& 0x00200000) {
495 if (mthd
== 0x0054) {
496 if (!nve0_fifo_swmthd(priv
, chid
, 0x0500, 0x00000000))
501 if (stat
& 0x00800000) {
502 if (!nve0_fifo_swmthd(priv
, chid
, mthd
, data
))
507 nv_error(priv
, "SUBFIFO%d:", unit
);
508 nouveau_bitfield_print(nve0_fifo_subfifo_intr
, show
);
511 "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
513 nouveau_client_name_for_fifo_chid(&priv
->base
, chid
),
517 nv_wr32(priv
, 0x0400c0 + (unit
* 0x2000), 0x80600008);
518 nv_wr32(priv
, 0x040108 + (unit
* 0x2000), stat
);
522 nve0_fifo_intr(struct nouveau_subdev
*subdev
)
524 struct nve0_fifo_priv
*priv
= (void *)subdev
;
525 u32 mask
= nv_rd32(priv
, 0x002140);
526 u32 stat
= nv_rd32(priv
, 0x002100) & mask
;
528 if (stat
& 0x00000100) {
529 nv_warn(priv
, "unknown status 0x00000100\n");
530 nv_wr32(priv
, 0x002100, 0x00000100);
534 if (stat
& 0x10000000) {
535 u32 units
= nv_rd32(priv
, 0x00259c);
540 nve0_fifo_isr_vm_fault(priv
, i
);
544 nv_wr32(priv
, 0x00259c, units
);
548 if (stat
& 0x20000000) {
549 u32 units
= nv_rd32(priv
, 0x0025a0);
554 nve0_fifo_isr_subfifo_intr(priv
, i
);
558 nv_wr32(priv
, 0x0025a0, units
);
562 if (stat
& 0x40000000) {
563 nv_warn(priv
, "unknown status 0x40000000\n");
564 nv_mask(priv
, 0x002a00, 0x00000000, 0x00000000);
568 if (stat
& 0x80000000) {
569 nouveau_event_trigger(priv
->base
.uevent
, 0);
570 nv_wr32(priv
, 0x002100, 0x80000000);
575 nv_fatal(priv
, "unhandled status 0x%08x\n", stat
);
576 nv_wr32(priv
, 0x002100, stat
);
577 nv_wr32(priv
, 0x002140, 0);
582 nve0_fifo_uevent_enable(struct nouveau_event
*event
, int index
)
584 struct nve0_fifo_priv
*priv
= event
->priv
;
585 nv_mask(priv
, 0x002140, 0x80000000, 0x80000000);
589 nve0_fifo_uevent_disable(struct nouveau_event
*event
, int index
)
591 struct nve0_fifo_priv
*priv
= event
->priv
;
592 nv_mask(priv
, 0x002140, 0x80000000, 0x00000000);
596 nve0_fifo_ctor(struct nouveau_object
*parent
, struct nouveau_object
*engine
,
597 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
598 struct nouveau_object
**pobject
)
600 struct nve0_fifo_priv
*priv
;
603 ret
= nouveau_fifo_create(parent
, engine
, oclass
, 0, 4095, &priv
);
604 *pobject
= nv_object(priv
);
608 ret
= nouveau_gpuobj_new(nv_object(priv
), NULL
, 4096 * 0x200, 0x1000,
609 NVOBJ_FLAG_ZERO_ALLOC
, &priv
->user
.mem
);
613 ret
= nouveau_gpuobj_map(priv
->user
.mem
, NV_MEM_ACCESS_RW
,
618 priv
->base
.uevent
->enable
= nve0_fifo_uevent_enable
;
619 priv
->base
.uevent
->disable
= nve0_fifo_uevent_disable
;
620 priv
->base
.uevent
->priv
= priv
;
622 nv_subdev(priv
)->unit
= 0x00000100;
623 nv_subdev(priv
)->intr
= nve0_fifo_intr
;
624 nv_engine(priv
)->cclass
= &nve0_fifo_cclass
;
625 nv_engine(priv
)->sclass
= nve0_fifo_sclass
;
630 nve0_fifo_dtor(struct nouveau_object
*object
)
632 struct nve0_fifo_priv
*priv
= (void *)object
;
635 nouveau_gpuobj_unmap(&priv
->user
.bar
);
636 nouveau_gpuobj_ref(NULL
, &priv
->user
.mem
);
638 for (i
= 0; i
< ARRAY_SIZE(priv
->engine
); i
++) {
639 nouveau_gpuobj_ref(NULL
, &priv
->engine
[i
].playlist
[1]);
640 nouveau_gpuobj_ref(NULL
, &priv
->engine
[i
].playlist
[0]);
643 nouveau_fifo_destroy(&priv
->base
);
647 nve0_fifo_init(struct nouveau_object
*object
)
649 struct nve0_fifo_priv
*priv
= (void *)object
;
652 ret
= nouveau_fifo_init(&priv
->base
);
656 /* enable all available PSUBFIFOs */
657 nv_wr32(priv
, 0x000204, 0xffffffff);
658 priv
->spoon_nr
= hweight32(nv_rd32(priv
, 0x000204));
659 nv_debug(priv
, "%d subfifo(s)\n", priv
->spoon_nr
);
662 for (i
= 0; i
< priv
->spoon_nr
; i
++) {
663 nv_mask(priv
, 0x04013c + (i
* 0x2000), 0x10000100, 0x00000000);
664 nv_wr32(priv
, 0x040108 + (i
* 0x2000), 0xffffffff); /* INTR */
665 nv_wr32(priv
, 0x04010c + (i
* 0x2000), 0xfffffeff); /* INTREN */
668 nv_wr32(priv
, 0x002254, 0x10000000 | priv
->user
.bar
.offset
>> 12);
670 nv_wr32(priv
, 0x002a00, 0xffffffff);
671 nv_wr32(priv
, 0x002100, 0xffffffff);
672 nv_wr32(priv
, 0x002140, 0x3fffffff);
676 struct nouveau_oclass
678 .handle
= NV_ENGINE(FIFO
, 0xe0),
679 .ofuncs
= &(struct nouveau_ofuncs
) {
680 .ctor
= nve0_fifo_ctor
,
681 .dtor
= nve0_fifo_dtor
,
682 .init
= nve0_fifo_init
,
683 .fini
= _nouveau_fifo_fini
,