2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <core/client.h>
26 #include <core/handle.h>
27 #include <core/namedb.h>
28 #include <core/gpuobj.h>
29 #include <core/engctx.h>
30 #include <core/event.h>
31 #include <core/class.h>
32 #include <core/math.h>
33 #include <core/enum.h>
35 #include <subdev/timer.h>
36 #include <subdev/bar.h>
37 #include <subdev/vm.h>
39 #include <engine/dmaobj.h>
40 #include <engine/fifo.h>
42 #define _(a,b) { (a), ((1ULL << (a)) | (b)) }
47 _(NVDEV_ENGINE_GR
, (1ULL << NVDEV_ENGINE_SW
) |
48 (1ULL << NVDEV_ENGINE_COPY2
)),
49 _(NVDEV_ENGINE_VP
, 0),
50 _(NVDEV_ENGINE_PPP
, 0),
51 _(NVDEV_ENGINE_BSP
, 0),
52 _(NVDEV_ENGINE_COPY0
, 0),
53 _(NVDEV_ENGINE_COPY1
, 0),
54 _(NVDEV_ENGINE_VENC
, 0),
57 #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
59 struct nve0_fifo_engn
{
60 struct nouveau_gpuobj
*playlist
[2];
64 struct nve0_fifo_priv
{
65 struct nouveau_fifo base
;
66 struct nve0_fifo_engn engine
[FIFO_ENGINE_NR
];
68 struct nouveau_gpuobj
*mem
;
69 struct nouveau_vma bar
;
74 struct nve0_fifo_base
{
75 struct nouveau_fifo_base base
;
76 struct nouveau_gpuobj
*pgd
;
77 struct nouveau_vm
*vm
;
80 struct nve0_fifo_chan
{
81 struct nouveau_fifo_chan base
;
85 /*******************************************************************************
86 * FIFO channel objects
87 ******************************************************************************/
90 nve0_fifo_playlist_update(struct nve0_fifo_priv
*priv
, u32 engine
)
92 struct nouveau_bar
*bar
= nouveau_bar(priv
);
93 struct nve0_fifo_engn
*engn
= &priv
->engine
[engine
];
94 struct nouveau_gpuobj
*cur
;
95 u32 match
= (engine
<< 16) | 0x00000001;
98 mutex_lock(&nv_subdev(priv
)->mutex
);
99 cur
= engn
->playlist
[engn
->cur_playlist
];
100 engn
->cur_playlist
= !engn
->cur_playlist
;
102 for (i
= 0, p
= 0; i
< priv
->base
.max
; i
++) {
103 u32 ctrl
= nv_rd32(priv
, 0x800004 + (i
* 8)) & 0x001f0001;
106 nv_wo32(cur
, p
+ 0, i
);
107 nv_wo32(cur
, p
+ 4, 0x00000000);
112 nv_wr32(priv
, 0x002270, cur
->addr
>> 12);
113 nv_wr32(priv
, 0x002274, (engine
<< 20) | (p
>> 3));
114 if (!nv_wait(priv
, 0x002284 + (engine
* 4), 0x00100000, 0x00000000))
115 nv_error(priv
, "playlist %d update timeout\n", engine
);
116 mutex_unlock(&nv_subdev(priv
)->mutex
);
120 nve0_fifo_context_attach(struct nouveau_object
*parent
,
121 struct nouveau_object
*object
)
123 struct nouveau_bar
*bar
= nouveau_bar(parent
);
124 struct nve0_fifo_base
*base
= (void *)parent
->parent
;
125 struct nouveau_engctx
*ectx
= (void *)object
;
129 switch (nv_engidx(object
->engine
)) {
130 case NVDEV_ENGINE_SW
:
131 case NVDEV_ENGINE_COPY0
:
132 case NVDEV_ENGINE_COPY1
:
133 case NVDEV_ENGINE_COPY2
:
135 case NVDEV_ENGINE_GR
: addr
= 0x0210; break;
136 case NVDEV_ENGINE_BSP
: addr
= 0x0270; break;
137 case NVDEV_ENGINE_VP
: addr
= 0x0250; break;
138 case NVDEV_ENGINE_PPP
: addr
= 0x0260; break;
143 if (!ectx
->vma
.node
) {
144 ret
= nouveau_gpuobj_map_vm(nv_gpuobj(ectx
), base
->vm
,
145 NV_MEM_ACCESS_RW
, &ectx
->vma
);
149 nv_engctx(ectx
)->addr
= nv_gpuobj(base
)->addr
>> 12;
152 nv_wo32(base
, addr
+ 0x00, lower_32_bits(ectx
->vma
.offset
) | 4);
153 nv_wo32(base
, addr
+ 0x04, upper_32_bits(ectx
->vma
.offset
));
159 nve0_fifo_context_detach(struct nouveau_object
*parent
, bool suspend
,
160 struct nouveau_object
*object
)
162 struct nouveau_bar
*bar
= nouveau_bar(parent
);
163 struct nve0_fifo_priv
*priv
= (void *)parent
->engine
;
164 struct nve0_fifo_base
*base
= (void *)parent
->parent
;
165 struct nve0_fifo_chan
*chan
= (void *)parent
;
168 switch (nv_engidx(object
->engine
)) {
169 case NVDEV_ENGINE_SW
: return 0;
170 case NVDEV_ENGINE_COPY0
:
171 case NVDEV_ENGINE_COPY1
:
172 case NVDEV_ENGINE_COPY2
: addr
= 0x0000; break;
173 case NVDEV_ENGINE_GR
: addr
= 0x0210; break;
174 case NVDEV_ENGINE_BSP
: addr
= 0x0270; break;
175 case NVDEV_ENGINE_VP
: addr
= 0x0250; break;
176 case NVDEV_ENGINE_PPP
: addr
= 0x0260; break;
181 nv_wr32(priv
, 0x002634, chan
->base
.chid
);
182 if (!nv_wait(priv
, 0x002634, 0xffffffff, chan
->base
.chid
)) {
183 nv_error(priv
, "channel %d [%s] kick timeout\n",
184 chan
->base
.chid
, nouveau_client_name(chan
));
190 nv_wo32(base
, addr
+ 0x00, 0x00000000);
191 nv_wo32(base
, addr
+ 0x04, 0x00000000);
199 nve0_fifo_chan_ctor(struct nouveau_object
*parent
,
200 struct nouveau_object
*engine
,
201 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
202 struct nouveau_object
**pobject
)
204 struct nouveau_bar
*bar
= nouveau_bar(parent
);
205 struct nve0_fifo_priv
*priv
= (void *)engine
;
206 struct nve0_fifo_base
*base
= (void *)parent
;
207 struct nve0_fifo_chan
*chan
;
208 struct nve0_channel_ind_class
*args
= data
;
209 u64 usermem
, ioffset
, ilength
;
212 if (size
< sizeof(*args
))
215 for (i
= 0; i
< FIFO_ENGINE_NR
; i
++) {
216 if (args
->engine
& (1 << i
)) {
217 if (nouveau_engine(parent
, fifo_engine
[i
].subdev
)) {
218 args
->engine
= (1 << i
);
224 if (i
== FIFO_ENGINE_NR
)
227 ret
= nouveau_fifo_channel_create(parent
, engine
, oclass
, 1,
228 priv
->user
.bar
.offset
, 0x200,
230 fifo_engine
[i
].mask
, &chan
);
231 *pobject
= nv_object(chan
);
235 nv_parent(chan
)->context_attach
= nve0_fifo_context_attach
;
236 nv_parent(chan
)->context_detach
= nve0_fifo_context_detach
;
239 usermem
= chan
->base
.chid
* 0x200;
240 ioffset
= args
->ioffset
;
241 ilength
= log2i(args
->ilength
/ 8);
243 for (i
= 0; i
< 0x200; i
+= 4)
244 nv_wo32(priv
->user
.mem
, usermem
+ i
, 0x00000000);
246 nv_wo32(base
, 0x08, lower_32_bits(priv
->user
.mem
->addr
+ usermem
));
247 nv_wo32(base
, 0x0c, upper_32_bits(priv
->user
.mem
->addr
+ usermem
));
248 nv_wo32(base
, 0x10, 0x0000face);
249 nv_wo32(base
, 0x30, 0xfffff902);
250 nv_wo32(base
, 0x48, lower_32_bits(ioffset
));
251 nv_wo32(base
, 0x4c, upper_32_bits(ioffset
) | (ilength
<< 16));
252 nv_wo32(base
, 0x84, 0x20400000);
253 nv_wo32(base
, 0x94, 0x30000001);
254 nv_wo32(base
, 0x9c, 0x00000100);
255 nv_wo32(base
, 0xac, 0x0000001f);
256 nv_wo32(base
, 0xe8, chan
->base
.chid
);
257 nv_wo32(base
, 0xb8, 0xf8000000);
258 nv_wo32(base
, 0xf8, 0x10003080); /* 0x002310 */
259 nv_wo32(base
, 0xfc, 0x10000010); /* 0x002350 */
265 nve0_fifo_chan_init(struct nouveau_object
*object
)
267 struct nouveau_gpuobj
*base
= nv_gpuobj(object
->parent
);
268 struct nve0_fifo_priv
*priv
= (void *)object
->engine
;
269 struct nve0_fifo_chan
*chan
= (void *)object
;
270 u32 chid
= chan
->base
.chid
;
273 ret
= nouveau_fifo_channel_init(&chan
->base
);
277 nv_mask(priv
, 0x800004 + (chid
* 8), 0x000f0000, chan
->engine
<< 16);
278 nv_wr32(priv
, 0x800000 + (chid
* 8), 0x80000000 | base
->addr
>> 12);
279 nv_mask(priv
, 0x800004 + (chid
* 8), 0x00000400, 0x00000400);
280 nve0_fifo_playlist_update(priv
, chan
->engine
);
281 nv_mask(priv
, 0x800004 + (chid
* 8), 0x00000400, 0x00000400);
286 nve0_fifo_chan_fini(struct nouveau_object
*object
, bool suspend
)
288 struct nve0_fifo_priv
*priv
= (void *)object
->engine
;
289 struct nve0_fifo_chan
*chan
= (void *)object
;
290 u32 chid
= chan
->base
.chid
;
292 nv_mask(priv
, 0x800004 + (chid
* 8), 0x00000800, 0x00000800);
293 nve0_fifo_playlist_update(priv
, chan
->engine
);
294 nv_wr32(priv
, 0x800000 + (chid
* 8), 0x00000000);
296 return nouveau_fifo_channel_fini(&chan
->base
, suspend
);
299 static struct nouveau_ofuncs
301 .ctor
= nve0_fifo_chan_ctor
,
302 .dtor
= _nouveau_fifo_channel_dtor
,
303 .init
= nve0_fifo_chan_init
,
304 .fini
= nve0_fifo_chan_fini
,
305 .rd32
= _nouveau_fifo_channel_rd32
,
306 .wr32
= _nouveau_fifo_channel_wr32
,
309 static struct nouveau_oclass
310 nve0_fifo_sclass
[] = {
311 { NVE0_CHANNEL_IND_CLASS
, &nve0_fifo_ofuncs
},
315 /*******************************************************************************
316 * FIFO context - instmem heap and vm setup
317 ******************************************************************************/
320 nve0_fifo_context_ctor(struct nouveau_object
*parent
,
321 struct nouveau_object
*engine
,
322 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
323 struct nouveau_object
**pobject
)
325 struct nve0_fifo_base
*base
;
328 ret
= nouveau_fifo_context_create(parent
, engine
, oclass
, NULL
, 0x1000,
329 0x1000, NVOBJ_FLAG_ZERO_ALLOC
, &base
);
330 *pobject
= nv_object(base
);
334 ret
= nouveau_gpuobj_new(nv_object(base
), NULL
, 0x10000, 0x1000, 0,
339 nv_wo32(base
, 0x0200, lower_32_bits(base
->pgd
->addr
));
340 nv_wo32(base
, 0x0204, upper_32_bits(base
->pgd
->addr
));
341 nv_wo32(base
, 0x0208, 0xffffffff);
342 nv_wo32(base
, 0x020c, 0x000000ff);
344 ret
= nouveau_vm_ref(nouveau_client(parent
)->vm
, &base
->vm
, base
->pgd
);
352 nve0_fifo_context_dtor(struct nouveau_object
*object
)
354 struct nve0_fifo_base
*base
= (void *)object
;
355 nouveau_vm_ref(NULL
, &base
->vm
, base
->pgd
);
356 nouveau_gpuobj_ref(NULL
, &base
->pgd
);
357 nouveau_fifo_context_destroy(&base
->base
);
360 static struct nouveau_oclass
362 .handle
= NV_ENGCTX(FIFO
, 0xe0),
363 .ofuncs
= &(struct nouveau_ofuncs
) {
364 .ctor
= nve0_fifo_context_ctor
,
365 .dtor
= nve0_fifo_context_dtor
,
366 .init
= _nouveau_fifo_context_init
,
367 .fini
= _nouveau_fifo_context_fini
,
368 .rd32
= _nouveau_fifo_context_rd32
,
369 .wr32
= _nouveau_fifo_context_wr32
,
373 /*******************************************************************************
375 ******************************************************************************/
377 static const struct nouveau_enum nve0_fifo_fault_unit
[] = {
381 static const struct nouveau_enum nve0_fifo_fault_reason
[] = {
382 { 0x00, "PT_NOT_PRESENT" },
383 { 0x01, "PT_TOO_SHORT" },
384 { 0x02, "PAGE_NOT_PRESENT" },
385 { 0x03, "VM_LIMIT_EXCEEDED" },
386 { 0x04, "NO_CHANNEL" },
387 { 0x05, "PAGE_SYSTEM_ONLY" },
388 { 0x06, "PAGE_READ_ONLY" },
389 { 0x0a, "COMPRESSED_SYSRAM" },
390 { 0x0c, "INVALID_STORAGE_TYPE" },
394 static const struct nouveau_enum nve0_fifo_fault_hubclient
[] = {
398 static const struct nouveau_enum nve0_fifo_fault_gpcclient
[] = {
402 static const struct nouveau_bitfield nve0_fifo_subfifo_intr
[] = {
403 { 0x00200000, "ILLEGAL_MTHD" },
404 { 0x00800000, "EMPTY_SUBC" },
409 nve0_fifo_isr_vm_fault(struct nve0_fifo_priv
*priv
, int unit
)
411 u32 inst
= nv_rd32(priv
, 0x2800 + (unit
* 0x10));
412 u32 valo
= nv_rd32(priv
, 0x2804 + (unit
* 0x10));
413 u32 vahi
= nv_rd32(priv
, 0x2808 + (unit
* 0x10));
414 u32 stat
= nv_rd32(priv
, 0x280c + (unit
* 0x10));
415 u32 client
= (stat
& 0x00001f00) >> 8;
416 const struct nouveau_enum
*en
;
417 struct nouveau_engine
*engine
;
418 struct nouveau_object
*engctx
= NULL
;
420 nv_error(priv
, "PFIFO: %s fault at 0x%010llx [", (stat
& 0x00000080) ?
421 "write" : "read", (u64
)vahi
<< 32 | valo
);
422 nouveau_enum_print(nve0_fifo_fault_reason
, stat
& 0x0000000f);
424 en
= nouveau_enum_print(nve0_fifo_fault_unit
, unit
);
425 if (stat
& 0x00000040) {
427 nouveau_enum_print(nve0_fifo_fault_hubclient
, client
);
429 pr_cont("/GPC%d/", (stat
& 0x1f000000) >> 24);
430 nouveau_enum_print(nve0_fifo_fault_gpcclient
, client
);
433 if (en
&& en
->data2
) {
434 engine
= nouveau_engine(priv
, en
->data2
);
436 engctx
= nouveau_engctx_get(engine
, inst
);
440 pr_cont(" on channel 0x%010llx [%s]\n", (u64
)inst
<< 12,
441 nouveau_client_name(engctx
));
443 nouveau_engctx_put(engctx
);
447 nve0_fifo_swmthd(struct nve0_fifo_priv
*priv
, u32 chid
, u32 mthd
, u32 data
)
449 struct nve0_fifo_chan
*chan
= NULL
;
450 struct nouveau_handle
*bind
;
454 spin_lock_irqsave(&priv
->base
.lock
, flags
);
455 if (likely(chid
>= priv
->base
.min
&& chid
<= priv
->base
.max
))
456 chan
= (void *)priv
->base
.channel
[chid
];
460 bind
= nouveau_namedb_get_class(nv_namedb(chan
), 0x906e);
462 if (!mthd
|| !nv_call(bind
->object
, mthd
, data
))
464 nouveau_namedb_put(bind
);
468 spin_unlock_irqrestore(&priv
->base
.lock
, flags
);
473 nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv
*priv
, int unit
)
475 u32 stat
= nv_rd32(priv
, 0x040108 + (unit
* 0x2000));
476 u32 addr
= nv_rd32(priv
, 0x0400c0 + (unit
* 0x2000));
477 u32 data
= nv_rd32(priv
, 0x0400c4 + (unit
* 0x2000));
478 u32 chid
= nv_rd32(priv
, 0x040120 + (unit
* 0x2000)) & 0xfff;
479 u32 subc
= (addr
& 0x00070000) >> 16;
480 u32 mthd
= (addr
& 0x00003ffc);
483 if (stat
& 0x00200000) {
484 if (mthd
== 0x0054) {
485 if (!nve0_fifo_swmthd(priv
, chid
, 0x0500, 0x00000000))
490 if (stat
& 0x00800000) {
491 if (!nve0_fifo_swmthd(priv
, chid
, mthd
, data
))
496 nv_error(priv
, "SUBFIFO%d:", unit
);
497 nouveau_bitfield_print(nve0_fifo_subfifo_intr
, show
);
500 "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
502 nouveau_client_name_for_fifo_chid(&priv
->base
, chid
),
506 nv_wr32(priv
, 0x0400c0 + (unit
* 0x2000), 0x80600008);
507 nv_wr32(priv
, 0x040108 + (unit
* 0x2000), stat
);
511 nve0_fifo_intr(struct nouveau_subdev
*subdev
)
513 struct nve0_fifo_priv
*priv
= (void *)subdev
;
514 u32 mask
= nv_rd32(priv
, 0x002140);
515 u32 stat
= nv_rd32(priv
, 0x002100) & mask
;
517 if (stat
& 0x00000100) {
518 nv_warn(priv
, "unknown status 0x00000100\n");
519 nv_wr32(priv
, 0x002100, 0x00000100);
523 if (stat
& 0x10000000) {
524 u32 units
= nv_rd32(priv
, 0x00259c);
529 nve0_fifo_isr_vm_fault(priv
, i
);
533 nv_wr32(priv
, 0x00259c, units
);
537 if (stat
& 0x20000000) {
538 u32 units
= nv_rd32(priv
, 0x0025a0);
543 nve0_fifo_isr_subfifo_intr(priv
, i
);
547 nv_wr32(priv
, 0x0025a0, units
);
551 if (stat
& 0x40000000) {
552 nv_warn(priv
, "unknown status 0x40000000\n");
553 nv_mask(priv
, 0x002a00, 0x00000000, 0x00000000);
557 if (stat
& 0x80000000) {
558 nouveau_event_trigger(priv
->base
.uevent
, 0);
559 nv_wr32(priv
, 0x002100, 0x80000000);
564 nv_fatal(priv
, "unhandled status 0x%08x\n", stat
);
565 nv_wr32(priv
, 0x002100, stat
);
566 nv_wr32(priv
, 0x002140, 0);
571 nve0_fifo_uevent_enable(struct nouveau_event
*event
, int index
)
573 struct nve0_fifo_priv
*priv
= event
->priv
;
574 nv_mask(priv
, 0x002140, 0x80000000, 0x80000000);
578 nve0_fifo_uevent_disable(struct nouveau_event
*event
, int index
)
580 struct nve0_fifo_priv
*priv
= event
->priv
;
581 nv_mask(priv
, 0x002140, 0x80000000, 0x00000000);
585 nve0_fifo_ctor(struct nouveau_object
*parent
, struct nouveau_object
*engine
,
586 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
587 struct nouveau_object
**pobject
)
589 struct nve0_fifo_priv
*priv
;
592 ret
= nouveau_fifo_create(parent
, engine
, oclass
, 0, 4095, &priv
);
593 *pobject
= nv_object(priv
);
597 for (i
= 0; i
< FIFO_ENGINE_NR
; i
++) {
598 ret
= nouveau_gpuobj_new(nv_object(priv
), NULL
, 0x8000, 0x1000,
599 0, &priv
->engine
[i
].playlist
[0]);
603 ret
= nouveau_gpuobj_new(nv_object(priv
), NULL
, 0x8000, 0x1000,
604 0, &priv
->engine
[i
].playlist
[1]);
609 ret
= nouveau_gpuobj_new(nv_object(priv
), NULL
, 4096 * 0x200, 0x1000,
610 NVOBJ_FLAG_ZERO_ALLOC
, &priv
->user
.mem
);
614 ret
= nouveau_gpuobj_map(priv
->user
.mem
, NV_MEM_ACCESS_RW
,
619 priv
->base
.uevent
->enable
= nve0_fifo_uevent_enable
;
620 priv
->base
.uevent
->disable
= nve0_fifo_uevent_disable
;
621 priv
->base
.uevent
->priv
= priv
;
623 nv_subdev(priv
)->unit
= 0x00000100;
624 nv_subdev(priv
)->intr
= nve0_fifo_intr
;
625 nv_engine(priv
)->cclass
= &nve0_fifo_cclass
;
626 nv_engine(priv
)->sclass
= nve0_fifo_sclass
;
631 nve0_fifo_dtor(struct nouveau_object
*object
)
633 struct nve0_fifo_priv
*priv
= (void *)object
;
636 nouveau_gpuobj_unmap(&priv
->user
.bar
);
637 nouveau_gpuobj_ref(NULL
, &priv
->user
.mem
);
639 for (i
= 0; i
< FIFO_ENGINE_NR
; i
++) {
640 nouveau_gpuobj_ref(NULL
, &priv
->engine
[i
].playlist
[1]);
641 nouveau_gpuobj_ref(NULL
, &priv
->engine
[i
].playlist
[0]);
644 nouveau_fifo_destroy(&priv
->base
);
648 nve0_fifo_init(struct nouveau_object
*object
)
650 struct nve0_fifo_priv
*priv
= (void *)object
;
653 ret
= nouveau_fifo_init(&priv
->base
);
657 /* enable all available PSUBFIFOs */
658 nv_wr32(priv
, 0x000204, 0xffffffff);
659 priv
->spoon_nr
= hweight32(nv_rd32(priv
, 0x000204));
660 nv_debug(priv
, "%d subfifo(s)\n", priv
->spoon_nr
);
663 for (i
= 0; i
< priv
->spoon_nr
; i
++) {
664 nv_mask(priv
, 0x04013c + (i
* 0x2000), 0x10000100, 0x00000000);
665 nv_wr32(priv
, 0x040108 + (i
* 0x2000), 0xffffffff); /* INTR */
666 nv_wr32(priv
, 0x04010c + (i
* 0x2000), 0xfffffeff); /* INTREN */
669 nv_wr32(priv
, 0x002254, 0x10000000 | priv
->user
.bar
.offset
>> 12);
671 nv_wr32(priv
, 0x002a00, 0xffffffff);
672 nv_wr32(priv
, 0x002100, 0xffffffff);
673 nv_wr32(priv
, 0x002140, 0x3fffffff);
677 struct nouveau_oclass
679 .handle
= NV_ENGINE(FIFO
, 0xe0),
680 .ofuncs
= &(struct nouveau_ofuncs
) {
681 .ctor
= nve0_fifo_ctor
,
682 .dtor
= nve0_fifo_dtor
,
683 .init
= nve0_fifo_init
,
684 .fini
= _nouveau_fifo_fini
,