2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "changk104.h"
27 #include <core/client.h>
28 #include <core/gpuobj.h>
29 #include <subdev/bar.h>
30 #include <subdev/top.h>
31 #include <engine/sw.h>
33 #include <nvif/class.h>
36 gk104_fifo_class_get(struct nvkm_fifo
*base
, int index
,
37 const struct nvkm_fifo_chan_oclass
**psclass
)
39 struct gk104_fifo
*fifo
= gk104_fifo(base
);
42 while ((*psclass
= fifo
->func
->chan
[c
])) {
51 gk104_fifo_uevent_fini(struct nvkm_fifo
*fifo
)
53 struct nvkm_device
*device
= fifo
->engine
.subdev
.device
;
54 nvkm_mask(device
, 0x002140, 0x80000000, 0x00000000);
58 gk104_fifo_uevent_init(struct nvkm_fifo
*fifo
)
60 struct nvkm_device
*device
= fifo
->engine
.subdev
.device
;
61 nvkm_mask(device
, 0x002140, 0x80000000, 0x80000000);
65 gk104_fifo_runlist_commit(struct gk104_fifo
*fifo
, int runl
)
67 struct gk104_fifo_chan
*chan
;
68 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
69 struct nvkm_device
*device
= subdev
->device
;
70 struct nvkm_memory
*mem
;
74 mutex_lock(&subdev
->mutex
);
75 mem
= fifo
->runlist
[runl
].mem
[fifo
->runlist
[runl
].next
];
76 fifo
->runlist
[runl
].next
= !fifo
->runlist
[runl
].next
;
79 list_for_each_entry(chan
, &fifo
->runlist
[runl
].chan
, head
) {
80 nvkm_wo32(mem
, (nr
* 8) + 0, chan
->base
.chid
);
81 nvkm_wo32(mem
, (nr
* 8) + 4, 0x00000000);
86 if (nvkm_memory_target(mem
) == NVKM_MEM_TARGET_VRAM
)
91 nvkm_wr32(device
, 0x002270, (nvkm_memory_addr(mem
) >> 12) |
93 nvkm_wr32(device
, 0x002274, (runl
<< 20) | nr
);
95 if (wait_event_timeout(fifo
->runlist
[runl
].wait
,
96 !(nvkm_rd32(device
, 0x002284 + (runl
* 0x08))
98 msecs_to_jiffies(2000)) == 0)
99 nvkm_error(subdev
, "runlist %d update timeout\n", runl
);
100 mutex_unlock(&subdev
->mutex
);
104 gk104_fifo_runlist_remove(struct gk104_fifo
*fifo
, struct gk104_fifo_chan
*chan
)
106 mutex_lock(&fifo
->base
.engine
.subdev
.mutex
);
107 list_del_init(&chan
->head
);
108 mutex_unlock(&fifo
->base
.engine
.subdev
.mutex
);
112 gk104_fifo_runlist_insert(struct gk104_fifo
*fifo
, struct gk104_fifo_chan
*chan
)
114 mutex_lock(&fifo
->base
.engine
.subdev
.mutex
);
115 list_add_tail(&chan
->head
, &fifo
->runlist
[chan
->runl
].chan
);
116 mutex_unlock(&fifo
->base
.engine
.subdev
.mutex
);
120 gk104_fifo_recover_work(struct work_struct
*w
)
122 struct gk104_fifo
*fifo
= container_of(w
, typeof(*fifo
), recover
.work
);
123 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
124 struct nvkm_engine
*engine
;
126 u32 engm
, runm
, todo
;
129 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
130 runm
= fifo
->recover
.runm
;
131 engm
= fifo
->recover
.engm
;
132 fifo
->recover
.engm
= 0;
133 fifo
->recover
.runm
= 0;
134 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
136 nvkm_mask(device
, 0x002630, runm
, runm
);
138 for (todo
= engm
; engn
= __ffs(todo
), todo
; todo
&= ~BIT(engn
)) {
139 if ((engine
= fifo
->engine
[engn
].engine
)) {
140 nvkm_subdev_fini(&engine
->subdev
, false);
141 WARN_ON(nvkm_subdev_init(&engine
->subdev
));
145 for (todo
= runm
; runl
= __ffs(todo
), todo
; todo
&= ~BIT(runl
))
146 gk104_fifo_runlist_commit(fifo
, runl
);
148 nvkm_wr32(device
, 0x00262c, runm
);
149 nvkm_mask(device
, 0x002630, runm
, 0x00000000);
153 gk104_fifo_recover(struct gk104_fifo
*fifo
, struct nvkm_engine
*engine
,
154 struct gk104_fifo_chan
*chan
)
156 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
157 struct nvkm_device
*device
= subdev
->device
;
158 u32 chid
= chan
->base
.chid
;
161 nvkm_error(subdev
, "%s engine fault on channel %d, recovering...\n",
162 nvkm_subdev_name
[engine
->subdev
.index
], chid
);
163 assert_spin_locked(&fifo
->base
.lock
);
165 nvkm_mask(device
, 0x800004 + (chid
* 0x08), 0x00000800, 0x00000800);
166 list_del_init(&chan
->head
);
169 for (engn
= 0; engn
< fifo
->engine_nr
; engn
++) {
170 if (fifo
->engine
[engn
].engine
== engine
) {
171 fifo
->recover
.engm
|= BIT(engn
);
176 fifo
->recover
.runm
|= BIT(chan
->runl
);
177 schedule_work(&fifo
->recover
.work
);
180 static const struct nvkm_enum
181 gk104_fifo_bind_reason
[] = {
182 { 0x01, "BIND_NOT_UNBOUND" },
183 { 0x02, "SNOOP_WITHOUT_BAR1" },
184 { 0x03, "UNBIND_WHILE_RUNNING" },
185 { 0x05, "INVALID_RUNLIST" },
186 { 0x06, "INVALID_CTX_TGT" },
187 { 0x0b, "UNBIND_WHILE_PARKED" },
192 gk104_fifo_intr_bind(struct gk104_fifo
*fifo
)
194 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
195 struct nvkm_device
*device
= subdev
->device
;
196 u32 intr
= nvkm_rd32(device
, 0x00252c);
197 u32 code
= intr
& 0x000000ff;
198 const struct nvkm_enum
*en
=
199 nvkm_enum_find(gk104_fifo_bind_reason
, code
);
201 nvkm_error(subdev
, "BIND_ERROR %02x [%s]\n", code
, en
? en
->name
: "");
204 static const struct nvkm_enum
205 gk104_fifo_sched_reason
[] = {
206 { 0x0a, "CTXSW_TIMEOUT" },
211 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo
*fifo
)
213 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
214 struct gk104_fifo_chan
*chan
;
218 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
219 for (engn
= 0; engn
< fifo
->engine_nr
; engn
++) {
220 struct nvkm_engine
*engine
= fifo
->engine
[engn
].engine
;
221 int runl
= fifo
->engine
[engn
].runl
;
222 u32 stat
= nvkm_rd32(device
, 0x002640 + (engn
* 0x08));
223 u32 busy
= (stat
& 0x80000000);
224 u32 next
= (stat
& 0x0fff0000) >> 16;
225 u32 chsw
= (stat
& 0x00008000);
226 u32 save
= (stat
& 0x00004000);
227 u32 load
= (stat
& 0x00002000);
228 u32 prev
= (stat
& 0x00000fff);
229 u32 chid
= load
? next
: prev
;
235 list_for_each_entry(chan
, &fifo
->runlist
[runl
].chan
, head
) {
236 if (chan
->base
.chid
== chid
&& engine
) {
237 gk104_fifo_recover(fifo
, engine
, chan
);
242 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
246 gk104_fifo_intr_sched(struct gk104_fifo
*fifo
)
248 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
249 struct nvkm_device
*device
= subdev
->device
;
250 u32 intr
= nvkm_rd32(device
, 0x00254c);
251 u32 code
= intr
& 0x000000ff;
252 const struct nvkm_enum
*en
=
253 nvkm_enum_find(gk104_fifo_sched_reason
, code
);
255 nvkm_error(subdev
, "SCHED_ERROR %02x [%s]\n", code
, en
? en
->name
: "");
259 gk104_fifo_intr_sched_ctxsw(fifo
);
267 gk104_fifo_intr_chsw(struct gk104_fifo
*fifo
)
269 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
270 struct nvkm_device
*device
= subdev
->device
;
271 u32 stat
= nvkm_rd32(device
, 0x00256c);
272 nvkm_error(subdev
, "CHSW_ERROR %08x\n", stat
);
273 nvkm_wr32(device
, 0x00256c, stat
);
277 gk104_fifo_intr_dropped_fault(struct gk104_fifo
*fifo
)
279 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
280 struct nvkm_device
*device
= subdev
->device
;
281 u32 stat
= nvkm_rd32(device
, 0x00259c);
282 nvkm_error(subdev
, "DROPPED_MMU_FAULT %08x\n", stat
);
286 gk104_fifo_intr_fault(struct gk104_fifo
*fifo
, int unit
)
288 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
289 struct nvkm_device
*device
= subdev
->device
;
290 u32 inst
= nvkm_rd32(device
, 0x002800 + (unit
* 0x10));
291 u32 valo
= nvkm_rd32(device
, 0x002804 + (unit
* 0x10));
292 u32 vahi
= nvkm_rd32(device
, 0x002808 + (unit
* 0x10));
293 u32 stat
= nvkm_rd32(device
, 0x00280c + (unit
* 0x10));
294 u32 gpc
= (stat
& 0x1f000000) >> 24;
295 u32 client
= (stat
& 0x00001f00) >> 8;
296 u32 write
= (stat
& 0x00000080);
297 u32 hub
= (stat
& 0x00000040);
298 u32 reason
= (stat
& 0x0000000f);
299 const struct nvkm_enum
*er
, *eu
, *ec
;
300 struct nvkm_engine
*engine
= NULL
;
301 struct nvkm_fifo_chan
*chan
;
303 char gpcid
[8] = "", en
[16] = "";
305 er
= nvkm_enum_find(fifo
->func
->fault
.reason
, reason
);
306 eu
= nvkm_enum_find(fifo
->func
->fault
.engine
, unit
);
308 ec
= nvkm_enum_find(fifo
->func
->fault
.hubclient
, client
);
310 ec
= nvkm_enum_find(fifo
->func
->fault
.gpcclient
, client
);
311 snprintf(gpcid
, sizeof(gpcid
), "GPC%d/", gpc
);
314 if (eu
&& eu
->data2
) {
316 case NVKM_SUBDEV_BAR
:
317 nvkm_mask(device
, 0x001704, 0x00000000, 0x00000000);
319 case NVKM_SUBDEV_INSTMEM
:
320 nvkm_mask(device
, 0x001714, 0x00000000, 0x00000000);
322 case NVKM_ENGINE_IFB
:
323 nvkm_mask(device
, 0x001718, 0x00000000, 0x00000000);
326 engine
= nvkm_device_engine(device
, eu
->data2
);
332 enum nvkm_devidx engidx
= nvkm_top_fault(device
->top
, unit
);
333 if (engidx
< NVKM_SUBDEV_NR
) {
334 const char *src
= nvkm_subdev_name
[engidx
];
337 *dst
++ = toupper(*src
++);
339 engine
= nvkm_device_engine(device
, engidx
);
342 snprintf(en
, sizeof(en
), "%s", eu
->name
);
345 chan
= nvkm_fifo_chan_inst(&fifo
->base
, (u64
)inst
<< 12, &flags
);
348 "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
349 "reason %02x [%s] on channel %d [%010llx %s]\n",
350 write
? "write" : "read", (u64
)vahi
<< 32 | valo
,
351 unit
, en
, client
, gpcid
, ec
? ec
->name
: "",
352 reason
, er
? er
->name
: "", chan
? chan
->chid
: -1,
354 chan
? chan
->object
.client
->name
: "unknown");
357 gk104_fifo_recover(fifo
, engine
, (void *)chan
);
358 nvkm_fifo_chan_put(&fifo
->base
, flags
, &chan
);
361 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0
[] = {
362 { 0x00000001, "MEMREQ" },
363 { 0x00000002, "MEMACK_TIMEOUT" },
364 { 0x00000004, "MEMACK_EXTRA" },
365 { 0x00000008, "MEMDAT_TIMEOUT" },
366 { 0x00000010, "MEMDAT_EXTRA" },
367 { 0x00000020, "MEMFLUSH" },
368 { 0x00000040, "MEMOP" },
369 { 0x00000080, "LBCONNECT" },
370 { 0x00000100, "LBREQ" },
371 { 0x00000200, "LBACK_TIMEOUT" },
372 { 0x00000400, "LBACK_EXTRA" },
373 { 0x00000800, "LBDAT_TIMEOUT" },
374 { 0x00001000, "LBDAT_EXTRA" },
375 { 0x00002000, "GPFIFO" },
376 { 0x00004000, "GPPTR" },
377 { 0x00008000, "GPENTRY" },
378 { 0x00010000, "GPCRC" },
379 { 0x00020000, "PBPTR" },
380 { 0x00040000, "PBENTRY" },
381 { 0x00080000, "PBCRC" },
382 { 0x00100000, "XBARCONNECT" },
383 { 0x00200000, "METHOD" },
384 { 0x00400000, "METHODCRC" },
385 { 0x00800000, "DEVICE" },
386 { 0x02000000, "SEMAPHORE" },
387 { 0x04000000, "ACQUIRE" },
388 { 0x08000000, "PRI" },
389 { 0x20000000, "NO_CTXSW_SEG" },
390 { 0x40000000, "PBSEG" },
391 { 0x80000000, "SIGNATURE" },
396 gk104_fifo_intr_pbdma_0(struct gk104_fifo
*fifo
, int unit
)
398 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
399 struct nvkm_device
*device
= subdev
->device
;
400 u32 mask
= nvkm_rd32(device
, 0x04010c + (unit
* 0x2000));
401 u32 stat
= nvkm_rd32(device
, 0x040108 + (unit
* 0x2000)) & mask
;
402 u32 addr
= nvkm_rd32(device
, 0x0400c0 + (unit
* 0x2000));
403 u32 data
= nvkm_rd32(device
, 0x0400c4 + (unit
* 0x2000));
404 u32 chid
= nvkm_rd32(device
, 0x040120 + (unit
* 0x2000)) & 0xfff;
405 u32 subc
= (addr
& 0x00070000) >> 16;
406 u32 mthd
= (addr
& 0x00003ffc);
408 struct nvkm_fifo_chan
*chan
;
412 if (stat
& 0x00800000) {
414 if (nvkm_sw_mthd(device
->sw
, chid
, subc
, mthd
, data
))
419 nvkm_wr32(device
, 0x0400c0 + (unit
* 0x2000), 0x80600008);
422 nvkm_snprintbf(msg
, sizeof(msg
), gk104_fifo_pbdma_intr_0
, show
);
423 chan
= nvkm_fifo_chan_chid(&fifo
->base
, chid
, &flags
);
424 nvkm_error(subdev
, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
425 "subc %d mthd %04x data %08x\n",
426 unit
, show
, msg
, chid
, chan
? chan
->inst
->addr
: 0,
427 chan
? chan
->object
.client
->name
: "unknown",
429 nvkm_fifo_chan_put(&fifo
->base
, flags
, &chan
);
432 nvkm_wr32(device
, 0x040108 + (unit
* 0x2000), stat
);
435 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1
[] = {
436 { 0x00000001, "HCE_RE_ILLEGAL_OP" },
437 { 0x00000002, "HCE_RE_ALIGNB" },
438 { 0x00000004, "HCE_PRIV" },
439 { 0x00000008, "HCE_ILLEGAL_MTHD" },
440 { 0x00000010, "HCE_ILLEGAL_CLASS" },
445 gk104_fifo_intr_pbdma_1(struct gk104_fifo
*fifo
, int unit
)
447 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
448 struct nvkm_device
*device
= subdev
->device
;
449 u32 mask
= nvkm_rd32(device
, 0x04014c + (unit
* 0x2000));
450 u32 stat
= nvkm_rd32(device
, 0x040148 + (unit
* 0x2000)) & mask
;
451 u32 chid
= nvkm_rd32(device
, 0x040120 + (unit
* 0x2000)) & 0xfff;
455 nvkm_snprintbf(msg
, sizeof(msg
), gk104_fifo_pbdma_intr_1
, stat
);
456 nvkm_error(subdev
, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
457 unit
, stat
, msg
, chid
,
458 nvkm_rd32(device
, 0x040150 + (unit
* 0x2000)),
459 nvkm_rd32(device
, 0x040154 + (unit
* 0x2000)));
462 nvkm_wr32(device
, 0x040148 + (unit
* 0x2000), stat
);
466 gk104_fifo_intr_runlist(struct gk104_fifo
*fifo
)
468 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
469 u32 mask
= nvkm_rd32(device
, 0x002a00);
471 int runl
= __ffs(mask
);
472 wake_up(&fifo
->runlist
[runl
].wait
);
473 nvkm_wr32(device
, 0x002a00, 1 << runl
);
474 mask
&= ~(1 << runl
);
479 gk104_fifo_intr_engine(struct gk104_fifo
*fifo
)
481 nvkm_fifo_uevent(&fifo
->base
);
485 gk104_fifo_intr(struct nvkm_fifo
*base
)
487 struct gk104_fifo
*fifo
= gk104_fifo(base
);
488 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
489 struct nvkm_device
*device
= subdev
->device
;
490 u32 mask
= nvkm_rd32(device
, 0x002140);
491 u32 stat
= nvkm_rd32(device
, 0x002100) & mask
;
493 if (stat
& 0x00000001) {
494 gk104_fifo_intr_bind(fifo
);
495 nvkm_wr32(device
, 0x002100, 0x00000001);
499 if (stat
& 0x00000010) {
500 nvkm_error(subdev
, "PIO_ERROR\n");
501 nvkm_wr32(device
, 0x002100, 0x00000010);
505 if (stat
& 0x00000100) {
506 gk104_fifo_intr_sched(fifo
);
507 nvkm_wr32(device
, 0x002100, 0x00000100);
511 if (stat
& 0x00010000) {
512 gk104_fifo_intr_chsw(fifo
);
513 nvkm_wr32(device
, 0x002100, 0x00010000);
517 if (stat
& 0x00800000) {
518 nvkm_error(subdev
, "FB_FLUSH_TIMEOUT\n");
519 nvkm_wr32(device
, 0x002100, 0x00800000);
523 if (stat
& 0x01000000) {
524 nvkm_error(subdev
, "LB_ERROR\n");
525 nvkm_wr32(device
, 0x002100, 0x01000000);
529 if (stat
& 0x08000000) {
530 gk104_fifo_intr_dropped_fault(fifo
);
531 nvkm_wr32(device
, 0x002100, 0x08000000);
535 if (stat
& 0x10000000) {
536 u32 mask
= nvkm_rd32(device
, 0x00259c);
538 u32 unit
= __ffs(mask
);
539 gk104_fifo_intr_fault(fifo
, unit
);
540 nvkm_wr32(device
, 0x00259c, (1 << unit
));
541 mask
&= ~(1 << unit
);
546 if (stat
& 0x20000000) {
547 u32 mask
= nvkm_rd32(device
, 0x0025a0);
549 u32 unit
= __ffs(mask
);
550 gk104_fifo_intr_pbdma_0(fifo
, unit
);
551 gk104_fifo_intr_pbdma_1(fifo
, unit
);
552 nvkm_wr32(device
, 0x0025a0, (1 << unit
));
553 mask
&= ~(1 << unit
);
558 if (stat
& 0x40000000) {
559 gk104_fifo_intr_runlist(fifo
);
563 if (stat
& 0x80000000) {
564 nvkm_wr32(device
, 0x002100, 0x80000000);
565 gk104_fifo_intr_engine(fifo
);
570 nvkm_error(subdev
, "INTR %08x\n", stat
);
571 nvkm_mask(device
, 0x002140, stat
, 0x00000000);
572 nvkm_wr32(device
, 0x002100, stat
);
577 gk104_fifo_fini(struct nvkm_fifo
*base
)
579 struct gk104_fifo
*fifo
= gk104_fifo(base
);
580 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
581 flush_work(&fifo
->recover
.work
);
582 /* allow mmu fault interrupts, even when we're not using fifo */
583 nvkm_mask(device
, 0x002140, 0x10000000, 0x10000000);
587 gk104_fifo_oneinit(struct nvkm_fifo
*base
)
589 struct gk104_fifo
*fifo
= gk104_fifo(base
);
590 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
591 struct nvkm_device
*device
= subdev
->device
;
592 struct nvkm_top
*top
= device
->top
;
593 int engn
, runl
, pbid
, ret
, i
, j
;
594 enum nvkm_devidx engidx
;
597 /* Determine number of PBDMAs by checking valid enable bits. */
598 nvkm_wr32(device
, 0x000204, 0xffffffff);
599 fifo
->pbdma_nr
= hweight32(nvkm_rd32(device
, 0x000204));
600 nvkm_debug(subdev
, "%d PBDMA(s)\n", fifo
->pbdma_nr
);
602 /* Read PBDMA->runlist(s) mapping from HW. */
603 if (!(map
= kzalloc(sizeof(*map
) * fifo
->pbdma_nr
, GFP_KERNEL
)))
606 for (i
= 0; i
< fifo
->pbdma_nr
; i
++)
607 map
[i
] = nvkm_rd32(device
, 0x002390 + (i
* 0x04));
609 /* Determine runlist configuration from topology device info. */
611 while ((int)(engidx
= nvkm_top_engine(top
, i
++, &runl
, &engn
)) >= 0) {
612 /* Determine which PBDMA handles requests for this engine. */
613 for (j
= 0, pbid
= -1; j
< fifo
->pbdma_nr
; j
++) {
614 if (map
[j
] & (1 << runl
)) {
620 nvkm_debug(subdev
, "engine %2d: runlist %2d pbdma %2d\n",
623 fifo
->engine
[engn
].engine
= nvkm_device_engine(device
, engidx
);
624 fifo
->engine
[engn
].runl
= runl
;
625 fifo
->engine
[engn
].pbid
= pbid
;
626 fifo
->engine_nr
= max(fifo
->engine_nr
, engn
+ 1);
627 fifo
->runlist
[runl
].engm
|= 1 << engn
;
628 fifo
->runlist_nr
= max(fifo
->runlist_nr
, runl
+ 1);
633 for (i
= 0; i
< fifo
->runlist_nr
; i
++) {
634 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
,
635 0x8000, 0x1000, false,
636 &fifo
->runlist
[i
].mem
[0]);
640 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
,
641 0x8000, 0x1000, false,
642 &fifo
->runlist
[i
].mem
[1]);
646 init_waitqueue_head(&fifo
->runlist
[i
].wait
);
647 INIT_LIST_HEAD(&fifo
->runlist
[i
].chan
);
650 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
,
651 fifo
->base
.nr
* 0x200, 0x1000, true,
656 ret
= nvkm_bar_umap(device
->bar
, fifo
->base
.nr
* 0x200, 12,
661 nvkm_memory_map(fifo
->user
.mem
, &fifo
->user
.bar
, 0);
666 gk104_fifo_init(struct nvkm_fifo
*base
)
668 struct gk104_fifo
*fifo
= gk104_fifo(base
);
669 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
673 nvkm_wr32(device
, 0x000204, (1 << fifo
->pbdma_nr
) - 1);
676 for (i
= 0; i
< fifo
->pbdma_nr
; i
++) {
677 nvkm_mask(device
, 0x04013c + (i
* 0x2000), 0x10000100, 0x00000000);
678 nvkm_wr32(device
, 0x040108 + (i
* 0x2000), 0xffffffff); /* INTR */
679 nvkm_wr32(device
, 0x04010c + (i
* 0x2000), 0xfffffeff); /* INTREN */
683 for (i
= 0; i
< fifo
->pbdma_nr
; i
++) {
684 nvkm_wr32(device
, 0x040148 + (i
* 0x2000), 0xffffffff); /* INTR */
685 nvkm_wr32(device
, 0x04014c + (i
* 0x2000), 0xffffffff); /* INTREN */
688 nvkm_wr32(device
, 0x002254, 0x10000000 | fifo
->user
.bar
.offset
>> 12);
690 nvkm_wr32(device
, 0x002100, 0xffffffff);
691 nvkm_wr32(device
, 0x002140, 0x7fffffff);
695 gk104_fifo_dtor(struct nvkm_fifo
*base
)
697 struct gk104_fifo
*fifo
= gk104_fifo(base
);
700 nvkm_vm_put(&fifo
->user
.bar
);
701 nvkm_memory_del(&fifo
->user
.mem
);
703 for (i
= 0; i
< fifo
->runlist_nr
; i
++) {
704 nvkm_memory_del(&fifo
->runlist
[i
].mem
[1]);
705 nvkm_memory_del(&fifo
->runlist
[i
].mem
[0]);
711 static const struct nvkm_fifo_func
713 .dtor
= gk104_fifo_dtor
,
714 .oneinit
= gk104_fifo_oneinit
,
715 .init
= gk104_fifo_init
,
716 .fini
= gk104_fifo_fini
,
717 .intr
= gk104_fifo_intr
,
718 .uevent_init
= gk104_fifo_uevent_init
,
719 .uevent_fini
= gk104_fifo_uevent_fini
,
720 .class_get
= gk104_fifo_class_get
,
724 gk104_fifo_new_(const struct gk104_fifo_func
*func
, struct nvkm_device
*device
,
725 int index
, int nr
, struct nvkm_fifo
**pfifo
)
727 struct gk104_fifo
*fifo
;
729 if (!(fifo
= kzalloc(sizeof(*fifo
), GFP_KERNEL
)))
732 INIT_WORK(&fifo
->recover
.work
, gk104_fifo_recover_work
);
733 *pfifo
= &fifo
->base
;
735 return nvkm_fifo_ctor(&gk104_fifo_
, device
, index
, nr
, &fifo
->base
);
738 const struct nvkm_enum
739 gk104_fifo_fault_engine
[] = {
740 { 0x00, "GR", NULL
, NVKM_ENGINE_GR
},
743 { 0x03, "IFB", NULL
, NVKM_ENGINE_IFB
},
744 { 0x04, "BAR1", NULL
, NVKM_SUBDEV_BAR
},
745 { 0x05, "BAR2", NULL
, NVKM_SUBDEV_INSTMEM
},
756 { 0x10, "MSVLD", NULL
, NVKM_ENGINE_MSVLD
},
757 { 0x11, "MSPPP", NULL
, NVKM_ENGINE_MSPPP
},
759 { 0x14, "MSPDEC", NULL
, NVKM_ENGINE_MSPDEC
},
760 { 0x15, "CE0", NULL
, NVKM_ENGINE_CE0
},
761 { 0x16, "CE1", NULL
, NVKM_ENGINE_CE1
},
764 { 0x19, "MSENC", NULL
, NVKM_ENGINE_MSENC
},
765 { 0x1b, "CE2", NULL
, NVKM_ENGINE_CE2
},
769 const struct nvkm_enum
770 gk104_fifo_fault_reason
[] = {
772 { 0x01, "PDE_SIZE" },
774 { 0x03, "VA_LIMIT_VIOLATION" },
775 { 0x04, "UNBOUND_INST_BLOCK" },
776 { 0x05, "PRIV_VIOLATION" },
777 { 0x06, "RO_VIOLATION" },
778 { 0x07, "WO_VIOLATION" },
779 { 0x08, "PITCH_MASK_VIOLATION" },
780 { 0x09, "WORK_CREATION" },
781 { 0x0a, "UNSUPPORTED_APERTURE" },
782 { 0x0b, "COMPRESSION_FAILURE" },
783 { 0x0c, "UNSUPPORTED_KIND" },
784 { 0x0d, "REGION_VIOLATION" },
785 { 0x0e, "BOTH_PTES_VALID" },
786 { 0x0f, "INFO_TYPE_POISONED" },
790 const struct nvkm_enum
791 gk104_fifo_fault_hubclient
[] = {
799 { 0x07, "HOST_CPU" },
800 { 0x08, "HOST_CPU_NB" },
811 { 0x13, "RASTERTWOD" },
827 const struct nvkm_enum
828 gk104_fifo_fault_gpcclient
[] = {
829 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
830 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
831 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
832 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
840 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
841 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
842 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
843 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
845 { 0x20, "LTP_UTLB_0" },
846 { 0x21, "LTP_UTLB_1" },
847 { 0x22, "LTP_UTLB_2" },
848 { 0x23, "LTP_UTLB_3" },
849 { 0x24, "GPC_RGG_UTLB" },
853 static const struct gk104_fifo_func
855 .fault
.engine
= gk104_fifo_fault_engine
,
856 .fault
.reason
= gk104_fifo_fault_reason
,
857 .fault
.hubclient
= gk104_fifo_fault_hubclient
,
858 .fault
.gpcclient
= gk104_fifo_fault_gpcclient
,
860 &gk104_fifo_gpfifo_oclass
,
866 gk104_fifo_new(struct nvkm_device
*device
, int index
, struct nvkm_fifo
**pfifo
)
868 return gk104_fifo_new_(&gk104_fifo
, device
, index
, 4096, pfifo
);