2 * Copyright 2013 Ilia Mirkin
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include <engine/xtensa.h>
23 #include <engine/fifo.h>
26 nvkm_xtensa_oclass_get(struct nvkm_oclass
*oclass
, int index
)
28 struct nvkm_xtensa
*xtensa
= nvkm_xtensa(oclass
->engine
);
31 while (xtensa
->func
->sclass
[c
].oclass
) {
33 oclass
->base
= xtensa
->func
->sclass
[index
];
42 nvkm_xtensa_cclass_bind(struct nvkm_object
*object
, struct nvkm_gpuobj
*parent
,
43 int align
, struct nvkm_gpuobj
**pgpuobj
)
45 return nvkm_gpuobj_new(object
->engine
->subdev
.device
, 0x10000, align
,
46 true, parent
, pgpuobj
);
49 static const struct nvkm_object_func
50 nvkm_xtensa_cclass
= {
51 .bind
= nvkm_xtensa_cclass_bind
,
55 nvkm_xtensa_intr(struct nvkm_engine
*engine
)
57 struct nvkm_xtensa
*xtensa
= nvkm_xtensa(engine
);
58 struct nvkm_subdev
*subdev
= &xtensa
->engine
.subdev
;
59 struct nvkm_device
*device
= subdev
->device
;
60 const u32 base
= xtensa
->addr
;
61 u32 unk104
= nvkm_rd32(device
, base
+ 0xd04);
62 u32 intr
= nvkm_rd32(device
, base
+ 0xc20);
63 u32 chan
= nvkm_rd32(device
, base
+ 0xc28);
64 u32 unk10c
= nvkm_rd32(device
, base
+ 0xd0c);
67 nvkm_warn(subdev
, "Watchdog interrupt, engine hung.\n");
68 nvkm_wr32(device
, base
+ 0xc20, intr
);
69 intr
= nvkm_rd32(device
, base
+ 0xc20);
70 if (unk104
== 0x10001 && unk10c
== 0x200 && chan
&& !intr
) {
71 nvkm_debug(subdev
, "Enabling FIFO_CTRL\n");
72 nvkm_mask(device
, xtensa
->addr
+ 0xd94, 0, xtensa
->func
->fifo_val
);
77 nvkm_xtensa_fini(struct nvkm_engine
*engine
, bool suspend
)
79 struct nvkm_xtensa
*xtensa
= nvkm_xtensa(engine
);
80 struct nvkm_device
*device
= xtensa
->engine
.subdev
.device
;
81 const u32 base
= xtensa
->addr
;
83 nvkm_wr32(device
, base
+ 0xd84, 0); /* INTR_EN */
84 nvkm_wr32(device
, base
+ 0xd94, 0); /* FIFO_CTRL */
87 nvkm_memory_del(&xtensa
->gpu_fw
);
92 nvkm_xtensa_init(struct nvkm_engine
*engine
)
94 struct nvkm_xtensa
*xtensa
= nvkm_xtensa(engine
);
95 struct nvkm_subdev
*subdev
= &xtensa
->engine
.subdev
;
96 struct nvkm_device
*device
= subdev
->device
;
97 const u32 base
= xtensa
->addr
;
98 const struct firmware
*fw
;
104 if (!xtensa
->gpu_fw
) {
105 snprintf(name
, sizeof(name
), "nouveau/nv84_xuc%03x",
108 ret
= request_firmware(&fw
, name
, nv_device_base(device
));
110 nvkm_warn(subdev
, "unable to load firmware %s\n", name
);
114 if (fw
->size
> 0x40000) {
115 nvkm_warn(subdev
, "firmware %s too large\n", name
);
116 release_firmware(fw
);
120 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
,
121 0x40000, 0x1000, false,
124 release_firmware(fw
);
128 nvkm_kmap(xtensa
->gpu_fw
);
129 for (i
= 0; i
< fw
->size
/ 4; i
++)
130 nvkm_wo32(xtensa
->gpu_fw
, i
* 4, *((u32
*)fw
->data
+ i
));
131 nvkm_done(xtensa
->gpu_fw
);
132 release_firmware(fw
);
135 addr
= nvkm_memory_addr(xtensa
->gpu_fw
);
136 size
= nvkm_memory_size(xtensa
->gpu_fw
);
138 nvkm_wr32(device
, base
+ 0xd10, 0x1fffffff); /* ?? */
139 nvkm_wr32(device
, base
+ 0xd08, 0x0fffffff); /* ?? */
141 nvkm_wr32(device
, base
+ 0xd28, xtensa
->func
->unkd28
); /* ?? */
142 nvkm_wr32(device
, base
+ 0xc20, 0x3f); /* INTR */
143 nvkm_wr32(device
, base
+ 0xd84, 0x3f); /* INTR_EN */
145 nvkm_wr32(device
, base
+ 0xcc0, addr
>> 8); /* XT_REGION_BASE */
146 nvkm_wr32(device
, base
+ 0xcc4, 0x1c); /* XT_REGION_SETUP */
147 nvkm_wr32(device
, base
+ 0xcc8, size
>> 8); /* XT_REGION_LIMIT */
149 tmp
= nvkm_rd32(device
, 0x0);
150 nvkm_wr32(device
, base
+ 0xde0, tmp
); /* SCRATCH_H2X */
152 nvkm_wr32(device
, base
+ 0xce8, 0xf); /* XT_REGION_SETUP */
154 nvkm_wr32(device
, base
+ 0xc20, 0x3f); /* INTR */
155 nvkm_wr32(device
, base
+ 0xd84, 0x3f); /* INTR_EN */
160 nvkm_xtensa_dtor(struct nvkm_engine
*engine
)
162 return nvkm_xtensa(engine
);
165 static const struct nvkm_engine_func
167 .dtor
= nvkm_xtensa_dtor
,
168 .init
= nvkm_xtensa_init
,
169 .fini
= nvkm_xtensa_fini
,
170 .intr
= nvkm_xtensa_intr
,
171 .fifo
.sclass
= nvkm_xtensa_oclass_get
,
172 .cclass
= &nvkm_xtensa_cclass
,
176 nvkm_xtensa_new_(const struct nvkm_xtensa_func
*func
,
177 struct nvkm_device
*device
, int index
, bool enable
,
178 u32 addr
, struct nvkm_engine
**pengine
)
180 struct nvkm_xtensa
*xtensa
;
182 if (!(xtensa
= kzalloc(sizeof(*xtensa
), GFP_KERNEL
)))
186 *pengine
= &xtensa
->engine
;
188 return nvkm_engine_ctor(&nvkm_xtensa
, device
, index
, func
->pmc_enable
,
189 enable
, &xtensa
->engine
);
This page took 0.037597 seconds and 5 git commands to generate.