drm/nouveau/core: pull in most of the new core infrastructure
[deliverable/linux.git] / drivers / gpu / drm / nouveau / core / subdev / device / base.c
1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #include <core/object.h>
26 #include <core/device.h>
27 #include <core/client.h>
28 #include <core/device.h>
29 #include <core/option.h>
30
31 #include <core/class.h>
32
33 #include <subdev/device.h>
34
35 static DEFINE_MUTEX(nv_devices_mutex);
36 static LIST_HEAD(nv_devices);
37
38 struct nouveau_device *
39 nouveau_device_find(u64 name)
40 {
41 struct nouveau_device *device, *match = NULL;
42 mutex_lock(&nv_devices_mutex);
43 list_for_each_entry(device, &nv_devices, head) {
44 if (device->handle == name) {
45 match = device;
46 break;
47 }
48 }
49 mutex_unlock(&nv_devices_mutex);
50 return match;
51 }
52
53 /******************************************************************************
54 * nouveau_devobj (0x0080): class implementation
55 *****************************************************************************/
56 struct nouveau_devobj {
57 struct nouveau_parent base;
58 struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
59 bool created;
60 };
61
62 static const u64 disable_map[] = {
63 [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS,
64 [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE,
65 [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE,
66 [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
67 [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
68 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
69 [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
70 [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
71 [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE,
72 [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
73 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
74 [NVDEV_SUBDEV_FAN0] = NV_DEVICE_DISABLE_CORE,
75 [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
76 [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
77 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
78 [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
79 [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG,
80 [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME,
81 [NVDEV_ENGINE_VP] = NV_DEVICE_DISABLE_VP,
82 [NVDEV_ENGINE_CRYPT] = NV_DEVICE_DISABLE_CRYPT,
83 [NVDEV_ENGINE_BSP] = NV_DEVICE_DISABLE_BSP,
84 [NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP,
85 [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
86 [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
87 [NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1,
88 [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
89 [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
90 [NVDEV_SUBDEV_NR] = 0,
91 };
92
93 static int
94 nouveau_devobj_ctor(struct nouveau_object *parent,
95 struct nouveau_object *engine,
96 struct nouveau_oclass *oclass, void *data, u32 size,
97 struct nouveau_object **pobject)
98 {
99 struct nouveau_client *client = nv_client(parent);
100 struct nouveau_object *subdev = NULL;
101 struct nouveau_device *device;
102 struct nouveau_devobj *devobj;
103 struct nv_device_class *args = data;
104 u64 disable, boot0, strap;
105 u64 mmio_base, mmio_size;
106 void __iomem *map;
107 int ret, i;
108
109 if (size < sizeof(struct nv_device_class))
110 return -EINVAL;
111
112 /* find the device subdev that matches what the client requested */
113 device = nv_device(client->device);
114 if (args->device != ~0) {
115 device = nouveau_device_find(args->device);
116 if (!device)
117 return -ENODEV;
118 }
119
120 ret = nouveau_parent_create(parent, nv_object(device), oclass, 0, NULL,
121 (1ULL << NVDEV_ENGINE_DMAOBJ) |
122 (1ULL << NVDEV_ENGINE_FIFO) |
123 (1ULL << NVDEV_ENGINE_DISP), &devobj);
124 *pobject = nv_object(devobj);
125 if (ret)
126 return ret;
127
128 mmio_base = pci_resource_start(device->pdev, 0);
129 mmio_size = pci_resource_len(device->pdev, 0);
130
131 /* translate api disable mask into internal mapping */
132 disable = args->debug0;
133 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
134 if (args->disable & disable_map[i])
135 disable |= (1ULL << i);
136 }
137
138 /* identify the chipset, and determine classes of subdev/engines */
139 if (!(args->disable & NV_DEVICE_DISABLE_IDENTIFY) &&
140 !device->card_type) {
141 map = ioremap(mmio_base, 0x102000);
142 if (map == NULL) {
143 }
144
145 /* switch mmio to cpu's native endianness */
146 #ifndef __BIG_ENDIAN
147 if (ioread32_native(map + 0x000004) != 0x00000000)
148 #else
149 if (ioread32_native(map + 0x000004) == 0x00000000)
150 #endif
151 iowrite32_native(0x01000001, map + 0x000004);
152
153 /* read boot0 and strapping information */
154 boot0 = ioread32_native(map + 0x000000);
155 strap = ioread32_native(map + 0x101000);
156 iounmap(map);
157
158 /* determine chipset and derive architecture from it */
159 if ((boot0 & 0x0f000000) > 0) {
160 device->chipset = (boot0 & 0xff00000) >> 20;
161 switch (device->chipset & 0xf0) {
162 case 0x10: device->card_type = NV_10; break;
163 case 0x20: device->card_type = NV_20; break;
164 case 0x30: device->card_type = NV_30; break;
165 case 0x40:
166 case 0x60: device->card_type = NV_40; break;
167 case 0x50:
168 case 0x80:
169 case 0x90:
170 case 0xa0: device->card_type = NV_50; break;
171 case 0xc0: device->card_type = NV_C0; break;
172 case 0xd0: device->card_type = NV_D0; break;
173 case 0xe0: device->card_type = NV_E0; break;
174 default:
175 break;
176 }
177 } else
178 if ((boot0 & 0xff00fff0) == 0x20004000) {
179 if (boot0 & 0x00f00000)
180 device->chipset = 0x05;
181 else
182 device->chipset = 0x04;
183 device->card_type = NV_04;
184 }
185
186 switch (device->card_type) {
187 case NV_04: ret = nv04_identify(device); break;
188 case NV_10: ret = nv10_identify(device); break;
189 case NV_20: ret = nv20_identify(device); break;
190 case NV_30: ret = nv30_identify(device); break;
191 case NV_40: ret = nv40_identify(device); break;
192 case NV_50: ret = nv50_identify(device); break;
193 case NV_C0:
194 case NV_D0: ret = nvc0_identify(device); break;
195 case NV_E0: ret = nve0_identify(device); break;
196 default:
197 ret = -EINVAL;
198 break;
199 }
200
201 if (ret) {
202 nv_error(device, "unknown chipset, 0x%08x\n", boot0);
203 return ret;
204 }
205
206 nv_info(device, "BOOT0 : 0x%08x\n", boot0);
207 nv_info(device, "Chipset: NV%02X\n", device->chipset);
208 nv_info(device, "Family : NV%02X\n", device->card_type);
209
210 /* determine frequency of timing crystal */
211 if ( device->chipset < 0x17 ||
212 (device->chipset >= 0x20 && device->chipset <= 0x25))
213 strap &= 0x00000040;
214 else
215 strap &= 0x00400040;
216
217 switch (strap) {
218 case 0x00000000: device->crystal = 13500; break;
219 case 0x00000040: device->crystal = 14318; break;
220 case 0x00400000: device->crystal = 27000; break;
221 case 0x00400040: device->crystal = 25000; break;
222 }
223
224 nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
225 }
226
227 if (!(args->disable & NV_DEVICE_DISABLE_MMIO) &&
228 !nv_subdev(device)->mmio) {
229 nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size);
230 if (!nv_subdev(device)->mmio) {
231 nv_error(device, "unable to map device registers\n");
232 return ret;
233 }
234 }
235
236 /* ensure requested subsystems are available for use */
237 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
238 if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
239 continue;
240
241 if (!device->subdev[i]) {
242 ret = nouveau_object_ctor(nv_object(device), NULL,
243 oclass, NULL, i, &subdev);
244 if (ret)
245 return ret;
246
247 if (nv_iclass(subdev, NV_ENGINE_CLASS))
248 nouveau_subdev_reset(subdev);
249 } else {
250 nouveau_object_ref(device->subdev[i], &subdev);
251 }
252
253 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
254 ret = nouveau_object_inc(subdev);
255 if (ret) {
256 nouveau_object_ref(NULL, &subdev);
257 return ret;
258 }
259 }
260
261 nouveau_object_ref(subdev, &devobj->subdev[i]);
262 nouveau_object_ref(NULL, &subdev);
263 }
264
265 return 0;
266 }
267
268 static void
269 nouveau_devobj_dtor(struct nouveau_object *object)
270 {
271 struct nouveau_devobj *devobj = (void *)object;
272 int i;
273
274 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
275 nouveau_object_ref(NULL, &devobj->subdev[i]);
276
277 nouveau_parent_destroy(&devobj->base);
278 }
279
280 static int
281 nouveau_devobj_init(struct nouveau_object *object)
282 {
283 struct nouveau_devobj *devobj = (void *)object;
284 struct nouveau_object *subdev;
285 int ret, i;
286
287 ret = nouveau_parent_init(&devobj->base);
288 if (ret)
289 return ret;
290
291 for (i = 0; devobj->created && i < NVDEV_SUBDEV_NR; i++) {
292 if ((subdev = devobj->subdev[i])) {
293 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
294 ret = nouveau_object_inc(subdev);
295 if (ret)
296 goto fail;
297 }
298 }
299 }
300
301 devobj->created = true;
302 return 0;
303
304 fail:
305 for (--i; i >= 0; i--) {
306 if ((subdev = devobj->subdev[i])) {
307 if (!nv_iclass(subdev, NV_ENGINE_CLASS))
308 nouveau_object_dec(subdev, false);
309 }
310 }
311
312 return ret;
313 }
314
315 static int
316 nouveau_devobj_fini(struct nouveau_object *object, bool suspend)
317 {
318 struct nouveau_devobj *devobj = (void *)object;
319 struct nouveau_object *subdev;
320 int ret, i;
321
322 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
323 if ((subdev = devobj->subdev[i])) {
324 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
325 ret = nouveau_object_dec(subdev, suspend);
326 if (ret && suspend)
327 goto fail;
328 }
329 }
330 }
331
332 ret = nouveau_parent_fini(&devobj->base, suspend);
333 fail:
334 for (; ret && suspend && i < NVDEV_SUBDEV_NR; i++) {
335 if ((subdev = devobj->subdev[i])) {
336 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
337 ret = nouveau_object_inc(subdev);
338 if (ret) {
339 /* XXX */
340 }
341 }
342 }
343 }
344
345 return ret;
346 }
347
348 static u8
349 nouveau_devobj_rd08(struct nouveau_object *object, u32 addr)
350 {
351 return nv_rd08(object->engine, addr);
352 }
353
354 static u16
355 nouveau_devobj_rd16(struct nouveau_object *object, u32 addr)
356 {
357 return nv_rd16(object->engine, addr);
358 }
359
360 static u32
361 nouveau_devobj_rd32(struct nouveau_object *object, u32 addr)
362 {
363 return nv_rd32(object->engine, addr);
364 }
365
366 static void
367 nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data)
368 {
369 nv_wr08(object->engine, addr, data);
370 }
371
372 static void
373 nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data)
374 {
375 nv_wr16(object->engine, addr, data);
376 }
377
378 static void
379 nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
380 {
381 nv_wr32(object->engine, addr, data);
382 }
383
384 static struct nouveau_ofuncs
385 nouveau_devobj_ofuncs = {
386 .ctor = nouveau_devobj_ctor,
387 .dtor = nouveau_devobj_dtor,
388 .init = nouveau_devobj_init,
389 .fini = nouveau_devobj_fini,
390 .rd08 = nouveau_devobj_rd08,
391 .rd16 = nouveau_devobj_rd16,
392 .rd32 = nouveau_devobj_rd32,
393 .wr08 = nouveau_devobj_wr08,
394 .wr16 = nouveau_devobj_wr16,
395 .wr32 = nouveau_devobj_wr32,
396 };
397
398 /******************************************************************************
399 * nouveau_device: engine functions
400 *****************************************************************************/
401 struct nouveau_oclass
402 nouveau_device_sclass[] = {
403 { 0x0080, &nouveau_devobj_ofuncs },
404 {}
405 };
406
407 static struct nouveau_oclass
408 nouveau_device_oclass = {
409 .handle = NV_SUBDEV(DEVICE, 0x00),
410 .ofuncs = &(struct nouveau_ofuncs) {
411 },
412 };
413
414 int
415 nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
416 const char *cfg, const char *dbg,
417 int length, void **pobject)
418 {
419 struct nouveau_device *device;
420 int ret = -EEXIST;
421
422 mutex_lock(&nv_devices_mutex);
423 list_for_each_entry(device, &nv_devices, head) {
424 if (device->handle == name)
425 goto done;
426 }
427
428 ret = nouveau_subdev_create_(NULL, NULL, &nouveau_device_oclass, 0,
429 "DEVICE", "device", length, pobject);
430 device = *pobject;
431 if (ret)
432 goto done;
433
434 atomic_set(&nv_object(device)->usecount, 2);
435 device->pdev = pdev;
436 device->handle = name;
437 device->cfgopt = cfg;
438 device->dbgopt = dbg;
439 device->name = sname;
440
441 nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
442 list_add(&device->head, &nv_devices);
443 done:
444 mutex_unlock(&nv_devices_mutex);
445 return ret;
446 }
447
448 void
449 nouveau_device_destroy(struct nouveau_device **pdevice)
450 {
451 struct nouveau_device *device = *pdevice;
452 if (device) {
453 mutex_lock(&nv_devices_mutex);
454 list_del(&device->head);
455 mutex_unlock(&nv_devices_mutex);
456 if (device->base.mmio)
457 iounmap(device->base.mmio);
458 nouveau_subdev_destroy(&device->base);
459 }
460 *pdevice = NULL;
461 }
This page took 0.040148 seconds and 5 git commands to generate.