Merge tag 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux...
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nvkm / engine / device / base.c
1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "priv.h"
25 #include "acpi.h"
26
27 #include <core/client.h>
28 #include <core/option.h>
29 #include <core/notify.h>
30 #include <core/parent.h>
31 #include <subdev/bios.h>
32 #include <subdev/fb.h>
33 #include <subdev/instmem.h>
34
35 #include <nvif/class.h>
36 #include <nvif/unpack.h>
37
38 static DEFINE_MUTEX(nv_devices_mutex);
39 static LIST_HEAD(nv_devices);
40
41 struct nvkm_device *
42 nvkm_device_find(u64 name)
43 {
44 struct nvkm_device *device, *match = NULL;
45 mutex_lock(&nv_devices_mutex);
46 list_for_each_entry(device, &nv_devices, head) {
47 if (device->handle == name) {
48 match = device;
49 break;
50 }
51 }
52 mutex_unlock(&nv_devices_mutex);
53 return match;
54 }
55
56 int
57 nvkm_device_list(u64 *name, int size)
58 {
59 struct nvkm_device *device;
60 int nr = 0;
61 mutex_lock(&nv_devices_mutex);
62 list_for_each_entry(device, &nv_devices, head) {
63 if (nr++ < size)
64 name[nr - 1] = device->handle;
65 }
66 mutex_unlock(&nv_devices_mutex);
67 return nr;
68 }
69
70 /******************************************************************************
71 * nvkm_devobj (0x0080): class implementation
72 *****************************************************************************/
73
74 struct nvkm_devobj {
75 struct nvkm_parent base;
76 struct nvkm_object *subdev[NVDEV_SUBDEV_NR];
77 };
78
79 static int
80 nvkm_devobj_info(struct nvkm_object *object, void *data, u32 size)
81 {
82 struct nvkm_device *device = nv_device(object);
83 struct nvkm_fb *pfb = nvkm_fb(device);
84 struct nvkm_instmem *imem = nvkm_instmem(device);
85 union {
86 struct nv_device_info_v0 v0;
87 } *args = data;
88 int ret;
89
90 nv_ioctl(object, "device info size %d\n", size);
91 if (nvif_unpack(args->v0, 0, 0, false)) {
92 nv_ioctl(object, "device info vers %d\n", args->v0.version);
93 } else
94 return ret;
95
96 switch (device->chipset) {
97 case 0x01a:
98 case 0x01f:
99 case 0x04c:
100 case 0x04e:
101 case 0x063:
102 case 0x067:
103 case 0x068:
104 case 0x0aa:
105 case 0x0ac:
106 case 0x0af:
107 args->v0.platform = NV_DEVICE_INFO_V0_IGP;
108 break;
109 default:
110 if (device->pdev) {
111 if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP))
112 args->v0.platform = NV_DEVICE_INFO_V0_AGP;
113 else
114 if (pci_is_pcie(device->pdev))
115 args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
116 else
117 args->v0.platform = NV_DEVICE_INFO_V0_PCI;
118 } else {
119 args->v0.platform = NV_DEVICE_INFO_V0_SOC;
120 }
121 break;
122 }
123
124 switch (device->card_type) {
125 case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
126 case NV_10:
127 case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
128 case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
129 case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
130 case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
131 case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
132 case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
133 case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
134 case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
135 default:
136 args->v0.family = 0;
137 break;
138 }
139
140 args->v0.chipset = device->chipset;
141 args->v0.revision = device->chiprev;
142 if (pfb) args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
143 else args->v0.ram_size = args->v0.ram_user = 0;
144 if (imem) args->v0.ram_user = args->v0.ram_user - imem->reserved;
145 return 0;
146 }
147
148 static int
149 nvkm_devobj_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
150 {
151 switch (mthd) {
152 case NV_DEVICE_V0_INFO:
153 return nvkm_devobj_info(object, data, size);
154 default:
155 break;
156 }
157 return -EINVAL;
158 }
159
160 static u8
161 nvkm_devobj_rd08(struct nvkm_object *object, u64 addr)
162 {
163 return nv_rd08(object->engine, addr);
164 }
165
166 static u16
167 nvkm_devobj_rd16(struct nvkm_object *object, u64 addr)
168 {
169 return nv_rd16(object->engine, addr);
170 }
171
172 static u32
173 nvkm_devobj_rd32(struct nvkm_object *object, u64 addr)
174 {
175 return nv_rd32(object->engine, addr);
176 }
177
178 static void
179 nvkm_devobj_wr08(struct nvkm_object *object, u64 addr, u8 data)
180 {
181 nv_wr08(object->engine, addr, data);
182 }
183
184 static void
185 nvkm_devobj_wr16(struct nvkm_object *object, u64 addr, u16 data)
186 {
187 nv_wr16(object->engine, addr, data);
188 }
189
190 static void
191 nvkm_devobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
192 {
193 nv_wr32(object->engine, addr, data);
194 }
195
196 static int
197 nvkm_devobj_map(struct nvkm_object *object, u64 *addr, u32 *size)
198 {
199 struct nvkm_device *device = nv_device(object);
200 *addr = nv_device_resource_start(device, 0);
201 *size = nv_device_resource_len(device, 0);
202 return 0;
203 }
204
205 static const u64 disable_map[] = {
206 [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_V0_DISABLE_VBIOS,
207 [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_V0_DISABLE_CORE,
208 [NVDEV_SUBDEV_GPIO] = NV_DEVICE_V0_DISABLE_CORE,
209 [NVDEV_SUBDEV_I2C] = NV_DEVICE_V0_DISABLE_CORE,
210 [NVDEV_SUBDEV_CLK ] = NV_DEVICE_V0_DISABLE_CORE,
211 [NVDEV_SUBDEV_MXM] = NV_DEVICE_V0_DISABLE_CORE,
212 [NVDEV_SUBDEV_MC] = NV_DEVICE_V0_DISABLE_CORE,
213 [NVDEV_SUBDEV_BUS] = NV_DEVICE_V0_DISABLE_CORE,
214 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_V0_DISABLE_CORE,
215 [NVDEV_SUBDEV_FB] = NV_DEVICE_V0_DISABLE_CORE,
216 [NVDEV_SUBDEV_LTC] = NV_DEVICE_V0_DISABLE_CORE,
217 [NVDEV_SUBDEV_IBUS] = NV_DEVICE_V0_DISABLE_CORE,
218 [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_V0_DISABLE_CORE,
219 [NVDEV_SUBDEV_MMU] = NV_DEVICE_V0_DISABLE_CORE,
220 [NVDEV_SUBDEV_BAR] = NV_DEVICE_V0_DISABLE_CORE,
221 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_V0_DISABLE_CORE,
222 [NVDEV_SUBDEV_THERM] = NV_DEVICE_V0_DISABLE_CORE,
223 [NVDEV_SUBDEV_PMU] = NV_DEVICE_V0_DISABLE_CORE,
224 [NVDEV_SUBDEV_FUSE] = NV_DEVICE_V0_DISABLE_CORE,
225 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_V0_DISABLE_CORE,
226 [NVDEV_ENGINE_PM ] = NV_DEVICE_V0_DISABLE_CORE,
227 [NVDEV_ENGINE_FIFO] = NV_DEVICE_V0_DISABLE_FIFO,
228 [NVDEV_ENGINE_SW] = NV_DEVICE_V0_DISABLE_FIFO,
229 [NVDEV_ENGINE_GR] = NV_DEVICE_V0_DISABLE_GR,
230 [NVDEV_ENGINE_MPEG] = NV_DEVICE_V0_DISABLE_MPEG,
231 [NVDEV_ENGINE_ME] = NV_DEVICE_V0_DISABLE_ME,
232 [NVDEV_ENGINE_VP] = NV_DEVICE_V0_DISABLE_VP,
233 [NVDEV_ENGINE_CIPHER] = NV_DEVICE_V0_DISABLE_CIPHER,
234 [NVDEV_ENGINE_BSP] = NV_DEVICE_V0_DISABLE_BSP,
235 [NVDEV_ENGINE_MSPPP] = NV_DEVICE_V0_DISABLE_MSPPP,
236 [NVDEV_ENGINE_CE0] = NV_DEVICE_V0_DISABLE_CE0,
237 [NVDEV_ENGINE_CE1] = NV_DEVICE_V0_DISABLE_CE1,
238 [NVDEV_ENGINE_CE2] = NV_DEVICE_V0_DISABLE_CE2,
239 [NVDEV_ENGINE_VIC] = NV_DEVICE_V0_DISABLE_VIC,
240 [NVDEV_ENGINE_MSENC] = NV_DEVICE_V0_DISABLE_MSENC,
241 [NVDEV_ENGINE_DISP] = NV_DEVICE_V0_DISABLE_DISP,
242 [NVDEV_ENGINE_MSVLD] = NV_DEVICE_V0_DISABLE_MSVLD,
243 [NVDEV_ENGINE_SEC] = NV_DEVICE_V0_DISABLE_SEC,
244 [NVDEV_SUBDEV_NR] = 0,
245 };
246
247 static void
248 nvkm_devobj_dtor(struct nvkm_object *object)
249 {
250 struct nvkm_devobj *devobj = (void *)object;
251 int i;
252
253 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
254 nvkm_object_ref(NULL, &devobj->subdev[i]);
255
256 nvkm_parent_destroy(&devobj->base);
257 }
258
259 static struct nvkm_oclass
260 nvkm_devobj_oclass_super = {
261 .handle = NV_DEVICE,
262 .ofuncs = &(struct nvkm_ofuncs) {
263 .dtor = nvkm_devobj_dtor,
264 .init = _nvkm_parent_init,
265 .fini = _nvkm_parent_fini,
266 .mthd = nvkm_devobj_mthd,
267 .map = nvkm_devobj_map,
268 .rd08 = nvkm_devobj_rd08,
269 .rd16 = nvkm_devobj_rd16,
270 .rd32 = nvkm_devobj_rd32,
271 .wr08 = nvkm_devobj_wr08,
272 .wr16 = nvkm_devobj_wr16,
273 .wr32 = nvkm_devobj_wr32,
274 }
275 };
276
277 static int
278 nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
279 struct nvkm_oclass *oclass, void *data, u32 size,
280 struct nvkm_object **pobject)
281 {
282 union {
283 struct nv_device_v0 v0;
284 } *args = data;
285 struct nvkm_client *client = nv_client(parent);
286 struct nvkm_device *device;
287 struct nvkm_devobj *devobj;
288 u32 boot0, strap;
289 u64 disable, mmio_base, mmio_size;
290 void __iomem *map;
291 int ret, i, c;
292
293 nv_ioctl(parent, "create device size %d\n", size);
294 if (nvif_unpack(args->v0, 0, 0, false)) {
295 nv_ioctl(parent, "create device v%d device %016llx "
296 "disable %016llx debug0 %016llx\n",
297 args->v0.version, args->v0.device,
298 args->v0.disable, args->v0.debug0);
299 } else
300 return ret;
301
302 /* give priviledged clients register access */
303 if (client->super)
304 oclass = &nvkm_devobj_oclass_super;
305
306 /* find the device subdev that matches what the client requested */
307 device = nv_device(client->device);
308 if (args->v0.device != ~0) {
309 device = nvkm_device_find(args->v0.device);
310 if (!device)
311 return -ENODEV;
312 }
313
314 ret = nvkm_parent_create(parent, nv_object(device), oclass, 0,
315 nvkm_control_oclass,
316 (1ULL << NVDEV_ENGINE_DMAOBJ) |
317 (1ULL << NVDEV_ENGINE_FIFO) |
318 (1ULL << NVDEV_ENGINE_DISP) |
319 (1ULL << NVDEV_ENGINE_PM), &devobj);
320 *pobject = nv_object(devobj);
321 if (ret)
322 return ret;
323
324 mmio_base = nv_device_resource_start(device, 0);
325 mmio_size = nv_device_resource_len(device, 0);
326
327 /* translate api disable mask into internal mapping */
328 disable = args->v0.debug0;
329 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
330 if (args->v0.disable & disable_map[i])
331 disable |= (1ULL << i);
332 }
333
334 /* identify the chipset, and determine classes of subdev/engines */
335 if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY) &&
336 !device->card_type) {
337 map = ioremap(mmio_base, 0x102000);
338 if (map == NULL)
339 return -ENOMEM;
340
341 /* switch mmio to cpu's native endianness */
342 #ifndef __BIG_ENDIAN
343 if (ioread32_native(map + 0x000004) != 0x00000000) {
344 #else
345 if (ioread32_native(map + 0x000004) == 0x00000000) {
346 #endif
347 iowrite32_native(0x01000001, map + 0x000004);
348 ioread32_native(map);
349 }
350
351 /* read boot0 and strapping information */
352 boot0 = ioread32_native(map + 0x000000);
353 strap = ioread32_native(map + 0x101000);
354 iounmap(map);
355
356 /* determine chipset and derive architecture from it */
357 if ((boot0 & 0x1f000000) > 0) {
358 device->chipset = (boot0 & 0x1ff00000) >> 20;
359 device->chiprev = (boot0 & 0x000000ff);
360 switch (device->chipset & 0x1f0) {
361 case 0x010: {
362 if (0x461 & (1 << (device->chipset & 0xf)))
363 device->card_type = NV_10;
364 else
365 device->card_type = NV_11;
366 device->chiprev = 0x00;
367 break;
368 }
369 case 0x020: device->card_type = NV_20; break;
370 case 0x030: device->card_type = NV_30; break;
371 case 0x040:
372 case 0x060: device->card_type = NV_40; break;
373 case 0x050:
374 case 0x080:
375 case 0x090:
376 case 0x0a0: device->card_type = NV_50; break;
377 case 0x0c0:
378 case 0x0d0: device->card_type = NV_C0; break;
379 case 0x0e0:
380 case 0x0f0:
381 case 0x100: device->card_type = NV_E0; break;
382 case 0x110:
383 case 0x120: device->card_type = GM100; break;
384 default:
385 break;
386 }
387 } else
388 if ((boot0 & 0xff00fff0) == 0x20004000) {
389 if (boot0 & 0x00f00000)
390 device->chipset = 0x05;
391 else
392 device->chipset = 0x04;
393 device->card_type = NV_04;
394 }
395
396 switch (device->card_type) {
397 case NV_04: ret = nv04_identify(device); break;
398 case NV_10:
399 case NV_11: ret = nv10_identify(device); break;
400 case NV_20: ret = nv20_identify(device); break;
401 case NV_30: ret = nv30_identify(device); break;
402 case NV_40: ret = nv40_identify(device); break;
403 case NV_50: ret = nv50_identify(device); break;
404 case NV_C0: ret = gf100_identify(device); break;
405 case NV_E0: ret = gk104_identify(device); break;
406 case GM100: ret = gm100_identify(device); break;
407 default:
408 ret = -EINVAL;
409 break;
410 }
411
412 if (ret) {
413 nv_error(device, "unknown chipset, 0x%08x\n", boot0);
414 return ret;
415 }
416
417 nv_info(device, "BOOT0 : 0x%08x\n", boot0);
418 nv_info(device, "Chipset: %s (NV%02X)\n",
419 device->cname, device->chipset);
420 nv_info(device, "Family : NV%02X\n", device->card_type);
421
422 /* determine frequency of timing crystal */
423 if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
424 (device->chipset >= 0x20 && device->chipset < 0x25))
425 strap &= 0x00000040;
426 else
427 strap &= 0x00400040;
428
429 switch (strap) {
430 case 0x00000000: device->crystal = 13500; break;
431 case 0x00000040: device->crystal = 14318; break;
432 case 0x00400000: device->crystal = 27000; break;
433 case 0x00400040: device->crystal = 25000; break;
434 }
435
436 nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
437 } else
438 if ( (args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY)) {
439 device->cname = "NULL";
440 device->oclass[NVDEV_SUBDEV_VBIOS] = &nvkm_bios_oclass;
441 }
442
443 if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) &&
444 !nv_subdev(device)->mmio) {
445 nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size);
446 if (!nv_subdev(device)->mmio) {
447 nv_error(device, "unable to map device registers\n");
448 return -ENOMEM;
449 }
450 }
451
452 /* ensure requested subsystems are available for use */
453 for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) {
454 if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
455 continue;
456
457 if (device->subdev[i]) {
458 nvkm_object_ref(device->subdev[i], &devobj->subdev[i]);
459 continue;
460 }
461
462 ret = nvkm_object_ctor(nv_object(device), NULL, oclass,
463 NULL, i, &devobj->subdev[i]);
464 if (ret == -ENODEV)
465 continue;
466 if (ret)
467 return ret;
468
469 device->subdev[i] = devobj->subdev[i];
470
471 /* note: can't init *any* subdevs until devinit has been run
472 * due to not knowing exactly what the vbios init tables will
473 * mess with. devinit also can't be run until all of its
474 * dependencies have been created.
475 *
476 * this code delays init of any subdev until all of devinit's
477 * dependencies have been created, and then initialises each
478 * subdev in turn as they're created.
479 */
480 while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
481 struct nvkm_object *subdev = devobj->subdev[c++];
482 if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
483 ret = nvkm_object_inc(subdev);
484 if (ret)
485 return ret;
486 atomic_dec(&nv_object(device)->usecount);
487 } else
488 if (subdev) {
489 nvkm_subdev_reset(subdev);
490 }
491 }
492 }
493
494 return 0;
495 }
496
497 static struct nvkm_ofuncs
498 nvkm_devobj_ofuncs = {
499 .ctor = nvkm_devobj_ctor,
500 .dtor = nvkm_devobj_dtor,
501 .init = _nvkm_parent_init,
502 .fini = _nvkm_parent_fini,
503 .mthd = nvkm_devobj_mthd,
504 };
505
506 /******************************************************************************
507 * nvkm_device: engine functions
508 *****************************************************************************/
509
510 struct nvkm_device *
511 nv_device(void *obj)
512 {
513 struct nvkm_object *device = nv_object(obj);
514 if (device->engine == NULL) {
515 while (device && device->parent)
516 device = device->parent;
517 } else {
518 device = &nv_object(obj)->engine->subdev.object;
519 if (device && device->parent)
520 device = device->parent;
521 }
522 #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
523 if (unlikely(!device))
524 nv_assert("BAD CAST -> NvDevice, 0x%08x\n", nv_hclass(obj));
525 #endif
526 return (void *)device;
527 }
528
529 static struct nvkm_oclass
530 nvkm_device_sclass[] = {
531 { 0x0080, &nvkm_devobj_ofuncs },
532 {}
533 };
534
535 static int
536 nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
537 struct nvkm_notify *notify)
538 {
539 if (!WARN_ON(size != 0)) {
540 notify->size = 0;
541 notify->types = 1;
542 notify->index = 0;
543 return 0;
544 }
545 return -EINVAL;
546 }
547
548 static const struct nvkm_event_func
549 nvkm_device_event_func = {
550 .ctor = nvkm_device_event_ctor,
551 };
552
553 static int
554 nvkm_device_fini(struct nvkm_object *object, bool suspend)
555 {
556 struct nvkm_device *device = (void *)object;
557 struct nvkm_object *subdev;
558 int ret, i;
559
560 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
561 if ((subdev = device->subdev[i])) {
562 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
563 ret = nvkm_object_dec(subdev, suspend);
564 if (ret && suspend)
565 goto fail;
566 }
567 }
568 }
569
570 ret = nvkm_acpi_fini(device, suspend);
571 fail:
572 for (; ret && i < NVDEV_SUBDEV_NR; i++) {
573 if ((subdev = device->subdev[i])) {
574 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
575 ret = nvkm_object_inc(subdev);
576 if (ret) {
577 /* XXX */
578 }
579 }
580 }
581 }
582
583 return ret;
584 }
585
586 static int
587 nvkm_device_init(struct nvkm_object *object)
588 {
589 struct nvkm_device *device = (void *)object;
590 struct nvkm_object *subdev;
591 int ret, i = 0;
592
593 ret = nvkm_acpi_init(device);
594 if (ret)
595 goto fail;
596
597 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
598 if ((subdev = device->subdev[i])) {
599 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
600 ret = nvkm_object_inc(subdev);
601 if (ret)
602 goto fail;
603 } else {
604 nvkm_subdev_reset(subdev);
605 }
606 }
607 }
608
609 ret = 0;
610 fail:
611 for (--i; ret && i >= 0; i--) {
612 if ((subdev = device->subdev[i])) {
613 if (!nv_iclass(subdev, NV_ENGINE_CLASS))
614 nvkm_object_dec(subdev, false);
615 }
616 }
617
618 if (ret)
619 nvkm_acpi_fini(device, false);
620 return ret;
621 }
622
623 static void
624 nvkm_device_dtor(struct nvkm_object *object)
625 {
626 struct nvkm_device *device = (void *)object;
627
628 nvkm_event_fini(&device->event);
629
630 mutex_lock(&nv_devices_mutex);
631 list_del(&device->head);
632 mutex_unlock(&nv_devices_mutex);
633
634 if (nv_subdev(device)->mmio)
635 iounmap(nv_subdev(device)->mmio);
636
637 nvkm_engine_destroy(&device->engine);
638 }
639
640 resource_size_t
641 nv_device_resource_start(struct nvkm_device *device, unsigned int bar)
642 {
643 if (nv_device_is_pci(device)) {
644 return pci_resource_start(device->pdev, bar);
645 } else {
646 struct resource *res;
647 res = platform_get_resource(device->platformdev,
648 IORESOURCE_MEM, bar);
649 if (!res)
650 return 0;
651 return res->start;
652 }
653 }
654
655 resource_size_t
656 nv_device_resource_len(struct nvkm_device *device, unsigned int bar)
657 {
658 if (nv_device_is_pci(device)) {
659 return pci_resource_len(device->pdev, bar);
660 } else {
661 struct resource *res;
662 res = platform_get_resource(device->platformdev,
663 IORESOURCE_MEM, bar);
664 if (!res)
665 return 0;
666 return resource_size(res);
667 }
668 }
669
670 int
671 nv_device_get_irq(struct nvkm_device *device, bool stall)
672 {
673 if (nv_device_is_pci(device)) {
674 return device->pdev->irq;
675 } else {
676 return platform_get_irq_byname(device->platformdev,
677 stall ? "stall" : "nonstall");
678 }
679 }
680
681 static struct nvkm_oclass
682 nvkm_device_oclass = {
683 .handle = NV_ENGINE(DEVICE, 0x00),
684 .ofuncs = &(struct nvkm_ofuncs) {
685 .dtor = nvkm_device_dtor,
686 .init = nvkm_device_init,
687 .fini = nvkm_device_fini,
688 },
689 };
690
691 int
692 nvkm_device_create_(void *dev, enum nv_bus_type type, u64 name,
693 const char *sname, const char *cfg, const char *dbg,
694 int length, void **pobject)
695 {
696 struct nvkm_device *device;
697 int ret = -EEXIST;
698
699 mutex_lock(&nv_devices_mutex);
700 list_for_each_entry(device, &nv_devices, head) {
701 if (device->handle == name)
702 goto done;
703 }
704
705 ret = nvkm_engine_create_(NULL, NULL, &nvkm_device_oclass, true,
706 "DEVICE", "device", length, pobject);
707 device = *pobject;
708 if (ret)
709 goto done;
710
711 switch (type) {
712 case NVKM_BUS_PCI:
713 device->pdev = dev;
714 break;
715 case NVKM_BUS_PLATFORM:
716 device->platformdev = dev;
717 break;
718 }
719 device->handle = name;
720 device->cfgopt = cfg;
721 device->dbgopt = dbg;
722 device->name = sname;
723
724 nv_subdev(device)->debug = nvkm_dbgopt(device->dbgopt, "DEVICE");
725 nv_engine(device)->sclass = nvkm_device_sclass;
726 list_add(&device->head, &nv_devices);
727
728 ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
729 done:
730 mutex_unlock(&nv_devices_mutex);
731 return ret;
732 }
This page took 0.079346 seconds and 5 git commands to generate.