2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <core/client.h>
28 #include <core/option.h>
29 #include <core/notify.h>
30 #include <core/parent.h>
31 #include <subdev/bios.h>
32 #include <subdev/fb.h>
33 #include <subdev/instmem.h>
35 #include <nvif/class.h>
36 #include <nvif/unpack.h>
38 static DEFINE_MUTEX(nv_devices_mutex
);
39 static LIST_HEAD(nv_devices
);
42 nvkm_device_find(u64 name
)
44 struct nvkm_device
*device
, *match
= NULL
;
45 mutex_lock(&nv_devices_mutex
);
46 list_for_each_entry(device
, &nv_devices
, head
) {
47 if (device
->handle
== name
) {
52 mutex_unlock(&nv_devices_mutex
);
57 nvkm_device_list(u64
*name
, int size
)
59 struct nvkm_device
*device
;
61 mutex_lock(&nv_devices_mutex
);
62 list_for_each_entry(device
, &nv_devices
, head
) {
64 name
[nr
- 1] = device
->handle
;
66 mutex_unlock(&nv_devices_mutex
);
70 /******************************************************************************
71 * nvkm_devobj (0x0080): class implementation
72 *****************************************************************************/
75 struct nvkm_parent base
;
76 struct nvkm_object
*subdev
[NVDEV_SUBDEV_NR
];
80 nvkm_devobj_info(struct nvkm_object
*object
, void *data
, u32 size
)
82 struct nvkm_device
*device
= nv_device(object
);
83 struct nvkm_fb
*pfb
= nvkm_fb(device
);
84 struct nvkm_instmem
*imem
= nvkm_instmem(device
);
86 struct nv_device_info_v0 v0
;
90 nv_ioctl(object
, "device info size %d\n", size
);
91 if (nvif_unpack(args
->v0
, 0, 0, false)) {
92 nv_ioctl(object
, "device info vers %d\n", args
->v0
.version
);
96 switch (device
->chipset
) {
107 args
->v0
.platform
= NV_DEVICE_INFO_V0_IGP
;
111 if (pci_find_capability(device
->pdev
, PCI_CAP_ID_AGP
))
112 args
->v0
.platform
= NV_DEVICE_INFO_V0_AGP
;
114 if (pci_is_pcie(device
->pdev
))
115 args
->v0
.platform
= NV_DEVICE_INFO_V0_PCIE
;
117 args
->v0
.platform
= NV_DEVICE_INFO_V0_PCI
;
119 args
->v0
.platform
= NV_DEVICE_INFO_V0_SOC
;
124 switch (device
->card_type
) {
125 case NV_04
: args
->v0
.family
= NV_DEVICE_INFO_V0_TNT
; break;
127 case NV_11
: args
->v0
.family
= NV_DEVICE_INFO_V0_CELSIUS
; break;
128 case NV_20
: args
->v0
.family
= NV_DEVICE_INFO_V0_KELVIN
; break;
129 case NV_30
: args
->v0
.family
= NV_DEVICE_INFO_V0_RANKINE
; break;
130 case NV_40
: args
->v0
.family
= NV_DEVICE_INFO_V0_CURIE
; break;
131 case NV_50
: args
->v0
.family
= NV_DEVICE_INFO_V0_TESLA
; break;
132 case NV_C0
: args
->v0
.family
= NV_DEVICE_INFO_V0_FERMI
; break;
133 case NV_E0
: args
->v0
.family
= NV_DEVICE_INFO_V0_KEPLER
; break;
134 case GM100
: args
->v0
.family
= NV_DEVICE_INFO_V0_MAXWELL
; break;
140 args
->v0
.chipset
= device
->chipset
;
141 args
->v0
.revision
= device
->chiprev
;
142 if (pfb
) args
->v0
.ram_size
= args
->v0
.ram_user
= pfb
->ram
->size
;
143 else args
->v0
.ram_size
= args
->v0
.ram_user
= 0;
144 if (imem
) args
->v0
.ram_user
= args
->v0
.ram_user
- imem
->reserved
;
149 nvkm_devobj_mthd(struct nvkm_object
*object
, u32 mthd
, void *data
, u32 size
)
152 case NV_DEVICE_V0_INFO
:
153 return nvkm_devobj_info(object
, data
, size
);
161 nvkm_devobj_rd08(struct nvkm_object
*object
, u64 addr
)
163 return nv_rd08(object
->engine
, addr
);
167 nvkm_devobj_rd16(struct nvkm_object
*object
, u64 addr
)
169 return nv_rd16(object
->engine
, addr
);
173 nvkm_devobj_rd32(struct nvkm_object
*object
, u64 addr
)
175 return nv_rd32(object
->engine
, addr
);
179 nvkm_devobj_wr08(struct nvkm_object
*object
, u64 addr
, u8 data
)
181 nv_wr08(object
->engine
, addr
, data
);
185 nvkm_devobj_wr16(struct nvkm_object
*object
, u64 addr
, u16 data
)
187 nv_wr16(object
->engine
, addr
, data
);
191 nvkm_devobj_wr32(struct nvkm_object
*object
, u64 addr
, u32 data
)
193 nv_wr32(object
->engine
, addr
, data
);
197 nvkm_devobj_map(struct nvkm_object
*object
, u64
*addr
, u32
*size
)
199 struct nvkm_device
*device
= nv_device(object
);
200 *addr
= nv_device_resource_start(device
, 0);
201 *size
= nv_device_resource_len(device
, 0);
205 static const u64 disable_map
[] = {
206 [NVDEV_SUBDEV_VBIOS
] = NV_DEVICE_V0_DISABLE_VBIOS
,
207 [NVDEV_SUBDEV_DEVINIT
] = NV_DEVICE_V0_DISABLE_CORE
,
208 [NVDEV_SUBDEV_GPIO
] = NV_DEVICE_V0_DISABLE_CORE
,
209 [NVDEV_SUBDEV_I2C
] = NV_DEVICE_V0_DISABLE_CORE
,
210 [NVDEV_SUBDEV_CLK
] = NV_DEVICE_V0_DISABLE_CORE
,
211 [NVDEV_SUBDEV_MXM
] = NV_DEVICE_V0_DISABLE_CORE
,
212 [NVDEV_SUBDEV_MC
] = NV_DEVICE_V0_DISABLE_CORE
,
213 [NVDEV_SUBDEV_BUS
] = NV_DEVICE_V0_DISABLE_CORE
,
214 [NVDEV_SUBDEV_TIMER
] = NV_DEVICE_V0_DISABLE_CORE
,
215 [NVDEV_SUBDEV_FB
] = NV_DEVICE_V0_DISABLE_CORE
,
216 [NVDEV_SUBDEV_LTC
] = NV_DEVICE_V0_DISABLE_CORE
,
217 [NVDEV_SUBDEV_IBUS
] = NV_DEVICE_V0_DISABLE_CORE
,
218 [NVDEV_SUBDEV_INSTMEM
] = NV_DEVICE_V0_DISABLE_CORE
,
219 [NVDEV_SUBDEV_MMU
] = NV_DEVICE_V0_DISABLE_CORE
,
220 [NVDEV_SUBDEV_BAR
] = NV_DEVICE_V0_DISABLE_CORE
,
221 [NVDEV_SUBDEV_VOLT
] = NV_DEVICE_V0_DISABLE_CORE
,
222 [NVDEV_SUBDEV_THERM
] = NV_DEVICE_V0_DISABLE_CORE
,
223 [NVDEV_SUBDEV_PMU
] = NV_DEVICE_V0_DISABLE_CORE
,
224 [NVDEV_SUBDEV_FUSE
] = NV_DEVICE_V0_DISABLE_CORE
,
225 [NVDEV_ENGINE_DMAOBJ
] = NV_DEVICE_V0_DISABLE_CORE
,
226 [NVDEV_ENGINE_PM
] = NV_DEVICE_V0_DISABLE_CORE
,
227 [NVDEV_ENGINE_FIFO
] = NV_DEVICE_V0_DISABLE_FIFO
,
228 [NVDEV_ENGINE_SW
] = NV_DEVICE_V0_DISABLE_FIFO
,
229 [NVDEV_ENGINE_GR
] = NV_DEVICE_V0_DISABLE_GR
,
230 [NVDEV_ENGINE_MPEG
] = NV_DEVICE_V0_DISABLE_MPEG
,
231 [NVDEV_ENGINE_ME
] = NV_DEVICE_V0_DISABLE_ME
,
232 [NVDEV_ENGINE_VP
] = NV_DEVICE_V0_DISABLE_VP
,
233 [NVDEV_ENGINE_CIPHER
] = NV_DEVICE_V0_DISABLE_CIPHER
,
234 [NVDEV_ENGINE_BSP
] = NV_DEVICE_V0_DISABLE_BSP
,
235 [NVDEV_ENGINE_MSPPP
] = NV_DEVICE_V0_DISABLE_MSPPP
,
236 [NVDEV_ENGINE_CE0
] = NV_DEVICE_V0_DISABLE_CE0
,
237 [NVDEV_ENGINE_CE1
] = NV_DEVICE_V0_DISABLE_CE1
,
238 [NVDEV_ENGINE_CE2
] = NV_DEVICE_V0_DISABLE_CE2
,
239 [NVDEV_ENGINE_VIC
] = NV_DEVICE_V0_DISABLE_VIC
,
240 [NVDEV_ENGINE_MSENC
] = NV_DEVICE_V0_DISABLE_MSENC
,
241 [NVDEV_ENGINE_DISP
] = NV_DEVICE_V0_DISABLE_DISP
,
242 [NVDEV_ENGINE_MSVLD
] = NV_DEVICE_V0_DISABLE_MSVLD
,
243 [NVDEV_ENGINE_SEC
] = NV_DEVICE_V0_DISABLE_SEC
,
244 [NVDEV_SUBDEV_NR
] = 0,
248 nvkm_devobj_dtor(struct nvkm_object
*object
)
250 struct nvkm_devobj
*devobj
= (void *)object
;
253 for (i
= NVDEV_SUBDEV_NR
- 1; i
>= 0; i
--)
254 nvkm_object_ref(NULL
, &devobj
->subdev
[i
]);
256 nvkm_parent_destroy(&devobj
->base
);
259 static struct nvkm_oclass
260 nvkm_devobj_oclass_super
= {
262 .ofuncs
= &(struct nvkm_ofuncs
) {
263 .dtor
= nvkm_devobj_dtor
,
264 .init
= _nvkm_parent_init
,
265 .fini
= _nvkm_parent_fini
,
266 .mthd
= nvkm_devobj_mthd
,
267 .map
= nvkm_devobj_map
,
268 .rd08
= nvkm_devobj_rd08
,
269 .rd16
= nvkm_devobj_rd16
,
270 .rd32
= nvkm_devobj_rd32
,
271 .wr08
= nvkm_devobj_wr08
,
272 .wr16
= nvkm_devobj_wr16
,
273 .wr32
= nvkm_devobj_wr32
,
278 nvkm_devobj_ctor(struct nvkm_object
*parent
, struct nvkm_object
*engine
,
279 struct nvkm_oclass
*oclass
, void *data
, u32 size
,
280 struct nvkm_object
**pobject
)
283 struct nv_device_v0 v0
;
285 struct nvkm_client
*client
= nv_client(parent
);
286 struct nvkm_device
*device
;
287 struct nvkm_devobj
*devobj
;
289 u64 disable
, mmio_base
, mmio_size
;
293 nv_ioctl(parent
, "create device size %d\n", size
);
294 if (nvif_unpack(args
->v0
, 0, 0, false)) {
295 nv_ioctl(parent
, "create device v%d device %016llx "
296 "disable %016llx debug0 %016llx\n",
297 args
->v0
.version
, args
->v0
.device
,
298 args
->v0
.disable
, args
->v0
.debug0
);
302 /* give priviledged clients register access */
304 oclass
= &nvkm_devobj_oclass_super
;
306 /* find the device subdev that matches what the client requested */
307 device
= nv_device(client
->device
);
308 if (args
->v0
.device
!= ~0) {
309 device
= nvkm_device_find(args
->v0
.device
);
314 ret
= nvkm_parent_create(parent
, nv_object(device
), oclass
, 0,
316 (1ULL << NVDEV_ENGINE_DMAOBJ
) |
317 (1ULL << NVDEV_ENGINE_FIFO
) |
318 (1ULL << NVDEV_ENGINE_DISP
) |
319 (1ULL << NVDEV_ENGINE_PM
), &devobj
);
320 *pobject
= nv_object(devobj
);
324 mmio_base
= nv_device_resource_start(device
, 0);
325 mmio_size
= nv_device_resource_len(device
, 0);
327 /* translate api disable mask into internal mapping */
328 disable
= args
->v0
.debug0
;
329 for (i
= 0; i
< NVDEV_SUBDEV_NR
; i
++) {
330 if (args
->v0
.disable
& disable_map
[i
])
331 disable
|= (1ULL << i
);
334 /* identify the chipset, and determine classes of subdev/engines */
335 if (!(args
->v0
.disable
& NV_DEVICE_V0_DISABLE_IDENTIFY
) &&
336 !device
->card_type
) {
337 map
= ioremap(mmio_base
, 0x102000);
341 /* switch mmio to cpu's native endianness */
343 if (ioread32_native(map
+ 0x000004) != 0x00000000) {
345 if (ioread32_native(map
+ 0x000004) == 0x00000000) {
347 iowrite32_native(0x01000001, map
+ 0x000004);
348 ioread32_native(map
);
351 /* read boot0 and strapping information */
352 boot0
= ioread32_native(map
+ 0x000000);
353 strap
= ioread32_native(map
+ 0x101000);
356 /* determine chipset and derive architecture from it */
357 if ((boot0
& 0x1f000000) > 0) {
358 device
->chipset
= (boot0
& 0x1ff00000) >> 20;
359 device
->chiprev
= (boot0
& 0x000000ff);
360 switch (device
->chipset
& 0x1f0) {
362 if (0x461 & (1 << (device
->chipset
& 0xf)))
363 device
->card_type
= NV_10
;
365 device
->card_type
= NV_11
;
366 device
->chiprev
= 0x00;
369 case 0x020: device
->card_type
= NV_20
; break;
370 case 0x030: device
->card_type
= NV_30
; break;
372 case 0x060: device
->card_type
= NV_40
; break;
376 case 0x0a0: device
->card_type
= NV_50
; break;
378 case 0x0d0: device
->card_type
= NV_C0
; break;
381 case 0x100: device
->card_type
= NV_E0
; break;
383 case 0x120: device
->card_type
= GM100
; break;
388 if ((boot0
& 0xff00fff0) == 0x20004000) {
389 if (boot0
& 0x00f00000)
390 device
->chipset
= 0x05;
392 device
->chipset
= 0x04;
393 device
->card_type
= NV_04
;
396 switch (device
->card_type
) {
397 case NV_04
: ret
= nv04_identify(device
); break;
399 case NV_11
: ret
= nv10_identify(device
); break;
400 case NV_20
: ret
= nv20_identify(device
); break;
401 case NV_30
: ret
= nv30_identify(device
); break;
402 case NV_40
: ret
= nv40_identify(device
); break;
403 case NV_50
: ret
= nv50_identify(device
); break;
404 case NV_C0
: ret
= gf100_identify(device
); break;
405 case NV_E0
: ret
= gk104_identify(device
); break;
406 case GM100
: ret
= gm100_identify(device
); break;
413 nv_error(device
, "unknown chipset, 0x%08x\n", boot0
);
417 nv_info(device
, "BOOT0 : 0x%08x\n", boot0
);
418 nv_info(device
, "Chipset: %s (NV%02X)\n",
419 device
->cname
, device
->chipset
);
420 nv_info(device
, "Family : NV%02X\n", device
->card_type
);
422 /* determine frequency of timing crystal */
423 if ( device
->card_type
<= NV_10
|| device
->chipset
< 0x17 ||
424 (device
->chipset
>= 0x20 && device
->chipset
< 0x25))
430 case 0x00000000: device
->crystal
= 13500; break;
431 case 0x00000040: device
->crystal
= 14318; break;
432 case 0x00400000: device
->crystal
= 27000; break;
433 case 0x00400040: device
->crystal
= 25000; break;
436 nv_debug(device
, "crystal freq: %dKHz\n", device
->crystal
);
438 if ( (args
->v0
.disable
& NV_DEVICE_V0_DISABLE_IDENTIFY
)) {
439 device
->cname
= "NULL";
440 device
->oclass
[NVDEV_SUBDEV_VBIOS
] = &nvkm_bios_oclass
;
443 if (!(args
->v0
.disable
& NV_DEVICE_V0_DISABLE_MMIO
) &&
444 !nv_subdev(device
)->mmio
) {
445 nv_subdev(device
)->mmio
= ioremap(mmio_base
, mmio_size
);
446 if (!nv_subdev(device
)->mmio
) {
447 nv_error(device
, "unable to map device registers\n");
452 /* ensure requested subsystems are available for use */
453 for (i
= 1, c
= 1; i
< NVDEV_SUBDEV_NR
; i
++) {
454 if (!(oclass
= device
->oclass
[i
]) || (disable
& (1ULL << i
)))
457 if (device
->subdev
[i
]) {
458 nvkm_object_ref(device
->subdev
[i
], &devobj
->subdev
[i
]);
462 ret
= nvkm_object_ctor(nv_object(device
), NULL
, oclass
,
463 NULL
, i
, &devobj
->subdev
[i
]);
469 device
->subdev
[i
] = devobj
->subdev
[i
];
471 /* note: can't init *any* subdevs until devinit has been run
472 * due to not knowing exactly what the vbios init tables will
473 * mess with. devinit also can't be run until all of its
474 * dependencies have been created.
476 * this code delays init of any subdev until all of devinit's
477 * dependencies have been created, and then initialises each
478 * subdev in turn as they're created.
480 while (i
>= NVDEV_SUBDEV_DEVINIT_LAST
&& c
<= i
) {
481 struct nvkm_object
*subdev
= devobj
->subdev
[c
++];
482 if (subdev
&& !nv_iclass(subdev
, NV_ENGINE_CLASS
)) {
483 ret
= nvkm_object_inc(subdev
);
486 atomic_dec(&nv_object(device
)->usecount
);
489 nvkm_subdev_reset(subdev
);
497 static struct nvkm_ofuncs
498 nvkm_devobj_ofuncs
= {
499 .ctor
= nvkm_devobj_ctor
,
500 .dtor
= nvkm_devobj_dtor
,
501 .init
= _nvkm_parent_init
,
502 .fini
= _nvkm_parent_fini
,
503 .mthd
= nvkm_devobj_mthd
,
506 /******************************************************************************
507 * nvkm_device: engine functions
508 *****************************************************************************/
513 struct nvkm_object
*device
= nv_object(obj
);
514 if (device
->engine
== NULL
) {
515 while (device
&& device
->parent
)
516 device
= device
->parent
;
518 device
= &nv_object(obj
)->engine
->subdev
.object
;
519 if (device
&& device
->parent
)
520 device
= device
->parent
;
522 #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
523 if (unlikely(!device
))
524 nv_assert("BAD CAST -> NvDevice, 0x%08x\n", nv_hclass(obj
));
526 return (void *)device
;
529 static struct nvkm_oclass
530 nvkm_device_sclass
[] = {
531 { 0x0080, &nvkm_devobj_ofuncs
},
536 nvkm_device_event_ctor(struct nvkm_object
*object
, void *data
, u32 size
,
537 struct nvkm_notify
*notify
)
539 if (!WARN_ON(size
!= 0)) {
548 static const struct nvkm_event_func
549 nvkm_device_event_func
= {
550 .ctor
= nvkm_device_event_ctor
,
554 nvkm_device_fini(struct nvkm_object
*object
, bool suspend
)
556 struct nvkm_device
*device
= (void *)object
;
557 struct nvkm_object
*subdev
;
560 for (i
= NVDEV_SUBDEV_NR
- 1; i
>= 0; i
--) {
561 if ((subdev
= device
->subdev
[i
])) {
562 if (!nv_iclass(subdev
, NV_ENGINE_CLASS
)) {
563 ret
= nvkm_object_dec(subdev
, suspend
);
570 ret
= nvkm_acpi_fini(device
, suspend
);
572 for (; ret
&& i
< NVDEV_SUBDEV_NR
; i
++) {
573 if ((subdev
= device
->subdev
[i
])) {
574 if (!nv_iclass(subdev
, NV_ENGINE_CLASS
)) {
575 ret
= nvkm_object_inc(subdev
);
587 nvkm_device_init(struct nvkm_object
*object
)
589 struct nvkm_device
*device
= (void *)object
;
590 struct nvkm_object
*subdev
;
593 ret
= nvkm_acpi_init(device
);
597 for (i
= 0; i
< NVDEV_SUBDEV_NR
; i
++) {
598 if ((subdev
= device
->subdev
[i
])) {
599 if (!nv_iclass(subdev
, NV_ENGINE_CLASS
)) {
600 ret
= nvkm_object_inc(subdev
);
604 nvkm_subdev_reset(subdev
);
611 for (--i
; ret
&& i
>= 0; i
--) {
612 if ((subdev
= device
->subdev
[i
])) {
613 if (!nv_iclass(subdev
, NV_ENGINE_CLASS
))
614 nvkm_object_dec(subdev
, false);
619 nvkm_acpi_fini(device
, false);
624 nvkm_device_dtor(struct nvkm_object
*object
)
626 struct nvkm_device
*device
= (void *)object
;
628 nvkm_event_fini(&device
->event
);
630 mutex_lock(&nv_devices_mutex
);
631 list_del(&device
->head
);
632 mutex_unlock(&nv_devices_mutex
);
634 if (nv_subdev(device
)->mmio
)
635 iounmap(nv_subdev(device
)->mmio
);
637 nvkm_engine_destroy(&device
->engine
);
641 nv_device_resource_start(struct nvkm_device
*device
, unsigned int bar
)
643 if (nv_device_is_pci(device
)) {
644 return pci_resource_start(device
->pdev
, bar
);
646 struct resource
*res
;
647 res
= platform_get_resource(device
->platformdev
,
648 IORESOURCE_MEM
, bar
);
656 nv_device_resource_len(struct nvkm_device
*device
, unsigned int bar
)
658 if (nv_device_is_pci(device
)) {
659 return pci_resource_len(device
->pdev
, bar
);
661 struct resource
*res
;
662 res
= platform_get_resource(device
->platformdev
,
663 IORESOURCE_MEM
, bar
);
666 return resource_size(res
);
671 nv_device_get_irq(struct nvkm_device
*device
, bool stall
)
673 if (nv_device_is_pci(device
)) {
674 return device
->pdev
->irq
;
676 return platform_get_irq_byname(device
->platformdev
,
677 stall
? "stall" : "nonstall");
681 static struct nvkm_oclass
682 nvkm_device_oclass
= {
683 .handle
= NV_ENGINE(DEVICE
, 0x00),
684 .ofuncs
= &(struct nvkm_ofuncs
) {
685 .dtor
= nvkm_device_dtor
,
686 .init
= nvkm_device_init
,
687 .fini
= nvkm_device_fini
,
692 nvkm_device_create_(void *dev
, enum nv_bus_type type
, u64 name
,
693 const char *sname
, const char *cfg
, const char *dbg
,
694 int length
, void **pobject
)
696 struct nvkm_device
*device
;
699 mutex_lock(&nv_devices_mutex
);
700 list_for_each_entry(device
, &nv_devices
, head
) {
701 if (device
->handle
== name
)
705 ret
= nvkm_engine_create_(NULL
, NULL
, &nvkm_device_oclass
, true,
706 "DEVICE", "device", length
, pobject
);
715 case NVKM_BUS_PLATFORM
:
716 device
->platformdev
= dev
;
719 device
->handle
= name
;
720 device
->cfgopt
= cfg
;
721 device
->dbgopt
= dbg
;
722 device
->name
= sname
;
724 nv_subdev(device
)->debug
= nvkm_dbgopt(device
->dbgopt
, "DEVICE");
725 nv_engine(device
)->sclass
= nvkm_device_sclass
;
726 list_add(&device
->head
, &nv_devices
);
728 ret
= nvkm_event_init(&nvkm_device_event_func
, 1, 1, &device
->event
);
730 mutex_unlock(&nv_devices_mutex
);