support for platform devices
[deliverable/linux.git] / drivers / gpu / drm / nouveau / core / engine / device / base.c
1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #include <core/object.h>
26 #include <core/device.h>
27 #include <core/client.h>
28 #include <core/option.h>
29
30 #include <core/class.h>
31
32 #include "priv.h"
33
34 static DEFINE_MUTEX(nv_devices_mutex);
35 static LIST_HEAD(nv_devices);
36
37 struct nouveau_device *
38 nouveau_device_find(u64 name)
39 {
40 struct nouveau_device *device, *match = NULL;
41 mutex_lock(&nv_devices_mutex);
42 list_for_each_entry(device, &nv_devices, head) {
43 if (device->handle == name) {
44 match = device;
45 break;
46 }
47 }
48 mutex_unlock(&nv_devices_mutex);
49 return match;
50 }
51
52 /******************************************************************************
53 * nouveau_devobj (0x0080): class implementation
54 *****************************************************************************/
55 struct nouveau_devobj {
56 struct nouveau_parent base;
57 struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
58 };
59
60 static const u64 disable_map[] = {
61 [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS,
62 [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
63 [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE,
64 [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE,
65 [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
66 [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE,
67 [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
68 [NVDEV_SUBDEV_BUS] = NV_DEVICE_DISABLE_CORE,
69 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
70 [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
71 [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE,
72 [NVDEV_SUBDEV_IBUS] = NV_DEVICE_DISABLE_CORE,
73 [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE,
74 [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
75 [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
76 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
77 [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
78 [NVDEV_SUBDEV_PWR] = NV_DEVICE_DISABLE_CORE,
79 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
80 [NVDEV_ENGINE_PERFMON] = NV_DEVICE_DISABLE_CORE,
81 [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
82 [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO,
83 [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
84 [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG,
85 [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME,
86 [NVDEV_ENGINE_VP] = NV_DEVICE_DISABLE_VP,
87 [NVDEV_ENGINE_CRYPT] = NV_DEVICE_DISABLE_CRYPT,
88 [NVDEV_ENGINE_BSP] = NV_DEVICE_DISABLE_BSP,
89 [NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP,
90 [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
91 [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
92 [NVDEV_ENGINE_VIC] = NV_DEVICE_DISABLE_VIC,
93 [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC,
94 [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
95 [NVDEV_SUBDEV_NR] = 0,
96 };
97
98 static int
99 nouveau_devobj_ctor(struct nouveau_object *parent,
100 struct nouveau_object *engine,
101 struct nouveau_oclass *oclass, void *data, u32 size,
102 struct nouveau_object **pobject)
103 {
104 struct nouveau_client *client = nv_client(parent);
105 struct nouveau_device *device;
106 struct nouveau_devobj *devobj;
107 struct nv_device_class *args = data;
108 u32 boot0, strap;
109 u64 disable, mmio_base, mmio_size;
110 void __iomem *map;
111 int ret, i, c;
112
113 if (size < sizeof(struct nv_device_class))
114 return -EINVAL;
115
116 /* find the device subdev that matches what the client requested */
117 device = nv_device(client->device);
118 if (args->device != ~0) {
119 device = nouveau_device_find(args->device);
120 if (!device)
121 return -ENODEV;
122 }
123
124 ret = nouveau_parent_create(parent, nv_object(device), oclass, 0,
125 nouveau_control_oclass,
126 (1ULL << NVDEV_ENGINE_DMAOBJ) |
127 (1ULL << NVDEV_ENGINE_FIFO) |
128 (1ULL << NVDEV_ENGINE_DISP) |
129 (1ULL << NVDEV_ENGINE_PERFMON), &devobj);
130 *pobject = nv_object(devobj);
131 if (ret)
132 return ret;
133
134 mmio_base = nv_device_resource_start(device, 0);
135 mmio_size = nv_device_resource_len(device, 0);
136
137 /* translate api disable mask into internal mapping */
138 disable = args->debug0;
139 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
140 if (args->disable & disable_map[i])
141 disable |= (1ULL << i);
142 }
143
144 /* identify the chipset, and determine classes of subdev/engines */
145 if (!(args->disable & NV_DEVICE_DISABLE_IDENTIFY) &&
146 !device->card_type) {
147 map = ioremap(mmio_base, 0x102000);
148 if (map == NULL)
149 return -ENOMEM;
150
151 /* switch mmio to cpu's native endianness */
152 #ifndef __BIG_ENDIAN
153 if (ioread32_native(map + 0x000004) != 0x00000000)
154 #else
155 if (ioread32_native(map + 0x000004) == 0x00000000)
156 #endif
157 iowrite32_native(0x01000001, map + 0x000004);
158
159 /* read boot0 and strapping information */
160 boot0 = ioread32_native(map + 0x000000);
161 strap = ioread32_native(map + 0x101000);
162 iounmap(map);
163
164 /* determine chipset and derive architecture from it */
165 if ((boot0 & 0x1f000000) > 0) {
166 device->chipset = (boot0 & 0x1ff00000) >> 20;
167 switch (device->chipset & 0x1f0) {
168 case 0x010: {
169 if (0x461 & (1 << (device->chipset & 0xf)))
170 device->card_type = NV_10;
171 else
172 device->card_type = NV_11;
173 break;
174 }
175 case 0x020: device->card_type = NV_20; break;
176 case 0x030: device->card_type = NV_30; break;
177 case 0x040:
178 case 0x060: device->card_type = NV_40; break;
179 case 0x050:
180 case 0x080:
181 case 0x090:
182 case 0x0a0: device->card_type = NV_50; break;
183 case 0x0c0: device->card_type = NV_C0; break;
184 case 0x0d0: device->card_type = NV_D0; break;
185 case 0x0e0:
186 case 0x0f0:
187 case 0x100: device->card_type = NV_E0; break;
188 case 0x110: device->card_type = GM100; break;
189 default:
190 break;
191 }
192 } else
193 if ((boot0 & 0xff00fff0) == 0x20004000) {
194 if (boot0 & 0x00f00000)
195 device->chipset = 0x05;
196 else
197 device->chipset = 0x04;
198 device->card_type = NV_04;
199 }
200
201 switch (device->card_type) {
202 case NV_04: ret = nv04_identify(device); break;
203 case NV_10:
204 case NV_11: ret = nv10_identify(device); break;
205 case NV_20: ret = nv20_identify(device); break;
206 case NV_30: ret = nv30_identify(device); break;
207 case NV_40: ret = nv40_identify(device); break;
208 case NV_50: ret = nv50_identify(device); break;
209 case NV_C0:
210 case NV_D0: ret = nvc0_identify(device); break;
211 case NV_E0: ret = nve0_identify(device); break;
212 case GM100: ret = gm100_identify(device); break;
213 default:
214 ret = -EINVAL;
215 break;
216 }
217
218 if (ret) {
219 nv_error(device, "unknown chipset, 0x%08x\n", boot0);
220 return ret;
221 }
222
223 nv_info(device, "BOOT0 : 0x%08x\n", boot0);
224 nv_info(device, "Chipset: %s (NV%02X)\n",
225 device->cname, device->chipset);
226 nv_info(device, "Family : NV%02X\n", device->card_type);
227
228 /* determine frequency of timing crystal */
229 if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
230 (device->chipset >= 0x20 && device->chipset < 0x25))
231 strap &= 0x00000040;
232 else
233 strap &= 0x00400040;
234
235 switch (strap) {
236 case 0x00000000: device->crystal = 13500; break;
237 case 0x00000040: device->crystal = 14318; break;
238 case 0x00400000: device->crystal = 27000; break;
239 case 0x00400040: device->crystal = 25000; break;
240 }
241
242 nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
243 }
244
245 if (!(args->disable & NV_DEVICE_DISABLE_MMIO) &&
246 !nv_subdev(device)->mmio) {
247 nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size);
248 if (!nv_subdev(device)->mmio) {
249 nv_error(device, "unable to map device registers\n");
250 return -ENOMEM;
251 }
252 }
253
254 /* ensure requested subsystems are available for use */
255 for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) {
256 if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
257 continue;
258
259 if (device->subdev[i]) {
260 nouveau_object_ref(device->subdev[i],
261 &devobj->subdev[i]);
262 continue;
263 }
264
265 ret = nouveau_object_ctor(nv_object(device), NULL,
266 oclass, NULL, i,
267 &devobj->subdev[i]);
268 if (ret == -ENODEV)
269 continue;
270 if (ret)
271 return ret;
272
273 device->subdev[i] = devobj->subdev[i];
274
275 /* note: can't init *any* subdevs until devinit has been run
276 * due to not knowing exactly what the vbios init tables will
277 * mess with. devinit also can't be run until all of its
278 * dependencies have been created.
279 *
280 * this code delays init of any subdev until all of devinit's
281 * dependencies have been created, and then initialises each
282 * subdev in turn as they're created.
283 */
284 while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
285 struct nouveau_object *subdev = devobj->subdev[c++];
286 if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
287 ret = nouveau_object_inc(subdev);
288 if (ret)
289 return ret;
290 atomic_dec(&nv_object(device)->usecount);
291 } else
292 if (subdev) {
293 nouveau_subdev_reset(subdev);
294 }
295 }
296 }
297
298 return 0;
299 }
300
301 static void
302 nouveau_devobj_dtor(struct nouveau_object *object)
303 {
304 struct nouveau_devobj *devobj = (void *)object;
305 int i;
306
307 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
308 nouveau_object_ref(NULL, &devobj->subdev[i]);
309
310 nouveau_parent_destroy(&devobj->base);
311 }
312
313 static u8
314 nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
315 {
316 return nv_rd08(object->engine, addr);
317 }
318
319 static u16
320 nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
321 {
322 return nv_rd16(object->engine, addr);
323 }
324
325 static u32
326 nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
327 {
328 return nv_rd32(object->engine, addr);
329 }
330
331 static void
332 nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
333 {
334 nv_wr08(object->engine, addr, data);
335 }
336
337 static void
338 nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
339 {
340 nv_wr16(object->engine, addr, data);
341 }
342
343 static void
344 nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
345 {
346 nv_wr32(object->engine, addr, data);
347 }
348
349 static struct nouveau_ofuncs
350 nouveau_devobj_ofuncs = {
351 .ctor = nouveau_devobj_ctor,
352 .dtor = nouveau_devobj_dtor,
353 .init = _nouveau_parent_init,
354 .fini = _nouveau_parent_fini,
355 .rd08 = nouveau_devobj_rd08,
356 .rd16 = nouveau_devobj_rd16,
357 .rd32 = nouveau_devobj_rd32,
358 .wr08 = nouveau_devobj_wr08,
359 .wr16 = nouveau_devobj_wr16,
360 .wr32 = nouveau_devobj_wr32,
361 };
362
363 /******************************************************************************
364 * nouveau_device: engine functions
365 *****************************************************************************/
366 static struct nouveau_oclass
367 nouveau_device_sclass[] = {
368 { 0x0080, &nouveau_devobj_ofuncs },
369 {}
370 };
371
372 static int
373 nouveau_device_fini(struct nouveau_object *object, bool suspend)
374 {
375 struct nouveau_device *device = (void *)object;
376 struct nouveau_object *subdev;
377 int ret, i;
378
379 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
380 if ((subdev = device->subdev[i])) {
381 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
382 ret = nouveau_object_dec(subdev, suspend);
383 if (ret && suspend)
384 goto fail;
385 }
386 }
387 }
388
389 ret = 0;
390 fail:
391 for (; ret && i < NVDEV_SUBDEV_NR; i++) {
392 if ((subdev = device->subdev[i])) {
393 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
394 ret = nouveau_object_inc(subdev);
395 if (ret) {
396 /* XXX */
397 }
398 }
399 }
400 }
401
402 return ret;
403 }
404
405 static int
406 nouveau_device_init(struct nouveau_object *object)
407 {
408 struct nouveau_device *device = (void *)object;
409 struct nouveau_object *subdev;
410 int ret, i;
411
412 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
413 if ((subdev = device->subdev[i])) {
414 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
415 ret = nouveau_object_inc(subdev);
416 if (ret)
417 goto fail;
418 } else {
419 nouveau_subdev_reset(subdev);
420 }
421 }
422 }
423
424 ret = 0;
425 fail:
426 for (--i; ret && i >= 0; i--) {
427 if ((subdev = device->subdev[i])) {
428 if (!nv_iclass(subdev, NV_ENGINE_CLASS))
429 nouveau_object_dec(subdev, false);
430 }
431 }
432
433 return ret;
434 }
435
436 static void
437 nouveau_device_dtor(struct nouveau_object *object)
438 {
439 struct nouveau_device *device = (void *)object;
440
441 mutex_lock(&nv_devices_mutex);
442 list_del(&device->head);
443 mutex_unlock(&nv_devices_mutex);
444
445 if (nv_subdev(device)->mmio)
446 iounmap(nv_subdev(device)->mmio);
447
448 nouveau_engine_destroy(&device->base);
449 }
450
451 resource_size_t
452 nv_device_resource_start(struct nouveau_device *device, unsigned int bar)
453 {
454 if (nv_device_is_pci(device)) {
455 return pci_resource_start(device->pdev, bar);
456 } else {
457 struct resource *res;
458 res = platform_get_resource(device->platformdev,
459 IORESOURCE_MEM, bar);
460 if (!res)
461 return 0;
462 return res->start;
463 }
464 }
465
466 resource_size_t
467 nv_device_resource_len(struct nouveau_device *device, unsigned int bar)
468 {
469 if (nv_device_is_pci(device)) {
470 return pci_resource_len(device->pdev, bar);
471 } else {
472 struct resource *res;
473 res = platform_get_resource(device->platformdev,
474 IORESOURCE_MEM, bar);
475 if (!res)
476 return 0;
477 return resource_size(res);
478 }
479 }
480
481 dma_addr_t
482 nv_device_map_page(struct nouveau_device *device, struct page *page)
483 {
484 dma_addr_t ret;
485
486 if (nv_device_is_pci(device)) {
487 ret = pci_map_page(device->pdev, page, 0, PAGE_SIZE,
488 PCI_DMA_BIDIRECTIONAL);
489 if (pci_dma_mapping_error(device->pdev, ret))
490 ret = 0;
491 } else {
492 ret = page_to_phys(page);
493 }
494
495 return ret;
496 }
497
498 void
499 nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr)
500 {
501 if (nv_device_is_pci(device))
502 pci_unmap_page(device->pdev, addr, PAGE_SIZE,
503 PCI_DMA_BIDIRECTIONAL);
504 }
505
506 int
507 nv_device_get_irq(struct nouveau_device *device, bool stall)
508 {
509 if (nv_device_is_pci(device)) {
510 return device->pdev->irq;
511 } else {
512 return platform_get_irq_byname(device->platformdev,
513 stall ? "stall" : "nonstall");
514 }
515 }
516
517 static struct nouveau_oclass
518 nouveau_device_oclass = {
519 .handle = NV_ENGINE(DEVICE, 0x00),
520 .ofuncs = &(struct nouveau_ofuncs) {
521 .dtor = nouveau_device_dtor,
522 .init = nouveau_device_init,
523 .fini = nouveau_device_fini,
524 },
525 };
526
527 int
528 nouveau_device_create_(void *dev, enum nv_bus_type type, u64 name,
529 const char *sname, const char *cfg, const char *dbg,
530 int length, void **pobject)
531 {
532 struct nouveau_device *device;
533 int ret = -EEXIST;
534
535 mutex_lock(&nv_devices_mutex);
536 list_for_each_entry(device, &nv_devices, head) {
537 if (device->handle == name)
538 goto done;
539 }
540
541 ret = nouveau_engine_create_(NULL, NULL, &nouveau_device_oclass, true,
542 "DEVICE", "device", length, pobject);
543 device = *pobject;
544 if (ret)
545 goto done;
546
547 switch (type) {
548 case NOUVEAU_BUS_PCI:
549 device->pdev = dev;
550 break;
551 case NOUVEAU_BUS_PLATFORM:
552 device->platformdev = dev;
553 break;
554 }
555 device->handle = name;
556 device->cfgopt = cfg;
557 device->dbgopt = dbg;
558 device->name = sname;
559
560 nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
561 nv_engine(device)->sclass = nouveau_device_sclass;
562 list_add(&device->head, &nv_devices);
563 done:
564 mutex_unlock(&nv_devices_mutex);
565 return ret;
566 }
This page took 0.05599 seconds and 5 git commands to generate.