drm: drop unused 'magicfree' list
[deliverable/linux.git] / drivers / gpu / drm / drm_drv.c
1 /*
2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3 *
4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved.
6 *
7 * Author Rickard E. (Rik) Faith <faith@valinux.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 */
28
29 #include <linux/debugfs.h>
30 #include <linux/fs.h>
31 #include <linux/module.h>
32 #include <linux/moduleparam.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <drm/drmP.h>
36 #include <drm/drm_core.h>
37 #include "drm_legacy.h"
38 #include "drm_internal.h"
39
40 unsigned int drm_debug = 0; /* 1 to enable debug output */
41 EXPORT_SYMBOL(drm_debug);
42
43 bool drm_atomic = 0;
44
45 MODULE_AUTHOR(CORE_AUTHOR);
46 MODULE_DESCRIPTION(CORE_DESC);
47 MODULE_LICENSE("GPL and additional rights");
48 MODULE_PARM_DESC(debug, "Enable debug output");
49 MODULE_PARM_DESC(atomic, "Enable experimental atomic KMS API");
50 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
51 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
52 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
53
54 module_param_named(debug, drm_debug, int, 0600);
55 module_param_named_unsafe(atomic, drm_atomic, bool, 0600);
56
57 static DEFINE_SPINLOCK(drm_minor_lock);
58 static struct idr drm_minors_idr;
59
60 struct class *drm_class;
61 static struct dentry *drm_debugfs_root;
62
63 void drm_err(const char *format, ...)
64 {
65 struct va_format vaf;
66 va_list args;
67
68 va_start(args, format);
69
70 vaf.fmt = format;
71 vaf.va = &args;
72
73 printk(KERN_ERR "[" DRM_NAME ":%ps] *ERROR* %pV",
74 __builtin_return_address(0), &vaf);
75
76 va_end(args);
77 }
78 EXPORT_SYMBOL(drm_err);
79
80 void drm_ut_debug_printk(const char *function_name, const char *format, ...)
81 {
82 struct va_format vaf;
83 va_list args;
84
85 va_start(args, format);
86 vaf.fmt = format;
87 vaf.va = &args;
88
89 printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
90
91 va_end(args);
92 }
93 EXPORT_SYMBOL(drm_ut_debug_printk);
94
95 #define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
96
97 struct drm_master *drm_master_create(struct drm_minor *minor)
98 {
99 struct drm_master *master;
100
101 master = kzalloc(sizeof(*master), GFP_KERNEL);
102 if (!master)
103 return NULL;
104
105 kref_init(&master->refcount);
106 spin_lock_init(&master->lock.spinlock);
107 init_waitqueue_head(&master->lock.lock_queue);
108 if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
109 kfree(master);
110 return NULL;
111 }
112 master->minor = minor;
113
114 return master;
115 }
116
117 struct drm_master *drm_master_get(struct drm_master *master)
118 {
119 kref_get(&master->refcount);
120 return master;
121 }
122 EXPORT_SYMBOL(drm_master_get);
123
124 static void drm_master_destroy(struct kref *kref)
125 {
126 struct drm_master *master = container_of(kref, struct drm_master, refcount);
127 struct drm_device *dev = master->minor->dev;
128 struct drm_map_list *r_list, *list_temp;
129
130 mutex_lock(&dev->struct_mutex);
131 if (dev->driver->master_destroy)
132 dev->driver->master_destroy(dev, master);
133
134 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
135 if (r_list->master == master) {
136 drm_legacy_rmmap_locked(dev, r_list->map);
137 r_list = NULL;
138 }
139 }
140
141 if (master->unique) {
142 kfree(master->unique);
143 master->unique = NULL;
144 master->unique_len = 0;
145 }
146
147 drm_ht_remove(&master->magiclist);
148
149 mutex_unlock(&dev->struct_mutex);
150 kfree(master);
151 }
152
153 void drm_master_put(struct drm_master **master)
154 {
155 kref_put(&(*master)->refcount, drm_master_destroy);
156 *master = NULL;
157 }
158 EXPORT_SYMBOL(drm_master_put);
159
160 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
161 struct drm_file *file_priv)
162 {
163 int ret = 0;
164
165 mutex_lock(&dev->master_mutex);
166 if (file_priv->is_master)
167 goto out_unlock;
168
169 if (file_priv->minor->master) {
170 ret = -EINVAL;
171 goto out_unlock;
172 }
173
174 if (!file_priv->master) {
175 ret = -EINVAL;
176 goto out_unlock;
177 }
178
179 file_priv->minor->master = drm_master_get(file_priv->master);
180 file_priv->is_master = 1;
181 if (dev->driver->master_set) {
182 ret = dev->driver->master_set(dev, file_priv, false);
183 if (unlikely(ret != 0)) {
184 file_priv->is_master = 0;
185 drm_master_put(&file_priv->minor->master);
186 }
187 }
188
189 out_unlock:
190 mutex_unlock(&dev->master_mutex);
191 return ret;
192 }
193
194 int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
195 struct drm_file *file_priv)
196 {
197 int ret = -EINVAL;
198
199 mutex_lock(&dev->master_mutex);
200 if (!file_priv->is_master)
201 goto out_unlock;
202
203 if (!file_priv->minor->master)
204 goto out_unlock;
205
206 ret = 0;
207 if (dev->driver->master_drop)
208 dev->driver->master_drop(dev, file_priv, false);
209 drm_master_put(&file_priv->minor->master);
210 file_priv->is_master = 0;
211
212 out_unlock:
213 mutex_unlock(&dev->master_mutex);
214 return ret;
215 }
216
217 /*
218 * DRM Minors
219 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
220 * of them is represented by a drm_minor object. Depending on the capabilities
221 * of the device-driver, different interfaces are registered.
222 *
223 * Minors can be accessed via dev->$minor_name. This pointer is either
224 * NULL or a valid drm_minor pointer and stays valid as long as the device is
225 * valid. This means, DRM minors have the same life-time as the underlying
226 * device. However, this doesn't mean that the minor is active. Minors are
227 * registered and unregistered dynamically according to device-state.
228 */
229
230 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
231 unsigned int type)
232 {
233 switch (type) {
234 case DRM_MINOR_LEGACY:
235 return &dev->primary;
236 case DRM_MINOR_RENDER:
237 return &dev->render;
238 case DRM_MINOR_CONTROL:
239 return &dev->control;
240 default:
241 return NULL;
242 }
243 }
244
245 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
246 {
247 struct drm_minor *minor;
248 unsigned long flags;
249 int r;
250
251 minor = kzalloc(sizeof(*minor), GFP_KERNEL);
252 if (!minor)
253 return -ENOMEM;
254
255 minor->type = type;
256 minor->dev = dev;
257
258 idr_preload(GFP_KERNEL);
259 spin_lock_irqsave(&drm_minor_lock, flags);
260 r = idr_alloc(&drm_minors_idr,
261 NULL,
262 64 * type,
263 64 * (type + 1),
264 GFP_NOWAIT);
265 spin_unlock_irqrestore(&drm_minor_lock, flags);
266 idr_preload_end();
267
268 if (r < 0)
269 goto err_free;
270
271 minor->index = r;
272
273 minor->kdev = drm_sysfs_minor_alloc(minor);
274 if (IS_ERR(minor->kdev)) {
275 r = PTR_ERR(minor->kdev);
276 goto err_index;
277 }
278
279 *drm_minor_get_slot(dev, type) = minor;
280 return 0;
281
282 err_index:
283 spin_lock_irqsave(&drm_minor_lock, flags);
284 idr_remove(&drm_minors_idr, minor->index);
285 spin_unlock_irqrestore(&drm_minor_lock, flags);
286 err_free:
287 kfree(minor);
288 return r;
289 }
290
291 static void drm_minor_free(struct drm_device *dev, unsigned int type)
292 {
293 struct drm_minor **slot, *minor;
294 unsigned long flags;
295
296 slot = drm_minor_get_slot(dev, type);
297 minor = *slot;
298 if (!minor)
299 return;
300
301 drm_mode_group_destroy(&minor->mode_group);
302 put_device(minor->kdev);
303
304 spin_lock_irqsave(&drm_minor_lock, flags);
305 idr_remove(&drm_minors_idr, minor->index);
306 spin_unlock_irqrestore(&drm_minor_lock, flags);
307
308 kfree(minor);
309 *slot = NULL;
310 }
311
312 static int drm_minor_register(struct drm_device *dev, unsigned int type)
313 {
314 struct drm_minor *minor;
315 unsigned long flags;
316 int ret;
317
318 DRM_DEBUG("\n");
319
320 minor = *drm_minor_get_slot(dev, type);
321 if (!minor)
322 return 0;
323
324 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
325 if (ret) {
326 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
327 return ret;
328 }
329
330 ret = device_add(minor->kdev);
331 if (ret)
332 goto err_debugfs;
333
334 /* replace NULL with @minor so lookups will succeed from now on */
335 spin_lock_irqsave(&drm_minor_lock, flags);
336 idr_replace(&drm_minors_idr, minor, minor->index);
337 spin_unlock_irqrestore(&drm_minor_lock, flags);
338
339 DRM_DEBUG("new minor registered %d\n", minor->index);
340 return 0;
341
342 err_debugfs:
343 drm_debugfs_cleanup(minor);
344 return ret;
345 }
346
347 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
348 {
349 struct drm_minor *minor;
350 unsigned long flags;
351
352 minor = *drm_minor_get_slot(dev, type);
353 if (!minor || !device_is_registered(minor->kdev))
354 return;
355
356 /* replace @minor with NULL so lookups will fail from now on */
357 spin_lock_irqsave(&drm_minor_lock, flags);
358 idr_replace(&drm_minors_idr, NULL, minor->index);
359 spin_unlock_irqrestore(&drm_minor_lock, flags);
360
361 device_del(minor->kdev);
362 dev_set_drvdata(minor->kdev, NULL); /* safety belt */
363 drm_debugfs_cleanup(minor);
364 }
365
366 /**
367 * drm_minor_acquire - Acquire a DRM minor
368 * @minor_id: Minor ID of the DRM-minor
369 *
370 * Looks up the given minor-ID and returns the respective DRM-minor object. The
371 * refence-count of the underlying device is increased so you must release this
372 * object with drm_minor_release().
373 *
374 * As long as you hold this minor, it is guaranteed that the object and the
375 * minor->dev pointer will stay valid! However, the device may get unplugged and
376 * unregistered while you hold the minor.
377 *
378 * Returns:
379 * Pointer to minor-object with increased device-refcount, or PTR_ERR on
380 * failure.
381 */
382 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
383 {
384 struct drm_minor *minor;
385 unsigned long flags;
386
387 spin_lock_irqsave(&drm_minor_lock, flags);
388 minor = idr_find(&drm_minors_idr, minor_id);
389 if (minor)
390 drm_dev_ref(minor->dev);
391 spin_unlock_irqrestore(&drm_minor_lock, flags);
392
393 if (!minor) {
394 return ERR_PTR(-ENODEV);
395 } else if (drm_device_is_unplugged(minor->dev)) {
396 drm_dev_unref(minor->dev);
397 return ERR_PTR(-ENODEV);
398 }
399
400 return minor;
401 }
402
403 /**
404 * drm_minor_release - Release DRM minor
405 * @minor: Pointer to DRM minor object
406 *
407 * Release a minor that was previously acquired via drm_minor_acquire().
408 */
409 void drm_minor_release(struct drm_minor *minor)
410 {
411 drm_dev_unref(minor->dev);
412 }
413
414 /**
415 * drm_put_dev - Unregister and release a DRM device
416 * @dev: DRM device
417 *
418 * Called at module unload time or when a PCI device is unplugged.
419 *
420 * Use of this function is discouraged. It will eventually go away completely.
421 * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
422 *
423 * Cleans up all DRM device, calling drm_lastclose().
424 */
425 void drm_put_dev(struct drm_device *dev)
426 {
427 DRM_DEBUG("\n");
428
429 if (!dev) {
430 DRM_ERROR("cleanup called no dev\n");
431 return;
432 }
433
434 drm_dev_unregister(dev);
435 drm_dev_unref(dev);
436 }
437 EXPORT_SYMBOL(drm_put_dev);
438
439 void drm_unplug_dev(struct drm_device *dev)
440 {
441 /* for a USB device */
442 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
443 drm_minor_unregister(dev, DRM_MINOR_RENDER);
444 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
445
446 mutex_lock(&drm_global_mutex);
447
448 drm_device_set_unplugged(dev);
449
450 if (dev->open_count == 0) {
451 drm_put_dev(dev);
452 }
453 mutex_unlock(&drm_global_mutex);
454 }
455 EXPORT_SYMBOL(drm_unplug_dev);
456
457 /*
458 * DRM internal mount
459 * We want to be able to allocate our own "struct address_space" to control
460 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
461 * stand-alone address_space objects, so we need an underlying inode. As there
462 * is no way to allocate an independent inode easily, we need a fake internal
463 * VFS mount-point.
464 *
465 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
466 * frees it again. You are allowed to use iget() and iput() to get references to
467 * the inode. But each drm_fs_inode_new() call must be paired with exactly one
468 * drm_fs_inode_free() call (which does not have to be the last iput()).
469 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
470 * between multiple inode-users. You could, technically, call
471 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
472 * iput(), but this way you'd end up with a new vfsmount for each inode.
473 */
474
475 static int drm_fs_cnt;
476 static struct vfsmount *drm_fs_mnt;
477
478 static const struct dentry_operations drm_fs_dops = {
479 .d_dname = simple_dname,
480 };
481
482 static const struct super_operations drm_fs_sops = {
483 .statfs = simple_statfs,
484 };
485
486 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
487 const char *dev_name, void *data)
488 {
489 return mount_pseudo(fs_type,
490 "drm:",
491 &drm_fs_sops,
492 &drm_fs_dops,
493 0x010203ff);
494 }
495
496 static struct file_system_type drm_fs_type = {
497 .name = "drm",
498 .owner = THIS_MODULE,
499 .mount = drm_fs_mount,
500 .kill_sb = kill_anon_super,
501 };
502
503 static struct inode *drm_fs_inode_new(void)
504 {
505 struct inode *inode;
506 int r;
507
508 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
509 if (r < 0) {
510 DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
511 return ERR_PTR(r);
512 }
513
514 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
515 if (IS_ERR(inode))
516 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
517
518 return inode;
519 }
520
521 static void drm_fs_inode_free(struct inode *inode)
522 {
523 if (inode) {
524 iput(inode);
525 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
526 }
527 }
528
529 /**
530 * drm_dev_alloc - Allocate new DRM device
531 * @driver: DRM driver to allocate device for
532 * @parent: Parent device object
533 *
534 * Allocate and initialize a new DRM device. No device registration is done.
535 * Call drm_dev_register() to advertice the device to user space and register it
536 * with other core subsystems.
537 *
538 * The initial ref-count of the object is 1. Use drm_dev_ref() and
539 * drm_dev_unref() to take and drop further ref-counts.
540 *
541 * Note that for purely virtual devices @parent can be NULL.
542 *
543 * RETURNS:
544 * Pointer to new DRM device, or NULL if out of memory.
545 */
546 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
547 struct device *parent)
548 {
549 struct drm_device *dev;
550 int ret;
551
552 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
553 if (!dev)
554 return NULL;
555
556 kref_init(&dev->ref);
557 dev->dev = parent;
558 dev->driver = driver;
559
560 INIT_LIST_HEAD(&dev->filelist);
561 INIT_LIST_HEAD(&dev->ctxlist);
562 INIT_LIST_HEAD(&dev->vmalist);
563 INIT_LIST_HEAD(&dev->maplist);
564 INIT_LIST_HEAD(&dev->vblank_event_list);
565
566 spin_lock_init(&dev->buf_lock);
567 spin_lock_init(&dev->event_lock);
568 mutex_init(&dev->struct_mutex);
569 mutex_init(&dev->ctxlist_mutex);
570 mutex_init(&dev->master_mutex);
571
572 dev->anon_inode = drm_fs_inode_new();
573 if (IS_ERR(dev->anon_inode)) {
574 ret = PTR_ERR(dev->anon_inode);
575 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
576 goto err_free;
577 }
578
579 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
580 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
581 if (ret)
582 goto err_minors;
583 }
584
585 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
586 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
587 if (ret)
588 goto err_minors;
589 }
590
591 ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
592 if (ret)
593 goto err_minors;
594
595 if (drm_ht_create(&dev->map_hash, 12))
596 goto err_minors;
597
598 ret = drm_legacy_ctxbitmap_init(dev);
599 if (ret) {
600 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
601 goto err_ht;
602 }
603
604 if (drm_core_check_feature(dev, DRIVER_GEM)) {
605 ret = drm_gem_init(dev);
606 if (ret) {
607 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
608 goto err_ctxbitmap;
609 }
610 }
611
612 return dev;
613
614 err_ctxbitmap:
615 drm_legacy_ctxbitmap_cleanup(dev);
616 err_ht:
617 drm_ht_remove(&dev->map_hash);
618 err_minors:
619 drm_minor_free(dev, DRM_MINOR_LEGACY);
620 drm_minor_free(dev, DRM_MINOR_RENDER);
621 drm_minor_free(dev, DRM_MINOR_CONTROL);
622 drm_fs_inode_free(dev->anon_inode);
623 err_free:
624 mutex_destroy(&dev->master_mutex);
625 kfree(dev);
626 return NULL;
627 }
628 EXPORT_SYMBOL(drm_dev_alloc);
629
630 static void drm_dev_release(struct kref *ref)
631 {
632 struct drm_device *dev = container_of(ref, struct drm_device, ref);
633
634 if (drm_core_check_feature(dev, DRIVER_GEM))
635 drm_gem_destroy(dev);
636
637 drm_legacy_ctxbitmap_cleanup(dev);
638 drm_ht_remove(&dev->map_hash);
639 drm_fs_inode_free(dev->anon_inode);
640
641 drm_minor_free(dev, DRM_MINOR_LEGACY);
642 drm_minor_free(dev, DRM_MINOR_RENDER);
643 drm_minor_free(dev, DRM_MINOR_CONTROL);
644
645 mutex_destroy(&dev->master_mutex);
646 kfree(dev->unique);
647 kfree(dev);
648 }
649
650 /**
651 * drm_dev_ref - Take reference of a DRM device
652 * @dev: device to take reference of or NULL
653 *
654 * This increases the ref-count of @dev by one. You *must* already own a
655 * reference when calling this. Use drm_dev_unref() to drop this reference
656 * again.
657 *
658 * This function never fails. However, this function does not provide *any*
659 * guarantee whether the device is alive or running. It only provides a
660 * reference to the object and the memory associated with it.
661 */
662 void drm_dev_ref(struct drm_device *dev)
663 {
664 if (dev)
665 kref_get(&dev->ref);
666 }
667 EXPORT_SYMBOL(drm_dev_ref);
668
669 /**
670 * drm_dev_unref - Drop reference of a DRM device
671 * @dev: device to drop reference of or NULL
672 *
673 * This decreases the ref-count of @dev by one. The device is destroyed if the
674 * ref-count drops to zero.
675 */
676 void drm_dev_unref(struct drm_device *dev)
677 {
678 if (dev)
679 kref_put(&dev->ref, drm_dev_release);
680 }
681 EXPORT_SYMBOL(drm_dev_unref);
682
683 /**
684 * drm_dev_register - Register DRM device
685 * @dev: Device to register
686 * @flags: Flags passed to the driver's .load() function
687 *
688 * Register the DRM device @dev with the system, advertise device to user-space
689 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
690 * previously.
691 *
692 * Never call this twice on any device!
693 *
694 * RETURNS:
695 * 0 on success, negative error code on failure.
696 */
697 int drm_dev_register(struct drm_device *dev, unsigned long flags)
698 {
699 int ret;
700
701 mutex_lock(&drm_global_mutex);
702
703 ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
704 if (ret)
705 goto err_minors;
706
707 ret = drm_minor_register(dev, DRM_MINOR_RENDER);
708 if (ret)
709 goto err_minors;
710
711 ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
712 if (ret)
713 goto err_minors;
714
715 if (dev->driver->load) {
716 ret = dev->driver->load(dev, flags);
717 if (ret)
718 goto err_minors;
719 }
720
721 /* setup grouping for legacy outputs */
722 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
723 ret = drm_mode_group_init_legacy_group(dev,
724 &dev->primary->mode_group);
725 if (ret)
726 goto err_unload;
727 }
728
729 ret = 0;
730 goto out_unlock;
731
732 err_unload:
733 if (dev->driver->unload)
734 dev->driver->unload(dev);
735 err_minors:
736 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
737 drm_minor_unregister(dev, DRM_MINOR_RENDER);
738 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
739 out_unlock:
740 mutex_unlock(&drm_global_mutex);
741 return ret;
742 }
743 EXPORT_SYMBOL(drm_dev_register);
744
745 /**
746 * drm_dev_unregister - Unregister DRM device
747 * @dev: Device to unregister
748 *
749 * Unregister the DRM device from the system. This does the reverse of
750 * drm_dev_register() but does not deallocate the device. The caller must call
751 * drm_dev_unref() to drop their final reference.
752 */
753 void drm_dev_unregister(struct drm_device *dev)
754 {
755 struct drm_map_list *r_list, *list_temp;
756
757 drm_lastclose(dev);
758
759 if (dev->driver->unload)
760 dev->driver->unload(dev);
761
762 if (dev->agp)
763 drm_pci_agp_destroy(dev);
764
765 drm_vblank_cleanup(dev);
766
767 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
768 drm_legacy_rmmap(dev, r_list->map);
769
770 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
771 drm_minor_unregister(dev, DRM_MINOR_RENDER);
772 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
773 }
774 EXPORT_SYMBOL(drm_dev_unregister);
775
776 /**
777 * drm_dev_set_unique - Set the unique name of a DRM device
778 * @dev: device of which to set the unique name
779 * @fmt: format string for unique name
780 *
781 * Sets the unique name of a DRM device using the specified format string and
782 * a variable list of arguments. Drivers can use this at driver probe time if
783 * the unique name of the devices they drive is static.
784 *
785 * Return: 0 on success or a negative error code on failure.
786 */
787 int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
788 {
789 va_list ap;
790
791 kfree(dev->unique);
792
793 va_start(ap, fmt);
794 dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
795 va_end(ap);
796
797 return dev->unique ? 0 : -ENOMEM;
798 }
799 EXPORT_SYMBOL(drm_dev_set_unique);
800
801 /*
802 * DRM Core
803 * The DRM core module initializes all global DRM objects and makes them
804 * available to drivers. Once setup, drivers can probe their respective
805 * devices.
806 * Currently, core management includes:
807 * - The "DRM-Global" key/value database
808 * - Global ID management for connectors
809 * - DRM major number allocation
810 * - DRM minor management
811 * - DRM sysfs class
812 * - DRM debugfs root
813 *
814 * Furthermore, the DRM core provides dynamic char-dev lookups. For each
815 * interface registered on a DRM device, you can request minor numbers from DRM
816 * core. DRM core takes care of major-number management and char-dev
817 * registration. A stub ->open() callback forwards any open() requests to the
818 * registered minor.
819 */
820
821 static int drm_stub_open(struct inode *inode, struct file *filp)
822 {
823 const struct file_operations *new_fops;
824 struct drm_minor *minor;
825 int err;
826
827 DRM_DEBUG("\n");
828
829 mutex_lock(&drm_global_mutex);
830 minor = drm_minor_acquire(iminor(inode));
831 if (IS_ERR(minor)) {
832 err = PTR_ERR(minor);
833 goto out_unlock;
834 }
835
836 new_fops = fops_get(minor->dev->driver->fops);
837 if (!new_fops) {
838 err = -ENODEV;
839 goto out_release;
840 }
841
842 replace_fops(filp, new_fops);
843 if (filp->f_op->open)
844 err = filp->f_op->open(inode, filp);
845 else
846 err = 0;
847
848 out_release:
849 drm_minor_release(minor);
850 out_unlock:
851 mutex_unlock(&drm_global_mutex);
852 return err;
853 }
854
855 static const struct file_operations drm_stub_fops = {
856 .owner = THIS_MODULE,
857 .open = drm_stub_open,
858 .llseek = noop_llseek,
859 };
860
861 static int __init drm_core_init(void)
862 {
863 int ret = -ENOMEM;
864
865 drm_global_init();
866 drm_connector_ida_init();
867 idr_init(&drm_minors_idr);
868
869 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
870 goto err_p1;
871
872 drm_class = drm_sysfs_create(THIS_MODULE, "drm");
873 if (IS_ERR(drm_class)) {
874 printk(KERN_ERR "DRM: Error creating drm class.\n");
875 ret = PTR_ERR(drm_class);
876 goto err_p2;
877 }
878
879 drm_debugfs_root = debugfs_create_dir("dri", NULL);
880 if (!drm_debugfs_root) {
881 DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
882 ret = -1;
883 goto err_p3;
884 }
885
886 DRM_INFO("Initialized %s %d.%d.%d %s\n",
887 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
888 return 0;
889 err_p3:
890 drm_sysfs_destroy();
891 err_p2:
892 unregister_chrdev(DRM_MAJOR, "drm");
893
894 idr_destroy(&drm_minors_idr);
895 err_p1:
896 return ret;
897 }
898
899 static void __exit drm_core_exit(void)
900 {
901 debugfs_remove(drm_debugfs_root);
902 drm_sysfs_destroy();
903
904 unregister_chrdev(DRM_MAJOR, "drm");
905
906 drm_connector_ida_destroy();
907 idr_destroy(&drm_minors_idr);
908 }
909
910 module_init(drm_core_init);
911 module_exit(drm_core_exit);
This page took 0.053071 seconds and 6 git commands to generate.