drm: Move DRM_MAGIC_HASH_ORDER into drm_drv.c
[deliverable/linux.git] / drivers / gpu / drm / drm_drv.c
1 /*
2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3 *
4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved.
6 *
7 * Author Rickard E. (Rik) Faith <faith@valinux.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 */
28
29 #include <linux/debugfs.h>
30 #include <linux/fs.h>
31 #include <linux/module.h>
32 #include <linux/moduleparam.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <drm/drmP.h>
36 #include <drm/drm_core.h>
37 #include "drm_legacy.h"
38 #include "drm_internal.h"
39
40 unsigned int drm_debug = 0; /* 1 to enable debug output */
41 EXPORT_SYMBOL(drm_debug);
42
43 MODULE_AUTHOR(CORE_AUTHOR);
44 MODULE_DESCRIPTION(CORE_DESC);
45 MODULE_LICENSE("GPL and additional rights");
46 MODULE_PARM_DESC(debug, "Enable debug output");
47 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
48 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
49 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
50
51 module_param_named(debug, drm_debug, int, 0600);
52
53 static DEFINE_SPINLOCK(drm_minor_lock);
54 static struct idr drm_minors_idr;
55
56 struct class *drm_class;
57 static struct dentry *drm_debugfs_root;
58
59 int drm_err(const char *func, const char *format, ...)
60 {
61 struct va_format vaf;
62 va_list args;
63 int r;
64
65 va_start(args, format);
66
67 vaf.fmt = format;
68 vaf.va = &args;
69
70 r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
71
72 va_end(args);
73
74 return r;
75 }
76 EXPORT_SYMBOL(drm_err);
77
78 void drm_ut_debug_printk(const char *function_name, const char *format, ...)
79 {
80 struct va_format vaf;
81 va_list args;
82
83 va_start(args, format);
84 vaf.fmt = format;
85 vaf.va = &args;
86
87 printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
88
89 va_end(args);
90 }
91 EXPORT_SYMBOL(drm_ut_debug_printk);
92
93 #define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
94
95 struct drm_master *drm_master_create(struct drm_minor *minor)
96 {
97 struct drm_master *master;
98
99 master = kzalloc(sizeof(*master), GFP_KERNEL);
100 if (!master)
101 return NULL;
102
103 kref_init(&master->refcount);
104 spin_lock_init(&master->lock.spinlock);
105 init_waitqueue_head(&master->lock.lock_queue);
106 if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
107 kfree(master);
108 return NULL;
109 }
110 INIT_LIST_HEAD(&master->magicfree);
111 master->minor = minor;
112
113 return master;
114 }
115
116 struct drm_master *drm_master_get(struct drm_master *master)
117 {
118 kref_get(&master->refcount);
119 return master;
120 }
121 EXPORT_SYMBOL(drm_master_get);
122
123 static void drm_master_destroy(struct kref *kref)
124 {
125 struct drm_master *master = container_of(kref, struct drm_master, refcount);
126 struct drm_device *dev = master->minor->dev;
127 struct drm_map_list *r_list, *list_temp;
128
129 mutex_lock(&dev->struct_mutex);
130 if (dev->driver->master_destroy)
131 dev->driver->master_destroy(dev, master);
132
133 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
134 if (r_list->master == master) {
135 drm_legacy_rmmap_locked(dev, r_list->map);
136 r_list = NULL;
137 }
138 }
139
140 if (master->unique) {
141 kfree(master->unique);
142 master->unique = NULL;
143 master->unique_len = 0;
144 }
145
146 drm_ht_remove(&master->magiclist);
147
148 mutex_unlock(&dev->struct_mutex);
149 kfree(master);
150 }
151
152 void drm_master_put(struct drm_master **master)
153 {
154 kref_put(&(*master)->refcount, drm_master_destroy);
155 *master = NULL;
156 }
157 EXPORT_SYMBOL(drm_master_put);
158
159 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
160 struct drm_file *file_priv)
161 {
162 int ret = 0;
163
164 mutex_lock(&dev->master_mutex);
165 if (file_priv->is_master)
166 goto out_unlock;
167
168 if (file_priv->minor->master) {
169 ret = -EINVAL;
170 goto out_unlock;
171 }
172
173 if (!file_priv->master) {
174 ret = -EINVAL;
175 goto out_unlock;
176 }
177
178 file_priv->minor->master = drm_master_get(file_priv->master);
179 file_priv->is_master = 1;
180 if (dev->driver->master_set) {
181 ret = dev->driver->master_set(dev, file_priv, false);
182 if (unlikely(ret != 0)) {
183 file_priv->is_master = 0;
184 drm_master_put(&file_priv->minor->master);
185 }
186 }
187
188 out_unlock:
189 mutex_unlock(&dev->master_mutex);
190 return ret;
191 }
192
193 int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
194 struct drm_file *file_priv)
195 {
196 int ret = -EINVAL;
197
198 mutex_lock(&dev->master_mutex);
199 if (!file_priv->is_master)
200 goto out_unlock;
201
202 if (!file_priv->minor->master)
203 goto out_unlock;
204
205 ret = 0;
206 if (dev->driver->master_drop)
207 dev->driver->master_drop(dev, file_priv, false);
208 drm_master_put(&file_priv->minor->master);
209 file_priv->is_master = 0;
210
211 out_unlock:
212 mutex_unlock(&dev->master_mutex);
213 return ret;
214 }
215
216 /*
217 * DRM Minors
218 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
219 * of them is represented by a drm_minor object. Depending on the capabilities
220 * of the device-driver, different interfaces are registered.
221 *
222 * Minors can be accessed via dev->$minor_name. This pointer is either
223 * NULL or a valid drm_minor pointer and stays valid as long as the device is
224 * valid. This means, DRM minors have the same life-time as the underlying
225 * device. However, this doesn't mean that the minor is active. Minors are
226 * registered and unregistered dynamically according to device-state.
227 */
228
229 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
230 unsigned int type)
231 {
232 switch (type) {
233 case DRM_MINOR_LEGACY:
234 return &dev->primary;
235 case DRM_MINOR_RENDER:
236 return &dev->render;
237 case DRM_MINOR_CONTROL:
238 return &dev->control;
239 default:
240 return NULL;
241 }
242 }
243
244 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
245 {
246 struct drm_minor *minor;
247 unsigned long flags;
248 int r;
249
250 minor = kzalloc(sizeof(*minor), GFP_KERNEL);
251 if (!minor)
252 return -ENOMEM;
253
254 minor->type = type;
255 minor->dev = dev;
256
257 idr_preload(GFP_KERNEL);
258 spin_lock_irqsave(&drm_minor_lock, flags);
259 r = idr_alloc(&drm_minors_idr,
260 NULL,
261 64 * type,
262 64 * (type + 1),
263 GFP_NOWAIT);
264 spin_unlock_irqrestore(&drm_minor_lock, flags);
265 idr_preload_end();
266
267 if (r < 0)
268 goto err_free;
269
270 minor->index = r;
271
272 minor->kdev = drm_sysfs_minor_alloc(minor);
273 if (IS_ERR(minor->kdev)) {
274 r = PTR_ERR(minor->kdev);
275 goto err_index;
276 }
277
278 *drm_minor_get_slot(dev, type) = minor;
279 return 0;
280
281 err_index:
282 spin_lock_irqsave(&drm_minor_lock, flags);
283 idr_remove(&drm_minors_idr, minor->index);
284 spin_unlock_irqrestore(&drm_minor_lock, flags);
285 err_free:
286 kfree(minor);
287 return r;
288 }
289
290 static void drm_minor_free(struct drm_device *dev, unsigned int type)
291 {
292 struct drm_minor **slot, *minor;
293 unsigned long flags;
294
295 slot = drm_minor_get_slot(dev, type);
296 minor = *slot;
297 if (!minor)
298 return;
299
300 drm_mode_group_destroy(&minor->mode_group);
301 put_device(minor->kdev);
302
303 spin_lock_irqsave(&drm_minor_lock, flags);
304 idr_remove(&drm_minors_idr, minor->index);
305 spin_unlock_irqrestore(&drm_minor_lock, flags);
306
307 kfree(minor);
308 *slot = NULL;
309 }
310
311 static int drm_minor_register(struct drm_device *dev, unsigned int type)
312 {
313 struct drm_minor *minor;
314 unsigned long flags;
315 int ret;
316
317 DRM_DEBUG("\n");
318
319 minor = *drm_minor_get_slot(dev, type);
320 if (!minor)
321 return 0;
322
323 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
324 if (ret) {
325 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
326 return ret;
327 }
328
329 ret = device_add(minor->kdev);
330 if (ret)
331 goto err_debugfs;
332
333 /* replace NULL with @minor so lookups will succeed from now on */
334 spin_lock_irqsave(&drm_minor_lock, flags);
335 idr_replace(&drm_minors_idr, minor, minor->index);
336 spin_unlock_irqrestore(&drm_minor_lock, flags);
337
338 DRM_DEBUG("new minor registered %d\n", minor->index);
339 return 0;
340
341 err_debugfs:
342 drm_debugfs_cleanup(minor);
343 return ret;
344 }
345
346 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
347 {
348 struct drm_minor *minor;
349 unsigned long flags;
350
351 minor = *drm_minor_get_slot(dev, type);
352 if (!minor || !device_is_registered(minor->kdev))
353 return;
354
355 /* replace @minor with NULL so lookups will fail from now on */
356 spin_lock_irqsave(&drm_minor_lock, flags);
357 idr_replace(&drm_minors_idr, NULL, minor->index);
358 spin_unlock_irqrestore(&drm_minor_lock, flags);
359
360 device_del(minor->kdev);
361 dev_set_drvdata(minor->kdev, NULL); /* safety belt */
362 drm_debugfs_cleanup(minor);
363 }
364
365 /**
366 * drm_minor_acquire - Acquire a DRM minor
367 * @minor_id: Minor ID of the DRM-minor
368 *
369 * Looks up the given minor-ID and returns the respective DRM-minor object. The
370 * refence-count of the underlying device is increased so you must release this
371 * object with drm_minor_release().
372 *
373 * As long as you hold this minor, it is guaranteed that the object and the
374 * minor->dev pointer will stay valid! However, the device may get unplugged and
375 * unregistered while you hold the minor.
376 *
377 * Returns:
378 * Pointer to minor-object with increased device-refcount, or PTR_ERR on
379 * failure.
380 */
381 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
382 {
383 struct drm_minor *minor;
384 unsigned long flags;
385
386 spin_lock_irqsave(&drm_minor_lock, flags);
387 minor = idr_find(&drm_minors_idr, minor_id);
388 if (minor)
389 drm_dev_ref(minor->dev);
390 spin_unlock_irqrestore(&drm_minor_lock, flags);
391
392 if (!minor) {
393 return ERR_PTR(-ENODEV);
394 } else if (drm_device_is_unplugged(minor->dev)) {
395 drm_dev_unref(minor->dev);
396 return ERR_PTR(-ENODEV);
397 }
398
399 return minor;
400 }
401
402 /**
403 * drm_minor_release - Release DRM minor
404 * @minor: Pointer to DRM minor object
405 *
406 * Release a minor that was previously acquired via drm_minor_acquire().
407 */
408 void drm_minor_release(struct drm_minor *minor)
409 {
410 drm_dev_unref(minor->dev);
411 }
412
413 /**
414 * drm_put_dev - Unregister and release a DRM device
415 * @dev: DRM device
416 *
417 * Called at module unload time or when a PCI device is unplugged.
418 *
419 * Use of this function is discouraged. It will eventually go away completely.
420 * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
421 *
422 * Cleans up all DRM device, calling drm_lastclose().
423 */
424 void drm_put_dev(struct drm_device *dev)
425 {
426 DRM_DEBUG("\n");
427
428 if (!dev) {
429 DRM_ERROR("cleanup called no dev\n");
430 return;
431 }
432
433 drm_dev_unregister(dev);
434 drm_dev_unref(dev);
435 }
436 EXPORT_SYMBOL(drm_put_dev);
437
438 void drm_unplug_dev(struct drm_device *dev)
439 {
440 /* for a USB device */
441 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
442 drm_minor_unregister(dev, DRM_MINOR_RENDER);
443 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
444
445 mutex_lock(&drm_global_mutex);
446
447 drm_device_set_unplugged(dev);
448
449 if (dev->open_count == 0) {
450 drm_put_dev(dev);
451 }
452 mutex_unlock(&drm_global_mutex);
453 }
454 EXPORT_SYMBOL(drm_unplug_dev);
455
456 /*
457 * DRM internal mount
458 * We want to be able to allocate our own "struct address_space" to control
459 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
460 * stand-alone address_space objects, so we need an underlying inode. As there
461 * is no way to allocate an independent inode easily, we need a fake internal
462 * VFS mount-point.
463 *
464 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
465 * frees it again. You are allowed to use iget() and iput() to get references to
466 * the inode. But each drm_fs_inode_new() call must be paired with exactly one
467 * drm_fs_inode_free() call (which does not have to be the last iput()).
468 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
469 * between multiple inode-users. You could, technically, call
470 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
471 * iput(), but this way you'd end up with a new vfsmount for each inode.
472 */
473
474 static int drm_fs_cnt;
475 static struct vfsmount *drm_fs_mnt;
476
477 static const struct dentry_operations drm_fs_dops = {
478 .d_dname = simple_dname,
479 };
480
481 static const struct super_operations drm_fs_sops = {
482 .statfs = simple_statfs,
483 };
484
485 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
486 const char *dev_name, void *data)
487 {
488 return mount_pseudo(fs_type,
489 "drm:",
490 &drm_fs_sops,
491 &drm_fs_dops,
492 0x010203ff);
493 }
494
495 static struct file_system_type drm_fs_type = {
496 .name = "drm",
497 .owner = THIS_MODULE,
498 .mount = drm_fs_mount,
499 .kill_sb = kill_anon_super,
500 };
501
502 static struct inode *drm_fs_inode_new(void)
503 {
504 struct inode *inode;
505 int r;
506
507 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
508 if (r < 0) {
509 DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
510 return ERR_PTR(r);
511 }
512
513 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
514 if (IS_ERR(inode))
515 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
516
517 return inode;
518 }
519
520 static void drm_fs_inode_free(struct inode *inode)
521 {
522 if (inode) {
523 iput(inode);
524 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
525 }
526 }
527
528 /**
529 * drm_dev_alloc - Allocate new DRM device
530 * @driver: DRM driver to allocate device for
531 * @parent: Parent device object
532 *
533 * Allocate and initialize a new DRM device. No device registration is done.
534 * Call drm_dev_register() to advertice the device to user space and register it
535 * with other core subsystems.
536 *
537 * The initial ref-count of the object is 1. Use drm_dev_ref() and
538 * drm_dev_unref() to take and drop further ref-counts.
539 *
540 * RETURNS:
541 * Pointer to new DRM device, or NULL if out of memory.
542 */
543 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
544 struct device *parent)
545 {
546 struct drm_device *dev;
547 int ret;
548
549 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
550 if (!dev)
551 return NULL;
552
553 kref_init(&dev->ref);
554 dev->dev = parent;
555 dev->driver = driver;
556
557 INIT_LIST_HEAD(&dev->filelist);
558 INIT_LIST_HEAD(&dev->ctxlist);
559 INIT_LIST_HEAD(&dev->vmalist);
560 INIT_LIST_HEAD(&dev->maplist);
561 INIT_LIST_HEAD(&dev->vblank_event_list);
562
563 spin_lock_init(&dev->buf_lock);
564 spin_lock_init(&dev->event_lock);
565 mutex_init(&dev->struct_mutex);
566 mutex_init(&dev->ctxlist_mutex);
567 mutex_init(&dev->master_mutex);
568
569 dev->anon_inode = drm_fs_inode_new();
570 if (IS_ERR(dev->anon_inode)) {
571 ret = PTR_ERR(dev->anon_inode);
572 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
573 goto err_free;
574 }
575
576 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
577 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
578 if (ret)
579 goto err_minors;
580 }
581
582 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
583 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
584 if (ret)
585 goto err_minors;
586 }
587
588 ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
589 if (ret)
590 goto err_minors;
591
592 if (drm_ht_create(&dev->map_hash, 12))
593 goto err_minors;
594
595 ret = drm_legacy_ctxbitmap_init(dev);
596 if (ret) {
597 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
598 goto err_ht;
599 }
600
601 if (driver->driver_features & DRIVER_GEM) {
602 ret = drm_gem_init(dev);
603 if (ret) {
604 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
605 goto err_ctxbitmap;
606 }
607 }
608
609 return dev;
610
611 err_ctxbitmap:
612 drm_legacy_ctxbitmap_cleanup(dev);
613 err_ht:
614 drm_ht_remove(&dev->map_hash);
615 err_minors:
616 drm_minor_free(dev, DRM_MINOR_LEGACY);
617 drm_minor_free(dev, DRM_MINOR_RENDER);
618 drm_minor_free(dev, DRM_MINOR_CONTROL);
619 drm_fs_inode_free(dev->anon_inode);
620 err_free:
621 mutex_destroy(&dev->master_mutex);
622 kfree(dev);
623 return NULL;
624 }
625 EXPORT_SYMBOL(drm_dev_alloc);
626
627 static void drm_dev_release(struct kref *ref)
628 {
629 struct drm_device *dev = container_of(ref, struct drm_device, ref);
630
631 if (dev->driver->driver_features & DRIVER_GEM)
632 drm_gem_destroy(dev);
633
634 drm_legacy_ctxbitmap_cleanup(dev);
635 drm_ht_remove(&dev->map_hash);
636 drm_fs_inode_free(dev->anon_inode);
637
638 drm_minor_free(dev, DRM_MINOR_LEGACY);
639 drm_minor_free(dev, DRM_MINOR_RENDER);
640 drm_minor_free(dev, DRM_MINOR_CONTROL);
641
642 mutex_destroy(&dev->master_mutex);
643 kfree(dev->unique);
644 kfree(dev);
645 }
646
647 /**
648 * drm_dev_ref - Take reference of a DRM device
649 * @dev: device to take reference of or NULL
650 *
651 * This increases the ref-count of @dev by one. You *must* already own a
652 * reference when calling this. Use drm_dev_unref() to drop this reference
653 * again.
654 *
655 * This function never fails. However, this function does not provide *any*
656 * guarantee whether the device is alive or running. It only provides a
657 * reference to the object and the memory associated with it.
658 */
659 void drm_dev_ref(struct drm_device *dev)
660 {
661 if (dev)
662 kref_get(&dev->ref);
663 }
664 EXPORT_SYMBOL(drm_dev_ref);
665
666 /**
667 * drm_dev_unref - Drop reference of a DRM device
668 * @dev: device to drop reference of or NULL
669 *
670 * This decreases the ref-count of @dev by one. The device is destroyed if the
671 * ref-count drops to zero.
672 */
673 void drm_dev_unref(struct drm_device *dev)
674 {
675 if (dev)
676 kref_put(&dev->ref, drm_dev_release);
677 }
678 EXPORT_SYMBOL(drm_dev_unref);
679
680 /**
681 * drm_dev_register - Register DRM device
682 * @dev: Device to register
683 * @flags: Flags passed to the driver's .load() function
684 *
685 * Register the DRM device @dev with the system, advertise device to user-space
686 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
687 * previously.
688 *
689 * Never call this twice on any device!
690 *
691 * RETURNS:
692 * 0 on success, negative error code on failure.
693 */
694 int drm_dev_register(struct drm_device *dev, unsigned long flags)
695 {
696 int ret;
697
698 mutex_lock(&drm_global_mutex);
699
700 ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
701 if (ret)
702 goto err_minors;
703
704 ret = drm_minor_register(dev, DRM_MINOR_RENDER);
705 if (ret)
706 goto err_minors;
707
708 ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
709 if (ret)
710 goto err_minors;
711
712 if (dev->driver->load) {
713 ret = dev->driver->load(dev, flags);
714 if (ret)
715 goto err_minors;
716 }
717
718 /* setup grouping for legacy outputs */
719 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
720 ret = drm_mode_group_init_legacy_group(dev,
721 &dev->primary->mode_group);
722 if (ret)
723 goto err_unload;
724 }
725
726 ret = 0;
727 goto out_unlock;
728
729 err_unload:
730 if (dev->driver->unload)
731 dev->driver->unload(dev);
732 err_minors:
733 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
734 drm_minor_unregister(dev, DRM_MINOR_RENDER);
735 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
736 out_unlock:
737 mutex_unlock(&drm_global_mutex);
738 return ret;
739 }
740 EXPORT_SYMBOL(drm_dev_register);
741
742 /**
743 * drm_dev_unregister - Unregister DRM device
744 * @dev: Device to unregister
745 *
746 * Unregister the DRM device from the system. This does the reverse of
747 * drm_dev_register() but does not deallocate the device. The caller must call
748 * drm_dev_unref() to drop their final reference.
749 */
750 void drm_dev_unregister(struct drm_device *dev)
751 {
752 struct drm_map_list *r_list, *list_temp;
753
754 drm_lastclose(dev);
755
756 if (dev->driver->unload)
757 dev->driver->unload(dev);
758
759 if (dev->agp)
760 drm_pci_agp_destroy(dev);
761
762 drm_vblank_cleanup(dev);
763
764 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
765 drm_legacy_rmmap(dev, r_list->map);
766
767 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
768 drm_minor_unregister(dev, DRM_MINOR_RENDER);
769 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
770 }
771 EXPORT_SYMBOL(drm_dev_unregister);
772
773 /**
774 * drm_dev_set_unique - Set the unique name of a DRM device
775 * @dev: device of which to set the unique name
776 * @fmt: format string for unique name
777 *
778 * Sets the unique name of a DRM device using the specified format string and
779 * a variable list of arguments. Drivers can use this at driver probe time if
780 * the unique name of the devices they drive is static.
781 *
782 * Return: 0 on success or a negative error code on failure.
783 */
784 int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
785 {
786 va_list ap;
787
788 kfree(dev->unique);
789
790 va_start(ap, fmt);
791 dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
792 va_end(ap);
793
794 return dev->unique ? 0 : -ENOMEM;
795 }
796 EXPORT_SYMBOL(drm_dev_set_unique);
797
798 /*
799 * DRM Core
800 * The DRM core module initializes all global DRM objects and makes them
801 * available to drivers. Once setup, drivers can probe their respective
802 * devices.
803 * Currently, core management includes:
804 * - The "DRM-Global" key/value database
805 * - Global ID management for connectors
806 * - DRM major number allocation
807 * - DRM minor management
808 * - DRM sysfs class
809 * - DRM debugfs root
810 *
811 * Furthermore, the DRM core provides dynamic char-dev lookups. For each
812 * interface registered on a DRM device, you can request minor numbers from DRM
813 * core. DRM core takes care of major-number management and char-dev
814 * registration. A stub ->open() callback forwards any open() requests to the
815 * registered minor.
816 */
817
818 static int drm_stub_open(struct inode *inode, struct file *filp)
819 {
820 const struct file_operations *new_fops;
821 struct drm_minor *minor;
822 int err;
823
824 DRM_DEBUG("\n");
825
826 mutex_lock(&drm_global_mutex);
827 minor = drm_minor_acquire(iminor(inode));
828 if (IS_ERR(minor)) {
829 err = PTR_ERR(minor);
830 goto out_unlock;
831 }
832
833 new_fops = fops_get(minor->dev->driver->fops);
834 if (!new_fops) {
835 err = -ENODEV;
836 goto out_release;
837 }
838
839 replace_fops(filp, new_fops);
840 if (filp->f_op->open)
841 err = filp->f_op->open(inode, filp);
842 else
843 err = 0;
844
845 out_release:
846 drm_minor_release(minor);
847 out_unlock:
848 mutex_unlock(&drm_global_mutex);
849 return err;
850 }
851
852 static const struct file_operations drm_stub_fops = {
853 .owner = THIS_MODULE,
854 .open = drm_stub_open,
855 .llseek = noop_llseek,
856 };
857
858 static int __init drm_core_init(void)
859 {
860 int ret = -ENOMEM;
861
862 drm_global_init();
863 drm_connector_ida_init();
864 idr_init(&drm_minors_idr);
865
866 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
867 goto err_p1;
868
869 drm_class = drm_sysfs_create(THIS_MODULE, "drm");
870 if (IS_ERR(drm_class)) {
871 printk(KERN_ERR "DRM: Error creating drm class.\n");
872 ret = PTR_ERR(drm_class);
873 goto err_p2;
874 }
875
876 drm_debugfs_root = debugfs_create_dir("dri", NULL);
877 if (!drm_debugfs_root) {
878 DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
879 ret = -1;
880 goto err_p3;
881 }
882
883 DRM_INFO("Initialized %s %d.%d.%d %s\n",
884 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
885 return 0;
886 err_p3:
887 drm_sysfs_destroy();
888 err_p2:
889 unregister_chrdev(DRM_MAJOR, "drm");
890
891 idr_destroy(&drm_minors_idr);
892 err_p1:
893 return ret;
894 }
895
896 static void __exit drm_core_exit(void)
897 {
898 debugfs_remove(drm_debugfs_root);
899 drm_sysfs_destroy();
900
901 unregister_chrdev(DRM_MAJOR, "drm");
902
903 drm_connector_ida_destroy();
904 idr_destroy(&drm_minors_idr);
905 }
906
907 module_init(drm_core_init);
908 module_exit(drm_core_exit);
This page took 0.069335 seconds and 6 git commands to generate.