drm/prime: proper locking+refcounting for obj->dma_buf link
[deliverable/linux.git] / drivers / gpu / drm / drm_gem.c
index dcbd2f559e3978b2c4e3362477ecb8ff75a7c4de..4b3c533be859c80f987206e62a6364930e8c9927 100644 (file)
@@ -93,7 +93,7 @@ drm_gem_init(struct drm_device *dev)
 {
        struct drm_gem_mm *mm;
 
-       spin_lock_init(&dev->object_name_lock);
+       mutex_init(&dev->object_name_lock);
        idr_init(&dev->object_name_idr);
 
        mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
@@ -154,7 +154,7 @@ void drm_gem_private_object_init(struct drm_device *dev,
        obj->filp = NULL;
 
        kref_init(&obj->refcount);
-       atomic_set(&obj->handle_count, 0);
+       obj->handle_count = 0;
        obj->size = size;
 }
 EXPORT_SYMBOL(drm_gem_private_object_init);
@@ -195,9 +195,14 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
                drm_prime_remove_buf_handle(&filp->prime,
                                obj->import_attach->dmabuf);
        }
-       if (obj->export_dma_buf) {
+
+       /*
+        * Note: obj->dma_buf can't disappear as long as we still hold a
+        * handle reference in obj->handle_count.
+        */
+       if (obj->dma_buf) {
                drm_prime_remove_buf_handle(&filp->prime,
-                               obj->export_dma_buf);
+                               obj->dma_buf);
        }
 }
 
@@ -218,11 +223,9 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj)
        struct drm_device *dev = obj->dev;
 
        /* Remove any name for this object */
-       spin_lock(&dev->object_name_lock);
        if (obj->name) {
                idr_remove(&dev->object_name_idr, obj->name);
                obj->name = 0;
-               spin_unlock(&dev->object_name_lock);
                /*
                 * The object name held a reference to this object, drop
                 * that now.
@@ -230,15 +233,22 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj)
                * This cannot be the last reference, since the handle holds one too.
                 */
                kref_put(&obj->refcount, drm_gem_object_ref_bug);
-       } else
-               spin_unlock(&dev->object_name_lock);
+       }
+}
 
+static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
+{
+       /* Unbreak the reference cycle if we have an exported dma_buf. */
+       if (obj->dma_buf) {
+               dma_buf_put(obj->dma_buf);
+               obj->dma_buf = NULL;
+       }
 }
 
-void
+static void
 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
 {
-       if (WARN_ON(atomic_read(&obj->handle_count) == 0))
+       if (WARN_ON(obj->handle_count == 0))
                return;
 
        /*
@@ -247,8 +257,13 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
        * checked for a name
        */
 
-       if (atomic_dec_and_test(&obj->handle_count))
+       mutex_lock(&obj->dev->object_name_lock);
+       if (--obj->handle_count == 0) {
                drm_gem_object_handle_free(obj);
+               drm_gem_object_exported_dma_buf_free(obj);
+       }
+       mutex_unlock(&obj->dev->object_name_lock);
+
        drm_gem_object_unreference_unlocked(obj);
 }
 
@@ -309,18 +324,22 @@ int drm_gem_dumb_destroy(struct drm_file *file,
 EXPORT_SYMBOL(drm_gem_dumb_destroy);
 
 /**
- * Create a handle for this object. This adds a handle reference
- * to the object, which includes a regular reference count. Callers
- * will likely want to dereference the object afterwards.
+ * drm_gem_handle_create_tail - internal functions to create a handle
+ * 
+ * This expects the dev->object_name_lock to be held already and will drop it
+ * before returning. Used to avoid races in establishing new handles when
+ * importing an object from either an flink name or a dma-buf.
  */
 int
-drm_gem_handle_create(struct drm_file *file_priv,
-                      struct drm_gem_object *obj,
-                      u32 *handlep)
+drm_gem_handle_create_tail(struct drm_file *file_priv,
+                          struct drm_gem_object *obj,
+                          u32 *handlep)
 {
        struct drm_device *dev = obj->dev;
        int ret;
 
+       WARN_ON(!mutex_is_locked(&dev->object_name_lock));
+
        /*
         * Get the user-visible handle using idr.  Preload and perform
         * allocation under our spinlock.
@@ -329,14 +348,17 @@ drm_gem_handle_create(struct drm_file *file_priv,
        spin_lock(&file_priv->table_lock);
 
        ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
-
+       drm_gem_object_reference(obj);
+       obj->handle_count++;
        spin_unlock(&file_priv->table_lock);
        idr_preload_end();
-       if (ret < 0)
+       mutex_unlock(&dev->object_name_lock);
+       if (ret < 0) {
+               drm_gem_object_handle_unreference_unlocked(obj);
                return ret;
+       }
        *handlep = ret;
 
-       drm_gem_object_handle_reference(obj);
 
        if (dev->driver->gem_open_object) {
                ret = dev->driver->gem_open_object(obj, file_priv);
@@ -348,6 +370,21 @@ drm_gem_handle_create(struct drm_file *file_priv,
 
        return 0;
 }
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+                      struct drm_gem_object *obj,
+                      u32 *handlep)
+{
+       mutex_lock(&obj->dev->object_name_lock);
+
+       return drm_gem_handle_create_tail(file_priv, obj, handlep);
+}
 EXPORT_SYMBOL(drm_gem_handle_create);
 
 
@@ -575,8 +612,14 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
        if (obj == NULL)
                return -ENOENT;
 
+       mutex_lock(&dev->object_name_lock);
        idr_preload(GFP_KERNEL);
-       spin_lock(&dev->object_name_lock);
+       /* prevent races with concurrent gem_close. */
+       if (obj->handle_count == 0) {
+               ret = -ENOENT;
+               goto err;
+       }
+
        if (!obj->name) {
                ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
                if (ret < 0)
@@ -592,8 +635,8 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
        ret = 0;
 
 err:
-       spin_unlock(&dev->object_name_lock);
        idr_preload_end();
+       mutex_unlock(&dev->object_name_lock);
        drm_gem_object_unreference_unlocked(obj);
        return ret;
 }
@@ -616,15 +659,17 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
 
-       spin_lock(&dev->object_name_lock);
+       mutex_lock(&dev->object_name_lock);
        obj = idr_find(&dev->object_name_idr, (int) args->name);
-       if (obj)
+       if (obj) {
                drm_gem_object_reference(obj);
-       spin_unlock(&dev->object_name_lock);
-       if (!obj)
+       } else {
+               mutex_unlock(&dev->object_name_lock);
                return -ENOENT;
+       }
 
-       ret = drm_gem_handle_create(file_priv, obj, &handle);
+       /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+       ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
        drm_gem_object_unreference_unlocked(obj);
        if (ret)
                return ret;
@@ -683,6 +728,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
 void
 drm_gem_object_release(struct drm_gem_object *obj)
 {
+       WARN_ON(obj->dma_buf);
+
        if (obj->filp)
            fput(obj->filp);
 }
This page took 0.027846 seconds and 5 git commands to generate.