drm/i915: Fix context/engine cleanup order
authorNick Hoath <nicholas.hoath@intel.com>
Thu, 21 Jan 2016 19:37:45 +0000 (19:37 +0000)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 25 Jan 2016 18:09:03 +0000 (19:09 +0100)
Swap the order of context & engine cleanup, so that contexts are cleaned
up first, and *then* engines. This is a more sensible order anyway, but
in particular has become necessary since the 'intel_ring_initialized()
must be simple and inline' patch, which now uses ring->dev as an
'initialised' flag, so it can now be NULL after engine teardown. This
in turn can cause a problem in the context code, which (used to) check
the ring->dev->struct_mutex -- causing a fault if ring->dev was NULL.

Also rename the cleanup function to reflect what it actually does
(cleanup engines, not a ringbuffer), and fix an annoying whitespace issue.

v2: Also make the fix in i915_load_modeset_init, not just in
    i915_driver_unload (Chris Wilson)
v3: Had extra stuff in it.
v4: Reverted extra stuff (so we're back to v2).
    Rebased and updated commentary above (Dave Gordon).

Signed-off-by: Nick Hoath <nicholas.hoath@intel.com>
Signed-off-by: Dave Gordon <david.s.gordon@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v2)
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1453405067-32890-3-git-send-email-david.s.gordon@intel.com
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c

index d70d96fe553bb65515540f048bdfe224f15a48e6..4725e8d61d0f4a41b7cab9318dade1570cfaf719 100644 (file)
@@ -451,8 +451,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
 cleanup_gem:
        mutex_lock(&dev->struct_mutex);
-       i915_gem_cleanup_ringbuffer(dev);
        i915_gem_context_fini(dev);
+       i915_gem_cleanup_engines(dev);
        mutex_unlock(&dev->struct_mutex);
 cleanup_irq:
        intel_guc_ucode_fini(dev);
@@ -1196,8 +1196,8 @@ int i915_driver_unload(struct drm_device *dev)
 
        intel_guc_ucode_fini(dev);
        mutex_lock(&dev->struct_mutex);
-       i915_gem_cleanup_ringbuffer(dev);
        i915_gem_context_fini(dev);
+       i915_gem_cleanup_engines(dev);
        mutex_unlock(&dev->struct_mutex);
        intel_fbc_cleanup_cfb(dev_priv);
        i915_gem_cleanup_stolen(dev);
index 211af534e5d0a36a941f1ecf310caa731d4b4416..01cc982f5497fc56f84c89a1ea41a21a1f3cf469 100644 (file)
@@ -3019,7 +3019,7 @@ int i915_gem_init_rings(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
 void i915_gem_init_swizzling(struct drm_device *dev);
-void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+void i915_gem_cleanup_engines(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_suspend(struct drm_device *dev);
 void __i915_add_request(struct drm_i915_gem_request *req,
index 371bbb28c4713c52dc3a1ebed72ea344559697ae..799a53ad04f2a9cfbf5747d39735c777a940a90f 100644 (file)
@@ -4912,7 +4912,7 @@ i915_gem_init_hw(struct drm_device *dev)
                req = i915_gem_request_alloc(ring, NULL);
                if (IS_ERR(req)) {
                        ret = PTR_ERR(req);
-                       i915_gem_cleanup_ringbuffer(dev);
+                       i915_gem_cleanup_engines(dev);
                        goto out;
                }
 
@@ -4925,7 +4925,7 @@ i915_gem_init_hw(struct drm_device *dev)
                if (ret && ret != -EIO) {
                        DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
                        i915_gem_request_cancel(req);
-                       i915_gem_cleanup_ringbuffer(dev);
+                       i915_gem_cleanup_engines(dev);
                        goto out;
                }
 
@@ -4933,7 +4933,7 @@ i915_gem_init_hw(struct drm_device *dev)
                if (ret && ret != -EIO) {
                        DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
                        i915_gem_request_cancel(req);
-                       i915_gem_cleanup_ringbuffer(dev);
+                       i915_gem_cleanup_engines(dev);
                        goto out;
                }
 
@@ -5008,7 +5008,7 @@ out_unlock:
 }
 
 void
-i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+i915_gem_cleanup_engines(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring;
@@ -5017,13 +5017,14 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
        for_each_ring(ring, dev_priv, i)
                dev_priv->gt.cleanup_ring(ring);
 
-    if (i915.enable_execlists)
-            /*
-             * Neither the BIOS, ourselves or any other kernel
-             * expects the system to be in execlists mode on startup,
-             * so we need to reset the GPU back to legacy mode.
-             */
-            intel_gpu_reset(dev);
+       if (i915.enable_execlists) {
+               /*
+                * Neither the BIOS, ourselves or any other kernel
+                * expects the system to be in execlists mode on startup,
+                * so we need to reset the GPU back to legacy mode.
+                */
+               intel_gpu_reset(dev);
+       }
 }
 
 static void
This page took 0.042812 seconds and 5 git commands to generate.