drm/i915: Move even more gtt code to i915_gem_gtt
authorBen Widawsky <benjamin.widawsky@intel.com>
Tue, 18 Dec 2012 18:31:25 +0000 (10:31 -0800)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 20 Dec 2012 15:27:35 +0000 (16:27 +0100)
This really should have been part of the kill agp series.

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c

index b0faa9149a1a7f08d250e6469010f5ede4a0d567..15799e783b7af6e8ed5334e00a64f7a2b24d20e4 100644 (file)
@@ -1587,10 +1587,9 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
                                enum i915_cache_level cache_level);
 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
-void i915_gem_init_global_gtt(struct drm_device *dev,
-                             unsigned long start,
-                             unsigned long mappable_end,
-                             unsigned long end);
+void i915_gem_init_global_gtt(struct drm_device *dev);
+void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
+                              unsigned long mappable_end, unsigned long end);
 int i915_gem_gtt_init(struct drm_device *dev);
 void i915_gem_gtt_fini(struct drm_device *dev);
 static inline void i915_gem_chipset_flush(struct drm_device *dev)
index 23b883a135dbc0ffd6389bff153e2908e974a195..ad98db5d22ea95d4082fe1530b76a956bd20ea35 100644 (file)
@@ -163,8 +163,8 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
                return -ENODEV;
 
        mutex_lock(&dev->struct_mutex);
-       i915_gem_init_global_gtt(dev, args->gtt_start,
-                                args->gtt_end, args->gtt_end);
+       i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
+                                 args->gtt_end);
        mutex_unlock(&dev->struct_mutex);
 
        return 0;
@@ -3981,58 +3981,13 @@ cleanup_render_ring:
        return ret;
 }
 
-static bool
-intel_enable_ppgtt(struct drm_device *dev)
-{
-       if (i915_enable_ppgtt >= 0)
-               return i915_enable_ppgtt;
-
-#ifdef CONFIG_INTEL_IOMMU
-       /* Disable ppgtt on SNB if VT-d is on. */
-       if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
-               return false;
-#endif
-
-       return true;
-}
-
 int i915_gem_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long gtt_size, mappable_size;
        int ret;
 
-       gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
-       mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
        mutex_lock(&dev->struct_mutex);
-       if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
-               /* PPGTT pdes are stolen from global gtt ptes, so shrink the
-                * aperture accordingly when using aliasing ppgtt. */
-               gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
-
-               i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
-
-               ret = i915_gem_init_aliasing_ppgtt(dev);
-               if (ret) {
-                       mutex_unlock(&dev->struct_mutex);
-                       return ret;
-               }
-       } else {
-               /* Let GEM Manage all of the aperture.
-                *
-                * However, leave one page at the end still bound to the scratch
-                * page.  There are a number of places where the hardware
-                * apparently prefetches past the end of the object, and we've
-                * seen multiple hangs with the GPU head pointer stuck in a
-                * batchbuffer bound at the last page of the aperture.  One page
-                * should be enough to keep any prefetching inside of the
-                * aperture.
-                */
-               i915_gem_init_global_gtt(dev, 0, mappable_size,
-                                        gtt_size);
-       }
-
+       i915_gem_init_global_gtt(dev);
        ret = i915_gem_init_hw(dev);
        mutex_unlock(&dev->struct_mutex);
        if (ret) {
index ea646b4c312aa28f3c09404f5a33ba480db28147..61b3e728be34c9e95ed8de2c2f32fb7d3e629617 100644 (file)
@@ -525,10 +525,10 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
        }
 }
 
-void i915_gem_init_global_gtt(struct drm_device *dev,
-                             unsigned long start,
-                             unsigned long mappable_end,
-                             unsigned long end)
+void i915_gem_setup_global_gtt(struct drm_device *dev,
+                              unsigned long start,
+                              unsigned long mappable_end,
+                              unsigned long end)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_mm_node *entry;
@@ -573,6 +573,57 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
        i915_ggtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
 }
 
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+       if (i915_enable_ppgtt >= 0)
+               return i915_enable_ppgtt;
+
+#ifdef CONFIG_INTEL_IOMMU
+       /* Disable ppgtt on SNB if VT-d is on. */
+       if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+               return false;
+#endif
+
+       return true;
+}
+
+void i915_gem_init_global_gtt(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long gtt_size, mappable_size;
+       int ret;
+
+       gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+       mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+       if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+               /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+                * aperture accordingly when using aliasing ppgtt. */
+               gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+
+               i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+
+               ret = i915_gem_init_aliasing_ppgtt(dev);
+               if (ret) {
+                       mutex_unlock(&dev->struct_mutex);
+                       return;
+               }
+       } else {
+               /* Let GEM Manage all of the aperture.
+                *
+                * However, leave one page at the end still bound to the scratch
+                * page.  There are a number of places where the hardware
+                * apparently prefetches past the end of the object, and we've
+                * seen multiple hangs with the GPU head pointer stuck in a
+                * batchbuffer bound at the last page of the aperture.  One page
+                * should be enough to keep any prefetching inside of the
+                * aperture.
+                */
+               i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+       }
+}
+
 static int setup_scratch_page(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
This page took 0.092511 seconds and 5 git commands to generate.