drm/i915: move wedged to the other gpu error handling stuff
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index bb38bca02559ad27d2421a027024e56c44d10833..04b2f92eb456fe8c71b7d84c44fc4b2c34b7b796 100644 (file)
@@ -87,14 +87,13 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
 }
 
 static int
-i915_gem_wait_for_error(struct drm_device *dev)
+i915_gem_wait_for_error(struct i915_gpu_error *error)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct completion *x = &dev_priv->error_completion;
+       struct completion *x = &error->completion;
        unsigned long flags;
        int ret;
 
-       if (!atomic_read(&dev_priv->mm.wedged))
+       if (!atomic_read(&error->wedged))
                return 0;
 
        /*
@@ -110,7 +109,7 @@ i915_gem_wait_for_error(struct drm_device *dev)
                return ret;
        }
 
-       if (atomic_read(&dev_priv->mm.wedged)) {
+       if (atomic_read(&error->wedged)) {
                /* GPU is hung, bump the completion count to account for
                 * the token we just consumed so that we never hit zero and
                 * end up waiting upon a subsequent completion event that
@@ -125,9 +124,10 @@ i915_gem_wait_for_error(struct drm_device *dev)
 
 int i915_mutex_lock_interruptible(struct drm_device *dev)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       ret = i915_gem_wait_for_error(dev);
+       ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
        if (ret)
                return ret;
 
@@ -149,6 +149,7 @@ int
 i915_gem_init_ioctl(struct drm_device *dev, void *data,
                    struct drm_file *file)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_init *args = data;
 
        if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -163,8 +164,9 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
                return -ENODEV;
 
        mutex_lock(&dev->struct_mutex);
-       i915_gem_init_global_gtt(dev, args->gtt_start,
-                                args->gtt_end, args->gtt_end);
+       i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
+                                 args->gtt_end);
+       dev_priv->gtt.mappable_end = args->gtt_end;
        mutex_unlock(&dev->struct_mutex);
 
        return 0;
@@ -186,7 +188,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                        pinned += obj->gtt_space->size;
        mutex_unlock(&dev->struct_mutex);
 
-       args->aper_size = dev_priv->mm.gtt_total;
+       args->aper_size = dev_priv->gtt.total;
        args->aper_available_size = args->aper_size - pinned;
 
        return 0;
@@ -637,7 +639,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
                 * source page isn't available.  Return the error and we'll
                 * retry in the slow path.
                 */
-               if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
+               if (fast_user_write(dev_priv->gtt.mappable, page_base,
                                    page_offset, user_data, page_length)) {
                        ret = -EFAULT;
                        goto out_unpin;
@@ -937,11 +939,11 @@ unlock:
 }
 
 int
-i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+i915_gem_check_wedge(struct i915_gpu_error *error,
                     bool interruptible)
 {
-       if (atomic_read(&dev_priv->mm.wedged)) {
-               struct completion *x = &dev_priv->error_completion;
+       if (atomic_read(&error->wedged)) {
+               struct completion *x = &error->completion;
                bool recovery_complete;
                unsigned long flags;
 
@@ -1023,7 +1025,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
 
 #define EXIT_COND \
        (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
-       atomic_read(&dev_priv->mm.wedged))
+       atomic_read(&dev_priv->gpu_error.wedged))
        do {
                if (interruptible)
                        end = wait_event_interruptible_timeout(ring->irq_queue,
@@ -1033,7 +1035,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                        end = wait_event_timeout(ring->irq_queue, EXIT_COND,
                                                 timeout_jiffies);
 
-               ret = i915_gem_check_wedge(dev_priv, interruptible);
+               ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
                if (ret)
                        end = ret;
        } while (end == 0 && wait_forever);
@@ -1079,7 +1081,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
        BUG_ON(seqno == 0);
 
-       ret = i915_gem_check_wedge(dev_priv, interruptible);
+       ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
        if (ret)
                return ret;
 
@@ -1144,7 +1146,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
        if (seqno == 0)
                return 0;
 
-       ret = i915_gem_check_wedge(dev_priv, true);
+       ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
        if (ret)
                return ret;
 
@@ -1362,7 +1364,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        obj->fault_mappable = true;
 
-       pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) +
+       pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
                page_offset;
 
        /* Finally, remap it using the new GTT offset */
@@ -1377,7 +1379,7 @@ out:
                /* If this -EIO is due to a gpu hang, give the reset code a
                 * chance to clean up the mess. Otherwise return the proper
                 * SIGBUS. */
-               if (!atomic_read(&dev_priv->mm.wedged))
+               if (!atomic_read(&dev_priv->gpu_error.wedged))
                        return VM_FAULT_SIGBUS;
        case -EAGAIN:
                /* Give the error handler a chance to run and move the
@@ -1435,7 +1437,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
        obj->fault_mappable = false;
 }
 
-static uint32_t
+uint32_t
 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
 {
        uint32_t gtt_size;
@@ -1463,16 +1465,15 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
  * Return the required GTT alignment for an object, taking into account
  * potential fence register mapping.
  */
-static uint32_t
-i915_gem_get_gtt_alignment(struct drm_device *dev,
-                          uint32_t size,
-                          int tiling_mode)
+uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
+                          int tiling_mode, bool fenced)
 {
        /*
         * Minimum alignment is 4k (GTT page size), but might be greater
         * if a fence register is needed for the object.
         */
-       if (INTEL_INFO(dev)->gen >= 4 ||
+       if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
            tiling_mode == I915_TILING_NONE)
                return 4096;
 
@@ -1483,35 +1484,6 @@ i915_gem_get_gtt_alignment(struct drm_device *dev,
        return i915_gem_get_gtt_size(dev, size, tiling_mode);
 }
 
-/**
- * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
- *                                      unfenced object
- * @dev: the device
- * @size: size of the object
- * @tiling_mode: tiling mode of the object
- *
- * Return the required GTT alignment for an object, only taking into account
- * unfenced tiled surface requirements.
- */
-uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
-                                   uint32_t size,
-                                   int tiling_mode)
-{
-       /*
-        * Minimum alignment is 4k (GTT page size) for sane hw.
-        */
-       if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
-           tiling_mode == I915_TILING_NONE)
-               return 4096;
-
-       /* Previous hardware however needs to be aligned to a power-of-two
-        * tile height. The simplest method for determining this is to reuse
-        * the power-of-tile object size.
-        */
-       return i915_gem_get_gtt_size(dev, size, tiling_mode);
-}
-
 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@@ -1520,9 +1492,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
        if (obj->base.map_list.map)
                return 0;
 
+       dev_priv->mm.shrinker_no_lock_stealing = true;
+
        ret = drm_gem_create_mmap_offset(&obj->base);
        if (ret != -ENOSPC)
-               return ret;
+               goto out;
 
        /* Badly fragmented mmap space? The only way we can recover
         * space is by destroying unwanted objects. We can't randomly release
@@ -1534,10 +1508,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
        i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
        ret = drm_gem_create_mmap_offset(&obj->base);
        if (ret != -ENOSPC)
-               return ret;
+               goto out;
 
        i915_gem_shrink_all(dev_priv);
-       return drm_gem_create_mmap_offset(&obj->base);
+       ret = drm_gem_create_mmap_offset(&obj->base);
+out:
+       dev_priv->mm.shrinker_no_lock_stealing = false;
+
+       return ret;
 }
 
 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
@@ -1568,7 +1546,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
                goto unlock;
        }
 
-       if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
+       if (obj->base.size > dev_priv->gtt.mappable_end) {
                ret = -E2BIG;
                goto out;
        }
@@ -1686,7 +1664,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
        kfree(obj->pages);
 }
 
-static int
+int
 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 {
        const struct drm_i915_gem_object_ops *ops = obj->ops;
@@ -1699,10 +1677,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        if (obj->pages_pin_count)
                return -EBUSY;
 
+       /* ->put_pages might need to allocate memory for the bit17 swizzle
+        * array, hence protect them from being reaped by removing them from gtt
+        * lists early. */
+       list_del(&obj->gtt_list);
+
        ops->put_pages(obj);
        obj->pages = NULL;
 
-       list_del(&obj->gtt_list);
        if (i915_gem_object_is_purgeable(obj))
                i915_gem_object_truncate(obj);
 
@@ -1788,7 +1770,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
         */
        mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
        gfp = mapping_gfp_mask(mapping);
-       gfp |= __GFP_NORETRY | __GFP_NOWARN;
+       gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
        gfp &= ~(__GFP_IO | __GFP_WAIT);
        for_each_sg(st->sgl, sg, page_count, i) {
                page = shmem_read_mapping_page_gfp(mapping, i, gfp);
@@ -1801,7 +1783,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
                         * our own buffer, now let the real VM do its job and
                         * go down in flames if truly OOM.
                         */
-                       gfp &= ~(__GFP_NORETRY | __GFP_NOWARN);
+                       gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
                        gfp |= __GFP_IO | __GFP_WAIT;
 
                        i915_gem_shrink_all(dev_priv);
@@ -1809,7 +1791,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
                        if (IS_ERR(page))
                                goto err_pages;
 
-                       gfp |= __GFP_NORETRY | __GFP_NOWARN;
+                       gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
                        gfp &= ~(__GFP_IO | __GFP_WAIT);
                }
 
@@ -1848,6 +1830,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
        if (obj->pages)
                return 0;
 
+       if (obj->madv != I915_MADV_WILLNEED) {
+               DRM_ERROR("Attempting to obtain a purgeable object\n");
+               return -EINVAL;
+       }
+
        BUG_ON(obj->pages_pin_count);
 
        ret = ops->get_pages(obj);
@@ -1926,7 +1913,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 }
 
 static int
-i915_gem_handle_seqno_wrap(struct drm_device *dev)
+i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
@@ -1942,7 +1929,7 @@ i915_gem_handle_seqno_wrap(struct drm_device *dev)
 
        /* Finally reset hw state */
        for_each_ring(ring, dev_priv, i) {
-               intel_ring_init_seqno(ring, 0);
+               intel_ring_init_seqno(ring, seqno);
 
                for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
                        ring->sync_seqno[j] = 0;
@@ -1951,6 +1938,32 @@ i915_gem_handle_seqno_wrap(struct drm_device *dev)
        return 0;
 }
 
+int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       if (seqno == 0)
+               return -EINVAL;
+
+       /* HWS page needs to be set less than what we
+        * will inject to ring
+        */
+       ret = i915_gem_init_seqno(dev, seqno - 1);
+       if (ret)
+               return ret;
+
+       /* Carefully set the last_seqno value so that wrap
+        * detection still works
+        */
+       dev_priv->next_seqno = seqno;
+       dev_priv->last_seqno = seqno - 1;
+       if (dev_priv->last_seqno == 0)
+               dev_priv->last_seqno--;
+
+       return 0;
+}
+
 int
 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
 {
@@ -1958,7 +1971,7 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
 
        /* reserve 0 for non-seqno */
        if (dev_priv->next_seqno == 0) {
-               int ret = i915_gem_handle_seqno_wrap(dev);
+               int ret = i915_gem_init_seqno(dev, 0);
                if (ret)
                        return ret;
 
@@ -2032,7 +2045,7 @@ i915_add_request(struct intel_ring_buffer *ring,
 
        if (!dev_priv->mm.suspended) {
                if (i915_enable_hangcheck) {
-                       mod_timer(&dev_priv->hangcheck_timer,
+                       mod_timer(&dev_priv->gpu_error.hangcheck_timer,
                                  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
                }
                if (was_empty) {
@@ -2434,7 +2447,7 @@ int
 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 {
        drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
-       int ret = 0;
+       int ret;
 
        if (obj->gtt_space == NULL)
                return 0;
@@ -2501,52 +2514,38 @@ int i915_gpu_idle(struct drm_device *dev)
        return 0;
 }
 
-static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
-                                       struct drm_i915_gem_object *obj)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       uint64_t val;
-
-       if (obj) {
-               u32 size = obj->gtt_space->size;
-
-               val = (uint64_t)((obj->gtt_offset + size - 4096) &
-                                0xfffff000) << 32;
-               val |= obj->gtt_offset & 0xfffff000;
-               val |= (uint64_t)((obj->stride / 128) - 1) <<
-                       SANDYBRIDGE_FENCE_PITCH_SHIFT;
-
-               if (obj->tiling_mode == I915_TILING_Y)
-                       val |= 1 << I965_FENCE_TILING_Y_SHIFT;
-               val |= I965_FENCE_REG_VALID;
-       } else
-               val = 0;
-
-       I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
-       POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
-}
-
 static void i965_write_fence_reg(struct drm_device *dev, int reg,
                                 struct drm_i915_gem_object *obj)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       int fence_reg;
+       int fence_pitch_shift;
        uint64_t val;
 
+       if (INTEL_INFO(dev)->gen >= 6) {
+               fence_reg = FENCE_REG_SANDYBRIDGE_0;
+               fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
+       } else {
+               fence_reg = FENCE_REG_965_0;
+               fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
+       }
+
        if (obj) {
                u32 size = obj->gtt_space->size;
 
                val = (uint64_t)((obj->gtt_offset + size - 4096) &
                                 0xfffff000) << 32;
                val |= obj->gtt_offset & 0xfffff000;
-               val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+               val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
                if (obj->tiling_mode == I915_TILING_Y)
                        val |= 1 << I965_FENCE_TILING_Y_SHIFT;
                val |= I965_FENCE_REG_VALID;
        } else
                val = 0;
 
-       I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
-       POSTING_READ(FENCE_REG_965_0 + reg * 8);
+       fence_reg += reg * 8;
+       I915_WRITE64(fence_reg, val);
+       POSTING_READ(fence_reg);
 }
 
 static void i915_write_fence_reg(struct drm_device *dev, int reg,
@@ -2630,7 +2629,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
 {
        switch (INTEL_INFO(dev)->gen) {
        case 7:
-       case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
+       case 6:
        case 5:
        case 4: i965_write_fence_reg(dev, reg, obj); break;
        case 3: i915_write_fence_reg(dev, reg, obj); break;
@@ -2883,26 +2882,21 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_mm_node *free_space;
+       struct drm_mm_node *node;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
        bool mappable, fenceable;
        int ret;
 
-       if (obj->madv != I915_MADV_WILLNEED) {
-               DRM_ERROR("Attempting to bind a purgeable object\n");
-               return -EINVAL;
-       }
-
        fence_size = i915_gem_get_gtt_size(dev,
                                           obj->base.size,
                                           obj->tiling_mode);
        fence_alignment = i915_gem_get_gtt_alignment(dev,
                                                     obj->base.size,
-                                                    obj->tiling_mode);
+                                                    obj->tiling_mode, true);
        unfenced_alignment =
-               i915_gem_get_unfenced_gtt_alignment(dev,
+               i915_gem_get_gtt_alignment(dev,
                                                    obj->base.size,
-                                                   obj->tiling_mode);
+                                                   obj->tiling_mode, false);
 
        if (alignment == 0)
                alignment = map_and_fenceable ? fence_alignment :
@@ -2918,7 +2912,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
         * before evicting everything in a vain attempt to find space.
         */
        if (obj->base.size >
-           (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
+           (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
                DRM_ERROR("Attempting to bind an object larger than the aperture\n");
                return -E2BIG;
        }
@@ -2929,69 +2923,57 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
        i915_gem_object_pin_pages(obj);
 
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (node == NULL) {
+               i915_gem_object_unpin_pages(obj);
+               return -ENOMEM;
+       }
+
  search_free:
        if (map_and_fenceable)
-               free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
-                                                              size, alignment, obj->cache_level,
-                                                              0, dev_priv->mm.gtt_mappable_end,
-                                                              false);
+               ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+                                                         size, alignment, obj->cache_level,
+                                                         0, dev_priv->gtt.mappable_end);
        else
-               free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
-                                                     size, alignment, obj->cache_level,
-                                                     false);
-
-       if (free_space != NULL) {
-               if (map_and_fenceable)
-                       free_space =
-                               drm_mm_get_block_range_generic(free_space,
-                                                              size, alignment, obj->cache_level,
-                                                              0, dev_priv->mm.gtt_mappable_end,
-                                                              false);
-               else
-                       free_space =
-                               drm_mm_get_block_generic(free_space,
-                                                        size, alignment, obj->cache_level,
-                                                        false);
-       }
-       if (free_space == NULL) {
+               ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
+                                                size, alignment, obj->cache_level);
+       if (ret) {
                ret = i915_gem_evict_something(dev, size, alignment,
                                               obj->cache_level,
                                               map_and_fenceable,
                                               nonblocking);
-               if (ret) {
-                       i915_gem_object_unpin_pages(obj);
-                       return ret;
-               }
+               if (ret == 0)
+                       goto search_free;
 
-               goto search_free;
+               i915_gem_object_unpin_pages(obj);
+               kfree(node);
+               return ret;
        }
-       if (WARN_ON(!i915_gem_valid_gtt_space(dev,
-                                             free_space,
-                                             obj->cache_level))) {
+       if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
                i915_gem_object_unpin_pages(obj);
-               drm_mm_put_block(free_space);
+               drm_mm_put_block(node);
                return -EINVAL;
        }
 
        ret = i915_gem_gtt_prepare_object(obj);
        if (ret) {
                i915_gem_object_unpin_pages(obj);
-               drm_mm_put_block(free_space);
+               drm_mm_put_block(node);
                return ret;
        }
 
        list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
        list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
-       obj->gtt_space = free_space;
-       obj->gtt_offset = free_space->start;
+       obj->gtt_space = node;
+       obj->gtt_offset = node->start;
 
        fenceable =
-               free_space->size == fence_size &&
-               (free_space->start & (fence_alignment - 1)) == 0;
+               node->size == fence_size &&
+               (node->start & (fence_alignment - 1)) == 0;
 
        mappable =
-               obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
+               obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
 
        obj->map_and_fenceable = mappable && fenceable;
 
@@ -3408,7 +3390,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        u32 seqno = 0;
        int ret;
 
-       if (atomic_read(&dev_priv->mm.wedged))
+       if (atomic_read(&dev_priv->gpu_error.wedged))
                return -EIO;
 
        spin_lock(&file_priv->mm.lock);
@@ -3821,7 +3803,7 @@ i915_gem_idle(struct drm_device *dev)
         * And not confound mm.suspended!
         */
        dev_priv->mm.suspended = 1;
-       del_timer_sync(&dev_priv->hangcheck_timer);
+       del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
 
        i915_kernel_lost_context(dev);
        i915_gem_cleanup_ringbuffer(dev);
@@ -3955,58 +3937,13 @@ cleanup_render_ring:
        return ret;
 }
 
-static bool
-intel_enable_ppgtt(struct drm_device *dev)
-{
-       if (i915_enable_ppgtt >= 0)
-               return i915_enable_ppgtt;
-
-#ifdef CONFIG_INTEL_IOMMU
-       /* Disable ppgtt on SNB if VT-d is on. */
-       if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
-               return false;
-#endif
-
-       return true;
-}
-
 int i915_gem_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long gtt_size, mappable_size;
        int ret;
 
-       gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
-       mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
        mutex_lock(&dev->struct_mutex);
-       if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
-               /* PPGTT pdes are stolen from global gtt ptes, so shrink the
-                * aperture accordingly when using aliasing ppgtt. */
-               gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
-
-               i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
-
-               ret = i915_gem_init_aliasing_ppgtt(dev);
-               if (ret) {
-                       mutex_unlock(&dev->struct_mutex);
-                       return ret;
-               }
-       } else {
-               /* Let GEM Manage all of the aperture.
-                *
-                * However, leave one page at the end still bound to the scratch
-                * page.  There are a number of places where the hardware
-                * apparently prefetches past the end of the object, and we've
-                * seen multiple hangs with the GPU head pointer stuck in a
-                * batchbuffer bound at the last page of the aperture.  One page
-                * should be enough to keep any prefetching inside of the
-                * aperture.
-                */
-               i915_gem_init_global_gtt(dev, 0, mappable_size,
-                                        gtt_size);
-       }
-
+       i915_gem_init_global_gtt(dev);
        ret = i915_gem_init_hw(dev);
        mutex_unlock(&dev->struct_mutex);
        if (ret) {
@@ -4041,9 +3978,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
 
-       if (atomic_read(&dev_priv->mm.wedged)) {
+       if (atomic_read(&dev_priv->gpu_error.wedged)) {
                DRM_ERROR("Reenabling wedged hardware, good luck\n");
-               atomic_set(&dev_priv->mm.wedged, 0);
+               atomic_set(&dev_priv->gpu_error.wedged, 0);
        }
 
        mutex_lock(&dev->struct_mutex);
@@ -4127,7 +4064,7 @@ i915_gem_load(struct drm_device *dev)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
-       init_completion(&dev_priv->error_completion);
+       init_completion(&dev_priv->gpu_error.completion);
 
        /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
        if (IS_GEN3(dev)) {
@@ -4394,6 +4331,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
                if (!mutex_is_locked_by(&dev->struct_mutex, current))
                        return 0;
 
+               if (dev_priv->mm.shrinker_no_lock_stealing)
+                       return 0;
+
                unlock = false;
        }
 
This page took 0.04507 seconds and 5 git commands to generate.