drm/i915/skl: Program the DDB allocation
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index 28f91df2604db0bfb867a548e701a5e9a48b7fa3..1de94cc635174fc786b6f96f950a2f48624f7ef7 100644 (file)
@@ -1281,8 +1281,7 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
 }
 
 static int
-i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
-                                    struct intel_engine_cs *ring)
+i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
 {
        if (!obj->active)
                return 0;
@@ -1319,7 +1318,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       return i915_gem_object_wait_rendering__tail(obj, ring);
+       return i915_gem_object_wait_rendering__tail(obj);
 }
 
 /* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1359,7 +1358,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       return i915_gem_object_wait_rendering__tail(obj, ring);
+       return i915_gem_object_wait_rendering__tail(obj);
 }
 
 /**
@@ -1466,6 +1465,16 @@ unlock:
  *
  * While the mapping holds a reference on the contents of the object, it doesn't
  * imply a ref on the object itself.
+ *
+ * IMPORTANT:
+ *
+ * DRM driver writers who look a this function as an example for how to do GEM
+ * mmap support, please don't implement mmap support like here. The modern way
+ * to implement DRM mmap support is with an mmap offset ioctl (like
+ * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
+ * That way debug tooling like valgrind will understand what's going on, hiding
+ * the mmap call in a driver private ioctl will break that. The i915 driver only
+ * does cpu mmaps this way because we didn't know better.
  */
 int
 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
@@ -1945,7 +1954,14 @@ unsigned long
 i915_gem_shrink(struct drm_i915_private *dev_priv,
                long target, unsigned flags)
 {
-       const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
+       const struct {
+               struct list_head *list;
+               unsigned int bit;
+       } phases[] = {
+               { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
+               { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
+               { NULL, 0 },
+       }, *phase;
        unsigned long count = 0;
 
        /*
@@ -1967,48 +1983,30 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
         * dev->struct_mutex and so we won't ever be able to observe an
         * object on the bound_list with a reference count equals 0.
         */
-       if (flags & I915_SHRINK_UNBOUND) {
+       for (phase = phases; phase->list; phase++) {
                struct list_head still_in_list;
 
-               INIT_LIST_HEAD(&still_in_list);
-               while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
-                       struct drm_i915_gem_object *obj;
-
-                       obj = list_first_entry(&dev_priv->mm.unbound_list,
-                                              typeof(*obj), global_list);
-                       list_move_tail(&obj->global_list, &still_in_list);
-
-                       if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
-                               continue;
-
-                       drm_gem_object_reference(&obj->base);
-
-                       if (i915_gem_object_put_pages(obj) == 0)
-                               count += obj->base.size >> PAGE_SHIFT;
-
-                       drm_gem_object_unreference(&obj->base);
-               }
-               list_splice(&still_in_list, &dev_priv->mm.unbound_list);
-       }
-
-       if (flags & I915_SHRINK_BOUND) {
-               struct list_head still_in_list;
+               if ((flags & phase->bit) == 0)
+                       continue;
 
                INIT_LIST_HEAD(&still_in_list);
-               while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
+               while (count < target && !list_empty(phase->list)) {
                        struct drm_i915_gem_object *obj;
                        struct i915_vma *vma, *v;
 
-                       obj = list_first_entry(&dev_priv->mm.bound_list,
+                       obj = list_first_entry(phase->list,
                                               typeof(*obj), global_list);
                        list_move_tail(&obj->global_list, &still_in_list);
 
-                       if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+                       if (flags & I915_SHRINK_PURGEABLE &&
+                           !i915_gem_object_is_purgeable(obj))
                                continue;
 
                        drm_gem_object_reference(&obj->base);
 
-                       list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+                       /* For the unbound phase, this should be a no-op! */
+                       list_for_each_entry_safe(vma, v,
+                                                &obj->vma_list, vma_link)
                                if (i915_vma_unbind(vma))
                                        break;
 
@@ -2017,7 +2015,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 
                        drm_gem_object_unreference(&obj->base);
                }
-               list_splice(&still_in_list, &dev_priv->mm.bound_list);
+               list_splice(&still_in_list, phase->list);
        }
 
        return count;
@@ -2811,6 +2809,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        u32 seqno = 0;
        int ret = 0;
 
+       if (args->flags != 0)
+               return -EINVAL;
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
@@ -3166,6 +3167,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
             obj->stride, obj->tiling_mode);
 
        switch (INTEL_INFO(dev)->gen) {
+       case 9:
        case 8:
        case 7:
        case 6:
@@ -3384,46 +3386,6 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
        return true;
 }
 
-static void i915_gem_verify_gtt(struct drm_device *dev)
-{
-#if WATCH_GTT
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj;
-       int err = 0;
-
-       list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
-               if (obj->gtt_space == NULL) {
-                       printk(KERN_ERR "object found on GTT list with no space reserved\n");
-                       err++;
-                       continue;
-               }
-
-               if (obj->cache_level != obj->gtt_space->color) {
-                       printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
-                              i915_gem_obj_ggtt_offset(obj),
-                              i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
-                              obj->cache_level,
-                              obj->gtt_space->color);
-                       err++;
-                       continue;
-               }
-
-               if (!i915_gem_valid_gtt_space(dev,
-                                             obj->gtt_space,
-                                             obj->cache_level)) {
-                       printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
-                              i915_gem_obj_ggtt_offset(obj),
-                              i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
-                              obj->cache_level);
-                       err++;
-                       continue;
-               }
-       }
-
-       WARN_ON(err);
-#endif
-}
-
 /**
  * Finds free space in the GTT aperture and binds the object there.
  */
@@ -3514,25 +3476,10 @@ search_free:
        list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
        list_add_tail(&vma->mm_list, &vm->inactive_list);
 
-       if (i915_is_ggtt(vm)) {
-               bool mappable, fenceable;
-
-               fenceable = (vma->node.size == fence_size &&
-                            (vma->node.start & (fence_alignment - 1)) == 0);
-
-               mappable = (vma->node.start + obj->base.size <=
-                           dev_priv->gtt.mappable_end);
-
-               obj->map_and_fenceable = mappable && fenceable;
-       }
-
-       WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
-
        trace_i915_vma_bind(vma, flags);
        vma->bind_vma(vma, obj->cache_level,
-                     flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
+                     flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
 
-       i915_gem_verify_gtt(dev);
        return vma;
 
 err_remove_node:
@@ -3739,7 +3686,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                list_for_each_entry(vma, &obj->vma_list, vma_link)
                        if (drm_mm_node_allocated(&vma->node))
                                vma->bind_vma(vma, cache_level,
-                                             obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
+                                               vma->bound & GLOBAL_BIND);
        }
 
        list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3769,7 +3716,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                                    old_write_domain);
        }
 
-       i915_gem_verify_gtt(dev);
        return 0;
 }
 
@@ -4101,6 +4047,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        struct i915_vma *vma;
+       unsigned bound;
        int ret;
 
        if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
@@ -4109,6 +4056,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
        if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
                return -EINVAL;
 
+       if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
+               return -EINVAL;
+
        vma = i915_gem_obj_to_vma(obj, vm);
        if (vma) {
                if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
@@ -4130,15 +4080,39 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
                }
        }
 
+       bound = vma ? vma->bound : 0;
        if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
                vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
                if (IS_ERR(vma))
                        return PTR_ERR(vma);
        }
 
-       if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
+       if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
                vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
 
+       if ((bound ^ vma->bound) & GLOBAL_BIND) {
+               bool mappable, fenceable;
+               u32 fence_size, fence_alignment;
+
+               fence_size = i915_gem_get_gtt_size(obj->base.dev,
+                                                  obj->base.size,
+                                                  obj->tiling_mode);
+               fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
+                                                            obj->base.size,
+                                                            obj->tiling_mode,
+                                                            true);
+
+               fenceable = (vma->node.size == fence_size &&
+                            (vma->node.start & (fence_alignment - 1)) == 0);
+
+               mappable = (vma->node.start + obj->base.size <=
+                           dev_priv->gtt.mappable_end);
+
+               obj->map_and_fenceable = mappable && fenceable;
+       }
+
+       WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+
        vma->pin_count++;
        if (flags & PIN_MAPPABLE)
                obj->pin_mappable |= true;
@@ -5119,6 +5093,15 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
        return ret;
 }
 
+/**
+ * i915_gem_track_fb - update frontbuffer tracking
+ * old: current GEM buffer for the frontbuffer slots
+ * new: new GEM buffer for the frontbuffer slots
+ * frontbuffer_bits: bitmask of frontbuffer slots
+ *
+ * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
+ * from @old and setting them in @new. Both @old and @new can be NULL.
+ */
 void i915_gem_track_fb(struct drm_i915_gem_object *old,
                       struct drm_i915_gem_object *new,
                       unsigned frontbuffer_bits)
@@ -5302,7 +5285,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
        struct drm_device *dev = dev_priv->dev;
        struct drm_i915_gem_object *obj;
        unsigned long timeout = msecs_to_jiffies(5000) + 1;
-       unsigned long pinned, bound, unbound, freed;
+       unsigned long pinned, bound, unbound, freed_pages;
        bool was_interruptible;
        bool unlock;
 
@@ -5319,7 +5302,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
        was_interruptible = dev_priv->mm.interruptible;
        dev_priv->mm.interruptible = false;
 
-       freed = i915_gem_shrink_all(dev_priv);
+       freed_pages = i915_gem_shrink_all(dev_priv);
 
        dev_priv->mm.interruptible = was_interruptible;
 
@@ -5350,14 +5333,15 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
        if (unlock)
                mutex_unlock(&dev->struct_mutex);
 
-       pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
-               freed, pinned);
+       if (freed_pages || unbound || bound)
+               pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
+                       freed_pages << PAGE_SHIFT, pinned);
        if (unbound || bound)
                pr_err("%lu and %lu bytes still available in the "
                       "bound and unbound GPU page lists.\n",
                       bound, unbound);
 
-       *(unsigned long *)ptr += freed;
+       *(unsigned long *)ptr += freed_pages;
        return NOTIFY_DONE;
 }
 
This page took 0.030505 seconds and 5 git commands to generate.