Merge tag 'v3.14' into drm-intel-next-queued
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_debugfs.c
index 144b0ff0a3f5687e02e63743403b34cb91d8fd03..049dcb5256d1e1851dd4f06a568eca36507ec936 100644 (file)
@@ -299,28 +299,62 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 } while (0)
 
 struct file_stats {
+       struct drm_i915_file_private *file_priv;
        int count;
-       size_t total, active, inactive, unbound;
+       size_t total, unbound;
+       size_t global, shared;
+       size_t active, inactive;
 };
 
 static int per_file_stats(int id, void *ptr, void *data)
 {
        struct drm_i915_gem_object *obj = ptr;
        struct file_stats *stats = data;
+       struct i915_vma *vma;
 
        stats->count++;
        stats->total += obj->base.size;
 
-       if (i915_gem_obj_ggtt_bound(obj)) {
-               if (!list_empty(&obj->ring_list))
-                       stats->active += obj->base.size;
-               else
-                       stats->inactive += obj->base.size;
+       if (obj->base.name || obj->base.dma_buf)
+               stats->shared += obj->base.size;
+
+       if (USES_FULL_PPGTT(obj->base.dev)) {
+               list_for_each_entry(vma, &obj->vma_list, vma_link) {
+                       struct i915_hw_ppgtt *ppgtt;
+
+                       if (!drm_mm_node_allocated(&vma->node))
+                               continue;
+
+                       if (i915_is_ggtt(vma->vm)) {
+                               stats->global += obj->base.size;
+                               continue;
+                       }
+
+                       ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
+                       if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv)
+                               continue;
+
+                       if (obj->ring) /* XXX per-vma statistic */
+                               stats->active += obj->base.size;
+                       else
+                               stats->inactive += obj->base.size;
+
+                       return 0;
+               }
        } else {
-               if (!list_empty(&obj->global_list))
-                       stats->unbound += obj->base.size;
+               if (i915_gem_obj_ggtt_bound(obj)) {
+                       stats->global += obj->base.size;
+                       if (obj->ring)
+                               stats->active += obj->base.size;
+                       else
+                               stats->inactive += obj->base.size;
+                       return 0;
+               }
        }
 
+       if (!list_empty(&obj->global_list))
+               stats->unbound += obj->base.size;
+
        return 0;
 }
 
@@ -411,6 +445,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
                struct task_struct *task;
 
                memset(&stats, 0, sizeof(stats));
+               stats.file_priv = file->driver_priv;
                idr_for_each(&file->object_idr, per_file_stats, &stats);
                /*
                 * Although we have a valid reference on file->pid, that does
@@ -420,12 +455,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
                 */
                rcu_read_lock();
                task = pid_task(file->pid, PIDTYPE_PID);
-               seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
+               seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
                           task ? task->comm : "<unknown>",
                           stats.count,
                           stats.total,
                           stats.active,
                           stats.inactive,
+                          stats.global,
+                          stats.shared,
                           stats.unbound);
                rcu_read_unlock();
        }
@@ -1026,7 +1063,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
                           max_freq * GT_FREQUENCY_MULTIPLIER);
 
                seq_printf(m, "Max overclocked frequency: %dMHz\n",
-                          dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
+                          dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
        } else if (IS_VALLEYVIEW(dev)) {
                u32 freq_sts, val;
 
@@ -1498,8 +1535,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 
        seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
 
-       for (gpu_freq = dev_priv->rps.min_delay;
-            gpu_freq <= dev_priv->rps.max_delay;
+       for (gpu_freq = dev_priv->rps.min_freq_softlimit;
+            gpu_freq <= dev_priv->rps.max_freq_softlimit;
             gpu_freq++) {
                ia_freq = gpu_freq;
                sandybridge_pcode_read(dev_priv,
@@ -2012,11 +2049,9 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
                return 0;
        }
 
-       mutex_lock(&dev_priv->pc8.lock);
        seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
        seq_printf(m, "IRQs disabled: %s\n",
                   yesno(dev_priv->pm.irqs_disabled));
-       mutex_unlock(&dev_priv->pc8.lock);
 
        return 0;
 }
@@ -3451,9 +3486,9 @@ i915_max_freq_get(void *data, u64 *val)
                return ret;
 
        if (IS_VALLEYVIEW(dev))
-               *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
+               *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
        else
-               *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+               *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return 0;
@@ -3490,16 +3525,16 @@ i915_max_freq_set(void *data, u64 val)
                do_div(val, GT_FREQUENCY_MULTIPLIER);
 
                rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-               hw_max = dev_priv->rps.hw_max;
+               hw_max = dev_priv->rps.max_freq;
                hw_min = (rp_state_cap >> 16) & 0xff;
        }
 
-       if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
+       if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
                mutex_unlock(&dev_priv->rps.hw_lock);
                return -EINVAL;
        }
 
-       dev_priv->rps.max_delay = val;
+       dev_priv->rps.max_freq_softlimit = val;
 
        if (IS_VALLEYVIEW(dev))
                valleyview_set_rps(dev, val);
@@ -3532,9 +3567,9 @@ i915_min_freq_get(void *data, u64 *val)
                return ret;
 
        if (IS_VALLEYVIEW(dev))
-               *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
+               *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
        else
-               *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+               *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return 0;
@@ -3571,16 +3606,16 @@ i915_min_freq_set(void *data, u64 val)
                do_div(val, GT_FREQUENCY_MULTIPLIER);
 
                rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-               hw_max = dev_priv->rps.hw_max;
+               hw_max = dev_priv->rps.max_freq;
                hw_min = (rp_state_cap >> 16) & 0xff;
        }
 
-       if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+       if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
                mutex_unlock(&dev_priv->rps.hw_lock);
                return -EINVAL;
        }
 
-       dev_priv->rps.min_delay = val;
+       dev_priv->rps.min_freq_softlimit = val;
 
        if (IS_VALLEYVIEW(dev))
                valleyview_set_rps(dev, val);
This page took 0.028888 seconds and 5 git commands to generate.