Merge tag 'v3.14' into drm-intel-next-queued
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_fence.c
index d3a86e43c0123715e0cf765a738e6664f42ab6e0..a77b1c13ea43d6bea244e0da8666c82bb082ea60 100644 (file)
@@ -121,7 +121,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
        (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
        (*fence)->ring = ring;
        radeon_fence_ring_emit(rdev, ring, *fence);
-       trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
+       trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
        return 0;
 }
 
@@ -288,7 +288,6 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
  * @rdev: radeon device pointer
  * @target_seq: sequence number(s) we want to wait for
  * @intr: use interruptable sleep
- * @lock_ring: whether the ring should be locked or not
  *
  * Wait for the requested sequence number(s) to be written by any ring
  * (all asics).  Sequnce number array is indexed by ring id.
@@ -299,7 +298,7 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
  * -EDEADLK is returned when a GPU lockup has been detected.
  */
 static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
-                                bool intr, bool lock_ring)
+                                bool intr)
 {
        uint64_t last_seq[RADEON_NUM_RINGS];
        bool signaled;
@@ -313,7 +312,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
                                continue;
 
                        last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
-                       trace_radeon_fence_wait_begin(rdev->ddev, target_seq[i]);
+                       trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
                        radeon_irq_kms_sw_irq_get(rdev, i);
                }
 
@@ -332,7 +331,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
                                continue;
 
                        radeon_irq_kms_sw_irq_put(rdev, i);
-                       trace_radeon_fence_wait_end(rdev->ddev, target_seq[i]);
+                       trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
                }
 
                if (unlikely(r < 0))
@@ -358,9 +357,6 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
                        if (i != RADEON_NUM_RINGS)
                                continue;
 
-                       if (lock_ring)
-                               mutex_lock(&rdev->ring_lock);
-
                        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
                                if (!target_seq[i])
                                        continue;
@@ -378,14 +374,9 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
 
                                /* remember that we need an reset */
                                rdev->needs_reset = true;
-                               if (lock_ring)
-                                       mutex_unlock(&rdev->ring_lock);
                                wake_up_all(&rdev->fence_queue);
                                return -EDEADLK;
                        }
-
-                       if (lock_ring)
-                               mutex_unlock(&rdev->ring_lock);
                }
        }
        return 0;
@@ -416,7 +407,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
        if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
                return 0;
 
-       r = radeon_fence_wait_seq(fence->rdev, seq, intr, true);
+       r = radeon_fence_wait_seq(fence->rdev, seq, intr);
        if (r)
                return r;
 
@@ -464,7 +455,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
        if (num_rings == 0)
                return -ENOENT;
 
-       r = radeon_fence_wait_seq(rdev, seq, intr, true);
+       r = radeon_fence_wait_seq(rdev, seq, intr);
        if (r) {
                return r;
        }
@@ -472,37 +463,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
 }
 
 /**
- * radeon_fence_wait_locked - wait for a fence to signal
- *
- * @fence: radeon fence object
- *
- * Wait for the requested fence to signal (all asics).
- * Returns 0 if the fence has passed, error for all other cases.
- */
-int radeon_fence_wait_locked(struct radeon_fence *fence)
-{
-       uint64_t seq[RADEON_NUM_RINGS] = {};
-       int r;
-
-       if (fence == NULL) {
-               WARN(1, "Querying an invalid fence : %p !\n", fence);
-               return -EINVAL;
-       }
-
-       seq[fence->ring] = fence->seq;
-       if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
-               return 0;
-
-       r = radeon_fence_wait_seq(fence->rdev, seq, false, false);
-       if (r)
-               return r;
-
-       fence->seq = RADEON_FENCE_SIGNALED_SEQ;
-       return 0;
-}
-
-/**
- * radeon_fence_wait_next_locked - wait for the next fence to signal
+ * radeon_fence_wait_next - wait for the next fence to signal
  *
  * @rdev: radeon device pointer
  * @ring: ring index the fence is associated with
@@ -511,7 +472,7 @@ int radeon_fence_wait_locked(struct radeon_fence *fence)
  * Returns 0 if the next fence has passed, error for all other cases.
  * Caller must hold ring lock.
  */
-int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
 {
        uint64_t seq[RADEON_NUM_RINGS] = {};
 
@@ -521,11 +482,11 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
                   already the last emited fence */
                return -ENOENT;
        }
-       return radeon_fence_wait_seq(rdev, seq, false, false);
+       return radeon_fence_wait_seq(rdev, seq, false);
 }
 
 /**
- * radeon_fence_wait_empty_locked - wait for all fences to signal
+ * radeon_fence_wait_empty - wait for all fences to signal
  *
  * @rdev: radeon device pointer
  * @ring: ring index the fence is associated with
@@ -534,7 +495,7 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
  * Returns 0 if the fences have passed, error for all other cases.
  * Caller must hold ring lock.
  */
-int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
 {
        uint64_t seq[RADEON_NUM_RINGS] = {};
        int r;
@@ -543,7 +504,7 @@ int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
        if (!seq[ring])
                return 0;
 
-       r = radeon_fence_wait_seq(rdev, seq, false, false);
+       r = radeon_fence_wait_seq(rdev, seq, false);
        if (r) {
                if (r == -EDEADLK)
                        return -EDEADLK;
@@ -794,7 +755,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
        for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
                if (!rdev->fence_drv[ring].initialized)
                        continue;
-               r = radeon_fence_wait_empty_locked(rdev, ring);
+               r = radeon_fence_wait_empty(rdev, ring);
                if (r) {
                        /* no need to trigger GPU reset as we are unloading */
                        radeon_fence_driver_force_completion(rdev);
@@ -841,6 +802,8 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
                if (!rdev->fence_drv[i].initialized)
                        continue;
 
+               radeon_fence_process(rdev, i);
+
                seq_printf(m, "--- ring %d ---\n", i);
                seq_printf(m, "Last signaled fence 0x%016llx\n",
                           (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
This page took 0.031856 seconds and 5 git commands to generate.