This originates from a hack by me to quickly fix a bug in an earlier
patch where we needed control over whether or not waiting on a seqno
actually did any retire list processing. Since the two operations aren't
clearly related, we should pull the parameter out of the wait function,
and make the caller responsible for retiring if the action is desired.
The only function call site which did not get an explicit retire_request call
(on purpose) is i915_gem_inactive_shrink(). That code was already calling
retire_request a second time.
v2: don't modify any behavior excepit i915_gem_inactive_shrink(Daniel)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
+ i915_gem_retire_requests(dev);
mutex_unlock(&dev->struct_mutex);
/* Cancel the retire work handler, which should be idle now. */
mutex_unlock(&dev->struct_mutex);
/* Cancel the retire work handler, which should be idle now. */
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
+int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno,
- bool do_retire);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
*/
int
i915_wait_request(struct intel_ring_buffer *ring,
*/
int
i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno,
- bool do_retire)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 ier;
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 ier;
if (atomic_read(&dev_priv->mm.wedged))
ret = -EAGAIN;
if (atomic_read(&dev_priv->mm.wedged))
ret = -EAGAIN;
- /* Directly dispatch request retiring. While we have the work queue
- * to handle this, the waiter on a request often wants an associated
- * buffer to have made it to the inactive list, and we would need
- * a separate wait queue to handle that.
- */
- if (ret == 0 && do_retire)
- i915_gem_retire_requests_ring(ring);
-
* it.
*/
if (obj->active) {
* it.
*/
if (obj->active) {
- ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
- true);
+ ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
+ i915_gem_retire_requests_ring(obj->ring);
-static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
+static int i915_ring_idle(struct intel_ring_buffer *ring)
- return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
- do_retire);
+ return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
-int i915_gpu_idle(struct drm_device *dev, bool do_retire)
+int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i;
/* Flush everything onto the inactive list. */
for (i = 0; i < I915_NUM_RINGS; i++) {
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i;
/* Flush everything onto the inactive list. */
for (i = 0; i < I915_NUM_RINGS; i++) {
- ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
+ ret = i915_ring_idle(&dev_priv->ring[i]);
}
if (obj->last_fenced_seqno) {
}
if (obj->last_fenced_seqno) {
- ret = i915_wait_request(obj->ring,
- obj->last_fenced_seqno,
- false);
+ ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
+ i915_gem_retire_requests(dev);
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
* This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively.
*/
* This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively.
*/
- if (i915_gpu_idle(dev, true) == 0)
+ if (i915_gpu_idle(dev) == 0)
goto rescan;
}
mutex_unlock(&dev->struct_mutex);
goto rescan;
}
mutex_unlock(&dev->struct_mutex);
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
bool lists_empty;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
bool lists_empty;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
trace_i915_gem_evict_everything(dev, purgeable_only);
trace_i915_gem_evict_everything(dev, purgeable_only);
- /* Flush everything (on to the inactive lists) and evict */
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
+ /* The gpu_idle will flush everything in the write domain to the
+ * active list. Then we must move everything off the active list
+ * with retire requests.
+ */
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ if (WARN_ON(!list_empty(&dev_priv->ring[i].gpu_write_list)))
+ return -EBUSY;
+
+ i915_gem_retire_requests(dev);
+
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
/* Having flushed everything, unbind() should never raise an error */
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
/* Having flushed everything, unbind() should never raise an error */
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
+ i915_gem_retire_requests(dev);
BUG_ON(ring->sync_seqno[i]);
}
BUG_ON(ring->sync_seqno[i]);
}
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
dev_priv->mm.interruptible = false;
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
dev_priv->mm.interruptible = false;
- if (i915_gpu_idle(dev_priv->dev, false)) {
+ if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
DRM_ERROR("Couldn't idle GPU\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
- true);
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
+ i915_gem_retire_requests(dev);
overlay->last_flip_req = 0;
return 0;
overlay->last_flip_req = 0;
return 0;
if (overlay->last_flip_req == 0)
return 0;
if (overlay->last_flip_req == 0)
return 0;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
- true);
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
+ i915_gem_retire_requests(dev);
if (overlay->flip_tail)
overlay->flip_tail(overlay);
if (overlay->flip_tail)
overlay->flip_tail(overlay);
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
- ret = i915_wait_request(ring, seqno, true);
+ ret = i915_wait_request(ring, seqno);
dev_priv->mm.interruptible = was_interruptible;
dev_priv->mm.interruptible = was_interruptible;
+ if (!ret)
+ i915_gem_retire_requests_ring(ring);