}
static int
-i915_gem_wait_for_error(struct drm_device *dev)
+i915_gem_wait_for_error(struct i915_gpu_error *error)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct completion *x = &dev_priv->error_completion;
+ struct completion *x = &error->completion;
unsigned long flags;
int ret;
- if (!atomic_read(&dev_priv->mm.wedged))
+ if (!atomic_read(&error->wedged))
return 0;
/*
return ret;
}
- if (atomic_read(&dev_priv->mm.wedged)) {
+ if (atomic_read(&error->wedged)) {
/* GPU is hung, bump the completion count to account for
* the token we just consumed so that we never hit zero and
* end up waiting upon a subsequent completion event that
int i915_mutex_lock_interruptible(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_gem_wait_for_error(dev);
+ ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
if (ret)
return ret;
i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_init *args = data;
if (drm_core_check_feature(dev, DRIVER_MODESET))
mutex_lock(&dev->struct_mutex);
i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
args->gtt_end);
+ dev_priv->gtt.mappable_end = args->gtt_end;
mutex_unlock(&dev->struct_mutex);
return 0;
pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
- args->aper_size = dev_priv->mm.gtt_total;
+ args->aper_size = dev_priv->gtt.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
- if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
+ if (fast_user_write(dev_priv->gtt.mappable, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
goto out_unpin;
}
int
-i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible)
{
- if (atomic_read(&dev_priv->mm.wedged)) {
- struct completion *x = &dev_priv->error_completion;
+ if (atomic_read(&error->wedged)) {
+ struct completion *x = &error->completion;
bool recovery_complete;
unsigned long flags;
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
- atomic_read(&dev_priv->mm.wedged))
+ atomic_read(&dev_priv->gpu_error.wedged))
do {
if (interruptible)
end = wait_event_interruptible_timeout(ring->irq_queue,
end = wait_event_timeout(ring->irq_queue, EXIT_COND,
timeout_jiffies);
- ret = i915_gem_check_wedge(dev_priv, interruptible);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
end = ret;
} while (end == 0 && wait_forever);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(seqno == 0);
- ret = i915_gem_check_wedge(dev_priv, interruptible);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
return ret;
if (seqno == 0)
return 0;
- ret = i915_gem_check_wedge(dev_priv, true);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
if (ret)
return ret;
obj->fault_mappable = true;
- pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) +
+ pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
page_offset;
/* Finally, remap it using the new GTT offset */
/* If this -EIO is due to a gpu hang, give the reset code a
* chance to clean up the mess. Otherwise return the proper
* SIGBUS. */
- if (!atomic_read(&dev_priv->mm.wedged))
+ if (!atomic_read(&dev_priv->gpu_error.wedged))
return VM_FAULT_SIGBUS;
case -EAGAIN:
/* Give the error handler a chance to run and move the
obj->fault_mappable = false;
}
-static uint32_t
+uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
uint32_t gtt_size;
goto unlock;
}
- if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
+ if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
}
kfree(obj->pages);
}
-static int
+int
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
const struct drm_i915_gem_object_ops *ops = obj->ops;
if (obj->pages)
return 0;
+ if (obj->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to obtain a purgeable object\n");
+ return -EINVAL;
+ }
+
BUG_ON(obj->pages_pin_count);
ret = ops->get_pages(obj);
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
- mod_timer(&dev_priv->hangcheck_timer,
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
if (was_empty) {
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
- int ret = 0;
+ int ret;
if (obj->gtt_space == NULL)
return 0;
bool mappable, fenceable;
int ret;
- if (obj->madv != I915_MADV_WILLNEED) {
- DRM_ERROR("Attempting to bind a purgeable object\n");
- return -EINVAL;
- }
-
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
* before evicting everything in a vain attempt to find space.
*/
if (obj->base.size >
- (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
+ (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n");
return -E2BIG;
}
if (map_and_fenceable)
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level,
- 0, dev_priv->mm.gtt_mappable_end);
+ 0, dev_priv->gtt.mappable_end);
else
ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level);
(node->start & (fence_alignment - 1)) == 0;
mappable =
- obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
+ obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
obj->map_and_fenceable = mappable && fenceable;
u32 seqno = 0;
int ret;
- if (atomic_read(&dev_priv->mm.wedged))
+ if (atomic_read(&dev_priv->gpu_error.wedged))
return -EIO;
spin_lock(&file_priv->mm.lock);
* And not confound mm.suspended!
*/
dev_priv->mm.suspended = 1;
- del_timer_sync(&dev_priv->hangcheck_timer);
+ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- if (atomic_read(&dev_priv->mm.wedged)) {
+ if (atomic_read(&dev_priv->gpu_error.wedged)) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
- atomic_set(&dev_priv->mm.wedged, 0);
+ atomic_set(&dev_priv->gpu_error.wedged, 0);
}
mutex_lock(&dev->struct_mutex);
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
- init_completion(&dev_priv->error_completion);
+ init_completion(&dev_priv->gpu_error.completion);
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {