mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
- goto fail_batch_free;
+ goto fail_clip_free;
}
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-fail_batch_free:
- drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
fail_clip_free:
drm_free(cliprects,
cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
DRM_MEM_DRIVER);
+fail_batch_free:
+ drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
return ret;
}
* Some of the preallocated space is taken by the GTT
* and popup. GTT is 1K per MB of aperture size, and popup is 4K.
*/
- if (IS_G4X(dev) || IS_IGD(dev))
+ if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev))
overhead = 4096;
else
overhead = (*aperture_size / 1024) + 4096;
/* Basic memrange allocator for stolen space (aka vram) */
drm_mm_init(&dev_priv->vram, 0, prealloc_size);
- /* Let GEM Manage from end of prealloc space to end of aperture */
- i915_gem_do_init(dev, prealloc_size, agp_size);
+ /* Let GEM Manage from end of prealloc space to end of aperture.
+ *
+ * However, leave one page at the end still bound to the scratch page.
+ * There are a number of places where the hardware apparently
+ * prefetches past the end of the object, and we've seen multiple
+ * hangs with the GPU head pointer stuck in a batchbuffer bound
+ * at the last page of the aperture. One page should be enough to
+ * keep any prefetching inside of the aperture.
+ */
+ i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
ret = i915_gem_init_ringbuffer(dev);
if (ret)
}
/* Must be done after probing outputs */
- intel_opregion_init(dev, 0);
+ /* FIXME: verify on IGDNG */
+ if (!IS_IGDNG(dev))
+ intel_opregion_init(dev, 0);
return 0;
if (dev_priv->regs != NULL)
iounmap(dev_priv->regs);
- intel_opregion_free(dev);
+ if (!IS_IGDNG(dev))
+ intel_opregion_free(dev, 0);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_modeset_cleanup(dev);
file_priv->driver_priv = i915_file_priv;
- i915_file_priv->mm.last_gem_seqno = 0;
- i915_file_priv->mm.last_gem_throttle_seqno = 0;
+ INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
return 0;
}
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ i915_gem_release(dev, file_priv);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);