+ if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
+ return -EINVAL;
+
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ struct page *page;
+ char *src;
+
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ src = kmap_atomic(page);
+ memcpy(vaddr, src, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ kunmap_atomic(src);
+
+ page_cache_release(page);
+ vaddr += PAGE_SIZE;
+ }
+
+ i915_gem_chipset_flush(obj->base.dev);
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ sg = st->sgl;
+ sg->offset = 0;
+ sg->length = obj->base.size;
+
+ sg_dma_address(sg) = obj->phys_handle->busaddr;
+ sg_dma_len(sg) = obj->base.size;
+
+ obj->pages = st;
+ obj->has_dma_mapping = true;
+ return 0;
+}
+
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ BUG_ON(obj->madv == __I915_MADV_PURGED);
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret) {
+ /* In the event of a disaster, abandon all caches and
+ * hope for the best.
+ */
+ WARN_ON(ret != -EIO);
+ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }