drm/i915: move dev_priv->suspend around
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_gtt.c
index 6e858e17bb0ceb4f383e9f8361cc4102fb79496c..3d8bd62a926cdd48ebd139f23e15156ff6199c6f 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright © 2010 Daniel Vetter
+ * Copyright © 2011-2014 Intel Corporation
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+bool intel_enable_ppgtt(struct drm_device *dev, bool full)
+{
+       if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+               return false;
+
+       if (i915.enable_ppgtt == 1 && full)
+               return false;
+
+#ifdef CONFIG_INTEL_IOMMU
+       /* Disable ppgtt on SNB if VT-d is on. */
+       if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
+               DRM_INFO("Disabling PPGTT because VT-d is on\n");
+               return false;
+       }
+#endif
+
+       /* Full ppgtt disabled by default for now due to issues. */
+       if (full)
+               return false; /* HAS_PPGTT(dev) */
+       else
+               return HAS_ALIASING_PPGTT(dev);
+}
+
 #define GEN6_PPGTT_PD_ENTRIES 512
 #define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
 typedef uint64_t gen8_gtt_pte_t;
@@ -64,7 +88,19 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
 
 #define GEN8_PTES_PER_PAGE             (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
 #define GEN8_PDES_PER_PAGE             (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
-#define GEN8_LEGACY_PDPS               4
+
+/* GEN8 legacy style addressis defined as a 3 level page table:
+ * 31:30 | 29:21 | 20:12 |  11:0
+ * PDPE  |  PDE  |  PTE  | offset
+ * The difference as compared to normal x86 3 level page table is the PDPEs are
+ * programmed via register.
+ */
+#define GEN8_PDPE_SHIFT                        30
+#define GEN8_PDPE_MASK                 0x3
+#define GEN8_PDE_SHIFT                 21
+#define GEN8_PDE_MASK                  0x1ff
+#define GEN8_PTE_SHIFT                 12
+#define GEN8_PTE_MASK                  0x1ff
 
 #define PPAT_UNCACHED_INDEX            (_PAGE_PWT | _PAGE_PCD)
 #define PPAT_CACHED_PDE_INDEX          0 /* WB LLC */
@@ -254,185 +290,337 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
 }
 
 static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
-                                  unsigned first_entry,
-                                  unsigned num_entries,
+                                  uint64_t start,
+                                  uint64_t length,
                                   bool use_scratch)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
        gen8_gtt_pte_t *pt_vaddr, scratch_pte;
-       unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
-       unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE;
+       unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
+       unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
+       unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
+       unsigned num_entries = length >> PAGE_SHIFT;
        unsigned last_pte, i;
 
        scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
                                      I915_CACHE_LLC, use_scratch);
 
        while (num_entries) {
-               struct page *page_table = &ppgtt->gen8_pt_pages[act_pt];
+               struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
 
-               last_pte = first_pte + num_entries;
+               last_pte = pte + num_entries;
                if (last_pte > GEN8_PTES_PER_PAGE)
                        last_pte = GEN8_PTES_PER_PAGE;
 
                pt_vaddr = kmap_atomic(page_table);
 
-               for (i = first_pte; i < last_pte; i++)
+               for (i = pte; i < last_pte; i++) {
                        pt_vaddr[i] = scratch_pte;
+                       num_entries--;
+               }
 
                kunmap_atomic(pt_vaddr);
 
-               num_entries -= last_pte - first_pte;
-               first_pte = 0;
-               act_pt++;
+               pte = 0;
+               if (++pde == GEN8_PDES_PER_PAGE) {
+                       pdpe++;
+                       pde = 0;
+               }
        }
 }
 
 static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
                                      struct sg_table *pages,
-                                     unsigned first_entry,
+                                     uint64_t start,
                                      enum i915_cache_level cache_level)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
        gen8_gtt_pte_t *pt_vaddr;
-       unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
-       unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
+       unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
+       unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
+       unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
        struct sg_page_iter sg_iter;
 
        pt_vaddr = NULL;
+
        for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+               if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
+                       break;
+
                if (pt_vaddr == NULL)
-                       pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
+                       pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
 
-               pt_vaddr[act_pte] =
+               pt_vaddr[pte] =
                        gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
                                        cache_level, true);
-               if (++act_pte == GEN8_PTES_PER_PAGE) {
+               if (++pte == GEN8_PTES_PER_PAGE) {
                        kunmap_atomic(pt_vaddr);
                        pt_vaddr = NULL;
-                       act_pt++;
-                       act_pte = 0;
+                       if (++pde == GEN8_PDES_PER_PAGE) {
+                               pdpe++;
+                               pde = 0;
+                       }
+                       pte = 0;
                }
        }
        if (pt_vaddr)
                kunmap_atomic(pt_vaddr);
 }
 
+static void gen8_free_page_tables(struct page **pt_pages)
+{
+       int i;
+
+       if (pt_pages == NULL)
+               return;
+
+       for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
+               if (pt_pages[i])
+                       __free_pages(pt_pages[i], 0);
+}
+
+static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
+{
+       int i;
+
+       for (i = 0; i < ppgtt->num_pd_pages; i++) {
+               gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
+               kfree(ppgtt->gen8_pt_pages[i]);
+               kfree(ppgtt->gen8_pt_dma_addr[i]);
+       }
+
+       __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
+}
+
+static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
+{
+       struct pci_dev *hwdev = ppgtt->base.dev->pdev;
+       int i, j;
+
+       for (i = 0; i < ppgtt->num_pd_pages; i++) {
+               /* TODO: In the future we'll support sparse mappings, so this
+                * will have to change. */
+               if (!ppgtt->pd_dma_addr[i])
+                       continue;
+
+               pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
+                              PCI_DMA_BIDIRECTIONAL);
+
+               for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+                       dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+                       if (addr)
+                               pci_unmap_page(hwdev, addr, PAGE_SIZE,
+                                              PCI_DMA_BIDIRECTIONAL);
+               }
+       }
+}
+
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
-       int i, j;
 
        list_del(&vm->global_link);
        drm_mm_takedown(&vm->mm);
 
-       for (i = 0; i < ppgtt->num_pd_pages ; i++) {
-               if (ppgtt->pd_dma_addr[i]) {
-                       pci_unmap_page(ppgtt->base.dev->pdev,
-                                      ppgtt->pd_dma_addr[i],
-                                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+       gen8_ppgtt_unmap_pages(ppgtt);
+       gen8_ppgtt_free(ppgtt);
+}
 
-                       for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
-                               dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
-                               if (addr)
-                                       pci_unmap_page(ppgtt->base.dev->pdev,
-                                                      addr,
-                                                      PAGE_SIZE,
-                                                      PCI_DMA_BIDIRECTIONAL);
+static struct page **__gen8_alloc_page_tables(void)
+{
+       struct page **pt_pages;
+       int i;
 
-                       }
-               }
-               kfree(ppgtt->gen8_pt_dma_addr[i]);
+       pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
+       if (!pt_pages)
+               return ERR_PTR(-ENOMEM);
+
+       for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
+               pt_pages[i] = alloc_page(GFP_KERNEL);
+               if (!pt_pages[i])
+                       goto bail;
        }
 
-       __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
-       __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
+       return pt_pages;
+
+bail:
+       gen8_free_page_tables(pt_pages);
+       kfree(pt_pages);
+       return ERR_PTR(-ENOMEM);
 }
 
-/**
- * GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a
- * net effect resembling a 2-level page table in normal x86 terms. Each PDP
- * represents 1GB of memory
- * 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
- *
- * TODO: Do something with the size parameter
- **/
-static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
+static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
+                                          const int max_pdp)
 {
-       struct page *pt_pages;
-       int i, j, ret = -ENOMEM;
-       const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
-       const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
+       struct page **pt_pages[GEN8_LEGACY_PDPS];
+       int i, ret;
 
-       if (size % (1<<30))
-               DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
+       for (i = 0; i < max_pdp; i++) {
+               pt_pages[i] = __gen8_alloc_page_tables();
+               if (IS_ERR(pt_pages[i])) {
+                       ret = PTR_ERR(pt_pages[i]);
+                       goto unwind_out;
+               }
+       }
 
-       /* FIXME: split allocation into smaller pieces. For now we only ever do
-        * this once, but with full PPGTT, the multiple contiguous allocations
-        * will be bad.
+       /* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
+        * "atomic" - for cleanup purposes.
         */
+       for (i = 0; i < max_pdp; i++)
+               ppgtt->gen8_pt_pages[i] = pt_pages[i];
+
+       return 0;
+
+unwind_out:
+       while (i--) {
+               gen8_free_page_tables(pt_pages[i]);
+               kfree(pt_pages[i]);
+       }
+
+       return ret;
+}
+
+static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
+{
+       int i;
+
+       for (i = 0; i < ppgtt->num_pd_pages; i++) {
+               ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
+                                                    sizeof(dma_addr_t),
+                                                    GFP_KERNEL);
+               if (!ppgtt->gen8_pt_dma_addr[i])
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
+                                               const int max_pdp)
+{
        ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
        if (!ppgtt->pd_pages)
                return -ENOMEM;
 
-       pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
-       if (!pt_pages) {
+       ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
+       BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
+
+       return 0;
+}
+
+static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
+                           const int max_pdp)
+{
+       int ret;
+
+       ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
+       if (ret)
+               return ret;
+
+       ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
+       if (ret) {
                __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
-               return -ENOMEM;
+               return ret;
        }
 
-       ppgtt->gen8_pt_pages = pt_pages;
-       ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
-       ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
        ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
-       ppgtt->enable = gen8_ppgtt_enable;
-       ppgtt->switch_mm = gen8_mm_switch;
-       ppgtt->base.clear_range = gen8_ppgtt_clear_range;
-       ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
-       ppgtt->base.cleanup = gen8_ppgtt_cleanup;
-       ppgtt->base.start = 0;
-       ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE;
 
-       BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
+       ret = gen8_ppgtt_allocate_dma(ppgtt);
+       if (ret)
+               gen8_ppgtt_free(ppgtt);
 
-       /*
-        * - Create a mapping for the page directories.
-        * - For each page directory:
-        *      allocate space for page table mappings.
-        *      map each page table
-        */
-       for (i = 0; i < max_pdp; i++) {
-               dma_addr_t temp;
-               temp = pci_map_page(ppgtt->base.dev->pdev,
-                                   &ppgtt->pd_pages[i], 0,
-                                   PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-               if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
-                       goto err_out;
+       return ret;
+}
 
-               ppgtt->pd_dma_addr[i] = temp;
+static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
+                                            const int pd)
+{
+       dma_addr_t pd_addr;
+       int ret;
 
-               ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL);
-               if (!ppgtt->gen8_pt_dma_addr[i])
-                       goto err_out;
+       pd_addr = pci_map_page(ppgtt->base.dev->pdev,
+                              &ppgtt->pd_pages[pd], 0,
+                              PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 
-               for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
-                       struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
-                       temp = pci_map_page(ppgtt->base.dev->pdev,
-                                           p, 0, PAGE_SIZE,
-                                           PCI_DMA_BIDIRECTIONAL);
+       ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
+       if (ret)
+               return ret;
+
+       ppgtt->pd_dma_addr[pd] = pd_addr;
+
+       return 0;
+}
+
+static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
+                                       const int pd,
+                                       const int pt)
+{
+       dma_addr_t pt_addr;
+       struct page *p;
+       int ret;
+
+       p = ppgtt->gen8_pt_pages[pd][pt];
+       pt_addr = pci_map_page(ppgtt->base.dev->pdev,
+                              p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+       ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
+       if (ret)
+               return ret;
+
+       ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
+
+       return 0;
+}
+
+/**
+ * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
+ * with a net effect resembling a 2-level page table in normal x86 terms. Each
+ * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
+ * space.
+ *
+ * FIXME: split allocation into smaller pieces. For now we only ever do this
+ * once, but with full PPGTT, the multiple contiguous allocations will be bad.
+ * TODO: Do something with the size parameter
+ */
+static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
+{
+       const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
+       const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
+       int i, j, ret;
+
+       if (size % (1<<30))
+               DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
+
+       /* 1. Do all our allocations for page directories and page tables. */
+       ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
+       if (ret)
+               return ret;
 
-                       if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
-                               goto err_out;
+       /*
+        * 2. Create DMA mappings for the page directories and page tables.
+        */
+       for (i = 0; i < max_pdp; i++) {
+               ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
+               if (ret)
+                       goto bail;
 
-                       ppgtt->gen8_pt_dma_addr[i][j] = temp;
+               for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+                       ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
+                       if (ret)
+                               goto bail;
                }
        }
 
-       /* For now, the PPGTT helper functions all require that the PDEs are
+       /*
+        * 3. Map all the page directory entires to point to the page tables
+        * we've allocated.
+        *
+        * For now, the PPGTT helper functions all require that the PDEs are
         * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
-        * will never need to touch the PDEs again */
+        * will never need to touch the PDEs again.
+        */
        for (i = 0; i < max_pdp; i++) {
                gen8_ppgtt_pde_t *pd_vaddr;
                pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
@@ -444,20 +632,26 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
                kunmap_atomic(pd_vaddr);
        }
 
-       ppgtt->base.clear_range(&ppgtt->base, 0,
-                               ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE,
-                               true);
+       ppgtt->enable = gen8_ppgtt_enable;
+       ppgtt->switch_mm = gen8_mm_switch;
+       ppgtt->base.clear_range = gen8_ppgtt_clear_range;
+       ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
+       ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+       ppgtt->base.start = 0;
+       ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
+
+       ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
 
        DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
                         ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
        DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
-                        ppgtt->num_pt_pages,
-                        (ppgtt->num_pt_pages - num_pt_pages) +
-                        size % (1<<30));
+                        ppgtt->num_pd_entries,
+                        (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
        return 0;
 
-err_out:
-       ppgtt->base.cleanup(&ppgtt->base);
+bail:
+       gen8_ppgtt_unmap_pages(ppgtt);
+       gen8_ppgtt_free(ppgtt);
        return ret;
 }
 
@@ -759,13 +953,15 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
 
 /* PPGTT support for Sandybdrige/Gen6 and later */
 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
-                                  unsigned first_entry,
-                                  unsigned num_entries,
+                                  uint64_t start,
+                                  uint64_t length,
                                   bool use_scratch)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
        gen6_gtt_pte_t *pt_vaddr, scratch_pte;
+       unsigned first_entry = start >> PAGE_SHIFT;
+       unsigned num_entries = length >> PAGE_SHIFT;
        unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
        unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        unsigned last_pte, i;
@@ -792,12 +988,13 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 
 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
                                      struct sg_table *pages,
-                                     unsigned first_entry,
+                                     uint64_t start,
                                      enum i915_cache_level cache_level)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
        gen6_gtt_pte_t *pt_vaddr;
+       unsigned first_entry = start >> PAGE_SHIFT;
        unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
        unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        struct sg_page_iter sg_iter;
@@ -821,38 +1018,49 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
                kunmap_atomic(pt_vaddr);
 }
 
-static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
 {
-       struct i915_hw_ppgtt *ppgtt =
-               container_of(vm, struct i915_hw_ppgtt, base);
        int i;
 
-       list_del(&vm->global_link);
-       drm_mm_takedown(&ppgtt->base.mm);
-       drm_mm_remove_node(&ppgtt->node);
-
        if (ppgtt->pt_dma_addr) {
                for (i = 0; i < ppgtt->num_pd_entries; i++)
                        pci_unmap_page(ppgtt->base.dev->pdev,
                                       ppgtt->pt_dma_addr[i],
                                       4096, PCI_DMA_BIDIRECTIONAL);
        }
+}
+
+static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
+{
+       int i;
 
        kfree(ppgtt->pt_dma_addr);
        for (i = 0; i < ppgtt->num_pd_entries; i++)
                __free_page(ppgtt->pt_pages[i]);
        kfree(ppgtt->pt_pages);
-       kfree(ppgtt);
 }
 
-static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+{
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
+
+       list_del(&vm->global_link);
+       drm_mm_takedown(&ppgtt->base.mm);
+       drm_mm_remove_node(&ppgtt->node);
+
+       gen6_ppgtt_unmap_pages(ppgtt);
+       gen6_ppgtt_free(ppgtt);
+}
+
+static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
 {
 #define GEN6_PD_ALIGN (PAGE_SIZE * 16)
 #define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
        struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        bool retried = false;
-       int i, ret;
+       int ret;
 
        /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
         * allocator works in address space sizes, so it's multiplied by page
@@ -868,7 +1076,7 @@ alloc:
        if (ret == -ENOSPC && !retried) {
                ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
                                               GEN6_PD_SIZE, GEN6_PD_ALIGN,
-                                              I915_CACHE_NONE, false, true);
+                                              I915_CACHE_NONE, 0);
                if (ret)
                        return ret;
 
@@ -879,42 +1087,60 @@ alloc:
        if (ppgtt->node.start < dev_priv->gtt.mappable_end)
                DRM_DEBUG("Forced to use aperture for PDEs\n");
 
-       ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
        ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
-       if (IS_GEN6(dev)) {
-               ppgtt->enable = gen6_ppgtt_enable;
-               ppgtt->switch_mm = gen6_mm_switch;
-       } else if (IS_HASWELL(dev)) {
-               ppgtt->enable = gen7_ppgtt_enable;
-               ppgtt->switch_mm = hsw_mm_switch;
-       } else if (IS_GEN7(dev)) {
-               ppgtt->enable = gen7_ppgtt_enable;
-               ppgtt->switch_mm = gen7_mm_switch;
-       } else
-               BUG();
-       ppgtt->base.clear_range = gen6_ppgtt_clear_range;
-       ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
-       ppgtt->base.cleanup = gen6_ppgtt_cleanup;
-       ppgtt->base.scratch = dev_priv->gtt.base.scratch;
-       ppgtt->base.start = 0;
-       ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
+       return ret;
+}
+
+static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
+{
+       int i;
+
        ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
                                  GFP_KERNEL);
-       if (!ppgtt->pt_pages) {
-               drm_mm_remove_node(&ppgtt->node);
+
+       if (!ppgtt->pt_pages)
                return -ENOMEM;
-       }
 
        for (i = 0; i < ppgtt->num_pd_entries; i++) {
                ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
-               if (!ppgtt->pt_pages[i])
-                       goto err_pt_alloc;
+               if (!ppgtt->pt_pages[i]) {
+                       gen6_ppgtt_free(ppgtt);
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
+{
+       int ret;
+
+       ret = gen6_ppgtt_allocate_page_directories(ppgtt);
+       if (ret)
+               return ret;
+
+       ret = gen6_ppgtt_allocate_page_tables(ppgtt);
+       if (ret) {
+               drm_mm_remove_node(&ppgtt->node);
+               return ret;
        }
 
        ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
                                     GFP_KERNEL);
-       if (!ppgtt->pt_dma_addr)
-               goto err_pt_alloc;
+       if (!ppgtt->pt_dma_addr) {
+               drm_mm_remove_node(&ppgtt->node);
+               gen6_ppgtt_free(ppgtt);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
+{
+       struct drm_device *dev = ppgtt->base.dev;
+       int i;
 
        for (i = 0; i < ppgtt->num_pd_entries; i++) {
                dma_addr_t pt_addr;
@@ -923,41 +1149,63 @@ alloc:
                                       PCI_DMA_BIDIRECTIONAL);
 
                if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
-                       ret = -EIO;
-                       goto err_pd_pin;
-
+                       gen6_ppgtt_unmap_pages(ppgtt);
+                       return -EIO;
                }
+
                ppgtt->pt_dma_addr[i] = pt_addr;
        }
 
-       ppgtt->base.clear_range(&ppgtt->base, 0,
-                               ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
+       return 0;
+}
+
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+{
+       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
+       if (IS_GEN6(dev)) {
+               ppgtt->enable = gen6_ppgtt_enable;
+               ppgtt->switch_mm = gen6_mm_switch;
+       } else if (IS_HASWELL(dev)) {
+               ppgtt->enable = gen7_ppgtt_enable;
+               ppgtt->switch_mm = hsw_mm_switch;
+       } else if (IS_GEN7(dev)) {
+               ppgtt->enable = gen7_ppgtt_enable;
+               ppgtt->switch_mm = gen7_mm_switch;
+       } else
+               BUG();
+
+       ret = gen6_ppgtt_alloc(ppgtt);
+       if (ret)
+               return ret;
+
+       ret = gen6_ppgtt_setup_page_tables(ppgtt);
+       if (ret) {
+               gen6_ppgtt_free(ppgtt);
+               return ret;
+       }
+
+       ppgtt->base.clear_range = gen6_ppgtt_clear_range;
+       ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
+       ppgtt->base.cleanup = gen6_ppgtt_cleanup;
+       ppgtt->base.scratch = dev_priv->gtt.base.scratch;
+       ppgtt->base.start = 0;
+       ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
        ppgtt->debug_dump = gen6_dump_ppgtt;
 
-       DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
-                        ppgtt->node.size >> 20,
-                        ppgtt->node.start / PAGE_SIZE);
        ppgtt->pd_offset =
                ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
 
-       return 0;
+       ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
 
-err_pd_pin:
-       if (ppgtt->pt_dma_addr) {
-               for (i--; i >= 0; i--)
-                       pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
-                                      4096, PCI_DMA_BIDIRECTIONAL);
-       }
-err_pt_alloc:
-       kfree(ppgtt->pt_dma_addr);
-       for (i = 0; i < ppgtt->num_pd_entries; i++) {
-               if (ppgtt->pt_pages[i])
-                       __free_page(ppgtt->pt_pages[i]);
-       }
-       kfree(ppgtt->pt_pages);
-       drm_mm_remove_node(&ppgtt->node);
+       DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
+                        ppgtt->node.size >> 20,
+                        ppgtt->node.start / PAGE_SIZE);
 
-       return ret;
+       return 0;
 }
 
 int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
@@ -995,20 +1243,17 @@ ppgtt_bind_vma(struct i915_vma *vma,
               enum i915_cache_level cache_level,
               u32 flags)
 {
-       const unsigned long entry = vma->node.start >> PAGE_SHIFT;
-
        WARN_ON(flags);
 
-       vma->vm->insert_entries(vma->vm, vma->obj->pages, entry, cache_level);
+       vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
+                               cache_level);
 }
 
 static void ppgtt_unbind_vma(struct i915_vma *vma)
 {
-       const unsigned long entry = vma->node.start >> PAGE_SHIFT;
-
        vma->vm->clear_range(vma->vm,
-                            entry,
-                            vma->obj->base.size >> PAGE_SHIFT,
+                            vma->node.start,
+                            vma->obj->base.size,
                             true);
 }
 
@@ -1092,8 +1337,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
        i915_check_and_clear_faults(dev);
 
        dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
-                                      dev_priv->gtt.base.start / PAGE_SIZE,
-                                      dev_priv->gtt.base.total / PAGE_SIZE,
+                                      dev_priv->gtt.base.start,
+                                      dev_priv->gtt.base.total,
                                       false);
 }
 
@@ -1107,8 +1352,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 
        /* First fill our portion of the GTT with scratch pages */
        dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
-                                      dev_priv->gtt.base.start / PAGE_SIZE,
-                                      dev_priv->gtt.base.total / PAGE_SIZE,
+                                      dev_priv->gtt.base.start,
+                                      dev_priv->gtt.base.total,
                                       true);
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -1169,10 +1414,11 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
 
 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct sg_table *st,
-                                    unsigned int first_entry,
+                                    uint64_t start,
                                     enum i915_cache_level level)
 {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       unsigned first_entry = start >> PAGE_SHIFT;
        gen8_gtt_pte_t __iomem *gtt_entries =
                (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
        int i = 0;
@@ -1214,10 +1460,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
  */
 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct sg_table *st,
-                                    unsigned int first_entry,
+                                    uint64_t start,
                                     enum i915_cache_level level)
 {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       unsigned first_entry = start >> PAGE_SHIFT;
        gen6_gtt_pte_t __iomem *gtt_entries =
                (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
        int i = 0;
@@ -1249,11 +1496,13 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 }
 
 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
-                                 unsigned int first_entry,
-                                 unsigned int num_entries,
+                                 uint64_t start,
+                                 uint64_t length,
                                  bool use_scratch)
 {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       unsigned first_entry = start >> PAGE_SHIFT;
+       unsigned num_entries = length >> PAGE_SHIFT;
        gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
                (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
        const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -1273,11 +1522,13 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
 }
 
 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
-                                 unsigned int first_entry,
-                                 unsigned int num_entries,
+                                 uint64_t start,
+                                 uint64_t length,
                                  bool use_scratch)
 {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       unsigned first_entry = start >> PAGE_SHIFT;
+       unsigned num_entries = length >> PAGE_SHIFT;
        gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
                (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
        const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -1310,10 +1561,12 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma,
 }
 
 static void i915_ggtt_clear_range(struct i915_address_space *vm,
-                                 unsigned int first_entry,
-                                 unsigned int num_entries,
+                                 uint64_t start,
+                                 uint64_t length,
                                  bool unused)
 {
+       unsigned first_entry = start >> PAGE_SHIFT;
+       unsigned num_entries = length >> PAGE_SHIFT;
        intel_gtt_clear_range(first_entry, num_entries);
 }
 
@@ -1334,7 +1587,6 @@ static void ggtt_bind_vma(struct i915_vma *vma,
        struct drm_device *dev = vma->vm->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj = vma->obj;
-       const unsigned long entry = vma->node.start >> PAGE_SHIFT;
 
        /* If there is no aliasing PPGTT, or the caller needs a global mapping,
         * or we have a global mapping already but the cacheability flags have
@@ -1350,7 +1602,8 @@ static void ggtt_bind_vma(struct i915_vma *vma,
        if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
                if (!obj->has_global_gtt_mapping ||
                    (cache_level != obj->cache_level)) {
-                       vma->vm->insert_entries(vma->vm, obj->pages, entry,
+                       vma->vm->insert_entries(vma->vm, obj->pages,
+                                               vma->node.start,
                                                cache_level);
                        obj->has_global_gtt_mapping = 1;
                }
@@ -1361,7 +1614,9 @@ static void ggtt_bind_vma(struct i915_vma *vma,
             (cache_level != obj->cache_level))) {
                struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
                appgtt->base.insert_entries(&appgtt->base,
-                                           vma->obj->pages, entry, cache_level);
+                                           vma->obj->pages,
+                                           vma->node.start,
+                                           cache_level);
                vma->obj->has_aliasing_ppgtt_mapping = 1;
        }
 }
@@ -1371,11 +1626,11 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
        struct drm_device *dev = vma->vm->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj = vma->obj;
-       const unsigned long entry = vma->node.start >> PAGE_SHIFT;
 
        if (obj->has_global_gtt_mapping) {
-               vma->vm->clear_range(vma->vm, entry,
-                                    vma->obj->base.size >> PAGE_SHIFT,
+               vma->vm->clear_range(vma->vm,
+                                    vma->node.start,
+                                    obj->base.size,
                                     true);
                obj->has_global_gtt_mapping = 0;
        }
@@ -1383,8 +1638,8 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
        if (obj->has_aliasing_ppgtt_mapping) {
                struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
                appgtt->base.clear_range(&appgtt->base,
-                                        entry,
-                                        obj->base.size >> PAGE_SHIFT,
+                                        vma->node.start,
+                                        obj->base.size,
                                         true);
                obj->has_aliasing_ppgtt_mapping = 0;
        }
@@ -1469,14 +1724,14 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
 
        /* Clear any non-preallocated blocks */
        drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
-               const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
                DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
                              hole_start, hole_end);
-               ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
+               ggtt_vm->clear_range(ggtt_vm, hole_start,
+                                    hole_end - hole_start, true);
        }
 
        /* And finally clear the reserved guard page */
-       ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
+       ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
 }
 
 void i915_gem_init_global_gtt(struct drm_device *dev)
@@ -1541,11 +1796,6 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
        bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
        if (bdw_gmch_ctl)
                bdw_gmch_ctl = 1 << bdw_gmch_ctl;
-       if (bdw_gmch_ctl > 4) {
-               WARN_ON(!i915.preliminary_hw_support);
-               return 4<<20;
-       }
-
        return bdw_gmch_ctl << 20;
 }
 
This page took 0.042083 seconds and 5 git commands to generate.