drm/nouveau: require reservations for nouveau_fence_sync and nouveau_bo_fence
[deliverable/linux.git] / drivers / gpu / drm / ttm / ttm_page_alloc_dma.c
index fb8259f698395a286e28fbe1053bc70e04821694..c96db433f8af834398f6496da9498db98013a7c7 100644 (file)
@@ -411,8 +411,10 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
  *
  * @pool: to free the pages from
  * @nr_free: If set to true will free all pages in pool
+ * @gfp: GFP flags.
  **/
-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
+static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
+                                      gfp_t gfp)
 {
        unsigned long irq_flags;
        struct dma_page *dma_p, *tmp;
@@ -430,8 +432,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
                         npages_to_free, nr_free);
        }
 #endif
-       pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
-                       GFP_KERNEL);
+       pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
 
        if (!pages_to_free) {
                pr_err("%s: Failed to allocate memory for pool free operation\n",
@@ -530,7 +531,7 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
                if (pool->type != type)
                        continue;
                /* Takes a spinlock.. */
-               ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
+               ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
                WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
                /* This code path is called after _all_ references to the
                 * struct device has been dropped - so nobody should be
@@ -847,6 +848,7 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
        if (count) {
                d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
                ttm->pages[index] = d_page->p;
+               ttm_dma->cpu_address[index] = d_page->vaddr;
                ttm_dma->dma_address[index] = d_page->dma;
                list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
                r = 0;
@@ -978,12 +980,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
        INIT_LIST_HEAD(&ttm_dma->pages_list);
        for (i = 0; i < ttm->num_pages; i++) {
                ttm->pages[i] = NULL;
+               ttm_dma->cpu_address[i] = 0;
                ttm_dma->dma_address[i] = 0;
        }
 
        /* shrink pool if necessary (only on !is_cached pools)*/
        if (npages)
-               ttm_dma_page_pool_free(pool, npages);
+               ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
        ttm->state = tt_unpopulated;
 }
 EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
@@ -993,10 +996,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
  *
  * XXX: (dchinner) Deadlock warning!
  *
- * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
- * needs to be paid to sc->gfp_mask to determine if this can be done or not.
- * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
- * bad.
+ * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
  *
  * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
  * shrinkers
@@ -1004,9 +1004,9 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
 static unsigned long
 ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-       static atomic_t start_pool = ATOMIC_INIT(0);
+       static unsigned start_pool;
        unsigned idx = 0;
-       unsigned pool_offset = atomic_add_return(1, &start_pool);
+       unsigned pool_offset;
        unsigned shrink_pages = sc->nr_to_scan;
        struct device_pools *p;
        unsigned long freed = 0;
@@ -1014,8 +1014,11 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        if (list_empty(&_manager->pools))
                return SHRINK_STOP;
 
-       mutex_lock(&_manager->lock);
-       pool_offset = pool_offset % _manager->npools;
+       if (!mutex_trylock(&_manager->lock))
+               return SHRINK_STOP;
+       if (!_manager->npools)
+               goto out;
+       pool_offset = ++start_pool % _manager->npools;
        list_for_each_entry(p, &_manager->pools, pools) {
                unsigned nr_free;
 
@@ -1027,13 +1030,15 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
                if (++idx < pool_offset)
                        continue;
                nr_free = shrink_pages;
-               shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+               shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
+                                                     sc->gfp_mask);
                freed += nr_free - shrink_pages;
 
                pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
                         p->pool->dev_name, p->pool->name, current->pid,
                         nr_free, shrink_pages);
        }
+out:
        mutex_unlock(&_manager->lock);
        return freed;
 }
@@ -1044,7 +1049,8 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
        struct device_pools *p;
        unsigned long count = 0;
 
-       mutex_lock(&_manager->lock);
+       if (!mutex_trylock(&_manager->lock))
+               return 0;
        list_for_each_entry(p, &_manager->pools, pools)
                count += p->pool->npages_free;
        mutex_unlock(&_manager->lock);
This page took 0.026892 seconds and 5 git commands to generate.