staging: ion: Move shrinker out of heaps
authorColin Cross <ccross@android.com>
Mon, 17 Feb 2014 21:58:38 +0000 (13:58 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 18 Feb 2014 19:05:08 +0000 (11:05 -0800)
Every heap that uses deferred frees is going to need a shrinker
to shrink the freelist under memory pressure.  Rather than
requiring each heap to implement a shrinker, automatically
register a shrinker if the deferred free flag is set.
The system heap also needs to shrink its page pools, so add
a shrink function to the heap ops that will be called after
shrinking the freelists.

Cc: Colin Cross <ccross@android.com>
Cc: Android Kernel Team <kernel-team@android.com>
Signed-off-by: Colin Cross <ccross@android.com>
[jstultz: Resolved big conflicts with the shrinker api change.
Also minor commit subject tweak.]
Signed-off-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_heap.c
drivers/staging/android/ion/ion_page_pool.c
drivers/staging/android/ion/ion_priv.h
drivers/staging/android/ion/ion_system_heap.c

index 577669789de1805d9e8e638978d18f83fcdb28c3..08367179a48c4ee54d13231cf1974da0468a4ac8 100644 (file)
@@ -1502,6 +1502,9 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
        if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
                ion_heap_init_deferred_free(heap);
 
+       if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
+               ion_heap_init_shrinker(heap);
+
        heap->dev = dev;
        down_write(&dev->lock);
        /* use negative heap->id to reverse the priority -- when traversing
index 305b75ed6385d7f658b7ec3d9299ebe90e7894b5..49ace13ac5451bc4776d4b7def448403a09aa70c 100644 (file)
@@ -252,6 +252,56 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
        return 0;
 }
 
+static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
+                                               struct shrink_control *sc)
+{
+       struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+                                            shrinker);
+       int total = 0;
+
+       total = ion_heap_freelist_size(heap) / PAGE_SIZE;
+       if (heap->ops->shrink)
+               total += heap->ops->shrink(heap, sc->gfp_mask, 0);
+       return total;
+}
+
+static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
+                                               struct shrink_control *sc)
+{
+       struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+                                            shrinker);
+       int freed = 0;
+       int to_scan = sc->nr_to_scan;
+
+       if (to_scan == 0)
+               return 0;
+
+       /*
+        * shrink the free list first, no point in zeroing the memory if we're
+        * just going to reclaim it
+        */
+       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+               freed = ion_heap_freelist_drain(heap, to_scan * PAGE_SIZE) /
+                               PAGE_SIZE;
+
+       to_scan -= freed;
+       if (to_scan <= 0)
+               return freed;
+
+       if (heap->ops->shrink)
+               freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
+       return freed;
+}
+
+void ion_heap_init_shrinker(struct ion_heap *heap)
+{
+       heap->shrinker.count_objects = ion_heap_shrink_count;
+       heap->shrinker.scan_objects = ion_heap_shrink_scan;
+       heap->shrinker.seeks = DEFAULT_SEEKS;
+       heap->shrinker.batch = 0;
+       register_shrinker(&heap->shrinker);
+}
+
 struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
 {
        struct ion_heap *heap = NULL;
index fa693c23681a7d15f60e2c84155ec22dc23a30c8..ecb5fc34ec5cfbe27ef7df3c7c3d41ffb4551466 100644 (file)
@@ -130,8 +130,7 @@ static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
                                int nr_to_scan)
 {
-       int nr_freed = 0;
-       int i;
+       int freed;
        bool high;
 
        high = !!(gfp_mask & __GFP_HIGHMEM);
@@ -139,7 +138,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
        if (nr_to_scan == 0)
                return ion_page_pool_total(pool, high);
 
-       for (i = 0; i < nr_to_scan; i++) {
+       for (freed = 0; freed < nr_to_scan; freed++) {
                struct page *page;
 
                mutex_lock(&pool->mutex);
@@ -153,10 +152,9 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
                }
                mutex_unlock(&pool->mutex);
                ion_page_pool_free_pages(pool, page);
-               nr_freed += (1 << pool->order);
        }
 
-       return nr_freed;
+       return freed;
 }
 
 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
index 0942a7fc82b4460ea82fa42d201d0831e32c3f52..bcf9d19b57a3b909d5015de76810ba615aebf55e 100644 (file)
@@ -114,6 +114,7 @@ struct ion_heap_ops {
        void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
        int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
                        struct vm_area_struct *vma);
+       int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
 };
 
 /**
@@ -132,10 +133,7 @@ struct ion_heap_ops {
  *                     allocating.  These are specified by platform data and
  *                     MUST be unique
  * @name:              used for debugging
- * @shrinker:          a shrinker for the heap, if the heap caches system
- *                     memory, it must define a shrinker to return it on low
- *                     memory conditions, this includes system memory cached
- *                     in the deferred free lists for heaps that support it
+ * @shrinker:          a shrinker for the heap
  * @free_list:         free list head if deferred free is used
  * @free_list_size     size of the deferred free list in bytes
  * @lock:              protects the free list
@@ -218,6 +216,16 @@ int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
 int ion_heap_buffer_zero(struct ion_buffer *buffer);
 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
 
+/**
+ * ion_heap_init_shrinker
+ * @heap:              the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
+ * this function will be called to setup a shrinker to shrink the freelists
+ * and call the heap's shrink op.
+ */
+void ion_heap_init_shrinker(struct ion_heap *heap);
+
 /**
  * ion_heap_init_deferred_free -- initialize deferred free functionality
  * @heap:              the heap
@@ -305,13 +313,8 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
  * @low_count:         number of lowmem items in the pool
  * @high_items:                list of highmem items
  * @low_items:         list of lowmem items
- * @shrinker:          a shrinker for the items
  * @mutex:             lock protecting this struct and especially the count
  *                     item list
- * @alloc:             function to be used to allocate pageory when the pool
- *                     is empty
- * @free:              function to be used to free pageory back to the system
- *                     when the shrinker fires
  * @gfp_mask:          gfp_mask to use from alloc
  * @order:             order of pages in the pool
  * @list:              plist node for list of pools
index 9849f3963e752f01b99450c7636132fba3aefda5..f453d977c80cb79d4ff2d430d1d5aabf895c7500 100644 (file)
@@ -231,75 +231,34 @@ static void ion_system_heap_unmap_dma(struct ion_heap *heap,
        return;
 }
 
-static struct ion_heap_ops system_heap_ops = {
-       .allocate = ion_system_heap_allocate,
-       .free = ion_system_heap_free,
-       .map_dma = ion_system_heap_map_dma,
-       .unmap_dma = ion_system_heap_unmap_dma,
-       .map_kernel = ion_heap_map_kernel,
-       .unmap_kernel = ion_heap_unmap_kernel,
-       .map_user = ion_heap_map_user,
-};
-
-static unsigned long ion_system_heap_shrink_count(struct shrinker *shrinker,
-                                 struct shrink_control *sc)
+static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
+                                       int nr_to_scan)
 {
-       struct ion_heap *heap = container_of(shrinker, struct ion_heap,
-                                            shrinker);
-       struct ion_system_heap *sys_heap = container_of(heap,
-                                                       struct ion_system_heap,
-                                                       heap);
+       struct ion_system_heap *sys_heap;
        int nr_total = 0;
        int i;
 
-       /* total number of items is whatever the page pools are holding
-          plus whatever's in the freelist */
-       for (i = 0; i < num_orders; i++) {
-               struct ion_page_pool *pool = sys_heap->pools[i];
-               nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0);
-       }
-       nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE;
-       return nr_total;
-
-}
-
-static unsigned long ion_system_heap_shrink_scan(struct shrinker *shrinker,
-                                 struct shrink_control *sc)
-{
-
-       struct ion_heap *heap = container_of(shrinker, struct ion_heap,
-                                            shrinker);
-       struct ion_system_heap *sys_heap = container_of(heap,
-                                                       struct ion_system_heap,
-                                                       heap);
-       int nr_freed = 0;
-       int i;
-
-       if (sc->nr_to_scan == 0)
-               goto end;
-
-       /* shrink the free list first, no point in zeroing the memory if
-          we're just going to reclaim it */
-       nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) /
-               PAGE_SIZE;
-
-       if (nr_freed >= sc->nr_to_scan)
-               goto end;
+       sys_heap = container_of(heap, struct ion_system_heap, heap);
 
        for (i = 0; i < num_orders; i++) {
                struct ion_page_pool *pool = sys_heap->pools[i];
-
-               nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask,
-                                                sc->nr_to_scan);
-               if (nr_freed >= sc->nr_to_scan)
-                       break;
+               nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
        }
 
-end:
-       return nr_freed;
-
+       return nr_total;
 }
 
+static struct ion_heap_ops system_heap_ops = {
+       .allocate = ion_system_heap_allocate,
+       .free = ion_system_heap_free,
+       .map_dma = ion_system_heap_map_dma,
+       .unmap_dma = ion_system_heap_unmap_dma,
+       .map_kernel = ion_heap_map_kernel,
+       .unmap_kernel = ion_heap_unmap_kernel,
+       .map_user = ion_heap_map_user,
+       .shrink = ion_system_heap_shrink,
+};
+
 static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
                                      void *unused)
 {
@@ -347,11 +306,6 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
                heap->pools[i] = pool;
        }
 
-       heap->heap.shrinker.scan_objects = ion_system_heap_shrink_scan;
-       heap->heap.shrinker.count_objects = ion_system_heap_shrink_count;
-       heap->heap.shrinker.seeks = DEFAULT_SEEKS;
-       heap->heap.shrinker.batch = 0;
-       register_shrinker(&heap->heap.shrinker);
        heap->heap.debug_show = ion_system_heap_debug_show;
        return &heap->heap;
 err_create_pool:
This page took 0.029775 seconds and 5 git commands to generate.