memcg: mem_cgroup_charge never NULL
[deliverable/linux.git] / mm / memcontrol.c
index 5c2c702af6172e34bced06b2d5693ce2d461b000..83ba13ad31e16d69d32087bf1a02285b72e3844b 100644 (file)
@@ -140,11 +140,17 @@ struct mem_cgroup {
 
 /*
  * We use the lower bit of the page->page_cgroup pointer as a bit spin
- * lock. We need to ensure that page->page_cgroup is atleast two
- * byte aligned (based on comments from Nick Piggin)
+ * lock.  We need to ensure that page->page_cgroup is at least two
+ * byte aligned (based on comments from Nick Piggin).  But since
+ * bit_spin_lock doesn't actually set that lock bit in a non-debug
+ * uniprocessor kernel, we should avoid setting it here too.
  */
 #define PAGE_CGROUP_LOCK_BIT   0x0
-#define PAGE_CGROUP_LOCK               (1 << PAGE_CGROUP_LOCK_BIT)
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+#define PAGE_CGROUP_LOCK       (1 << PAGE_CGROUP_LOCK_BIT)
+#else
+#define PAGE_CGROUP_LOCK       0x0
+#endif
 
 /*
  * A page_cgroup page is associated with every page descriptor. The
@@ -271,19 +277,10 @@ static inline int page_cgroup_locked(struct page *page)
                                        &page->page_cgroup);
 }
 
-void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
+static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
 {
-       int locked;
-
-       /*
-        * While resetting the page_cgroup we might not hold the
-        * page_cgroup lock. free_hot_cold_page() is an example
-        * of such a scenario
-        */
-       if (pc)
-               VM_BUG_ON(!page_cgroup_locked(page));
-       locked = (page->page_cgroup & PAGE_CGROUP_LOCK);
-       page->page_cgroup = ((unsigned long)pc | locked);
+       VM_BUG_ON(!page_cgroup_locked(page));
+       page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
 }
 
 struct page_cgroup *page_get_page_cgroup(struct page *page)
@@ -303,25 +300,6 @@ static void __always_inline unlock_page_cgroup(struct page *page)
        bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
 }
 
-/*
- * Tie new page_cgroup to struct page under lock_page_cgroup()
- * This can fail if the page has been tied to a page_cgroup.
- * If success, returns 0.
- */
-static int page_cgroup_assign_new_page_cgroup(struct page *page,
-                                               struct page_cgroup *pc)
-{
-       int ret = 0;
-
-       lock_page_cgroup(page);
-       if (!page_get_page_cgroup(page))
-               page_assign_page_cgroup(page, pc);
-       else /* A page is tied to other pc. */
-               ret = 1;
-       unlock_page_cgroup(page);
-       return ret;
-}
-
 /*
  * Clear page->page_cgroup member under lock_page_cgroup().
  * If given "pc" value is different from one page->page_cgroup,
@@ -399,7 +377,7 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
        int ret;
 
        task_lock(task);
-       ret = task->mm && mm_cgroup(task->mm) == mem;
+       ret = task->mm && mm_match_cgroup(task->mm, mem);
        task_unlock(task);
        return ret;
 }
@@ -407,11 +385,13 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
 /*
  * This routine assumes that the appropriate zone's lru lock is already held
  */
-void mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
+void mem_cgroup_move_lists(struct page *page, bool active)
 {
+       struct page_cgroup *pc;
        struct mem_cgroup_per_zone *mz;
        unsigned long flags;
 
+       pc = page_get_page_cgroup(page);
        if (!pc)
                return;
 
@@ -534,7 +514,6 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                if (scan >= nr_to_scan)
                        break;
                page = pc->page;
-               VM_BUG_ON(!pc);
 
                if (unlikely(!PageLRU(page)))
                        continue;
@@ -587,26 +566,24 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
         * with it
         */
 retry:
-       if (page) {
-               lock_page_cgroup(page);
-               pc = page_get_page_cgroup(page);
-               /*
-                * The page_cgroup exists and
-                * the page has already been accounted.
-                */
-               if (pc) {
-                       if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
-                               /* this page is under being uncharged ? */
-                               unlock_page_cgroup(page);
-                               cpu_relax();
-                               goto retry;
-                       } else {
-                               unlock_page_cgroup(page);
-                               goto done;
-                       }
+       lock_page_cgroup(page);
+       pc = page_get_page_cgroup(page);
+       /*
+        * The page_cgroup exists and
+        * the page has already been accounted.
+        */
+       if (pc) {
+               if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
+                       /* this page is under being uncharged ? */
+                       unlock_page_cgroup(page);
+                       cpu_relax();
+                       goto retry;
+               } else {
+                       unlock_page_cgroup(page);
+                       goto done;
                }
-               unlock_page_cgroup(page);
        }
+       unlock_page_cgroup(page);
 
        pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
        if (pc == NULL)
@@ -665,7 +642,9 @@ retry:
        if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
                pc->flags |= PAGE_CGROUP_FLAG_CACHE;
 
-       if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) {
+       lock_page_cgroup(page);
+       if (page_get_page_cgroup(page)) {
+               unlock_page_cgroup(page);
                /*
                 * Another charge has been added to this page already.
                 * We take lock_page_cgroup(page) again and read
@@ -674,10 +653,10 @@ retry:
                res_counter_uncharge(&mem->res, PAGE_SIZE);
                css_put(&mem->css);
                kfree(pc);
-               if (!page)
-                       goto done;
                goto retry;
        }
+       page_assign_page_cgroup(page, pc);
+       unlock_page_cgroup(page);
 
        mz = page_cgroup_zoneinfo(pc);
        spin_lock_irqsave(&mz->lru_lock, flags);
@@ -1101,7 +1080,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
                mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
 
        if (mem == NULL)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        res_counter_init(&mem->res);
 
@@ -1117,7 +1096,7 @@ free_out:
                free_mem_cgroup_per_zone_info(mem, node);
        if (cont->parent != NULL)
                kfree(mem);
-       return NULL;
+       return ERR_PTR(-ENOMEM);
 }
 
 static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
This page took 0.028092 seconds and 5 git commands to generate.