memcg: remove unused variable
[deliverable/linux.git] / mm / memcontrol.c
index f342778a0c0a2649b00a3a284036ea17f05023a1..5c56765c848a7816bb89cd9b54977e6f0c5239f1 100644 (file)
@@ -59,7 +59,7 @@
 
 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
 #define MEM_CGROUP_RECLAIM_RETRIES     5
-struct mem_cgroup *root_mem_cgroup __read_mostly;
+static struct mem_cgroup *root_mem_cgroup __read_mostly;
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
@@ -138,7 +138,6 @@ struct mem_cgroup_per_zone {
 
        struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
 
-       struct zone_reclaim_stat reclaim_stat;
        struct rb_node          tree_node;      /* RB tree node */
        unsigned long long      usage_in_excess;/* Set to the value by which */
                                                /* the soft limit is exceeded*/
@@ -1116,11 +1115,6 @@ void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
        mz->lru_size[lru] -= 1 << compound_order(page);
 }
 
-void mem_cgroup_lru_del(struct page *page)
-{
-       mem_cgroup_lru_del_list(page, page_lru(page));
-}
-
 /**
  * mem_cgroup_lru_move_lists - account for moving a page between lrus
  * @zone: zone of the page
@@ -1149,15 +1143,25 @@ struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
  * Checks whether given mem is same or in the root_mem_cgroup's
  * hierarchy subtree
  */
+bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
+                                 struct mem_cgroup *memcg)
+{
+       if (root_memcg == memcg)
+               return true;
+       if (!root_memcg->use_hierarchy)
+               return false;
+       return css_is_ancestor(&memcg->css, &root_memcg->css);
+}
+
 static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
-               struct mem_cgroup *memcg)
+                                      struct mem_cgroup *memcg)
 {
-       if (root_memcg != memcg) {
-               return (root_memcg->use_hierarchy &&
-                       css_is_ancestor(&memcg->css, &root_memcg->css));
-       }
+       bool ret;
 
-       return true;
+       rcu_read_lock();
+       ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
+       rcu_read_unlock();
+       return ret;
 }
 
 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
@@ -1233,16 +1237,6 @@ int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
        return (active > inactive);
 }
 
-struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
-                                                     struct zone *zone)
-{
-       int nid = zone_to_nid(zone);
-       int zid = zone_idx(zone);
-       struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
-
-       return &mz->reclaim_stat;
-}
-
 struct zone_reclaim_stat *
 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
 {
@@ -1258,7 +1252,7 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
        /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
        smp_rmb();
        mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
-       return &mz->reclaim_stat;
+       return &mz->lruvec.reclaim_stat;
 }
 
 #define mem_cgroup_from_res_counter(counter, member)   \
@@ -1634,7 +1628,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
  * unused nodes. But scan_nodes is lazily updated and may not cotain
  * enough new information. We need to do double check.
  */
-bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
 {
        int nid;
 
@@ -1669,7 +1663,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
        return 0;
 }
 
-bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
 {
        return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
 }
@@ -1843,7 +1837,8 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
 /*
  * try to call OOM killer. returns false if we should exit memory-reclaim loop.
  */
-bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
+static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
+                                 int order)
 {
        struct oom_wait_info owait;
        bool locked, need_to_kill;
@@ -2845,24 +2840,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
         */
        if (do_swap_account && PageSwapCache(page)) {
                swp_entry_t ent = {.val = page_private(page)};
-               struct mem_cgroup *swap_memcg;
-               unsigned short id;
-
-               id = swap_cgroup_record(ent, 0);
-               rcu_read_lock();
-               swap_memcg = mem_cgroup_lookup(id);
-               if (swap_memcg) {
-                       /*
-                        * This recorded memcg can be obsolete one. So, avoid
-                        * calling css_tryget
-                        */
-                       if (!mem_cgroup_is_root(swap_memcg))
-                               res_counter_uncharge(&swap_memcg->memsw,
-                                                    PAGE_SIZE);
-                       mem_cgroup_swap_statistics(swap_memcg, false);
-                       mem_cgroup_put(swap_memcg);
-               }
-               rcu_read_unlock();
+               mem_cgroup_uncharge_swap(ent);
        }
        /*
         * At swapin, we may charge account against cgroup which has no tasks.
@@ -3155,7 +3133,6 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
  * @entry: swap entry to be moved
  * @from:  mem_cgroup which the entry is moved from
  * @to:  mem_cgroup which the entry is moved to
- * @need_fixup: whether we should fixup res_counters and refcounts.
  *
  * It succeeds only when the swap_cgroup's record for this entry is the same
  * as the mem_cgroup's id of @from.
@@ -3166,7 +3143,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
  * both res and memsw, and called css_get().
  */
 static int mem_cgroup_move_swap_account(swp_entry_t entry,
-               struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
+                               struct mem_cgroup *from, struct mem_cgroup *to)
 {
        unsigned short old_id, new_id;
 
@@ -3185,24 +3162,13 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
                 * swap-in, the refcount of @to might be decreased to 0.
                 */
                mem_cgroup_get(to);
-               if (need_fixup) {
-                       if (!mem_cgroup_is_root(from))
-                               res_counter_uncharge(&from->memsw, PAGE_SIZE);
-                       mem_cgroup_put(from);
-                       /*
-                        * we charged both to->res and to->memsw, so we should
-                        * uncharge to->res.
-                        */
-                       if (!mem_cgroup_is_root(to))
-                               res_counter_uncharge(&to->res, PAGE_SIZE);
-               }
                return 0;
        }
        return -EINVAL;
 }
 #else
 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
-               struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
+                               struct mem_cgroup *from, struct mem_cgroup *to)
 {
        return -EINVAL;
 }
@@ -3363,7 +3329,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
 void mem_cgroup_replace_page_cache(struct page *oldpage,
                                  struct page *newpage)
 {
-       struct mem_cgroup *memcg;
+       struct mem_cgroup *memcg = NULL;
        struct page_cgroup *pc;
        enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
 
@@ -3373,11 +3339,20 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
        pc = lookup_page_cgroup(oldpage);
        /* fix accounting on old pages */
        lock_page_cgroup(pc);
-       memcg = pc->mem_cgroup;
-       mem_cgroup_charge_statistics(memcg, false, -1);
-       ClearPageCgroupUsed(pc);
+       if (PageCgroupUsed(pc)) {
+               memcg = pc->mem_cgroup;
+               mem_cgroup_charge_statistics(memcg, false, -1);
+               ClearPageCgroupUsed(pc);
+       }
        unlock_page_cgroup(pc);
 
+       /*
+        * When called from shmem_replace_page(), in some cases the
+        * oldpage has already been charged, and in some cases not.
+        */
+       if (!memcg)
+               return;
+
        if (PageSwapBacked(oldpage))
                type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
 
@@ -3793,7 +3768,7 @@ try_to_free:
        goto move_account;
 }
 
-int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
+static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
 {
        return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
 }
@@ -4074,7 +4049,7 @@ struct mcs_total_stat {
        s64 stat[NR_MCS_STAT];
 };
 
-struct {
+static struct {
        char *local_name;
        char *total_name;
 } memcg_stat_strings[NR_MCS_STAT] = {
@@ -4226,21 +4201,19 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
        {
                int nid, zid;
                struct mem_cgroup_per_zone *mz;
+               struct zone_reclaim_stat *rstat;
                unsigned long recent_rotated[2] = {0, 0};
                unsigned long recent_scanned[2] = {0, 0};
 
                for_each_online_node(nid)
                        for (zid = 0; zid < MAX_NR_ZONES; zid++) {
                                mz = mem_cgroup_zoneinfo(memcg, nid, zid);
+                               rstat = &mz->lruvec.reclaim_stat;
 
-                               recent_rotated[0] +=
-                                       mz->reclaim_stat.recent_rotated[0];
-                               recent_rotated[1] +=
-                                       mz->reclaim_stat.recent_rotated[1];
-                               recent_scanned[0] +=
-                                       mz->reclaim_stat.recent_scanned[0];
-                               recent_scanned[1] +=
-                                       mz->reclaim_stat.recent_scanned[1];
+                               recent_rotated[0] += rstat->recent_rotated[0];
+                               recent_rotated[1] += rstat->recent_rotated[1];
+                               recent_scanned[0] += rstat->recent_scanned[0];
+                               recent_scanned[1] += rstat->recent_scanned[1];
                        }
                cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
                cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
@@ -5135,7 +5108,7 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
                return NULL;
        if (PageAnon(page)) {
                /* we don't move shared anon */
-               if (!move_anon() || page_mapcount(page) > 2)
+               if (!move_anon())
                        return NULL;
        } else if (!move_file())
                /* we ignore mapcount for file pages */
@@ -5146,32 +5119,37 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
        return page;
 }
 
+#ifdef CONFIG_SWAP
 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
                        unsigned long addr, pte_t ptent, swp_entry_t *entry)
 {
-       int usage_count;
        struct page *page = NULL;
        swp_entry_t ent = pte_to_swp_entry(ptent);
 
        if (!move_anon() || non_swap_entry(ent))
                return NULL;
-       usage_count = mem_cgroup_count_swap_user(ent, &page);
-       if (usage_count > 1) { /* we don't move shared anon */
-               if (page)
-                       put_page(page);
-               return NULL;
-       }
+       /*
+        * Because lookup_swap_cache() updates some statistics counter,
+        * we call find_get_page() with swapper_space directly.
+        */
+       page = find_get_page(&swapper_space, ent.val);
        if (do_swap_account)
                entry->val = ent.val;
 
        return page;
 }
+#else
+static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
+                       unsigned long addr, pte_t ptent, swp_entry_t *entry)
+{
+       return NULL;
+}
+#endif
 
 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
                        unsigned long addr, pte_t ptent, swp_entry_t *entry)
 {
        struct page *page = NULL;
-       struct inode *inode;
        struct address_space *mapping;
        pgoff_t pgoff;
 
@@ -5180,7 +5158,6 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
        if (!move_file())
                return NULL;
 
-       inode = vma->vm_file->f_path.dentry->d_inode;
        mapping = vma->vm_file->f_mapping;
        if (pte_none(ptent))
                pgoff = linear_page_index(vma, addr);
@@ -5521,8 +5498,7 @@ put:                      /* get_mctgt_type() gets the page */
                        break;
                case MC_TARGET_SWAP:
                        ent = target.ent;
-                       if (!mem_cgroup_move_swap_account(ent,
-                                               mc.from, mc.to, false)) {
+                       if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
                                mc.precharge--;
                                /* we fixup refcnts and charges later. */
                                mc.moved_swap++;
@@ -5598,7 +5574,6 @@ static void mem_cgroup_move_task(struct cgroup *cont,
        if (mm) {
                if (mc.to)
                        mem_cgroup_move_charge(mm);
-               put_swap_token(mm);
                mmput(mm);
        }
        if (mc.to)
This page took 0.03805 seconds and 5 git commands to generate.