Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb...
[deliverable/linux.git] / mm / memcontrol.c
index f4f41c36e70325c00a56e37cba22f80e7384336d..53b8201b31eb6f21bf66a3ca26f07fbf39f21c65 100644 (file)
@@ -1396,17 +1396,6 @@ int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
        return inactive * inactive_ratio < active;
 }
 
-int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
-{
-       unsigned long active;
-       unsigned long inactive;
-
-       inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_FILE);
-       active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_FILE);
-
-       return (active > inactive);
-}
-
 #define mem_cgroup_from_res_counter(counter, member)   \
        container_of(counter, struct mem_cgroup, member)
 
@@ -2307,6 +2296,17 @@ static void drain_local_stock(struct work_struct *dummy)
        clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 }
 
+static void __init memcg_stock_init(void)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct memcg_stock_pcp *stock =
+                                       &per_cpu(memcg_stock, cpu);
+               INIT_WORK(&stock->work, drain_local_stock);
+       }
+}
+
 /*
  * Cache charges(val) which is from res_counter, to local per_cpu area.
  * This will be consumed by consume_stock() function, later.
@@ -5025,6 +5025,7 @@ out:
        return ret;
 }
 
+#ifdef CONFIG_MEMCG_KMEM
 static int memcg_propagate_kmem(struct mem_cgroup *memcg)
 {
        int ret = 0;
@@ -5033,7 +5034,6 @@ static int memcg_propagate_kmem(struct mem_cgroup *memcg)
                goto out;
 
        memcg->kmem_account_flags = parent->kmem_account_flags;
-#ifdef CONFIG_MEMCG_KMEM
        /*
         * When that happen, we need to disable the static branch only on those
         * memcgs that enabled it. To achieve this, we would be forced to
@@ -5059,10 +5059,10 @@ static int memcg_propagate_kmem(struct mem_cgroup *memcg)
        mutex_lock(&set_limit_mutex);
        ret = memcg_update_cache_sizes(memcg);
        mutex_unlock(&set_limit_mutex);
-#endif
 out:
        return ret;
 }
+#endif /* CONFIG_MEMCG_KMEM */
 
 /*
  * The user of this function is...
@@ -6063,7 +6063,7 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
 }
 EXPORT_SYMBOL(parent_mem_cgroup);
 
-static int mem_cgroup_soft_limit_tree_init(void)
+static void __init mem_cgroup_soft_limit_tree_init(void)
 {
        struct mem_cgroup_tree_per_node *rtpn;
        struct mem_cgroup_tree_per_zone *rtpz;
@@ -6074,8 +6074,7 @@ static int mem_cgroup_soft_limit_tree_init(void)
                if (!node_state(node, N_NORMAL_MEMORY))
                        tmp = -1;
                rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
-               if (!rtpn)
-                       goto err_cleanup;
+               BUG_ON(!rtpn);
 
                soft_limit_tree.rb_tree_per_node[node] = rtpn;
 
@@ -6085,17 +6084,6 @@ static int mem_cgroup_soft_limit_tree_init(void)
                        spin_lock_init(&rtpz->lock);
                }
        }
-       return 0;
-
-err_cleanup:
-       for_each_node(node) {
-               if (!soft_limit_tree.rb_tree_per_node[node])
-                       break;
-               kfree(soft_limit_tree.rb_tree_per_node[node]);
-               soft_limit_tree.rb_tree_per_node[node] = NULL;
-       }
-       return 1;
-
 }
 
 static struct cgroup_subsys_state * __ref
@@ -6115,17 +6103,7 @@ mem_cgroup_css_alloc(struct cgroup *cont)
 
        /* root ? */
        if (cont->parent == NULL) {
-               int cpu;
-
-               if (mem_cgroup_soft_limit_tree_init())
-                       goto free_out;
                root_mem_cgroup = memcg;
-               for_each_possible_cpu(cpu) {
-                       struct memcg_stock_pcp *stock =
-                                               &per_cpu(memcg_stock, cpu);
-                       INIT_WORK(&stock->work, drain_local_stock);
-               }
-
                res_counter_init(&memcg->res, NULL);
                res_counter_init(&memcg->memsw, NULL);
                res_counter_init(&memcg->kmem, NULL);
@@ -6196,6 +6174,8 @@ mem_cgroup_css_online(struct cgroup *cont)
                 * call __mem_cgroup_free, so return directly
                 */
                mem_cgroup_put(memcg);
+               if (parent->use_hierarchy)
+                       mem_cgroup_put(parent);
        }
        return error;
 }
@@ -6850,15 +6830,19 @@ static void __init enable_swap_cgroup(void)
 #endif
 
 /*
- * The rest of init is performed during ->css_alloc() for root css which
- * happens before initcalls.  hotcpu_notifier() can't be done together as
- * it would introduce circular locking by adding cgroup_lock -> cpu hotplug
- * dependency.  Do it from a subsys_initcall().
+ * subsys_initcall() for memory controller.
+ *
+ * Some parts like hotcpu_notifier() have to be initialized from this context
+ * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
+ * everything that doesn't depend on a specific mem_cgroup structure should
+ * be initialized from here.
  */
 static int __init mem_cgroup_init(void)
 {
        hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
        enable_swap_cgroup();
+       mem_cgroup_soft_limit_tree_init();
+       memcg_stock_init();
        return 0;
 }
 subsys_initcall(mem_cgroup_init);
This page took 0.037911 seconds and 5 git commands to generate.