Merge branch 'mlxsw-fixes'
[deliverable/linux.git] / drivers / clk / clk.c
index d584004f7af7d193d5f2646b482f26ce442deeef..820a939fb6bbf40dfada897360e83ecd43c285a6 100644 (file)
@@ -172,104 +172,6 @@ static bool clk_core_is_enabled(struct clk_core *core)
        return core->ops->is_enabled(core->hw);
 }
 
-static void clk_unprepare_unused_subtree(struct clk_core *core)
-{
-       struct clk_core *child;
-
-       lockdep_assert_held(&prepare_lock);
-
-       hlist_for_each_entry(child, &core->children, child_node)
-               clk_unprepare_unused_subtree(child);
-
-       if (core->prepare_count)
-               return;
-
-       if (core->flags & CLK_IGNORE_UNUSED)
-               return;
-
-       if (clk_core_is_prepared(core)) {
-               trace_clk_unprepare(core);
-               if (core->ops->unprepare_unused)
-                       core->ops->unprepare_unused(core->hw);
-               else if (core->ops->unprepare)
-                       core->ops->unprepare(core->hw);
-               trace_clk_unprepare_complete(core);
-       }
-}
-
-static void clk_disable_unused_subtree(struct clk_core *core)
-{
-       struct clk_core *child;
-       unsigned long flags;
-
-       lockdep_assert_held(&prepare_lock);
-
-       hlist_for_each_entry(child, &core->children, child_node)
-               clk_disable_unused_subtree(child);
-
-       flags = clk_enable_lock();
-
-       if (core->enable_count)
-               goto unlock_out;
-
-       if (core->flags & CLK_IGNORE_UNUSED)
-               goto unlock_out;
-
-       /*
-        * some gate clocks have special needs during the disable-unused
-        * sequence.  call .disable_unused if available, otherwise fall
-        * back to .disable
-        */
-       if (clk_core_is_enabled(core)) {
-               trace_clk_disable(core);
-               if (core->ops->disable_unused)
-                       core->ops->disable_unused(core->hw);
-               else if (core->ops->disable)
-                       core->ops->disable(core->hw);
-               trace_clk_disable_complete(core);
-       }
-
-unlock_out:
-       clk_enable_unlock(flags);
-}
-
-static bool clk_ignore_unused;
-static int __init clk_ignore_unused_setup(char *__unused)
-{
-       clk_ignore_unused = true;
-       return 1;
-}
-__setup("clk_ignore_unused", clk_ignore_unused_setup);
-
-static int clk_disable_unused(void)
-{
-       struct clk_core *core;
-
-       if (clk_ignore_unused) {
-               pr_warn("clk: Not disabling unused clocks\n");
-               return 0;
-       }
-
-       clk_prepare_lock();
-
-       hlist_for_each_entry(core, &clk_root_list, child_node)
-               clk_disable_unused_subtree(core);
-
-       hlist_for_each_entry(core, &clk_orphan_list, child_node)
-               clk_disable_unused_subtree(core);
-
-       hlist_for_each_entry(core, &clk_root_list, child_node)
-               clk_unprepare_unused_subtree(core);
-
-       hlist_for_each_entry(core, &clk_orphan_list, child_node)
-               clk_unprepare_unused_subtree(core);
-
-       clk_prepare_unlock();
-
-       return 0;
-}
-late_initcall_sync(clk_disable_unused);
-
 /***    helper functions   ***/
 
 const char *__clk_get_name(const struct clk *clk)
@@ -591,6 +493,13 @@ static void clk_core_unprepare(struct clk_core *core)
        clk_core_unprepare(core->parent);
 }
 
+static void clk_core_unprepare_lock(struct clk_core *core)
+{
+       clk_prepare_lock();
+       clk_core_unprepare(core);
+       clk_prepare_unlock();
+}
+
 /**
  * clk_unprepare - undo preparation of a clock source
  * @clk: the clk being unprepared
@@ -607,9 +516,7 @@ void clk_unprepare(struct clk *clk)
        if (IS_ERR_OR_NULL(clk))
                return;
 
-       clk_prepare_lock();
-       clk_core_unprepare(clk->core);
-       clk_prepare_unlock();
+       clk_core_unprepare_lock(clk->core);
 }
 EXPORT_SYMBOL_GPL(clk_unprepare);
 
@@ -645,6 +552,17 @@ static int clk_core_prepare(struct clk_core *core)
        return 0;
 }
 
+static int clk_core_prepare_lock(struct clk_core *core)
+{
+       int ret;
+
+       clk_prepare_lock();
+       ret = clk_core_prepare(core);
+       clk_prepare_unlock();
+
+       return ret;
+}
+
 /**
  * clk_prepare - prepare a clock source
  * @clk: the clk being prepared
@@ -659,16 +577,10 @@ static int clk_core_prepare(struct clk_core *core)
  */
 int clk_prepare(struct clk *clk)
 {
-       int ret;
-
        if (!clk)
                return 0;
 
-       clk_prepare_lock();
-       ret = clk_core_prepare(clk->core);
-       clk_prepare_unlock();
-
-       return ret;
+       return clk_core_prepare_lock(clk->core);
 }
 EXPORT_SYMBOL_GPL(clk_prepare);
 
@@ -688,16 +600,25 @@ static void clk_core_disable(struct clk_core *core)
        if (--core->enable_count > 0)
                return;
 
-       trace_clk_disable(core);
+       trace_clk_disable_rcuidle(core);
 
        if (core->ops->disable)
                core->ops->disable(core->hw);
 
-       trace_clk_disable_complete(core);
+       trace_clk_disable_complete_rcuidle(core);
 
        clk_core_disable(core->parent);
 }
 
+static void clk_core_disable_lock(struct clk_core *core)
+{
+       unsigned long flags;
+
+       flags = clk_enable_lock();
+       clk_core_disable(core);
+       clk_enable_unlock(flags);
+}
+
 /**
  * clk_disable - gate a clock
  * @clk: the clk being gated
@@ -712,14 +633,10 @@ static void clk_core_disable(struct clk_core *core)
  */
 void clk_disable(struct clk *clk)
 {
-       unsigned long flags;
-
        if (IS_ERR_OR_NULL(clk))
                return;
 
-       flags = clk_enable_lock();
-       clk_core_disable(clk->core);
-       clk_enable_unlock(flags);
+       clk_core_disable_lock(clk->core);
 }
 EXPORT_SYMBOL_GPL(clk_disable);
 
@@ -741,12 +658,12 @@ static int clk_core_enable(struct clk_core *core)
                if (ret)
                        return ret;
 
-               trace_clk_enable(core);
+               trace_clk_enable_rcuidle(core);
 
                if (core->ops->enable)
                        ret = core->ops->enable(core->hw);
 
-               trace_clk_enable_complete(core);
+               trace_clk_enable_complete_rcuidle(core);
 
                if (ret) {
                        clk_core_disable(core->parent);
@@ -758,6 +675,18 @@ static int clk_core_enable(struct clk_core *core)
        return 0;
 }
 
+static int clk_core_enable_lock(struct clk_core *core)
+{
+       unsigned long flags;
+       int ret;
+
+       flags = clk_enable_lock();
+       ret = clk_core_enable(core);
+       clk_enable_unlock(flags);
+
+       return ret;
+}
+
 /**
  * clk_enable - ungate a clock
  * @clk: the clk being ungated
@@ -773,19 +702,136 @@ static int clk_core_enable(struct clk_core *core)
  */
 int clk_enable(struct clk *clk)
 {
-       unsigned long flags;
-       int ret;
-
        if (!clk)
                return 0;
 
+       return clk_core_enable_lock(clk->core);
+}
+EXPORT_SYMBOL_GPL(clk_enable);
+
+static int clk_core_prepare_enable(struct clk_core *core)
+{
+       int ret;
+
+       ret = clk_core_prepare_lock(core);
+       if (ret)
+               return ret;
+
+       ret = clk_core_enable_lock(core);
+       if (ret)
+               clk_core_unprepare_lock(core);
+
+       return ret;
+}
+
+static void clk_core_disable_unprepare(struct clk_core *core)
+{
+       clk_core_disable_lock(core);
+       clk_core_unprepare_lock(core);
+}
+
+static void clk_unprepare_unused_subtree(struct clk_core *core)
+{
+       struct clk_core *child;
+
+       lockdep_assert_held(&prepare_lock);
+
+       hlist_for_each_entry(child, &core->children, child_node)
+               clk_unprepare_unused_subtree(child);
+
+       if (core->prepare_count)
+               return;
+
+       if (core->flags & CLK_IGNORE_UNUSED)
+               return;
+
+       if (clk_core_is_prepared(core)) {
+               trace_clk_unprepare(core);
+               if (core->ops->unprepare_unused)
+                       core->ops->unprepare_unused(core->hw);
+               else if (core->ops->unprepare)
+                       core->ops->unprepare(core->hw);
+               trace_clk_unprepare_complete(core);
+       }
+}
+
+static void clk_disable_unused_subtree(struct clk_core *core)
+{
+       struct clk_core *child;
+       unsigned long flags;
+
+       lockdep_assert_held(&prepare_lock);
+
+       hlist_for_each_entry(child, &core->children, child_node)
+               clk_disable_unused_subtree(child);
+
+       if (core->flags & CLK_OPS_PARENT_ENABLE)
+               clk_core_prepare_enable(core->parent);
+
        flags = clk_enable_lock();
-       ret = clk_core_enable(clk->core);
+
+       if (core->enable_count)
+               goto unlock_out;
+
+       if (core->flags & CLK_IGNORE_UNUSED)
+               goto unlock_out;
+
+       /*
+        * some gate clocks have special needs during the disable-unused
+        * sequence.  call .disable_unused if available, otherwise fall
+        * back to .disable
+        */
+       if (clk_core_is_enabled(core)) {
+               trace_clk_disable(core);
+               if (core->ops->disable_unused)
+                       core->ops->disable_unused(core->hw);
+               else if (core->ops->disable)
+                       core->ops->disable(core->hw);
+               trace_clk_disable_complete(core);
+       }
+
+unlock_out:
        clk_enable_unlock(flags);
+       if (core->flags & CLK_OPS_PARENT_ENABLE)
+               clk_core_disable_unprepare(core->parent);
+}
 
-       return ret;
+static bool clk_ignore_unused;
+static int __init clk_ignore_unused_setup(char *__unused)
+{
+       clk_ignore_unused = true;
+       return 1;
 }
-EXPORT_SYMBOL_GPL(clk_enable);
+__setup("clk_ignore_unused", clk_ignore_unused_setup);
+
+static int clk_disable_unused(void)
+{
+       struct clk_core *core;
+
+       if (clk_ignore_unused) {
+               pr_warn("clk: Not disabling unused clocks\n");
+               return 0;
+       }
+
+       clk_prepare_lock();
+
+       hlist_for_each_entry(core, &clk_root_list, child_node)
+               clk_disable_unused_subtree(core);
+
+       hlist_for_each_entry(core, &clk_orphan_list, child_node)
+               clk_disable_unused_subtree(core);
+
+       hlist_for_each_entry(core, &clk_root_list, child_node)
+               clk_unprepare_unused_subtree(core);
+
+       hlist_for_each_entry(core, &clk_orphan_list, child_node)
+               clk_unprepare_unused_subtree(core);
+
+       clk_prepare_unlock();
+
+       return 0;
+}
+late_initcall_sync(clk_disable_unused);
 
 static int clk_core_round_rate_nolock(struct clk_core *core,
                                      struct clk_rate_request *req)
@@ -828,9 +874,7 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
 /**
  * __clk_determine_rate - get the closest rate actually supported by a clock
  * @hw: determine the rate of this clock
- * @rate: target rate
- * @min_rate: returned rate must be greater than this rate
- * @max_rate: returned rate must be less than this rate
+ * @req: target rate request
  *
  * Useful for clk_ops such as .set_rate and .determine_rate.
  */
@@ -1128,7 +1172,9 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *core,
        struct clk_core *old_parent = core->parent;
 
        /*
-        * Migrate prepare state between parents and prevent race with
+        * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
+        *
+        * 2. Migrate prepare state between parents and prevent race with
         * clk_enable().
         *
         * If the clock is not prepared, then a race with
@@ -1144,12 +1190,17 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *core,
         *
         * See also: Comment for clk_set_parent() below.
         */
+
+       /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
+       if (core->flags & CLK_OPS_PARENT_ENABLE) {
+               clk_core_prepare_enable(old_parent);
+               clk_core_prepare_enable(parent);
+       }
+
+       /* migrate prepare count if > 0 */
        if (core->prepare_count) {
-               clk_core_prepare(parent);
-               flags = clk_enable_lock();
-               clk_core_enable(parent);
-               clk_core_enable(core);
-               clk_enable_unlock(flags);
+               clk_core_prepare_enable(parent);
+               clk_core_enable_lock(core);
        }
 
        /* update the clk tree topology */
@@ -1164,18 +1215,19 @@ static void __clk_set_parent_after(struct clk_core *core,
                                   struct clk_core *parent,
                                   struct clk_core *old_parent)
 {
-       unsigned long flags;
-
        /*
         * Finish the migration of prepare state and undo the changes done
         * for preventing a race with clk_enable().
         */
        if (core->prepare_count) {
-               flags = clk_enable_lock();
-               clk_core_disable(core);
-               clk_core_disable(old_parent);
-               clk_enable_unlock(flags);
-               clk_core_unprepare(old_parent);
+               clk_core_disable_lock(core);
+               clk_core_disable_unprepare(old_parent);
+       }
+
+       /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
+       if (core->flags & CLK_OPS_PARENT_ENABLE) {
+               clk_core_disable_unprepare(parent);
+               clk_core_disable_unprepare(old_parent);
        }
 }
 
@@ -1422,13 +1474,17 @@ static void clk_change_rate(struct clk_core *core)
        unsigned long best_parent_rate = 0;
        bool skip_set_rate = false;
        struct clk_core *old_parent;
+       struct clk_core *parent = NULL;
 
        old_rate = core->rate;
 
-       if (core->new_parent)
+       if (core->new_parent) {
+               parent = core->new_parent;
                best_parent_rate = core->new_parent->rate;
-       else if (core->parent)
+       } else if (core->parent) {
+               parent = core->parent;
                best_parent_rate = core->parent->rate;
+       }
 
        if (core->flags & CLK_SET_RATE_UNGATE) {
                unsigned long flags;
@@ -1456,6 +1512,9 @@ static void clk_change_rate(struct clk_core *core)
                __clk_set_parent_after(core, core->new_parent, old_parent);
        }
 
+       if (core->flags & CLK_OPS_PARENT_ENABLE)
+               clk_core_prepare_enable(parent);
+
        trace_clk_set_rate(core, core->new_rate);
 
        if (!skip_set_rate && core->ops->set_rate)
@@ -1474,6 +1533,9 @@ static void clk_change_rate(struct clk_core *core)
                clk_core_unprepare(core);
        }
 
+       if (core->flags & CLK_OPS_PARENT_ENABLE)
+               clk_core_disable_unprepare(parent);
+
        if (core->notifier_count && old_rate != core->rate)
                __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
 
@@ -1501,7 +1563,6 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
 {
        struct clk_core *top, *fail_clk;
        unsigned long rate = req_rate;
-       int ret = 0;
 
        if (!core)
                return 0;
@@ -1532,7 +1593,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
 
        core->req_rate = req_rate;
 
-       return ret;
+       return 0;
 }
 
 /**
This page took 0.029411 seconds and 5 git commands to generate.