diff options
Diffstat (limited to 'drivers/clk/clk.c')
-rw-r--r-- | drivers/clk/clk.c | 406 |
1 files changed, 301 insertions, 105 deletions
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index ed87b2405806..934cfd18f72d 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -19,14 +19,77 @@ #include <linux/of.h> #include <linux/device.h> #include <linux/init.h> +#include <linux/sched.h> static DEFINE_SPINLOCK(enable_lock); static DEFINE_MUTEX(prepare_lock); +static struct task_struct *prepare_owner; +static struct task_struct *enable_owner; + +static int prepare_refcnt; +static int enable_refcnt; + static HLIST_HEAD(clk_root_list); static HLIST_HEAD(clk_orphan_list); static LIST_HEAD(clk_notifier_list); +/*** locking ***/ +static void clk_prepare_lock(void) +{ + if (!mutex_trylock(&prepare_lock)) { + if (prepare_owner == current) { + prepare_refcnt++; + return; + } + mutex_lock(&prepare_lock); + } + WARN_ON_ONCE(prepare_owner != NULL); + WARN_ON_ONCE(prepare_refcnt != 0); + prepare_owner = current; + prepare_refcnt = 1; +} + +static void clk_prepare_unlock(void) +{ + WARN_ON_ONCE(prepare_owner != current); + WARN_ON_ONCE(prepare_refcnt == 0); + + if (--prepare_refcnt) + return; + prepare_owner = NULL; + mutex_unlock(&prepare_lock); +} + +static unsigned long clk_enable_lock(void) +{ + unsigned long flags; + + if (!spin_trylock_irqsave(&enable_lock, flags)) { + if (enable_owner == current) { + enable_refcnt++; + return flags; + } + spin_lock_irqsave(&enable_lock, flags); + } + WARN_ON_ONCE(enable_owner != NULL); + WARN_ON_ONCE(enable_refcnt != 0); + enable_owner = current; + enable_refcnt = 1; + return flags; +} + +static void clk_enable_unlock(unsigned long flags) +{ + WARN_ON_ONCE(enable_owner != current); + WARN_ON_ONCE(enable_refcnt == 0); + + if (--enable_refcnt) + return; + enable_owner = NULL; + spin_unlock_irqrestore(&enable_lock, flags); +} + /*** debugfs support ***/ #ifdef CONFIG_COMMON_CLK_DEBUG @@ -69,7 +132,7 @@ static int clk_summary_show(struct seq_file *s, void *data) seq_printf(s, " clock enable_cnt prepare_cnt rate\n"); seq_printf(s, "---------------------------------------------------------------------\n"); - mutex_lock(&prepare_lock); + clk_prepare_lock(); hlist_for_each_entry(c, &clk_root_list, child_node) clk_summary_show_subtree(s, c, 0); @@ -77,7 +140,7 @@ static int clk_summary_show(struct seq_file *s, void *data) hlist_for_each_entry(c, &clk_orphan_list, child_node) clk_summary_show_subtree(s, c, 0); - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return 0; } @@ -130,7 +193,7 @@ static int clk_dump(struct seq_file *s, void *data) seq_printf(s, "{"); - mutex_lock(&prepare_lock); + clk_prepare_lock(); hlist_for_each_entry(c, &clk_root_list, child_node) { if (!first_node) @@ -144,7 +207,7 @@ static int clk_dump(struct seq_file *s, void *data) clk_dump_subtree(s, c, 0); } - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); seq_printf(s, "}"); return 0; @@ -280,6 +343,39 @@ out: } /** + * clk_debug_reparent - reparent clk node in the debugfs clk tree + * @clk: the clk being reparented + * @new_parent: the new clk parent, may be NULL + * + * Rename clk entry in the debugfs clk tree if debugfs has been + * initialized. Otherwise it bails out early since the debugfs clk tree + * will be created lazily by clk_debug_init as part of a late_initcall. + * + * Caller must hold prepare_lock. + */ +static void clk_debug_reparent(struct clk *clk, struct clk *new_parent) +{ + struct dentry *d; + struct dentry *new_parent_d; + + if (!inited) + return; + + if (new_parent) + new_parent_d = new_parent->dentry; + else + new_parent_d = orphandir; + + d = debugfs_rename(clk->dentry->d_parent, clk->dentry, + new_parent_d, clk->name); + if (d) + clk->dentry = d; + else + pr_debug("%s: failed to rename debugfs entry for %s\n", + __func__, clk->name); +} + +/** * clk_debug_init - lazily create the debugfs clk tree visualization * * clks are often initialized very early during boot before memory can @@ -316,7 +412,7 @@ static int __init clk_debug_init(void) if (!orphandir) return -ENOMEM; - mutex_lock(&prepare_lock); + clk_prepare_lock(); hlist_for_each_entry(clk, &clk_root_list, child_node) clk_debug_create_subtree(clk, rootdir); @@ -326,16 +422,45 @@ static int __init clk_debug_init(void) inited = 1; - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return 0; } late_initcall(clk_debug_init); #else static inline int clk_debug_register(struct clk *clk) { return 0; } +static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent) +{ +} #endif /* caller must hold prepare_lock */ +static void clk_unprepare_unused_subtree(struct clk *clk) +{ + struct clk *child; + + if (!clk) + return; + + hlist_for_each_entry(child, &clk->children, child_node) + clk_unprepare_unused_subtree(child); + + if (clk->prepare_count) + return; + + if (clk->flags & CLK_IGNORE_UNUSED) + return; + + if (__clk_is_prepared(clk)) { + if (clk->ops->unprepare_unused) + clk->ops->unprepare_unused(clk->hw); + else if (clk->ops->unprepare) + clk->ops->unprepare(clk->hw); + } +} +EXPORT_SYMBOL_GPL(__clk_get_flags); + +/* caller must hold prepare_lock */ static void clk_disable_unused_subtree(struct clk *clk) { struct clk *child; @@ -347,7 +472,7 @@ static void clk_disable_unused_subtree(struct clk *clk) hlist_for_each_entry(child, &clk->children, child_node) clk_disable_unused_subtree(child); - spin_lock_irqsave(&enable_lock, flags); + flags = clk_enable_lock(); if (clk->enable_count) goto unlock_out; @@ -368,17 +493,30 @@ static void clk_disable_unused_subtree(struct clk *clk) } unlock_out: - spin_unlock_irqrestore(&enable_lock, flags); + clk_enable_unlock(flags); out: return; } +static bool clk_ignore_unused; +static int __init clk_ignore_unused_setup(char *__unused) +{ + clk_ignore_unused = true; + return 1; +} +__setup("clk_ignore_unused", clk_ignore_unused_setup); + static int clk_disable_unused(void) { struct clk *clk; - mutex_lock(&prepare_lock); + if (clk_ignore_unused) { + pr_warn("clk: Not disabling unused clocks\n"); + return 0; + } + + clk_prepare_lock(); hlist_for_each_entry(clk, &clk_root_list, child_node) clk_disable_unused_subtree(clk); @@ -386,7 +524,13 @@ static int clk_disable_unused(void) hlist_for_each_entry(clk, &clk_orphan_list, child_node) clk_disable_unused_subtree(clk); - mutex_unlock(&prepare_lock); + hlist_for_each_entry(clk, &clk_root_list, child_node) + clk_unprepare_unused_subtree(clk); + + hlist_for_each_entry(clk, &clk_orphan_list, child_node) + clk_unprepare_unused_subtree(clk); + + clk_prepare_unlock(); return 0; } @@ -451,6 +595,27 @@ unsigned long __clk_get_flags(struct clk *clk) return !clk ? 0 : clk->flags; } +bool __clk_is_prepared(struct clk *clk) +{ + int ret; + + if (!clk) + return false; + + /* + * .is_prepared is optional for clocks that can prepare + * fall back to software usage counter if it is missing + */ + if (!clk->ops->is_prepared) { + ret = clk->prepare_count ? 1 : 0; + goto out; + } + + ret = clk->ops->is_prepared(clk->hw); +out: + return !!ret; +} + bool __clk_is_enabled(struct clk *clk) { int ret; @@ -548,9 +713,9 @@ void __clk_unprepare(struct clk *clk) */ void clk_unprepare(struct clk *clk) { - mutex_lock(&prepare_lock); + clk_prepare_lock(); __clk_unprepare(clk); - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); } EXPORT_SYMBOL_GPL(clk_unprepare); @@ -596,9 +761,9 @@ int clk_prepare(struct clk *clk) { int ret; - mutex_lock(&prepare_lock); + clk_prepare_lock(); ret = __clk_prepare(clk); - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return ret; } @@ -640,9 +805,9 @@ void clk_disable(struct clk *clk) { unsigned long flags; - spin_lock_irqsave(&enable_lock, flags); + flags = clk_enable_lock(); __clk_disable(clk); - spin_unlock_irqrestore(&enable_lock, flags); + clk_enable_unlock(flags); } EXPORT_SYMBOL_GPL(clk_disable); @@ -693,9 +858,9 @@ int clk_enable(struct clk *clk) unsigned long flags; int ret; - spin_lock_irqsave(&enable_lock, flags); + flags = clk_enable_lock(); ret = __clk_enable(clk); - spin_unlock_irqrestore(&enable_lock, flags); + clk_enable_unlock(flags); return ret; } @@ -740,9 +905,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate) { unsigned long ret; - mutex_lock(&prepare_lock); + clk_prepare_lock(); ret = __clk_round_rate(clk, rate); - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return ret; } @@ -837,13 +1002,13 @@ unsigned long clk_get_rate(struct clk *clk) { unsigned long rate; - mutex_lock(&prepare_lock); + clk_prepare_lock(); if (clk && (clk->flags & CLK_GET_RATE_NOCACHE)) __clk_recalc_rates(clk, 0); rate = __clk_get_rate(clk); - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return rate; } @@ -876,16 +1041,16 @@ static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate) else new_rate = parent_rate; - /* abort the rate change if a driver returns NOTIFY_BAD */ + /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ if (clk->notifier_count) ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate); - if (ret == NOTIFY_BAD) + if (ret & NOTIFY_STOP_MASK) goto out; hlist_for_each_entry(child, &clk->children, child_node) { ret = __clk_speculate_rates(child, new_rate); - if (ret == NOTIFY_BAD) + if (ret & NOTIFY_STOP_MASK) break; } @@ -974,11 +1139,11 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even int ret = NOTIFY_DONE; if (clk->rate == clk->new_rate) - return 0; + return NULL; if (clk->notifier_count) { ret = __clk_notify(clk, event, clk->rate, clk->new_rate); - if (ret == NOTIFY_BAD) + if (ret & NOTIFY_STOP_MASK) fail_clk = clk; } @@ -1048,7 +1213,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate) int ret = 0; /* prevent racing with updates to the clock topology */ - mutex_lock(&prepare_lock); + clk_prepare_lock(); /* bail early if nothing to do */ if (rate == clk->rate) @@ -1080,7 +1245,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate) clk_change_rate(top); out: - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return ret; } @@ -1096,9 +1261,9 @@ struct clk *clk_get_parent(struct clk *clk) { struct clk *parent; - mutex_lock(&prepare_lock); + clk_prepare_lock(); parent = __clk_get_parent(clk); - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return parent; } @@ -1162,16 +1327,8 @@ out: return ret; } -void __clk_reparent(struct clk *clk, struct clk *new_parent) +static void clk_reparent(struct clk *clk, struct clk *new_parent) { -#ifdef CONFIG_COMMON_CLK_DEBUG - struct dentry *d; - struct dentry *new_parent_d; -#endif - - if (!clk || !new_parent) - return; - hlist_del(&clk->child_node); if (new_parent) @@ -1179,39 +1336,20 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent) else hlist_add_head(&clk->child_node, &clk_orphan_list); -#ifdef CONFIG_COMMON_CLK_DEBUG - if (!inited) - goto out; - - if (new_parent) - new_parent_d = new_parent->dentry; - else - new_parent_d = orphandir; - - d = debugfs_rename(clk->dentry->d_parent, clk->dentry, - new_parent_d, clk->name); - if (d) - clk->dentry = d; - else - pr_debug("%s: failed to rename debugfs entry for %s\n", - __func__, clk->name); -out: -#endif - clk->parent = new_parent; +} +void __clk_reparent(struct clk *clk, struct clk *new_parent) +{ + clk_reparent(clk, new_parent); + clk_debug_reparent(clk, new_parent); __clk_recalc_rates(clk, POST_RATE_CHANGE); } -static int __clk_set_parent(struct clk *clk, struct clk *parent) +static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent) { - struct clk *old_parent; - unsigned long flags; - int ret = -EINVAL; u8 i; - old_parent = clk->parent; - if (!clk->parents) clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents), GFP_KERNEL); @@ -1231,36 +1369,79 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent) } } - if (i == clk->num_parents) { - pr_debug("%s: clock %s is not a possible parent of clock %s\n", - __func__, parent->name, clk->name); - goto out; - } + return i; +} - /* migrate prepare and enable */ +static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index) +{ + unsigned long flags; + int ret = 0; + struct clk *old_parent = clk->parent; + bool migrated_enable = false; + + /* migrate prepare */ if (clk->prepare_count) __clk_prepare(parent); - /* FIXME replace with clk_is_enabled(clk) someday */ - spin_lock_irqsave(&enable_lock, flags); - if (clk->enable_count) + flags = clk_enable_lock(); + + /* migrate enable */ + if (clk->enable_count) { __clk_enable(parent); - spin_unlock_irqrestore(&enable_lock, flags); + migrated_enable = true; + } + + /* update the clk tree topology */ + clk_reparent(clk, parent); + + clk_enable_unlock(flags); /* change clock input source */ - ret = clk->ops->set_parent(clk->hw, i); + if (parent && clk->ops->set_parent) + ret = clk->ops->set_parent(clk->hw, p_index); - /* clean up old prepare and enable */ - spin_lock_irqsave(&enable_lock, flags); - if (clk->enable_count) + if (ret) { + /* + * The error handling is tricky due to that we need to release + * the spinlock while issuing the .set_parent callback. This + * means the new parent might have been enabled/disabled in + * between, which must be considered when doing rollback. + */ + flags = clk_enable_lock(); + + clk_reparent(clk, old_parent); + + if (migrated_enable && clk->enable_count) { + __clk_disable(parent); + } else if (migrated_enable && (clk->enable_count == 0)) { + __clk_disable(old_parent); + } else if (!migrated_enable && clk->enable_count) { + __clk_disable(parent); + __clk_enable(old_parent); + } + + clk_enable_unlock(flags); + + if (clk->prepare_count) + __clk_unprepare(parent); + + return ret; + } + + /* clean up enable for old parent if migration was done */ + if (migrated_enable) { + flags = clk_enable_lock(); __clk_disable(old_parent); - spin_unlock_irqrestore(&enable_lock, flags); + clk_enable_unlock(flags); + } + /* clean up prepare for old parent if migration was done */ if (clk->prepare_count) __clk_unprepare(old_parent); -out: - return ret; + /* update debugfs with new clk tree topology */ + clk_debug_reparent(clk, parent); + return 0; } /** @@ -1278,44 +1459,59 @@ out: int clk_set_parent(struct clk *clk, struct clk *parent) { int ret = 0; + u8 p_index = 0; + unsigned long p_rate = 0; if (!clk || !clk->ops) return -EINVAL; - if (!clk->ops->set_parent) + /* verify ops for for multi-parent clks */ + if ((clk->num_parents > 1) && (!clk->ops->set_parent)) return -ENOSYS; /* prevent racing with updates to the clock topology */ - mutex_lock(&prepare_lock); + clk_prepare_lock(); if (clk->parent == parent) goto out; + /* check that we are allowed to re-parent if the clock is in use */ + if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) { + ret = -EBUSY; + goto out; + } + + /* try finding the new parent index */ + if (parent) { + p_index = clk_fetch_parent_index(clk, parent); + p_rate = parent->rate; + if (p_index == clk->num_parents) { + pr_debug("%s: clk %s can not be parent of clk %s\n", + __func__, parent->name, clk->name); + ret = -EINVAL; + goto out; + } + } + /* propagate PRE_RATE_CHANGE notifications */ if (clk->notifier_count) - ret = __clk_speculate_rates(clk, parent->rate); + ret = __clk_speculate_rates(clk, p_rate); /* abort if a driver objects */ - if (ret == NOTIFY_STOP) + if (ret & NOTIFY_STOP_MASK) goto out; - /* only re-parent if the clock is not in use */ - if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) - ret = -EBUSY; - else - ret = __clk_set_parent(clk, parent); + /* do the re-parent */ + ret = __clk_set_parent(clk, parent, p_index); - /* propagate ABORT_RATE_CHANGE if .set_parent failed */ - if (ret) { + /* propagate rate recalculation accordingly */ + if (ret) __clk_recalc_rates(clk, ABORT_RATE_CHANGE); - goto out; - } - - /* propagate rate recalculation downstream */ - __clk_reparent(clk, parent); + else + __clk_recalc_rates(clk, POST_RATE_CHANGE); out: - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return ret; } @@ -1338,7 +1534,7 @@ int __clk_init(struct device *dev, struct clk *clk) if (!clk) return -EINVAL; - mutex_lock(&prepare_lock); + clk_prepare_lock(); /* check to see if a clock with this name is already registered */ if (__clk_lookup(clk->name)) { @@ -1462,7 +1658,7 @@ int __clk_init(struct device *dev, struct clk *clk) clk_debug_register(clk); out: - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return ret; } @@ -1696,7 +1892,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb) if (!clk || !nb) return -EINVAL; - mutex_lock(&prepare_lock); + clk_prepare_lock(); /* search the list of notifiers for this clk */ list_for_each_entry(cn, &clk_notifier_list, node) @@ -1720,7 +1916,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb) clk->notifier_count++; out: - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return ret; } @@ -1745,7 +1941,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) if (!clk || !nb) return -EINVAL; - mutex_lock(&prepare_lock); + clk_prepare_lock(); list_for_each_entry(cn, &clk_notifier_list, node) if (cn->clk == clk) @@ -1766,7 +1962,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) ret = -ENOENT; } - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return ret; } |