diff options
Diffstat (limited to 'arch/arm/mach-omap2/clock.c')
-rw-r--r-- | arch/arm/mach-omap2/clock.c | 910 |
1 files changed, 242 insertions, 668 deletions
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c index e381d991092c..e4ec3a69ee2e 100644 --- a/arch/arm/mach-omap2/clock.c +++ b/arch/arm/mach-omap2/clock.c @@ -20,7 +20,7 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/delay.h> -#include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/io.h> #include <linux/bitops.h> @@ -55,9 +55,28 @@ u16 cpu_mask; */ static bool clkdm_control = true; -static LIST_HEAD(clocks); -static DEFINE_MUTEX(clocks_mutex); -static DEFINE_SPINLOCK(clockfw_lock); +static LIST_HEAD(clk_hw_omap_clocks); + +/* + * Used for clocks that have the same value as the parent clock, + * divided by some factor + */ +unsigned long omap_fixed_divisor_recalc(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_hw_omap *oclk; + + if (!hw) { + pr_warn("%s: hw is NULL\n", __func__); + return -EINVAL; + } + + oclk = to_clk_hw_omap(hw); + + WARN_ON(!oclk->fixed_div); + + return parent_rate / oclk->fixed_div; +} /* * OMAP2+ specific clock functions @@ -109,7 +128,7 @@ static int _wait_idlest_generic(void __iomem *reg, u32 mask, u8 idlest, * belong in the clock code and will be moved in the medium term to * module-dependent code. No return value. */ -static void _omap2_module_wait_ready(struct clk *clk) +static void _omap2_module_wait_ready(struct clk_hw_omap *clk) { void __iomem *companion_reg, *idlest_reg; u8 other_bit, idlest_bit, idlest_val, idlest_reg_id; @@ -124,12 +143,11 @@ static void _omap2_module_wait_ready(struct clk *clk) } clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val); - r = cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id); if (r) { /* IDLEST register not in the CM module */ _wait_idlest_generic(idlest_reg, (1 << idlest_bit), idlest_val, - clk->name); + __clk_get_name(clk->hw.clk)); } else { cm_wait_module_ready(prcm_mod, idlest_reg_id, idlest_bit); }; @@ -145,15 +163,16 @@ static void _omap2_module_wait_ready(struct clk *clk) * clockdomain pointer, and save it into the struct clk. Intended to be * called during clk_register(). No return value. */ -void omap2_init_clk_clkdm(struct clk *clk) +void omap2_init_clk_clkdm(struct clk_hw *hw) { + struct clk_hw_omap *clk = to_clk_hw_omap(hw); struct clockdomain *clkdm; const char *clk_name; if (!clk->clkdm_name) return; - clk_name = __clk_get_name(clk); + clk_name = __clk_get_name(hw->clk); clkdm = clkdm_lookup(clk->clkdm_name); if (clkdm) { @@ -200,8 +219,8 @@ void __init omap2_clk_disable_clkdm_control(void) * associate this type of code with per-module data structures to * avoid this issue, and remove the casts. No return value. */ -void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg, - u8 *other_bit) +void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk, + void __iomem **other_reg, u8 *other_bit) { u32 r; @@ -229,8 +248,8 @@ void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg, * register address ID (e.g., that CM_FCLKEN2 corresponds to * CM_IDLEST2). This is not true for all modules. No return value. */ -void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg, - u8 *idlest_bit, u8 *idlest_val) +void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk, + void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val) { u32 r; @@ -252,16 +271,44 @@ void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg, } -int omap2_dflt_clk_enable(struct clk *clk) +/** + * omap2_dflt_clk_enable - enable a clock in the hardware + * @hw: struct clk_hw * of the clock to enable + * + * Enable the clock @hw in the hardware. We first call into the OMAP + * clockdomain code to "enable" the corresponding clockdomain if this + * is the first enabled user of the clockdomain. Then program the + * hardware to enable the clock. Then wait for the IP block that uses + * this clock to leave idle (if applicable). Returns the error value + * from clkdm_clk_enable() if it terminated with an error, or -EINVAL + * if @hw has a null clock enable_reg, or zero upon success. + */ +int omap2_dflt_clk_enable(struct clk_hw *hw) { + struct clk_hw_omap *clk; u32 v; + int ret = 0; + + clk = to_clk_hw_omap(hw); + + if (clkdm_control && clk->clkdm) { + ret = clkdm_clk_enable(clk->clkdm, hw->clk); + if (ret) { + WARN(1, "%s: could not enable %s's clockdomain %s: %d\n", + __func__, __clk_get_name(hw->clk), + clk->clkdm->name, ret); + return ret; + } + } if (unlikely(clk->enable_reg == NULL)) { - pr_err("clock.c: Enable for %s without enable code\n", - clk->name); - return 0; /* REVISIT: -EINVAL */ + pr_err("%s: %s missing enable_reg\n", __func__, + __clk_get_name(hw->clk)); + ret = -EINVAL; + goto err; } + /* FIXME should not have INVERT_ENABLE bit here */ v = __raw_readl(clk->enable_reg); if (clk->flags & INVERT_ENABLE) v &= ~(1 << clk->enable_bit); @@ -270,22 +317,39 @@ int omap2_dflt_clk_enable(struct clk *clk) __raw_writel(v, clk->enable_reg); v = __raw_readl(clk->enable_reg); /* OCP barrier */ - if (clk->ops->find_idlest) + if (clk->ops && clk->ops->find_idlest) _omap2_module_wait_ready(clk); return 0; + +err: + if (clkdm_control && clk->clkdm) + clkdm_clk_disable(clk->clkdm, hw->clk); + return ret; } -void omap2_dflt_clk_disable(struct clk *clk) +/** + * omap2_dflt_clk_disable - disable a clock in the hardware + * @hw: struct clk_hw * of the clock to disable + * + * Disable the clock @hw in the hardware, and call into the OMAP + * clockdomain code to "disable" the corresponding clockdomain if all + * clocks/hwmods in that clockdomain are now disabled. No return + * value. + */ +void omap2_dflt_clk_disable(struct clk_hw *hw) { + struct clk_hw_omap *clk; u32 v; + clk = to_clk_hw_omap(hw); if (!clk->enable_reg) { /* - * 'Independent' here refers to a clock which is not + * 'independent' here refers to a clock which is not * controlled by its parent. */ - pr_err("clock: clk_disable called on independent clock %s which has no enable_reg\n", clk->name); + pr_err("%s: independent clock %s has no enable_reg\n", + __func__, __clk_get_name(hw->clk)); return; } @@ -296,191 +360,213 @@ void omap2_dflt_clk_disable(struct clk *clk) v &= ~(1 << clk->enable_bit); __raw_writel(v, clk->enable_reg); /* No OCP barrier needed here since it is a disable operation */ -} - -const struct clkops clkops_omap2_dflt_wait = { - .enable = omap2_dflt_clk_enable, - .disable = omap2_dflt_clk_disable, - .find_companion = omap2_clk_dflt_find_companion, - .find_idlest = omap2_clk_dflt_find_idlest, -}; -const struct clkops clkops_omap2_dflt = { - .enable = omap2_dflt_clk_enable, - .disable = omap2_dflt_clk_disable, -}; + if (clkdm_control && clk->clkdm) + clkdm_clk_disable(clk->clkdm, hw->clk); +} /** - * omap2_clk_disable - disable a clock, if the system is not using it - * @clk: struct clk * to disable + * omap2_clkops_enable_clkdm - increment usecount on clkdm of @hw + * @hw: struct clk_hw * of the clock being enabled * - * Decrements the usecount on struct clk @clk. If there are no users - * left, call the clkops-specific clock disable function to disable it - * in hardware. If the clock is part of a clockdomain (which they all - * should be), request that the clockdomain be disabled. (It too has - * a usecount, and so will not be disabled in the hardware until it no - * longer has any users.) If the clock has a parent clock (most of - * them do), then call ourselves, recursing on the parent clock. This - * can cause an entire branch of the clock tree to be powered off by - * simply disabling one clock. Intended to be called with the clockfw_lock - * spinlock held. No return value. + * Increment the usecount of the clockdomain of the clock pointed to + * by @hw; if the usecount is 1, the clockdomain will be "enabled." + * Only needed for clocks that don't use omap2_dflt_clk_enable() as + * their enable function pointer. Passes along the return value of + * clkdm_clk_enable(), -EINVAL if @hw is not associated with a + * clockdomain, or 0 if clock framework-based clockdomain control is + * not implemented. */ -void omap2_clk_disable(struct clk *clk) +int omap2_clkops_enable_clkdm(struct clk_hw *hw) { - if (clk->usecount == 0) { - WARN(1, "clock: %s: omap2_clk_disable() called, but usecount already 0?", clk->name); - return; - } - - pr_debug("clock: %s: decrementing usecount\n", clk->name); + struct clk_hw_omap *clk; + int ret = 0; - clk->usecount--; + clk = to_clk_hw_omap(hw); - if (clk->usecount > 0) - return; + if (unlikely(!clk->clkdm)) { + pr_err("%s: %s: no clkdm set ?!\n", __func__, + __clk_get_name(hw->clk)); + return -EINVAL; + } - pr_debug("clock: %s: disabling in hardware\n", clk->name); + if (unlikely(clk->enable_reg)) + pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__, + __clk_get_name(hw->clk)); - if (clk->ops && clk->ops->disable) { - trace_clock_disable(clk->name, 0, smp_processor_id()); - clk->ops->disable(clk); + if (!clkdm_control) { + pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n", + __func__, __clk_get_name(hw->clk)); + return 0; } - if (clkdm_control && clk->clkdm) - clkdm_clk_disable(clk->clkdm, clk); + ret = clkdm_clk_enable(clk->clkdm, hw->clk); + WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n", + __func__, __clk_get_name(hw->clk), clk->clkdm->name, ret); - if (clk->parent) - omap2_clk_disable(clk->parent); + return ret; } /** - * omap2_clk_enable - request that the system enable a clock - * @clk: struct clk * to enable + * omap2_clkops_disable_clkdm - decrement usecount on clkdm of @hw + * @hw: struct clk_hw * of the clock being disabled * - * Increments the usecount on struct clk @clk. If there were no users - * previously, then recurse up the clock tree, enabling all of the - * clock's parents and all of the parent clockdomains, and finally, - * enabling @clk's clockdomain, and @clk itself. Intended to be - * called with the clockfw_lock spinlock held. Returns 0 upon success - * or a negative error code upon failure. + * Decrement the usecount of the clockdomain of the clock pointed to + * by @hw; if the usecount is 0, the clockdomain will be "disabled." + * Only needed for clocks that don't use omap2_dflt_clk_disable() as their + * disable function pointer. No return value. */ -int omap2_clk_enable(struct clk *clk) +void omap2_clkops_disable_clkdm(struct clk_hw *hw) { - int ret; + struct clk_hw_omap *clk; - pr_debug("clock: %s: incrementing usecount\n", clk->name); + clk = to_clk_hw_omap(hw); - clk->usecount++; - - if (clk->usecount > 1) - return 0; - - pr_debug("clock: %s: enabling in hardware\n", clk->name); - - if (clk->parent) { - ret = omap2_clk_enable(clk->parent); - if (ret) { - WARN(1, "clock: %s: could not enable parent %s: %d\n", - clk->name, clk->parent->name, ret); - goto oce_err1; - } + if (unlikely(!clk->clkdm)) { + pr_err("%s: %s: no clkdm set ?!\n", __func__, + __clk_get_name(hw->clk)); + return; } - if (clkdm_control && clk->clkdm) { - ret = clkdm_clk_enable(clk->clkdm, clk); - if (ret) { - WARN(1, "clock: %s: could not enable clockdomain %s: %d\n", - clk->name, clk->clkdm->name, ret); - goto oce_err2; - } - } + if (unlikely(clk->enable_reg)) + pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__, + __clk_get_name(hw->clk)); - if (clk->ops && clk->ops->enable) { - trace_clock_enable(clk->name, 1, smp_processor_id()); - ret = clk->ops->enable(clk); - if (ret) { - WARN(1, "clock: %s: could not enable: %d\n", - clk->name, ret); - goto oce_err3; - } + if (!clkdm_control) { + pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n", + __func__, __clk_get_name(hw->clk)); + return; } - return 0; - -oce_err3: - if (clkdm_control && clk->clkdm) - clkdm_clk_disable(clk->clkdm, clk); -oce_err2: - if (clk->parent) - omap2_clk_disable(clk->parent); -oce_err1: - clk->usecount--; - - return ret; + clkdm_clk_disable(clk->clkdm, hw->clk); } -/* Given a clock and a rate apply a clock specific rounding function */ -long omap2_clk_round_rate(struct clk *clk, unsigned long rate) +/** + * omap2_dflt_clk_is_enabled - is clock enabled in the hardware? + * @hw: struct clk_hw * to check + * + * Return 1 if the clock represented by @hw is enabled in the + * hardware, or 0 otherwise. Intended for use in the struct + * clk_ops.is_enabled function pointer. + */ +int omap2_dflt_clk_is_enabled(struct clk_hw *hw) { - if (clk->round_rate) - return clk->round_rate(clk, rate); + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + u32 v; - return clk->rate; + v = __raw_readl(clk->enable_reg); + + if (clk->flags & INVERT_ENABLE) + v ^= BIT(clk->enable_bit); + + v &= BIT(clk->enable_bit); + + return v ? 1 : 0; } -/* Set the clock rate for a clock source */ -int omap2_clk_set_rate(struct clk *clk, unsigned long rate) +static int __initdata mpurate; + +/* + * By default we use the rate set by the bootloader. + * You can override this with mpurate= cmdline option. + */ +static int __init omap_clk_setup(char *str) { - int ret = -EINVAL; + get_option(&str, &mpurate); - pr_debug("clock: set_rate for clock %s to rate %ld\n", clk->name, rate); + if (!mpurate) + return 1; - /* dpll_ck, core_ck, virt_prcm_set; plus all clksel clocks */ - if (clk->set_rate) { - trace_clock_set_rate(clk->name, rate, smp_processor_id()); - ret = clk->set_rate(clk, rate); - } + if (mpurate < 1000) + mpurate *= 1000000; - return ret; + return 1; } +__setup("mpurate=", omap_clk_setup); -int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent) +/** + * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock + * @clk: struct clk * to initialize + * + * Add an OMAP clock @clk to the internal list of OMAP clocks. Used + * temporarily for autoidle handling, until this support can be + * integrated into the common clock framework code in some way. No + * return value. + */ +void omap2_init_clk_hw_omap_clocks(struct clk *clk) { - if (!clk->clksel) - return -EINVAL; + struct clk_hw_omap *c; - if (clk->parent == new_parent) - return 0; + if (__clk_get_flags(clk) & CLK_IS_BASIC) + return; - return omap2_clksel_set_parent(clk, new_parent); + c = to_clk_hw_omap(__clk_get_hw(clk)); + list_add(&c->node, &clk_hw_omap_clocks); } -/* - * OMAP2+ clock reset and init functions +/** + * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that + * support it + * + * Enable clock autoidle on all OMAP clocks that have allow_idle + * function pointers associated with them. This function is intended + * to be temporary until support for this is added to the common clock + * code. Returns 0. */ +int omap2_clk_enable_autoidle_all(void) +{ + struct clk_hw_omap *c; -#ifdef CONFIG_OMAP_RESET_CLOCKS -void omap2_clk_disable_unused(struct clk *clk) + list_for_each_entry(c, &clk_hw_omap_clocks, node) + if (c->ops && c->ops->allow_idle) + c->ops->allow_idle(c); + return 0; +} + +/** + * omap2_clk_disable_autoidle_all - disable autoidle on all OMAP clocks that + * support it + * + * Disable clock autoidle on all OMAP clocks that have allow_idle + * function pointers associated with them. This function is intended + * to be temporary until support for this is added to the common clock + * code. Returns 0. + */ +int omap2_clk_disable_autoidle_all(void) { - u32 regval32, v; + struct clk_hw_omap *c; - v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0; + list_for_each_entry(c, &clk_hw_omap_clocks, node) + if (c->ops && c->ops->deny_idle) + c->ops->deny_idle(c); + return 0; +} - regval32 = __raw_readl(clk->enable_reg); - if ((regval32 & (1 << clk->enable_bit)) == v) - return; +/** + * omap2_clk_enable_init_clocks - prepare & enable a list of clocks + * @clk_names: ptr to an array of strings of clock names to enable + * @num_clocks: number of clock names in @clk_names + * + * Prepare and enable a list of clocks, named by @clk_names. No + * return value. XXX Deprecated; only needed until these clocks are + * properly claimed and enabled by the drivers or core code that uses + * them. XXX What code disables & calls clk_put on these clocks? + */ +void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks) +{ + struct clk *init_clk; + int i; - pr_debug("Disabling unused clock \"%s\"\n", clk->name); - if (cpu_is_omap34xx()) { - omap2_clk_enable(clk); - omap2_clk_disable(clk); - } else { - clk->ops->disable(clk); + for (i = 0; i < num_clocks; i++) { + init_clk = clk_get(NULL, clk_names[i]); + clk_prepare_enable(init_clk); } - if (clk->clkdm != NULL) - pwrdm_state_switch(clk->clkdm->pwrdm.ptr); } -#endif + +const struct clk_hw_omap_ops clkhwops_wait = { + .find_idlest = omap2_clk_dflt_find_idlest, + .find_companion = omap2_clk_dflt_find_companion, +}; /** * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument @@ -512,14 +598,12 @@ int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name) r = clk_set_rate(mpurate_ck, mpurate); if (IS_ERR_VALUE(r)) { WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n", - mpurate_ck->name, mpurate, r); + mpurate_ck_name, mpurate, r); clk_put(mpurate_ck); return -EINVAL; } calibrate_delay(); - recalculate_root_clocks(); - clk_put(mpurate_ck); return 0; @@ -563,513 +647,3 @@ void __init omap2_clk_print_new_rates(const char *hfclkin_ck_name, (clk_get_rate(core_ck) / 1000000), (clk_get_rate(mpu_ck) / 1000000)); } - -/* Common data */ - -int clk_enable(struct clk *clk) -{ - unsigned long flags; - int ret; - - if (clk == NULL || IS_ERR(clk)) - return -EINVAL; - - spin_lock_irqsave(&clockfw_lock, flags); - ret = omap2_clk_enable(clk); - spin_unlock_irqrestore(&clockfw_lock, flags); - - return ret; -} -EXPORT_SYMBOL(clk_enable); - -void clk_disable(struct clk *clk) -{ - unsigned long flags; - - if (clk == NULL || IS_ERR(clk)) - return; - - spin_lock_irqsave(&clockfw_lock, flags); - if (clk->usecount == 0) { - pr_err("Trying disable clock %s with 0 usecount\n", - clk->name); - WARN_ON(1); - goto out; - } - - omap2_clk_disable(clk); - -out: - spin_unlock_irqrestore(&clockfw_lock, flags); -} -EXPORT_SYMBOL(clk_disable); - -unsigned long clk_get_rate(struct clk *clk) -{ - unsigned long flags; - unsigned long ret; - - if (clk == NULL || IS_ERR(clk)) - return 0; - - spin_lock_irqsave(&clockfw_lock, flags); - ret = clk->rate; - spin_unlock_irqrestore(&clockfw_lock, flags); - - return ret; -} -EXPORT_SYMBOL(clk_get_rate); - -/* - * Optional clock functions defined in include/linux/clk.h - */ - -long clk_round_rate(struct clk *clk, unsigned long rate) -{ - unsigned long flags; - long ret; - - if (clk == NULL || IS_ERR(clk)) - return 0; - - spin_lock_irqsave(&clockfw_lock, flags); - ret = omap2_clk_round_rate(clk, rate); - spin_unlock_irqrestore(&clockfw_lock, flags); - - return ret; -} -EXPORT_SYMBOL(clk_round_rate); - -int clk_set_rate(struct clk *clk, unsigned long rate) -{ - unsigned long flags; - int ret = -EINVAL; - - if (clk == NULL || IS_ERR(clk)) - return ret; - - spin_lock_irqsave(&clockfw_lock, flags); - ret = omap2_clk_set_rate(clk, rate); - if (ret == 0) - propagate_rate(clk); - spin_unlock_irqrestore(&clockfw_lock, flags); - - return ret; -} -EXPORT_SYMBOL(clk_set_rate); - -int clk_set_parent(struct clk *clk, struct clk *parent) -{ - unsigned long flags; - int ret = -EINVAL; - - if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent)) - return ret; - - spin_lock_irqsave(&clockfw_lock, flags); - if (clk->usecount == 0) { - ret = omap2_clk_set_parent(clk, parent); - if (ret == 0) - propagate_rate(clk); - } else { - ret = -EBUSY; - } - spin_unlock_irqrestore(&clockfw_lock, flags); - - return ret; -} -EXPORT_SYMBOL(clk_set_parent); - -struct clk *clk_get_parent(struct clk *clk) -{ - return clk->parent; -} -EXPORT_SYMBOL(clk_get_parent); - -/* - * OMAP specific clock functions shared between omap1 and omap2 - */ - -int __initdata mpurate; - -/* - * By default we use the rate set by the bootloader. - * You can override this with mpurate= cmdline option. - */ -static int __init omap_clk_setup(char *str) -{ - get_option(&str, &mpurate); - - if (!mpurate) - return 1; - - if (mpurate < 1000) - mpurate *= 1000000; - - return 1; -} -__setup("mpurate=", omap_clk_setup); - -/* Used for clocks that always have same value as the parent clock */ -unsigned long followparent_recalc(struct clk *clk) -{ - return clk->parent->rate; -} - -/* - * Used for clocks that have the same value as the parent clock, - * divided by some factor - */ -unsigned long omap_fixed_divisor_recalc(struct clk *clk) -{ - WARN_ON(!clk->fixed_div); - - return clk->parent->rate / clk->fixed_div; -} - -void clk_reparent(struct clk *child, struct clk *parent) -{ - list_del_init(&child->sibling); - if (parent) - list_add(&child->sibling, &parent->children); - child->parent = parent; - - /* now do the debugfs renaming to reattach the child - to the proper parent */ -} - -/* Propagate rate to children */ -void propagate_rate(struct clk *tclk) -{ - struct clk *clkp; - - list_for_each_entry(clkp, &tclk->children, sibling) { - if (clkp->recalc) - clkp->rate = clkp->recalc(clkp); - propagate_rate(clkp); - } -} - -static LIST_HEAD(root_clks); - -/** - * recalculate_root_clocks - recalculate and propagate all root clocks - * - * Recalculates all root clocks (clocks with no parent), which if the - * clock's .recalc is set correctly, should also propagate their rates. - * Called at init. - */ -void recalculate_root_clocks(void) -{ - struct clk *clkp; - - list_for_each_entry(clkp, &root_clks, sibling) { - if (clkp->recalc) - clkp->rate = clkp->recalc(clkp); - propagate_rate(clkp); - } -} - -/** - * clk_preinit - initialize any fields in the struct clk before clk init - * @clk: struct clk * to initialize - * - * Initialize any struct clk fields needed before normal clk initialization - * can run. No return value. - */ -void clk_preinit(struct clk *clk) -{ - INIT_LIST_HEAD(&clk->children); -} - -int clk_register(struct clk *clk) -{ - if (clk == NULL || IS_ERR(clk)) - return -EINVAL; - - /* - * trap out already registered clocks - */ - if (clk->node.next || clk->node.prev) - return 0; - - mutex_lock(&clocks_mutex); - if (clk->parent) - list_add(&clk->sibling, &clk->parent->children); - else - list_add(&clk->sibling, &root_clks); - - list_add(&clk->node, &clocks); - if (clk->init) - clk->init(clk); - mutex_unlock(&clocks_mutex); - - return 0; -} -EXPORT_SYMBOL(clk_register); - -void clk_unregister(struct clk *clk) -{ - if (clk == NULL || IS_ERR(clk)) - return; - - mutex_lock(&clocks_mutex); - list_del(&clk->sibling); - list_del(&clk->node); - mutex_unlock(&clocks_mutex); -} -EXPORT_SYMBOL(clk_unregister); - -void clk_enable_init_clocks(void) -{ - struct clk *clkp; - - list_for_each_entry(clkp, &clocks, node) - if (clkp->flags & ENABLE_ON_INIT) - clk_enable(clkp); -} - -/** - * omap_clk_get_by_name - locate OMAP struct clk by its name - * @name: name of the struct clk to locate - * - * Locate an OMAP struct clk by its name. Assumes that struct clk - * names are unique. Returns NULL if not found or a pointer to the - * struct clk if found. - */ -struct clk *omap_clk_get_by_name(const char *name) -{ - struct clk *c; - struct clk *ret = NULL; - - mutex_lock(&clocks_mutex); - - list_for_each_entry(c, &clocks, node) { - if (!strcmp(c->name, name)) { - ret = c; - break; - } - } - - mutex_unlock(&clocks_mutex); - - return ret; -} - -int omap_clk_enable_autoidle_all(void) -{ - struct clk *c; - unsigned long flags; - - spin_lock_irqsave(&clockfw_lock, flags); - - list_for_each_entry(c, &clocks, node) - if (c->ops->allow_idle) - c->ops->allow_idle(c); - - spin_unlock_irqrestore(&clockfw_lock, flags); - - return 0; -} - -int omap_clk_disable_autoidle_all(void) -{ - struct clk *c; - unsigned long flags; - - spin_lock_irqsave(&clockfw_lock, flags); - - list_for_each_entry(c, &clocks, node) - if (c->ops->deny_idle) - c->ops->deny_idle(c); - - spin_unlock_irqrestore(&clockfw_lock, flags); - - return 0; -} - -/* - * Low level helpers - */ -static int clkll_enable_null(struct clk *clk) -{ - return 0; -} - -static void clkll_disable_null(struct clk *clk) -{ -} - -const struct clkops clkops_null = { - .enable = clkll_enable_null, - .disable = clkll_disable_null, -}; - -/* - * Dummy clock - * - * Used for clock aliases that are needed on some OMAPs, but not others - */ -struct clk dummy_ck = { - .name = "dummy", - .ops = &clkops_null, -}; - -/* - * - */ - -#ifdef CONFIG_OMAP_RESET_CLOCKS -/* - * Disable any unused clocks left on by the bootloader - */ -static int __init clk_disable_unused(void) -{ - struct clk *ck; - unsigned long flags; - - pr_info("clock: disabling unused clocks to save power\n"); - - spin_lock_irqsave(&clockfw_lock, flags); - list_for_each_entry(ck, &clocks, node) { - if (ck->ops == &clkops_null) - continue; - - if (ck->usecount > 0 || !ck->enable_reg) - continue; - - omap2_clk_disable_unused(ck); - } - spin_unlock_irqrestore(&clockfw_lock, flags); - - return 0; -} -late_initcall(clk_disable_unused); -late_initcall(omap_clk_enable_autoidle_all); -#endif - -#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) -/* - * debugfs support to trace clock tree hierarchy and attributes - */ - -#include <linux/debugfs.h> -#include <linux/seq_file.h> - -static struct dentry *clk_debugfs_root; - -static int clk_dbg_show_summary(struct seq_file *s, void *unused) -{ - struct clk *c; - struct clk *pa; - - mutex_lock(&clocks_mutex); - seq_printf(s, "%-30s %-30s %-10s %s\n", - "clock-name", "parent-name", "rate", "use-count"); - - list_for_each_entry(c, &clocks, node) { - pa = c->parent; - seq_printf(s, "%-30s %-30s %-10lu %d\n", - c->name, pa ? pa->name : "none", c->rate, - c->usecount); - } - mutex_unlock(&clocks_mutex); - - return 0; -} - -static int clk_dbg_open(struct inode *inode, struct file *file) -{ - return single_open(file, clk_dbg_show_summary, inode->i_private); -} - -static const struct file_operations debug_clock_fops = { - .open = clk_dbg_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int clk_debugfs_register_one(struct clk *c) -{ - int err; - struct dentry *d; - struct clk *pa = c->parent; - - d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root); - if (!d) - return -ENOMEM; - c->dent = d; - - d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount); - if (!d) { - err = -ENOMEM; - goto err_out; - } - d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate); - if (!d) { - err = -ENOMEM; - goto err_out; - } - d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags); - if (!d) { - err = -ENOMEM; - goto err_out; - } - return 0; - -err_out: - debugfs_remove_recursive(c->dent); - return err; -} - -static int clk_debugfs_register(struct clk *c) -{ - int err; - struct clk *pa = c->parent; - - if (pa && !pa->dent) { - err = clk_debugfs_register(pa); - if (err) - return err; - } - - if (!c->dent) { - err = clk_debugfs_register_one(c); - if (err) - return err; - } - return 0; -} - -static int __init clk_debugfs_init(void) -{ - struct clk *c; - struct dentry *d; - int err; - - d = debugfs_create_dir("clock", NULL); - if (!d) - return -ENOMEM; - clk_debugfs_root = d; - - list_for_each_entry(c, &clocks, node) { - err = clk_debugfs_register(c); - if (err) - goto err_out; - } - - d = debugfs_create_file("summary", S_IRUGO, - d, NULL, &debug_clock_fops); - if (!d) - return -ENOMEM; - - return 0; -err_out: - debugfs_remove_recursive(clk_debugfs_root); - return err; -} -late_initcall(clk_debugfs_init); - -#endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */ - |