summaryrefslogtreecommitdiff
path: root/arch/arm/mach-tegra/tegra12_clocks.c
diff options
context:
space:
mode:
authorAlex Frid <afrid@nvidia.com>2014-04-16 16:52:09 -0700
committerMandar Padmawar <mpadmawar@nvidia.com>2014-04-22 05:00:57 -0700
commit9a9aa3a00924339080065e9ee2d4f3c0ba39f053 (patch)
treec4222defa620711ff8df19704bdd2d46a379a32d /arch/arm/mach-tegra/tegra12_clocks.c
parent58edcc912c8e9c61711150c814b1344e91ac10a4 (diff)
ARM: tegra12: clock: Avoid locking clock list in DFLL late init
DFLL late init is invoked under CPU clock mutex taken, and in turn calls tegra_get_clock_by_name() interface that locks the global tegra clock list. On the other hand, there are several cases of clock list traversing that acquire list mutex, and then lock all individual clocks (including CPU). This created a possibility of AB-BA dead-lock. Fixed by re-arranging DFLL late init to avoid taking clock list mutex. Bug 1502699 Change-Id: I5b2848a616eb2a8a8024096c3537812a04cf43ab Reviewed-on: http://git-master/r/397473 Signed-off-by: Alex Frid <afrid@nvidia.com> Reviewed-on: http://git-master/r/398073 Reviewed-by: Mandar Padmawar <mpadmawar@nvidia.com> Tested-by: Mandar Padmawar <mpadmawar@nvidia.com>
Diffstat (limited to 'arch/arm/mach-tegra/tegra12_clocks.c')
-rw-r--r--arch/arm/mach-tegra/tegra12_clocks.c67
1 files changed, 35 insertions, 32 deletions
diff --git a/arch/arm/mach-tegra/tegra12_clocks.c b/arch/arm/mach-tegra/tegra12_clocks.c
index 2b1ee1cf3198..43f7d5f3a5a4 100644
--- a/arch/arm/mach-tegra/tegra12_clocks.c
+++ b/arch/arm/mach-tegra/tegra12_clocks.c
@@ -642,6 +642,7 @@ static unsigned long tegra12_clk_cap_shared_bus(struct clk *bus,
unsigned long rate, unsigned long ceiling);
static bool tegra12_periph_is_special_reset(struct clk *c);
+static void tegra12_dfll_cpu_late_init(struct clk *c);
static bool detach_shared_bus;
module_param(detach_shared_bus, bool, 0644);
@@ -4287,38 +4288,6 @@ static void tune_cpu_trimmers(bool trim_high)
}
#endif
-static void __init tegra12_dfll_cpu_late_init(struct clk *c)
-{
-#ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS
- int ret;
- struct clk *cpu = tegra_get_clock_by_name("cpu_g");
-
- if (!cpu || !cpu->dvfs) {
- pr_err("%s: CPU dvfs is not present\n", __func__);
- return;
- }
- tegra_dvfs_set_dfll_tune_trimmers(cpu->dvfs, tune_cpu_trimmers);
-
- /* release dfll clock source reset, init cl_dvfs control logic, and
- move dfll to initialized state, so it can be used as CPU source */
- tegra_periph_reset_deassert(c);
- ret = tegra_init_cl_dvfs();
- if (!ret) {
- c->state = OFF;
- if (tegra_platform_is_silicon()) {
- use_dfll = CONFIG_TEGRA_USE_DFLL_RANGE;
-#ifdef CONFIG_ARCH_TEGRA_13x_SOC
- if (tegra_cpu_speedo_id() == 0)
- use_dfll = 0;
-#endif
- }
- tegra_dvfs_set_dfll_range(cpu->dvfs, use_dfll);
- tegra_cl_dvfs_debug_init(c);
- pr_info("Tegra CPU DFLL is initialized with use_dfll = %d\n", use_dfll);
- }
-#endif
-}
-
static void __init tegra12_dfll_clk_init(struct clk *c)
{
c->ops->init = tegra12_dfll_cpu_late_init;
@@ -8919,6 +8888,40 @@ static bool tegra12_is_dyn_ramp(
return false;
}
+/* DFLL late init called with CPU clock lock taken */
+static void __init tegra12_dfll_cpu_late_init(struct clk *c)
+{
+#ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS
+ int ret;
+ struct clk *cpu = &tegra_clk_virtual_cpu_g;
+
+ if (!cpu || !cpu->dvfs) {
+ pr_err("%s: CPU dvfs is not present\n", __func__);
+ return;
+ }
+ tegra_dvfs_set_dfll_tune_trimmers(cpu->dvfs, tune_cpu_trimmers);
+
+ /* release dfll clock source reset, init cl_dvfs control logic, and
+ move dfll to initialized state, so it can be used as CPU source */
+ tegra_periph_reset_deassert(c);
+ ret = tegra_init_cl_dvfs();
+ if (!ret) {
+ c->state = OFF;
+ if (tegra_platform_is_silicon()) {
+ use_dfll = CONFIG_TEGRA_USE_DFLL_RANGE;
+#ifdef CONFIG_ARCH_TEGRA_13x_SOC
+ if (tegra_cpu_speedo_id() == 0)
+ use_dfll = 0;
+#endif
+ }
+ tegra_dvfs_set_dfll_range(cpu->dvfs, use_dfll);
+ tegra_cl_dvfs_debug_init(c);
+ pr_info("Tegra CPU DFLL is initialized with use_dfll = %d\n",
+ use_dfll);
+ }
+#endif
+}
+
/*
* Backup pll is used as transitional CPU clock source while main pll is
* relocking; in addition all CPU rates below backup level are sourced from