summaryrefslogtreecommitdiff
path: root/arch/arm/mach-tegra/clock.c
diff options
context:
space:
mode:
authorStephen Warren <swarren@nvidia.com>2016-09-23 16:44:51 -0600
committerTom Warren <twarren@nvidia.com>2016-09-27 09:11:02 -0700
commit74686766847146e4408486c5e3ca8a1681b145c0 (patch)
treef3ea903ef107802cff616f5479b3c822863c7ec0 /arch/arm/mach-tegra/clock.c
parent4a332d3ee770bd6b633fd3abba741451b17156bc (diff)
downloadu-boot-74686766847146e4408486c5e3ca8a1681b145c0.tar.gz
u-boot-74686766847146e4408486c5e3ca8a1681b145c0.tar.bz2
u-boot-74686766847146e4408486c5e3ca8a1681b145c0.zip
ARM: tegra: fix clock_get_periph_rate() for UART clocks
Make clock_get_periph_rate() return the correct value for UART clocks. This change needs to be applied before the patches that enable CONFIG_CLK for Tegra SoCs before Tegra186, since enabling that option causes ns16550_serial_ofdata_to_platdata() to rely on clk_get_rate() for UART clocks, and clk_get_rate() eventually calls clock_get_periph_rate(). This change is a rather horrible hack, as explained in the comment added to the clock driver. I've tried fixing this correctly for all clocks as described in that comment, but there's too much fallout elsewhere. I believe the clock driver has a number of bugs which all cancel each-other out, and unravelling that chain is too complex at present. This change is the smallest change that fixes clock_get_periph_rate() for UART clocks while guaranteeing no change in behaviour for any other clock, which avoids other regressions. Signed-off-by: Stephen Warren <swarren@nvidia.com> Reviewed-by: Simon Glass <sjg@chromium.org> Signed-off-by: Tom Warren <twarren@nvidia.com>
Diffstat (limited to 'arch/arm/mach-tegra/clock.c')
-rw-r--r--arch/arm/mach-tegra/clock.c38
1 files changed, 36 insertions, 2 deletions
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c
index cf8bc38925..3bb72331a4 100644
--- a/arch/arm/mach-tegra/clock.c
+++ b/arch/arm/mach-tegra/clock.c
@@ -311,9 +311,43 @@ unsigned long clock_get_periph_rate(enum periph_id periph_id,
enum clock_id parent)
{
u32 *reg = get_periph_source_reg(periph_id);
+ unsigned parent_rate = pll_rate[parent];
+ int div = (readl(reg) & OUT_CLK_DIVISOR_MASK) >> OUT_CLK_DIVISOR_SHIFT;
+
+ switch (periph_id) {
+ case PERIPH_ID_UART1:
+ case PERIPH_ID_UART2:
+ case PERIPH_ID_UART3:
+ case PERIPH_ID_UART4:
+ case PERIPH_ID_UART5:
+#ifdef CONFIG_TEGRA20
+ /* There's no divider for these clocks in this SoC. */
+ return parent_rate;
+#else
+ /*
+ * This undoes the +2 in get_rate_from_divider() which I
+ * believe is incorrect. Ideally we would fix
+ * get_rate_from_divider(), but... Removing the +2 from
+ * get_rate_from_divider() would probably require remove the -2
+ * from the tail of clk_get_divider() since I believe that's
+ * only there to invert get_rate_from_divider()'s +2. Observe
+ * how find_best_divider() uses those two functions together.
+ * However, doing so breaks other stuff, such as Seaboard's
+ * display, likely due to clock_set_pllout()'s call to
+ * clk_get_divider(). Attempting to fix that by making
+ * clock_set_pllout() subtract 2 from clk_get_divider()'s
+ * return value doesn't help. In summary this clock driver is
+ * quite broken but I'm afraid I have no idea how to fix it
+ * without completely replacing it.
+ */
+ div -= 2;
+ break;
+#endif
+ default:
+ break;
+ }
- return get_rate_from_divider(pll_rate[parent],
- (readl(reg) & OUT_CLK_DIVISOR_MASK) >> OUT_CLK_DIVISOR_SHIFT);
+ return get_rate_from_divider(parent_rate, div);
}
/**