summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk_rk3568.c2959
-rw-r--r--drivers/mtd/nand/raw/Kconfig16
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/rockchip_nfc.c1253
-rw-r--r--drivers/pci/pcie_dw_rockchip.c16
-rw-r--r--drivers/ram/rockchip/Makefile1
-rw-r--r--drivers/ram/rockchip/sdram_rk3568.c56
8 files changed, 4294 insertions, 9 deletions
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 4cfcf833092..913f611a0ff 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -14,4 +14,5 @@ obj-$(CONFIG_ROCKCHIP_RK3308) += clk_rk3308.o
obj-$(CONFIG_ROCKCHIP_RK3328) += clk_rk3328.o
obj-$(CONFIG_ROCKCHIP_RK3368) += clk_rk3368.o
obj-$(CONFIG_ROCKCHIP_RK3399) += clk_rk3399.o
+obj-$(CONFIG_ROCKCHIP_RK3568) += clk_rk3568.o
obj-$(CONFIG_ROCKCHIP_RV1108) += clk_rv1108.o
diff --git a/drivers/clk/rockchip/clk_rk3568.c b/drivers/clk/rockchip/clk_rk3568.c
new file mode 100644
index 00000000000..553c6c0dafb
--- /dev/null
+++ b/drivers/clk/rockchip/clk_rk3568.c
@@ -0,0 +1,2959 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co., Ltd
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ */
+
+#include <common.h>
+#include <bitfield.h>
+#include <clk-uclass.h>
+#include <dm.h>
+#include <errno.h>
+#include <syscon.h>
+#include <asm/arch-rockchip/cru_rk3568.h>
+#include <asm/arch-rockchip/clock.h>
+#include <asm/arch-rockchip/hardware.h>
+#include <asm/io.h>
+#include <dm/lists.h>
+#include <dt-bindings/clock/rk3568-cru.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#if CONFIG_IS_ENABLED(OF_PLATDATA)
+struct rk3568_clk_plat {
+ struct dtd_rockchip_rk3568_cru dtd;
+};
+
+struct rk3568_pmuclk_plat {
+ struct dtd_rockchip_rk3568_pmucru dtd;
+};
+#endif
+
+#define RK3568_CPUCLK_RATE(_rate, _aclk_div, _pclk_div) \
+{ \
+ .rate = _rate##U, \
+ .aclk_div = _aclk_div, \
+ .pclk_div = _pclk_div, \
+}
+
+#define DIV_TO_RATE(input_rate, div) ((input_rate) / ((div) + 1))
+
+static struct rockchip_cpu_rate_table rk3568_cpu_rates[] = {
+ RK3568_CPUCLK_RATE(1416000000, 1, 5),
+ RK3568_CPUCLK_RATE(1296000000, 1, 5),
+ RK3568_CPUCLK_RATE(1200000000, 1, 3),
+ RK3568_CPUCLK_RATE(1104000000, 1, 3),
+ RK3568_CPUCLK_RATE(1008000000, 1, 3),
+ RK3568_CPUCLK_RATE(912000000, 1, 3),
+ RK3568_CPUCLK_RATE(816000000, 1, 3),
+ RK3568_CPUCLK_RATE(600000000, 1, 1),
+ RK3568_CPUCLK_RATE(408000000, 1, 1),
+ { /* sentinel */ },
+};
+
+static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1416000000, 1, 118, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1296000000, 1, 108, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1200000000, 1, 100, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 1, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1104000000, 1, 92, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 3, 250, 2, 1, 1, 0),
+ RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0),
+ RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0),
+ RK3036_PLL_RATE(600000000, 1, 100, 4, 1, 1, 0),
+ RK3036_PLL_RATE(594000000, 1, 99, 4, 1, 1, 0),
+ RK3036_PLL_RATE(500000000, 1, 125, 6, 1, 1, 0),
+ RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE(400000000, 1, 100, 6, 1, 1, 0),
+ RK3036_PLL_RATE(200000000, 1, 100, 6, 2, 1, 0),
+ RK3036_PLL_RATE(100000000, 1, 150, 6, 6, 1, 0),
+ { /* sentinel */ },
+};
+
+static struct rockchip_pll_clock rk3568_pll_clks[] = {
+ [APLL] = PLL(pll_rk3328, PLL_APLL, RK3568_PLL_CON(0),
+ RK3568_MODE_CON, 0, 10, 0, rk3568_pll_rates),
+ [DPLL] = PLL(pll_rk3328, PLL_DPLL, RK3568_PLL_CON(8),
+ RK3568_MODE_CON, 2, 10, 0, NULL),
+ [CPLL] = PLL(pll_rk3328, PLL_CPLL, RK3568_PLL_CON(24),
+ RK3568_MODE_CON, 4, 10, 0, rk3568_pll_rates),
+ [GPLL] = PLL(pll_rk3328, PLL_HPLL, RK3568_PLL_CON(16),
+ RK3568_MODE_CON, 6, 10, 0, rk3568_pll_rates),
+ [NPLL] = PLL(pll_rk3328, PLL_NPLL, RK3568_PLL_CON(32),
+ RK3568_MODE_CON, 10, 10, 0, rk3568_pll_rates),
+ [VPLL] = PLL(pll_rk3328, PLL_VPLL, RK3568_PLL_CON(40),
+ RK3568_MODE_CON, 12, 10, 0, rk3568_pll_rates),
+ [PPLL] = PLL(pll_rk3328, PLL_PPLL, RK3568_PMU_PLL_CON(0),
+ RK3568_PMU_MODE, 0, 10, 0, rk3568_pll_rates),
+ [HPLL] = PLL(pll_rk3328, PLL_HPLL, RK3568_PMU_PLL_CON(16),
+ RK3568_PMU_MODE, 2, 10, 0, rk3568_pll_rates),
+};
+
+#ifndef CONFIG_SPL_BUILD
+static ulong
+rk3568_pmu_pll_set_rate(struct rk3568_clk_priv *priv,
+ ulong pll_id, ulong rate)
+{
+ struct udevice *pmucru_dev;
+ struct rk3568_pmuclk_priv *pmu_priv;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_CLK,
+ DM_DRIVER_GET(rockchip_rk3568_pmucru),
+ &pmucru_dev);
+ if (ret) {
+ printf("%s: could not find pmucru device\n", __func__);
+ return ret;
+ }
+ pmu_priv = dev_get_priv(pmucru_dev);
+
+ rockchip_pll_set_rate(&rk3568_pll_clks[pll_id],
+ pmu_priv->pmucru, pll_id, rate);
+
+ return 0;
+}
+#endif
+
+static ulong rk3568_pmu_pll_get_rate(struct rk3568_clk_priv *priv,
+ ulong pll_id)
+{
+ struct udevice *pmucru_dev;
+ struct rk3568_pmuclk_priv *pmu_priv;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_CLK,
+ DM_DRIVER_GET(rockchip_rk3568_pmucru),
+ &pmucru_dev);
+ if (ret) {
+ printf("%s: could not find pmucru device\n", __func__);
+ return ret;
+ }
+ pmu_priv = dev_get_priv(pmucru_dev);
+
+ return rockchip_pll_get_rate(&rk3568_pll_clks[pll_id],
+ pmu_priv->pmucru, pll_id);
+}
+
+/*
+ *
+ * rational_best_approximation(31415, 10000,
+ * (1 << 8) - 1, (1 << 5) - 1, &n, &d);
+ *
+ * you may look at given_numerator as a fixed point number,
+ * with the fractional part size described in given_denominator.
+ *
+ * for theoretical background, see:
+ * http://en.wikipedia.org/wiki/Continued_fraction
+ */
+static void rational_best_approximation(unsigned long given_numerator,
+ unsigned long given_denominator,
+ unsigned long max_numerator,
+ unsigned long max_denominator,
+ unsigned long *best_numerator,
+ unsigned long *best_denominator)
+{
+ unsigned long n, d, n0, d0, n1, d1;
+
+ n = given_numerator;
+ d = given_denominator;
+ n0 = 0;
+ d1 = 0;
+ n1 = 1;
+ d0 = 1;
+ for (;;) {
+ unsigned long t, a;
+
+ if (n1 > max_numerator || d1 > max_denominator) {
+ n1 = n0;
+ d1 = d0;
+ break;
+ }
+ if (d == 0)
+ break;
+ t = d;
+ a = n / d;
+ d = n % d;
+ n = t;
+ t = n0 + a * n1;
+ n0 = n1;
+ n1 = t;
+ t = d0 + a * d1;
+ d0 = d1;
+ d1 = t;
+ }
+ *best_numerator = n1;
+ *best_denominator = d1;
+}
+
+static ulong rk3568_rtc32k_get_pmuclk(struct rk3568_pmuclk_priv *priv)
+{
+ struct rk3568_pmucru *pmucru = priv->pmucru;
+ unsigned long m, n;
+ u32 fracdiv;
+
+ fracdiv = readl(&pmucru->pmu_clksel_con[1]);
+ m = fracdiv & RTC32K_FRAC_NUMERATOR_MASK;
+ m >>= RTC32K_FRAC_NUMERATOR_SHIFT;
+ n = fracdiv & RTC32K_FRAC_DENOMINATOR_MASK;
+ n >>= RTC32K_FRAC_DENOMINATOR_SHIFT;
+
+ return OSC_HZ * m / n;
+}
+
+static ulong rk3568_rtc32k_set_pmuclk(struct rk3568_pmuclk_priv *priv,
+ ulong rate)
+{
+ struct rk3568_pmucru *pmucru = priv->pmucru;
+ unsigned long m, n, val;
+
+ rk_clrsetreg(&pmucru->pmu_clksel_con[0], RTC32K_SEL_MASK,
+ RTC32K_SEL_OSC0_DIV32K << RTC32K_SEL_SHIFT);
+
+ rational_best_approximation(rate, OSC_HZ,
+ GENMASK(16 - 1, 0),
+ GENMASK(16 - 1, 0),
+ &m, &n);
+ val = m << RTC32K_FRAC_NUMERATOR_SHIFT | n;
+ writel(val, &pmucru->pmu_clksel_con[1]);
+
+ return rk3568_rtc32k_get_pmuclk(priv);
+}
+
+static ulong rk3568_i2c_get_pmuclk(struct rk3568_pmuclk_priv *priv,
+ ulong clk_id)
+{
+ struct rk3568_pmucru *pmucru = priv->pmucru;
+ u32 div, con;
+
+ switch (clk_id) {
+ case CLK_I2C0:
+ con = readl(&pmucru->pmu_clksel_con[3]);
+ div = (con & CLK_I2C0_DIV_MASK) >> CLK_I2C0_DIV_SHIFT;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return DIV_TO_RATE(priv->ppll_hz, div);
+}
+
+static ulong rk3568_i2c_set_pmuclk(struct rk3568_pmuclk_priv *priv,
+ ulong clk_id, ulong rate)
+{
+ struct rk3568_pmucru *pmucru = priv->pmucru;
+ int src_clk_div;
+
+ src_clk_div = DIV_ROUND_UP(priv->ppll_hz, rate);
+ assert(src_clk_div - 1 <= 127);
+
+ switch (clk_id) {
+ case CLK_I2C0:
+ rk_clrsetreg(&pmucru->pmu_clksel_con[3], CLK_I2C0_DIV_MASK,
+ (src_clk_div - 1) << CLK_I2C0_DIV_SHIFT);
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return rk3568_i2c_get_pmuclk(priv, clk_id);
+}
+
+static ulong rk3568_pwm_get_pmuclk(struct rk3568_pmuclk_priv *priv,
+ ulong clk_id)
+{
+ struct rk3568_pmucru *pmucru = priv->pmucru;
+ u32 div, sel, con, parent;
+
+ switch (clk_id) {
+ case CLK_PWM0:
+ con = readl(&pmucru->pmu_clksel_con[6]);
+ sel = (con & CLK_PWM0_SEL_MASK) >> CLK_PWM0_SEL_SHIFT;
+ div = (con & CLK_PWM0_DIV_MASK) >> CLK_PWM0_DIV_SHIFT;
+ if (sel == CLK_PWM0_SEL_XIN24M)
+ parent = OSC_HZ;
+ else
+ parent = priv->ppll_hz;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return DIV_TO_RATE(parent, div);
+}
+
+static ulong rk3568_pwm_set_pmuclk(struct rk3568_pmuclk_priv *priv,
+ ulong clk_id, ulong rate)
+{
+ struct rk3568_pmucru *pmucru = priv->pmucru;
+ int src_clk_div;
+
+ switch (clk_id) {
+ case CLK_PWM0:
+ if (rate == OSC_HZ) {
+ rk_clrsetreg(&pmucru->pmu_clksel_con[6],
+ CLK_PWM0_SEL_MASK | CLK_PWM0_DIV_MASK,
+ (CLK_PWM0_SEL_XIN24M <<
+ CLK_PWM0_SEL_SHIFT) |
+ 0 << CLK_PWM0_SEL_SHIFT);
+ } else {
+ src_clk_div = DIV_ROUND_UP(priv->ppll_hz, rate);
+ assert(src_clk_div - 1 <= 127);
+ rk_clrsetreg(&pmucru->pmu_clksel_con[6],
+ CLK_PWM0_DIV_MASK | CLK_PWM0_DIV_MASK,
+ (CLK_PWM0_SEL_PPLL << CLK_PWM0_SEL_SHIFT) |
+ (src_clk_div - 1) << CLK_PWM0_DIV_SHIFT);
+ }
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return rk3568_pwm_get_pmuclk(priv, clk_id);
+}
+
+static ulong rk3568_pmu_get_pmuclk(struct rk3568_pmuclk_priv *priv)
+{
+ struct rk3568_pmucru *pmucru = priv->pmucru;
+ u32 div, con, sel, parent;
+
+ con = readl(&pmucru->pmu_clksel_con[2]);
+ sel = (con & PCLK_PDPMU_SEL_MASK) >> PCLK_PDPMU_SEL_SHIFT;
+ div = (con & PCLK_PDPMU_DIV_MASK) >> PCLK_PDPMU_DIV_SHIFT;
+ if (sel)
+ parent = GPLL_HZ;
+ else
+ parent = priv->ppll_hz;
+
+ return DIV_TO_RATE(parent, div);
+}
+
+static ulong rk3568_pmu_set_pmuclk(struct rk3568_pmuclk_priv *priv,
+ ulong rate)
+{
+ struct rk3568_pmucru *pmucru = priv->pmucru;
+ int src_clk_div;
+
+ src_clk_div = DIV_ROUND_UP(priv->ppll_hz, rate);
+ assert(src_clk_div - 1 <= 31);
+
+ rk_clrsetreg(&pmucru->pmu_clksel_con[2],
+ PCLK_PDPMU_DIV_MASK | PCLK_PDPMU_SEL_MASK,
+ (PCLK_PDPMU_SEL_PPLL << PCLK_PDPMU_SEL_SHIFT) |
+ ((src_clk_div - 1) << PCLK_PDPMU_DIV_SHIFT));
+
+ return rk3568_pmu_get_pmuclk(priv);
+}
+
+static ulong rk3568_pmuclk_get_rate(struct clk *clk)
+{
+ struct rk3568_pmuclk_priv *priv = dev_get_priv(clk->dev);
+ ulong rate = 0;
+
+ if (!priv->ppll_hz) {
+ printf("%s ppll=%lu\n", __func__, priv->ppll_hz);
+ return -ENOENT;
+ }
+
+ debug("%s %ld\n", __func__, clk->id);
+ switch (clk->id) {
+ case PLL_PPLL:
+ rate = rockchip_pll_get_rate(&rk3568_pll_clks[PPLL],
+ priv->pmucru, PPLL);
+ break;
+ case PLL_HPLL:
+ rate = rockchip_pll_get_rate(&rk3568_pll_clks[HPLL],
+ priv->pmucru, HPLL);
+ break;
+ case CLK_RTC_32K:
+ case CLK_RTC32K_FRAC:
+ rate = rk3568_rtc32k_get_pmuclk(priv);
+ break;
+ case CLK_I2C0:
+ rate = rk3568_i2c_get_pmuclk(priv, clk->id);
+ break;
+ case CLK_PWM0:
+ rate = rk3568_pwm_get_pmuclk(priv, clk->id);
+ break;
+ case PCLK_PMU:
+ rate = rk3568_pmu_get_pmuclk(priv);
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return rate;
+}
+
+static ulong rk3568_pmuclk_set_rate(struct clk *clk, ulong rate)
+{
+ struct rk3568_pmuclk_priv *priv = dev_get_priv(clk->dev);
+ ulong ret = 0;
+
+ if (!priv->ppll_hz) {
+ printf("%s ppll=%lu\n", __func__, priv->ppll_hz);
+ return -ENOENT;
+ }
+
+ debug("%s %ld %ld\n", __func__, clk->id, rate);
+ switch (clk->id) {
+ case PLL_PPLL:
+ ret = rockchip_pll_set_rate(&rk3568_pll_clks[PPLL],
+ priv->pmucru, PPLL, rate);
+ priv->ppll_hz = rockchip_pll_get_rate(&rk3568_pll_clks[PPLL],
+ priv->pmucru, PPLL);
+ break;
+ case PLL_HPLL:
+ ret = rockchip_pll_set_rate(&rk3568_pll_clks[HPLL],
+ priv->pmucru, HPLL, rate);
+ priv->hpll_hz = rockchip_pll_get_rate(&rk3568_pll_clks[HPLL],
+ priv->pmucru, HPLL);
+ break;
+ case CLK_RTC_32K:
+ case CLK_RTC32K_FRAC:
+ ret = rk3568_rtc32k_set_pmuclk(priv, rate);
+ break;
+ case CLK_I2C0:
+ ret = rk3568_i2c_set_pmuclk(priv, clk->id, rate);
+ break;
+ case CLK_PWM0:
+ ret = rk3568_pwm_set_pmuclk(priv, clk->id, rate);
+ break;
+ case PCLK_PMU:
+ ret = rk3568_pmu_set_pmuclk(priv, rate);
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return ret;
+}
+
+static int rk3568_rtc32k_set_parent(struct clk *clk, struct clk *parent)
+{
+ struct rk3568_pmuclk_priv *priv = dev_get_priv(clk->dev);
+ struct rk3568_pmucru *pmucru = priv->pmucru;
+
+ if (parent->id == CLK_RTC32K_FRAC)
+ rk_clrsetreg(&pmucru->pmu_clksel_con[0], RTC32K_SEL_MASK,
+ RTC32K_SEL_OSC0_DIV32K << RTC32K_SEL_SHIFT);
+ else
+ rk_clrsetreg(&pmucru->pmu_clksel_con[0], RTC32K_SEL_MASK,
+ RTC32K_SEL_OSC1_32K << RTC32K_SEL_SHIFT);
+
+ return 0;
+}
+
+static int rk3568_pmuclk_set_parent(struct clk *clk, struct clk *parent)
+{
+ switch (clk->id) {
+ case CLK_RTC_32K:
+ return rk3568_rtc32k_set_parent(clk, parent);
+ default:
+ return -ENOENT;
+ }
+}
+
+static struct clk_ops rk3568_pmuclk_ops = {
+ .get_rate = rk3568_pmuclk_get_rate,
+ .set_rate = rk3568_pmuclk_set_rate,
+ .set_parent = rk3568_pmuclk_set_parent,
+};
+
+static int rk3568_pmuclk_probe(struct udevice *dev)
+{
+ struct rk3568_pmuclk_priv *priv = dev_get_priv(dev);
+ int ret = 0;
+
+ if (priv->ppll_hz != PPLL_HZ) {
+ ret = rockchip_pll_set_rate(&rk3568_pll_clks[PPLL],
+ priv->pmucru,
+ PPLL, PPLL_HZ);
+ if (!ret)
+ priv->ppll_hz = PPLL_HZ;
+ }
+
+ /* Ungate PCIe30phy refclk_m and refclk_n */
+ rk_clrsetreg(&priv->pmucru->pmu_clkgate_con[2], 0x3 << 13, 0 << 13);
+ return 0;
+}
+
+static int rk3568_pmuclk_ofdata_to_platdata(struct udevice *dev)
+{
+ struct rk3568_pmuclk_priv *priv = dev_get_priv(dev);
+
+ priv->pmucru = dev_read_addr_ptr(dev);
+
+ return 0;
+}
+
+static int rk3568_pmuclk_bind(struct udevice *dev)
+{
+#if CONFIG_IS_ENABLED(RESET_ROCKCHIP)
+ int ret = 0;
+
+ ret = offsetof(struct rk3568_pmucru, pmu_softrst_con[0]);
+ ret = rockchip_reset_bind(dev, ret, 1);
+ if (ret)
+ debug("Warning: pmucru software reset driver bind faile\n");
+#endif
+
+ return 0;
+}
+
+static const struct udevice_id rk3568_pmuclk_ids[] = {
+ { .compatible = "rockchip,rk3568-pmucru" },
+ { }
+};
+
+U_BOOT_DRIVER(rockchip_rk3568_pmucru) = {
+ .name = "rockchip_rk3568_pmucru",
+ .id = UCLASS_CLK,
+ .of_match = rk3568_pmuclk_ids,
+ .priv_auto = sizeof(struct rk3568_pmuclk_priv),
+ .of_to_plat = rk3568_pmuclk_ofdata_to_platdata,
+ .ops = &rk3568_pmuclk_ops,
+ .bind = rk3568_pmuclk_bind,
+ .probe = rk3568_pmuclk_probe,
+#if CONFIG_IS_ENABLED(OF_PLATDATA)
+ .plat_auto = sizeof(struct rk3568_pmuclk_plat),
+#endif
+
+};
+
+static int rk3568_armclk_set_clk(struct rk3568_clk_priv *priv, ulong hz)
+{
+ struct rk3568_cru *cru = priv->cru;
+ const struct rockchip_cpu_rate_table *rate;
+ ulong old_rate;
+
+ rate = rockchip_get_cpu_settings(rk3568_cpu_rates, hz);
+ if (!rate) {
+ printf("%s unsupported rate\n", __func__);
+ return -EINVAL;
+ }
+
+ rk_clrsetreg(&cru->clksel_con[0],
+ CLK_CORE_PRE_SEL_MASK,
+ (CLK_CORE_PRE_SEL_SRC << CLK_CORE_PRE_SEL_SHIFT));
+ rk_clrsetreg(&cru->clksel_con[2],
+ SCLK_CORE_PRE_SEL_MASK |
+ SCLK_CORE_SRC_SEL_MASK |
+ SCLK_CORE_SRC_DIV_MASK,
+ (SCLK_CORE_PRE_SEL_SRC <<
+ SCLK_CORE_PRE_SEL_SHIFT) |
+ (SCLK_CORE_SRC_SEL_APLL <<
+ SCLK_CORE_SRC_SEL_SHIFT) |
+ (1 << SCLK_CORE_SRC_DIV_SHIFT));
+
+ /*
+ * set up dependent divisors for DBG and ACLK clocks.
+ */
+ old_rate = rockchip_pll_get_rate(&rk3568_pll_clks[APLL],
+ priv->cru, APLL);
+ if (old_rate > hz) {
+ if (rockchip_pll_set_rate(&rk3568_pll_clks[APLL],
+ priv->cru, APLL, hz))
+ return -EINVAL;
+ rk_clrsetreg(&cru->clksel_con[3],
+ GICCLK_CORE_DIV_MASK | ATCLK_CORE_DIV_MASK,
+ rate->pclk_div << GICCLK_CORE_DIV_SHIFT |
+ rate->pclk_div << ATCLK_CORE_DIV_SHIFT);
+ rk_clrsetreg(&cru->clksel_con[4],
+ PERIPHCLK_CORE_PRE_DIV_MASK |
+ PCLK_CORE_PRE_DIV_MASK,
+ rate->pclk_div << PCLK_CORE_PRE_DIV_SHIFT |
+ rate->pclk_div << PERIPHCLK_CORE_PRE_DIV_SHIFT);
+ rk_clrsetreg(&cru->clksel_con[5],
+ ACLK_CORE_NDFT_DIV_MASK,
+ rate->aclk_div << ACLK_CORE_NDFT_DIV_SHIFT);
+ } else if (old_rate < hz) {
+ rk_clrsetreg(&cru->clksel_con[3],
+ GICCLK_CORE_DIV_MASK | ATCLK_CORE_DIV_MASK,
+ rate->pclk_div << GICCLK_CORE_DIV_SHIFT |
+ rate->pclk_div << ATCLK_CORE_DIV_SHIFT);
+ rk_clrsetreg(&cru->clksel_con[4],
+ PERIPHCLK_CORE_PRE_DIV_MASK |
+ PCLK_CORE_PRE_DIV_MASK,
+ rate->pclk_div << PCLK_CORE_PRE_DIV_SHIFT |
+ rate->pclk_div << PERIPHCLK_CORE_PRE_DIV_SHIFT);
+ rk_clrsetreg(&cru->clksel_con[5],
+ ACLK_CORE_NDFT_DIV_MASK,
+ rate->aclk_div << ACLK_CORE_NDFT_DIV_SHIFT);
+ if (rockchip_pll_set_rate(&rk3568_pll_clks[APLL],
+ priv->cru, APLL, hz))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ulong rk3568_cpll_div_get_rate(struct rk3568_clk_priv *priv,
+ ulong clk_id)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int div, mask, shift, con;
+
+ switch (clk_id) {
+ case CPLL_500M:
+ con = 78;
+ mask = CPLL_500M_DIV_MASK;
+ shift = CPLL_500M_DIV_SHIFT;
+ break;
+ case CPLL_333M:
+ con = 79;
+ mask = CPLL_333M_DIV_MASK;
+ shift = CPLL_333M_DIV_SHIFT;
+ break;
+ case CPLL_250M:
+ con = 79;
+ mask = CPLL_250M_DIV_MASK;
+ shift = CPLL_250M_DIV_SHIFT;
+ break;
+ case CPLL_125M:
+ con = 80;
+ mask = CPLL_125M_DIV_MASK;
+ shift = CPLL_125M_DIV_SHIFT;
+ break;
+ case CPLL_100M:
+ con = 82;
+ mask = CPLL_100M_DIV_MASK;
+ shift = CPLL_100M_DIV_SHIFT;
+ break;
+ case CPLL_62P5M:
+ con = 80;
+ mask = CPLL_62P5M_DIV_MASK;
+ shift = CPLL_62P5M_DIV_SHIFT;
+ break;
+ case CPLL_50M:
+ con = 81;
+ mask = CPLL_50M_DIV_MASK;
+ shift = CPLL_50M_DIV_SHIFT;
+ break;
+ case CPLL_25M:
+ con = 81;
+ mask = CPLL_25M_DIV_MASK;
+ shift = CPLL_25M_DIV_SHIFT;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ div = (readl(&cru->clksel_con[con]) & mask) >> shift;
+ return DIV_TO_RATE(priv->cpll_hz, div);
+}
+
+static ulong rk3568_cpll_div_set_rate(struct rk3568_clk_priv *priv,
+ ulong clk_id, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int div, mask, shift, con;
+
+ switch (clk_id) {
+ case CPLL_500M:
+ con = 78;
+ mask = CPLL_500M_DIV_MASK;
+ shift = CPLL_500M_DIV_SHIFT;
+ break;
+ case CPLL_333M:
+ con = 79;
+ mask = CPLL_333M_DIV_MASK;
+ shift = CPLL_333M_DIV_SHIFT;
+ break;
+ case CPLL_250M:
+ con = 79;
+ mask = CPLL_250M_DIV_MASK;
+ shift = CPLL_250M_DIV_SHIFT;
+ break;
+ case CPLL_125M:
+ con = 80;
+ mask = CPLL_125M_DIV_MASK;
+ shift = CPLL_125M_DIV_SHIFT;
+ break;
+ case CPLL_100M:
+ con = 82;
+ mask = CPLL_100M_DIV_MASK;
+ shift = CPLL_100M_DIV_SHIFT;
+ break;
+ case CPLL_62P5M:
+ con = 80;
+ mask = CPLL_62P5M_DIV_MASK;
+ shift = CPLL_62P5M_DIV_SHIFT;
+ break;
+ case CPLL_50M:
+ con = 81;
+ mask = CPLL_50M_DIV_MASK;
+ shift = CPLL_50M_DIV_SHIFT;
+ break;
+ case CPLL_25M:
+ con = 81;
+ mask = CPLL_25M_DIV_MASK;
+ shift = CPLL_25M_DIV_SHIFT;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ div = DIV_ROUND_UP(priv->cpll_hz, rate);
+ assert(div - 1 <= 31);
+ rk_clrsetreg(&cru->clksel_con[con],
+ mask, (div - 1) << shift);
+ return rk3568_cpll_div_get_rate(priv, clk_id);
+}
+
+static ulong rk3568_bus_get_clk(struct rk3568_clk_priv *priv, ulong clk_id)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 con, sel, rate;
+
+ switch (clk_id) {
+ case ACLK_BUS:
+ con = readl(&cru->clksel_con[50]);
+ sel = (con & ACLK_BUS_SEL_MASK) >> ACLK_BUS_SEL_SHIFT;
+ if (sel == ACLK_BUS_SEL_200M)
+ rate = 200 * MHz;
+ else if (sel == ACLK_BUS_SEL_150M)
+ rate = 150 * MHz;
+ else if (sel == ACLK_BUS_SEL_100M)
+ rate = 100 * MHz;
+ else
+ rate = OSC_HZ;
+ break;
+ case PCLK_BUS:
+ case PCLK_WDT_NS:
+ con = readl(&cru->clksel_con[50]);
+ sel = (con & PCLK_BUS_SEL_MASK) >> PCLK_BUS_SEL_SHIFT;
+ if (sel == PCLK_BUS_SEL_100M)
+ rate = 100 * MHz;
+ else if (sel == PCLK_BUS_SEL_75M)
+ rate = 75 * MHz;
+ else if (sel == PCLK_BUS_SEL_50M)
+ rate = 50 * MHz;
+ else
+ rate = OSC_HZ;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return rate;
+}
+
+static ulong rk3568_bus_set_clk(struct rk3568_clk_priv *priv,
+ ulong clk_id, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk;
+
+ switch (clk_id) {
+ case ACLK_BUS:
+ if (rate == 200 * MHz)
+ src_clk = ACLK_BUS_SEL_200M;
+ else if (rate == 150 * MHz)
+ src_clk = ACLK_BUS_SEL_150M;
+ else if (rate == 100 * MHz)
+ src_clk = ACLK_BUS_SEL_100M;
+ else
+ src_clk = ACLK_BUS_SEL_24M;
+ rk_clrsetreg(&cru->clksel_con[50],
+ ACLK_BUS_SEL_MASK,
+ src_clk << ACLK_BUS_SEL_SHIFT);
+ break;
+ case PCLK_BUS:
+ case PCLK_WDT_NS:
+ if (rate == 100 * MHz)
+ src_clk = PCLK_BUS_SEL_100M;
+ else if (rate == 75 * MHz)
+ src_clk = PCLK_BUS_SEL_75M;
+ else if (rate == 50 * MHz)
+ src_clk = PCLK_BUS_SEL_50M;
+ else
+ src_clk = PCLK_BUS_SEL_24M;
+ rk_clrsetreg(&cru->clksel_con[50],
+ PCLK_BUS_SEL_MASK,
+ src_clk << PCLK_BUS_SEL_SHIFT);
+ break;
+
+ default:
+ printf("do not support this bus freq\n");
+ return -EINVAL;
+ }
+
+ return rk3568_bus_get_clk(priv, clk_id);
+}
+
+static ulong rk3568_perimid_get_clk(struct rk3568_clk_priv *priv, ulong clk_id)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 con, sel, rate;
+
+ switch (clk_id) {
+ case ACLK_PERIMID:
+ con = readl(&cru->clksel_con[10]);
+ sel = (con & ACLK_PERIMID_SEL_MASK) >> ACLK_PERIMID_SEL_SHIFT;
+ if (sel == ACLK_PERIMID_SEL_300M)
+ rate = 300 * MHz;
+ else if (sel == ACLK_PERIMID_SEL_200M)
+ rate = 200 * MHz;
+ else if (sel == ACLK_PERIMID_SEL_100M)
+ rate = 100 * MHz;
+ else
+ rate = OSC_HZ;
+ break;
+ case HCLK_PERIMID:
+ con = readl(&cru->clksel_con[10]);
+ sel = (con & HCLK_PERIMID_SEL_MASK) >> HCLK_PERIMID_SEL_SHIFT;
+ if (sel == HCLK_PERIMID_SEL_150M)
+ rate = 150 * MHz;
+ else if (sel == HCLK_PERIMID_SEL_100M)
+ rate = 100 * MHz;
+ else if (sel == HCLK_PERIMID_SEL_75M)
+ rate = 75 * MHz;
+ else
+ rate = OSC_HZ;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return rate;
+}
+
+static ulong rk3568_perimid_set_clk(struct rk3568_clk_priv *priv,
+ ulong clk_id, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk;
+
+ switch (clk_id) {
+ case ACLK_PERIMID:
+ if (rate == 300 * MHz)
+ src_clk = ACLK_PERIMID_SEL_300M;
+ else if (rate == 200 * MHz)
+ src_clk = ACLK_PERIMID_SEL_200M;
+ else if (rate == 100 * MHz)
+ src_clk = ACLK_PERIMID_SEL_100M;
+ else
+ src_clk = ACLK_PERIMID_SEL_24M;
+ rk_clrsetreg(&cru->clksel_con[10],
+ ACLK_PERIMID_SEL_MASK,
+ src_clk << ACLK_PERIMID_SEL_SHIFT);
+ break;
+ case HCLK_PERIMID:
+ if (rate == 150 * MHz)
+ src_clk = HCLK_PERIMID_SEL_150M;
+ else if (rate == 100 * MHz)
+ src_clk = HCLK_PERIMID_SEL_100M;
+ else if (rate == 75 * MHz)
+ src_clk = HCLK_PERIMID_SEL_75M;
+ else
+ src_clk = HCLK_PERIMID_SEL_24M;
+ rk_clrsetreg(&cru->clksel_con[10],
+ HCLK_PERIMID_SEL_MASK,
+ src_clk << HCLK_PERIMID_SEL_SHIFT);
+ break;
+
+ default:
+ printf("do not support this permid freq\n");
+ return -EINVAL;
+ }
+
+ return rk3568_perimid_get_clk(priv, clk_id);
+}
+
+static ulong rk3568_top_get_clk(struct rk3568_clk_priv *priv, ulong clk_id)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 con, sel, rate;
+
+ switch (clk_id) {
+ case ACLK_TOP_HIGH:
+ con = readl(&cru->clksel_con[73]);
+ sel = (con & ACLK_TOP_HIGH_SEL_MASK) >> ACLK_TOP_HIGH_SEL_SHIFT;
+ if (sel == ACLK_TOP_HIGH_SEL_500M)
+ rate = 500 * MHz;
+ else if (sel == ACLK_TOP_HIGH_SEL_400M)
+ rate = 400 * MHz;
+ else if (sel == ACLK_TOP_HIGH_SEL_300M)
+ rate = 300 * MHz;
+ else
+ rate = OSC_HZ;
+ break;
+ case ACLK_TOP_LOW:
+ con = readl(&cru->clksel_con[73]);
+ sel = (con & ACLK_TOP_LOW_SEL_MASK) >> ACLK_TOP_LOW_SEL_SHIFT;
+ if (sel == ACLK_TOP_LOW_SEL_400M)
+ rate = 400 * MHz;
+ else if (sel == ACLK_TOP_LOW_SEL_300M)
+ rate = 300 * MHz;
+ else if (sel == ACLK_TOP_LOW_SEL_200M)
+ rate = 200 * MHz;
+ else
+ rate = OSC_HZ;
+ break;
+ case HCLK_TOP:
+ con = readl(&cru->clksel_con[73]);
+ sel = (con & HCLK_TOP_SEL_MASK) >> HCLK_TOP_SEL_SHIFT;
+ if (sel == HCLK_TOP_SEL_150M)
+ rate = 150 * MHz;
+ else if (sel == HCLK_TOP_SEL_100M)
+ rate = 100 * MHz;
+ else if (sel == HCLK_TOP_SEL_75M)
+ rate = 75 * MHz;
+ else
+ rate = OSC_HZ;
+ break;
+ case PCLK_TOP:
+ con = readl(&cru->clksel_con[73]);
+ sel = (con & PCLK_TOP_SEL_MASK) >> PCLK_TOP_SEL_SHIFT;
+ if (sel == PCLK_TOP_SEL_100M)
+ rate = 100 * MHz;
+ else if (sel == PCLK_TOP_SEL_75M)
+ rate = 75 * MHz;
+ else if (sel == PCLK_TOP_SEL_50M)
+ rate = 50 * MHz;
+ else
+ rate = OSC_HZ;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return rate;
+}
+
+static ulong rk3568_top_set_clk(struct rk3568_clk_priv *priv,
+ ulong clk_id, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk;
+
+ switch (clk_id) {
+ case ACLK_TOP_HIGH:
+ if (rate == 500 * MHz)
+ src_clk = ACLK_TOP_HIGH_SEL_500M;
+ else if (rate == 400 * MHz)
+ src_clk = ACLK_TOP_HIGH_SEL_400M;
+ else if (rate == 300 * MHz)
+ src_clk = ACLK_TOP_HIGH_SEL_300M;
+ else
+ src_clk = ACLK_TOP_HIGH_SEL_24M;
+ rk_clrsetreg(&cru->clksel_con[73],
+ ACLK_TOP_HIGH_SEL_MASK,
+ src_clk << ACLK_TOP_HIGH_SEL_SHIFT);
+ break;
+ case ACLK_TOP_LOW:
+ if (rate == 400 * MHz)
+ src_clk = ACLK_TOP_LOW_SEL_400M;
+ else if (rate == 300 * MHz)
+ src_clk = ACLK_TOP_LOW_SEL_300M;
+ else if (rate == 200 * MHz)
+ src_clk = ACLK_TOP_LOW_SEL_200M;
+ else
+ src_clk = ACLK_TOP_LOW_SEL_24M;
+ rk_clrsetreg(&cru->clksel_con[73],
+ ACLK_TOP_LOW_SEL_MASK,
+ src_clk << ACLK_TOP_LOW_SEL_SHIFT);
+ break;
+ case HCLK_TOP:
+ if (rate == 150 * MHz)
+ src_clk = HCLK_TOP_SEL_150M;
+ else if (rate == 100 * MHz)
+ src_clk = HCLK_TOP_SEL_100M;
+ else if (rate == 75 * MHz)
+ src_clk = HCLK_TOP_SEL_75M;
+ else
+ src_clk = HCLK_TOP_SEL_24M;
+ rk_clrsetreg(&cru->clksel_con[73],
+ HCLK_TOP_SEL_MASK,
+ src_clk << HCLK_TOP_SEL_SHIFT);
+ break;
+ case PCLK_TOP:
+ if (rate == 100 * MHz)
+ src_clk = PCLK_TOP_SEL_100M;
+ else if (rate == 75 * MHz)
+ src_clk = PCLK_TOP_SEL_75M;
+ else if (rate == 50 * MHz)
+ src_clk = PCLK_TOP_SEL_50M;
+ else
+ src_clk = PCLK_TOP_SEL_24M;
+ rk_clrsetreg(&cru->clksel_con[73],
+ PCLK_TOP_SEL_MASK,
+ src_clk << PCLK_TOP_SEL_SHIFT);
+ break;
+
+ default:
+ printf("do not support this permid freq\n");
+ return -EINVAL;
+ }
+
+ return rk3568_top_get_clk(priv, clk_id);
+}
+
+static ulong rk3568_i2c_get_clk(struct rk3568_clk_priv *priv, ulong clk_id)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 sel, con;
+ ulong rate;
+
+ switch (clk_id) {
+ case CLK_I2C1:
+ case CLK_I2C2:
+ case CLK_I2C3:
+ case CLK_I2C4:
+ case CLK_I2C5:
+ con = readl(&cru->clksel_con[71]);
+ sel = (con & CLK_I2C_SEL_MASK) >> CLK_I2C_SEL_SHIFT;
+ if (sel == CLK_I2C_SEL_200M)
+ rate = 200 * MHz;
+ else if (sel == CLK_I2C_SEL_100M)
+ rate = 100 * MHz;
+ else if (sel == CLK_I2C_SEL_CPLL_100M)
+ rate = 100 * MHz;
+ else
+ rate = OSC_HZ;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return rate;
+}
+
+static ulong rk3568_i2c_set_clk(struct rk3568_clk_priv *priv, ulong clk_id,
+ ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk;
+
+ if (rate == 200 * MHz)
+ src_clk = CLK_I2C_SEL_200M;
+ else if (rate == 100 * MHz)
+ src_clk = CLK_I2C_SEL_100M;
+ else
+ src_clk = CLK_I2C_SEL_24M;
+
+ switch (clk_id) {
+ case CLK_I2C1:
+ case CLK_I2C2:
+ case CLK_I2C3:
+ case CLK_I2C4:
+ case CLK_I2C5:
+ rk_clrsetreg(&cru->clksel_con[71], CLK_I2C_SEL_MASK,
+ src_clk << CLK_I2C_SEL_SHIFT);
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return rk3568_i2c_get_clk(priv, clk_id);
+}
+
+static ulong rk3568_spi_get_clk(struct rk3568_clk_priv *priv, ulong clk_id)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 sel, con;
+
+ con = readl(&cru->clksel_con[72]);
+
+ switch (clk_id) {
+ case CLK_SPI0:
+ sel = (con & CLK_SPI0_SEL_MASK) >> CLK_SPI0_SEL_SHIFT;
+ break;
+ case CLK_SPI1:
+ sel = (con & CLK_SPI1_SEL_MASK) >> CLK_SPI1_SEL_SHIFT;
+ break;
+ case CLK_SPI2:
+ sel = (con & CLK_SPI2_SEL_MASK) >> CLK_SPI2_SEL_SHIFT;
+ break;
+ case CLK_SPI3:
+ sel = (con & CLK_SPI3_SEL_MASK) >> CLK_SPI3_SEL_SHIFT;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ switch (sel) {
+ case CLK_SPI_SEL_200M:
+ return 200 * MHz;
+ case CLK_SPI_SEL_24M:
+ return OSC_HZ;
+ case CLK_SPI_SEL_CPLL_100M:
+ return 100 * MHz;
+ default:
+ return -ENOENT;
+ }
+}
+
+static ulong rk3568_spi_set_clk(struct rk3568_clk_priv *priv,
+ ulong clk_id, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk;
+
+ if (rate == 200 * MHz)
+ src_clk = CLK_SPI_SEL_200M;
+ else if (rate == 100 * MHz)
+ src_clk = CLK_SPI_SEL_CPLL_100M;
+ else
+ src_clk = CLK_SPI_SEL_24M;
+
+ switch (clk_id) {
+ case CLK_SPI0:
+ rk_clrsetreg(&cru->clksel_con[72],
+ CLK_SPI0_SEL_MASK,
+ src_clk << CLK_SPI0_SEL_SHIFT);
+ break;
+ case CLK_SPI1:
+ rk_clrsetreg(&cru->clksel_con[72],
+ CLK_SPI1_SEL_MASK,
+ src_clk << CLK_SPI1_SEL_SHIFT);
+ break;
+ case CLK_SPI2:
+ rk_clrsetreg(&cru->clksel_con[72],
+ CLK_SPI2_SEL_MASK,
+ src_clk << CLK_SPI2_SEL_SHIFT);
+ break;
+ case CLK_SPI3:
+ rk_clrsetreg(&cru->clksel_con[72],
+ CLK_SPI3_SEL_MASK,
+ src_clk << CLK_SPI3_SEL_SHIFT);
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return rk3568_spi_get_clk(priv, clk_id);
+}
+
+static ulong rk3568_pwm_get_clk(struct rk3568_clk_priv *priv, ulong clk_id)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 sel, con;
+
+ con = readl(&cru->clksel_con[72]);
+
+ switch (clk_id) {
+ case CLK_PWM1:
+ sel = (con & CLK_PWM1_SEL_MASK) >> CLK_PWM3_SEL_SHIFT;
+ break;
+ case CLK_PWM2:
+ sel = (con & CLK_PWM2_SEL_MASK) >> CLK_PWM2_SEL_SHIFT;
+ break;
+ case CLK_PWM3:
+ sel = (con & CLK_PWM3_SEL_MASK) >> CLK_PWM3_SEL_SHIFT;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ switch (sel) {
+ case CLK_PWM_SEL_100M:
+ return 100 * MHz;
+ case CLK_PWM_SEL_24M:
+ return OSC_HZ;
+ case CLK_PWM_SEL_CPLL_100M:
+ return 100 * MHz;
+ default:
+ return -ENOENT;
+ }
+}
+
+static ulong rk3568_pwm_set_clk(struct rk3568_clk_priv *priv,
+ ulong clk_id, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk;
+
+ if (rate == 100 * MHz)
+ src_clk = CLK_PWM_SEL_100M;
+ else
+ src_clk = CLK_PWM_SEL_24M;
+
+ switch (clk_id) {
+ case CLK_PWM1:
+ rk_clrsetreg(&cru->clksel_con[72],
+ CLK_PWM1_SEL_MASK,
+ src_clk << CLK_PWM1_SEL_SHIFT);
+ break;
+ case CLK_PWM2:
+ rk_clrsetreg(&cru->clksel_con[72],
+ CLK_PWM2_SEL_MASK,
+ src_clk << CLK_PWM2_SEL_SHIFT);
+ break;
+ case CLK_PWM3:
+ rk_clrsetreg(&cru->clksel_con[72],
+ CLK_PWM3_SEL_MASK,
+ src_clk << CLK_PWM3_SEL_SHIFT);
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return rk3568_pwm_get_clk(priv, clk_id);
+}
+
+static ulong rk3568_adc_get_clk(struct rk3568_clk_priv *priv, ulong clk_id)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 div, sel, con, prate;
+
+ switch (clk_id) {
+ case CLK_SARADC:
+ return OSC_HZ;
+ case CLK_TSADC_TSEN:
+ con = readl(&cru->clksel_con[51]);
+ div = (con & CLK_TSADC_TSEN_DIV_MASK) >>
+ CLK_TSADC_TSEN_DIV_SHIFT;
+ sel = (con & CLK_TSADC_TSEN_SEL_MASK) >>
+ CLK_TSADC_TSEN_SEL_SHIFT;
+ if (sel == CLK_TSADC_TSEN_SEL_24M)
+ prate = OSC_HZ;
+ else
+ prate = 100 * MHz;
+ return DIV_TO_RATE(prate, div);
+ case CLK_TSADC:
+ con = readl(&cru->clksel_con[51]);
+ div = (con & CLK_TSADC_DIV_MASK) >> CLK_TSADC_DIV_SHIFT;
+ prate = rk3568_adc_get_clk(priv, CLK_TSADC_TSEN);
+ return DIV_TO_RATE(prate, div);
+ default:
+ return -ENOENT;
+ }
+}
+
+static ulong rk3568_adc_set_clk(struct rk3568_clk_priv *priv,
+ ulong clk_id, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk_div;
+ ulong prate = 0;
+
+ switch (clk_id) {
+ case CLK_SARADC:
+ return OSC_HZ;
+ case CLK_TSADC_TSEN:
+ if (!(OSC_HZ % rate)) {
+ src_clk_div = DIV_ROUND_UP(OSC_HZ, rate);
+ assert(src_clk_div - 1 <= 7);
+ rk_clrsetreg(&cru->clksel_con[51],
+ CLK_TSADC_TSEN_SEL_MASK |
+ CLK_TSADC_TSEN_DIV_MASK,
+ (CLK_TSADC_TSEN_SEL_24M <<
+ CLK_TSADC_TSEN_SEL_SHIFT) |
+ (src_clk_div - 1) <<
+ CLK_TSADC_TSEN_DIV_SHIFT);
+ } else {
+ src_clk_div = DIV_ROUND_UP(100 * MHz, rate);
+ assert(src_clk_div - 1 <= 7);
+ rk_clrsetreg(&cru->clksel_con[51],
+ CLK_TSADC_TSEN_SEL_MASK |
+ CLK_TSADC_TSEN_DIV_MASK,
+ (CLK_TSADC_TSEN_SEL_100M <<
+ CLK_TSADC_TSEN_SEL_SHIFT) |
+ (src_clk_div - 1) <<
+ CLK_TSADC_TSEN_DIV_SHIFT);
+ }
+ break;
+ case CLK_TSADC:
+ prate = rk3568_adc_get_clk(priv, CLK_TSADC_TSEN);
+ src_clk_div = DIV_ROUND_UP(prate, rate);
+ assert(src_clk_div - 1 <= 128);
+ rk_clrsetreg(&cru->clksel_con[51],
+ CLK_TSADC_DIV_MASK,
+ (src_clk_div - 1) << CLK_TSADC_DIV_SHIFT);
+ break;
+ default:
+ return -ENOENT;
+ }
+ return rk3568_adc_get_clk(priv, clk_id);
+}
+
+static ulong rk3568_crypto_get_rate(struct rk3568_clk_priv *priv, ulong clk_id)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 sel, con;
+
+ switch (clk_id) {
+ case ACLK_SECURE_FLASH:
+ case ACLK_CRYPTO_NS:
+ con = readl(&cru->clksel_con[27]);
+ sel = (con & ACLK_SECURE_FLASH_SEL_MASK) >>
+ ACLK_SECURE_FLASH_SEL_SHIFT;
+ if (sel == ACLK_SECURE_FLASH_SEL_200M)
+ return 200 * MHz;
+ else if (sel == ACLK_SECURE_FLASH_SEL_150M)
+ return 150 * MHz;
+ else if (sel == ACLK_SECURE_FLASH_SEL_100M)
+ return 100 * MHz;
+ else
+ return 24 * MHz;
+ case HCLK_SECURE_FLASH:
+ case HCLK_CRYPTO_NS:
+ case CLK_CRYPTO_NS_RNG:
+ con = readl(&cru->clksel_con[27]);
+ sel = (con & HCLK_SECURE_FLASH_SEL_MASK) >>
+ HCLK_SECURE_FLASH_SEL_SHIFT;
+ if (sel == HCLK_SECURE_FLASH_SEL_150M)
+ return 150 * MHz;
+ else if (sel == HCLK_SECURE_FLASH_SEL_100M)
+ return 100 * MHz;
+ else if (sel == HCLK_SECURE_FLASH_SEL_75M)
+ return 75 * MHz;
+ else
+ return 24 * MHz;
+ case CLK_CRYPTO_NS_CORE:
+ con = readl(&cru->clksel_con[27]);
+ sel = (con & CLK_CRYPTO_CORE_SEL_MASK) >>
+ CLK_CRYPTO_CORE_SEL_SHIFT;
+ if (sel == CLK_CRYPTO_CORE_SEL_200M)
+ return 200 * MHz;
+ else if (sel == CLK_CRYPTO_CORE_SEL_150M)
+ return 150 * MHz;
+ else
+ return 100 * MHz;
+ case CLK_CRYPTO_NS_PKA:
+ con = readl(&cru->clksel_con[27]);
+ sel = (con & CLK_CRYPTO_PKA_SEL_MASK) >>
+ CLK_CRYPTO_PKA_SEL_SHIFT;
+ if (sel == CLK_CRYPTO_PKA_SEL_300M)
+ return 300 * MHz;
+ else if (sel == CLK_CRYPTO_PKA_SEL_200M)
+ return 200 * MHz;
+ else
+ return 100 * MHz;
+ default:
+ return -ENOENT;
+ }
+}
+
+static ulong rk3568_crypto_set_rate(struct rk3568_clk_priv *priv,
+ ulong clk_id, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 src_clk, mask, shift;
+
+ switch (clk_id) {
+ case ACLK_SECURE_FLASH:
+ case ACLK_CRYPTO_NS:
+ mask = ACLK_SECURE_FLASH_SEL_MASK;
+ shift = ACLK_SECURE_FLASH_SEL_SHIFT;
+ if (rate == 200 * MHz)
+ src_clk = ACLK_SECURE_FLASH_SEL_200M;
+ else if (rate == 150 * MHz)
+ src_clk = ACLK_SECURE_FLASH_SEL_150M;
+ else if (rate == 100 * MHz)
+ src_clk = ACLK_SECURE_FLASH_SEL_100M;
+ else
+ src_clk = ACLK_SECURE_FLASH_SEL_24M;
+ break;
+ case HCLK_SECURE_FLASH:
+ case HCLK_CRYPTO_NS:
+ case CLK_CRYPTO_NS_RNG:
+ mask = HCLK_SECURE_FLASH_SEL_MASK;
+ shift = HCLK_SECURE_FLASH_SEL_SHIFT;
+ if (rate == 150 * MHz)
+ src_clk = HCLK_SECURE_FLASH_SEL_150M;
+ else if (rate == 100 * MHz)
+ src_clk = HCLK_SECURE_FLASH_SEL_100M;
+ else if (rate == 75 * MHz)
+ src_clk = HCLK_SECURE_FLASH_SEL_75M;
+ else
+ src_clk = HCLK_SECURE_FLASH_SEL_24M;
+ break;
+ case CLK_CRYPTO_NS_CORE:
+ mask = CLK_CRYPTO_CORE_SEL_MASK;
+ shift = CLK_CRYPTO_CORE_SEL_SHIFT;
+ if (rate == 200 * MHz)
+ src_clk = CLK_CRYPTO_CORE_SEL_200M;
+ else if (rate == 150 * MHz)
+ src_clk = CLK_CRYPTO_CORE_SEL_150M;
+ else
+ src_clk = CLK_CRYPTO_CORE_SEL_100M;
+ break;
+ case CLK_CRYPTO_NS_PKA:
+ mask = CLK_CRYPTO_PKA_SEL_MASK;
+ shift = CLK_CRYPTO_PKA_SEL_SHIFT;
+ if (rate == 300 * MHz)
+ src_clk = CLK_CRYPTO_PKA_SEL_300M;
+ else if (rate == 200 * MHz)
+ src_clk = CLK_CRYPTO_PKA_SEL_200M;
+ else
+ src_clk = CLK_CRYPTO_PKA_SEL_100M;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ rk_clrsetreg(&cru->clksel_con[27], mask, src_clk << shift);
+
+ return rk3568_crypto_get_rate(priv, clk_id);
+}
+
+static ulong rk3568_sdmmc_get_clk(struct rk3568_clk_priv *priv, ulong clk_id)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 sel, con;
+
+ switch (clk_id) {
+ case HCLK_SDMMC0:
+ case CLK_SDMMC0:
+ con = readl(&cru->clksel_con[30]);
+ sel = (con & CLK_SDMMC0_SEL_MASK) >> CLK_SDMMC0_SEL_SHIFT;
+ break;
+ case CLK_SDMMC1:
+ con = readl(&cru->clksel_con[30]);
+ sel = (con & CLK_SDMMC1_SEL_MASK) >> CLK_SDMMC1_SEL_SHIFT;
+ break;
+ case CLK_SDMMC2:
+ con = readl(&cru->clksel_con[32]);
+ sel = (con & CLK_SDMMC2_SEL_MASK) >> CLK_SDMMC2_SEL_SHIFT;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ switch (sel) {
+ case CLK_SDMMC_SEL_24M:
+ return OSC_HZ;
+ case CLK_SDMMC_SEL_400M:
+ return 400 * MHz;
+ case CLK_SDMMC_SEL_300M:
+ return 300 * MHz;
+ case CLK_SDMMC_SEL_100M:
+ return 100 * MHz;
+ case CLK_SDMMC_SEL_50M:
+ return 50 * MHz;
+ case CLK_SDMMC_SEL_750K:
+ return 750 * KHz;
+ default:
+ return -ENOENT;
+ }
+}
+
+static ulong rk3568_sdmmc_set_clk(struct rk3568_clk_priv *priv,
+ ulong clk_id, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk;
+
+ switch (rate) {
+ case OSC_HZ:
+ src_clk = CLK_SDMMC_SEL_24M;
+ break;
+ case 400 * MHz:
+ src_clk = CLK_SDMMC_SEL_400M;
+ break;
+ case 300 * MHz:
+ src_clk = CLK_SDMMC_SEL_300M;
+ break;
+ case 100 * MHz:
+ src_clk = CLK_SDMMC_SEL_100M;
+ break;
+ case 52 * MHz:
+ case 50 * MHz:
+ src_clk = CLK_SDMMC_SEL_50M;
+ break;
+ case 750 * KHz:
+ case 400 * KHz:
+ src_clk = CLK_SDMMC_SEL_750K;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ switch (clk_id) {
+ case HCLK_SDMMC0:
+ case CLK_SDMMC0:
+ rk_clrsetreg(&cru->clksel_con[30],
+ CLK_SDMMC0_SEL_MASK,
+ src_clk << CLK_SDMMC0_SEL_SHIFT);
+ break;
+ case CLK_SDMMC1:
+ rk_clrsetreg(&cru->clksel_con[30],
+ CLK_SDMMC1_SEL_MASK,
+ src_clk << CLK_SDMMC1_SEL_SHIFT);
+ break;
+ case CLK_SDMMC2:
+ rk_clrsetreg(&cru->clksel_con[32],
+ CLK_SDMMC2_SEL_MASK,
+ src_clk << CLK_SDMMC2_SEL_SHIFT);
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return rk3568_sdmmc_get_clk(priv, clk_id);
+}
+
+static ulong rk3568_sfc_get_clk(struct rk3568_clk_priv *priv)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 sel, con;
+
+ con = readl(&cru->clksel_con[28]);
+ sel = (con & SCLK_SFC_SEL_MASK) >> SCLK_SFC_SEL_SHIFT;
+ switch (sel) {
+ case SCLK_SFC_SEL_24M:
+ return OSC_HZ;
+ case SCLK_SFC_SEL_50M:
+ return 50 * MHz;
+ case SCLK_SFC_SEL_75M:
+ return 75 * MHz;
+ case SCLK_SFC_SEL_100M:
+ return 100 * MHz;
+ case SCLK_SFC_SEL_125M:
+ return 125 * MHz;
+ case SCLK_SFC_SEL_150M:
+ return 150 * KHz;
+ default:
+ return -ENOENT;
+ }
+}
+
+static ulong rk3568_sfc_set_clk(struct rk3568_clk_priv *priv, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk;
+
+ switch (rate) {
+ case OSC_HZ:
+ src_clk = SCLK_SFC_SEL_24M;
+ break;
+ case 50 * MHz:
+ src_clk = SCLK_SFC_SEL_50M;
+ break;
+ case 75 * MHz:
+ src_clk = SCLK_SFC_SEL_75M;
+ break;
+ case 100 * MHz:
+ src_clk = SCLK_SFC_SEL_100M;
+ break;
+ case 125 * MHz:
+ src_clk = SCLK_SFC_SEL_125M;
+ break;
+ case 150 * KHz:
+ src_clk = SCLK_SFC_SEL_150M;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ rk_clrsetreg(&cru->clksel_con[28],
+ SCLK_SFC_SEL_MASK,
+ src_clk << SCLK_SFC_SEL_SHIFT);
+
+ return rk3568_sfc_get_clk(priv);
+}
+
+static ulong rk3568_nand_get_clk(struct rk3568_clk_priv *priv)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 sel, con;
+
+ con = readl(&cru->clksel_con[28]);
+ sel = (con & NCLK_NANDC_SEL_MASK) >> NCLK_NANDC_SEL_SHIFT;
+ switch (sel) {
+ case NCLK_NANDC_SEL_200M:
+ return 200 * MHz;
+ case NCLK_NANDC_SEL_150M:
+ return 150 * MHz;
+ case NCLK_NANDC_SEL_100M:
+ return 100 * MHz;
+ case NCLK_NANDC_SEL_24M:
+ return OSC_HZ;
+ default:
+ return -ENOENT;
+ }
+}
+
+static ulong rk3568_nand_set_clk(struct rk3568_clk_priv *priv, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk;
+
+ switch (rate) {
+ case OSC_HZ:
+ src_clk = NCLK_NANDC_SEL_24M;
+ break;
+ case 100 * MHz:
+ src_clk = NCLK_NANDC_SEL_100M;
+ break;
+ case 150 * MHz:
+ src_clk = NCLK_NANDC_SEL_150M;
+ break;
+ case 200 * MHz:
+ src_clk = NCLK_NANDC_SEL_200M;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ rk_clrsetreg(&cru->clksel_con[28],
+ NCLK_NANDC_SEL_MASK,
+ src_clk << NCLK_NANDC_SEL_SHIFT);
+
+ return rk3568_nand_get_clk(priv);
+}
+
+static ulong rk3568_emmc_get_clk(struct rk3568_clk_priv *priv)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 sel, con;
+
+ con = readl(&cru->clksel_con[28]);
+ sel = (con & CCLK_EMMC_SEL_MASK) >> CCLK_EMMC_SEL_SHIFT;
+ switch (sel) {
+ case CCLK_EMMC_SEL_200M:
+ return 200 * MHz;
+ case CCLK_EMMC_SEL_150M:
+ return 150 * MHz;
+ case CCLK_EMMC_SEL_100M:
+ return 100 * MHz;
+ case CCLK_EMMC_SEL_50M:
+ return 50 * MHz;
+ case CCLK_EMMC_SEL_375K:
+ return 375 * KHz;
+ case CCLK_EMMC_SEL_24M:
+ return OSC_HZ;
+ default:
+ return -ENOENT;
+ }
+}
+
+static ulong rk3568_emmc_set_clk(struct rk3568_clk_priv *priv, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk;
+
+ switch (rate) {
+ case OSC_HZ:
+ src_clk = CCLK_EMMC_SEL_24M;
+ break;
+ case 52 * MHz:
+ case 50 * MHz:
+ src_clk = CCLK_EMMC_SEL_50M;
+ break;
+ case 100 * MHz:
+ src_clk = CCLK_EMMC_SEL_100M;
+ break;
+ case 150 * MHz:
+ src_clk = CCLK_EMMC_SEL_150M;
+ break;
+ case 200 * MHz:
+ src_clk = CCLK_EMMC_SEL_200M;
+ break;
+ case 400 * KHz:
+ case 375 * KHz:
+ src_clk = CCLK_EMMC_SEL_375K;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ rk_clrsetreg(&cru->clksel_con[28],
+ CCLK_EMMC_SEL_MASK,
+ src_clk << CCLK_EMMC_SEL_SHIFT);
+
+ return rk3568_emmc_get_clk(priv);
+}
+
+static ulong rk3568_emmc_get_bclk(struct rk3568_clk_priv *priv)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 sel, con;
+
+ con = readl(&cru->clksel_con[28]);
+ sel = (con & BCLK_EMMC_SEL_MASK) >> BCLK_EMMC_SEL_SHIFT;
+ switch (sel) {
+ case BCLK_EMMC_SEL_200M:
+ return 200 * MHz;
+ case BCLK_EMMC_SEL_150M:
+ return 150 * MHz;
+ case BCLK_EMMC_SEL_125M:
+ return 125 * MHz;
+ default:
+ return -ENOENT;
+ }
+}
+
+static ulong rk3568_emmc_set_bclk(struct rk3568_clk_priv *priv, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk;
+
+ switch (rate) {
+ case 200 * MHz:
+ src_clk = BCLK_EMMC_SEL_200M;
+ break;
+ case 150 * MHz:
+ src_clk = BCLK_EMMC_SEL_150M;
+ break;
+ case 125 * MHz:
+ src_clk = BCLK_EMMC_SEL_125M;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ rk_clrsetreg(&cru->clksel_con[28],
+ BCLK_EMMC_SEL_MASK,
+ src_clk << BCLK_EMMC_SEL_SHIFT);
+
+ return rk3568_emmc_get_bclk(priv);
+}
+
+#ifndef CONFIG_SPL_BUILD
+static ulong rk3568_aclk_vop_get_clk(struct rk3568_clk_priv *priv)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 div, sel, con, parent;
+
+ con = readl(&cru->clksel_con[38]);
+ div = (con & ACLK_VOP_PRE_DIV_MASK) >> ACLK_VOP_PRE_DIV_SHIFT;
+ sel = (con & ACLK_VOP_PRE_SEL_MASK) >> ACLK_VOP_PRE_SEL_SHIFT;
+ if (sel == ACLK_VOP_PRE_SEL_GPLL)
+ parent = priv->gpll_hz;
+ else if (sel == ACLK_VOP_PRE_SEL_CPLL)
+ parent = priv->cpll_hz;
+ else if (sel == ACLK_VOP_PRE_SEL_VPLL)
+ parent = priv->vpll_hz;
+ else
+ parent = priv->hpll_hz;
+
+ return DIV_TO_RATE(parent, div);
+}
+
+static ulong rk3568_aclk_vop_set_clk(struct rk3568_clk_priv *priv, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk_div, src_clk_mux;
+
+ if ((priv->cpll_hz % rate) == 0) {
+ src_clk_div = DIV_ROUND_UP(priv->cpll_hz, rate);
+ src_clk_mux = ACLK_VOP_PRE_SEL_CPLL;
+ } else {
+ src_clk_div = DIV_ROUND_UP(priv->gpll_hz, rate);
+ src_clk_mux = ACLK_VOP_PRE_SEL_GPLL;
+ }
+ assert(src_clk_div - 1 <= 31);
+ rk_clrsetreg(&cru->clksel_con[38],
+ ACLK_VOP_PRE_SEL_MASK | ACLK_VOP_PRE_DIV_MASK,
+ src_clk_mux << ACLK_VOP_PRE_SEL_SHIFT |
+ (src_clk_div - 1) << ACLK_VOP_PRE_DIV_SHIFT);
+
+ return rk3568_aclk_vop_get_clk(priv);
+}
+
+static ulong rk3568_dclk_vop_get_clk(struct rk3568_clk_priv *priv, ulong clk_id)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 conid, div, sel, con, parent;
+
+ switch (clk_id) {
+ case DCLK_VOP0:
+ conid = 39;
+ break;
+ case DCLK_VOP1:
+ conid = 40;
+ break;
+ case DCLK_VOP2:
+ conid = 41;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ con = readl(&cru->clksel_con[conid]);
+ div = (con & DCLK0_VOP_DIV_MASK) >> DCLK0_VOP_DIV_SHIFT;
+ sel = (con & DCLK0_VOP_SEL_MASK) >> DCLK0_VOP_SEL_SHIFT;
+ if (sel == DCLK_VOP_SEL_HPLL)
+ parent = rk3568_pmu_pll_get_rate(priv, HPLL);
+ else if (sel == DCLK_VOP_SEL_VPLL)
+ parent = rockchip_pll_get_rate(&rk3568_pll_clks[VPLL],
+ priv->cru, VPLL);
+ else if (sel == DCLK_VOP_SEL_GPLL)
+ parent = priv->gpll_hz;
+ else if (sel == DCLK_VOP_SEL_CPLL)
+ parent = priv->cpll_hz;
+ else
+ return -ENOENT;
+
+ return DIV_TO_RATE(parent, div);
+}
+
+#define RK3568_VOP_PLL_LIMIT_FREQ 600000000
+
+static ulong rk3568_dclk_vop_set_clk(struct rk3568_clk_priv *priv,
+ ulong clk_id, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ ulong pll_rate, now, best_rate = 0;
+ u32 i, conid, con, sel, div, best_div = 0, best_sel = 0;
+
+ switch (clk_id) {
+ case DCLK_VOP0:
+ conid = 39;
+ break;
+ case DCLK_VOP1:
+ conid = 40;
+ break;
+ case DCLK_VOP2:
+ conid = 41;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ con = readl(&cru->clksel_con[conid]);
+ sel = (con & DCLK0_VOP_SEL_MASK) >> DCLK0_VOP_SEL_SHIFT;
+
+ if (sel == DCLK_VOP_SEL_HPLL) {
+ div = 1;
+ rk_clrsetreg(&cru->clksel_con[conid],
+ DCLK0_VOP_DIV_MASK | DCLK0_VOP_SEL_MASK,
+ (DCLK_VOP_SEL_HPLL << DCLK0_VOP_SEL_SHIFT) |
+ ((div - 1) << DCLK0_VOP_DIV_SHIFT));
+ rk3568_pmu_pll_set_rate(priv, HPLL, div * rate);
+ } else if (sel == DCLK_VOP_SEL_VPLL) {
+ div = DIV_ROUND_UP(RK3568_VOP_PLL_LIMIT_FREQ, rate);
+ rk_clrsetreg(&cru->clksel_con[conid],
+ DCLK0_VOP_DIV_MASK | DCLK0_VOP_SEL_MASK,
+ (DCLK_VOP_SEL_VPLL << DCLK0_VOP_SEL_SHIFT) |
+ ((div - 1) << DCLK0_VOP_DIV_SHIFT));
+ rockchip_pll_set_rate(&rk3568_pll_clks[VPLL],
+ priv->cru, VPLL, div * rate);
+ } else {
+ for (i = 0; i <= DCLK_VOP_SEL_CPLL; i++) {
+ switch (i) {
+ case DCLK_VOP_SEL_GPLL:
+ pll_rate = priv->gpll_hz;
+ break;
+ case DCLK_VOP_SEL_CPLL:
+ pll_rate = priv->cpll_hz;
+ break;
+ default:
+ printf("do not support this vop pll sel\n");
+ return -EINVAL;
+ }
+
+ div = DIV_ROUND_UP(pll_rate, rate);
+ if (div > 255)
+ continue;
+ now = pll_rate / div;
+ if (abs(rate - now) < abs(rate - best_rate)) {
+ best_rate = now;
+ best_div = div;
+ best_sel = i;
+ }
+ debug("p_rate=%lu, best_rate=%lu, div=%u, sel=%u\n",
+ pll_rate, best_rate, best_div, best_sel);
+ }
+
+ if (best_rate) {
+ rk_clrsetreg(&cru->clksel_con[conid],
+ DCLK0_VOP_DIV_MASK | DCLK0_VOP_SEL_MASK,
+ best_sel << DCLK0_VOP_SEL_SHIFT |
+ (best_div - 1) << DCLK0_VOP_DIV_SHIFT);
+ } else {
+ printf("do not support this vop freq %lu\n", rate);
+ return -EINVAL;
+ }
+ }
+ return rk3568_dclk_vop_get_clk(priv, clk_id);
+}
+
+static ulong rk3568_gmac_src_get_clk(struct rk3568_clk_priv *priv,
+ ulong mac_id)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 sel, con;
+
+ con = readl(&cru->clksel_con[31 + mac_id * 2]);
+ sel = (con & CLK_MAC0_2TOP_SEL_MASK) >> CLK_MAC0_2TOP_SEL_SHIFT;
+
+ switch (sel) {
+ case CLK_MAC0_2TOP_SEL_125M:
+ return 125 * MHz;
+ case CLK_MAC0_2TOP_SEL_50M:
+ return 50 * MHz;
+ case CLK_MAC0_2TOP_SEL_25M:
+ return 25 * MHz;
+ case CLK_MAC0_2TOP_SEL_PPLL:
+ return rk3568_pmu_pll_get_rate(priv, HPLL);
+ default:
+ return -ENOENT;
+ }
+}
+
+static ulong rk3568_gmac_src_set_clk(struct rk3568_clk_priv *priv,
+ ulong mac_id, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk;
+
+ switch (rate) {
+ case 125 * MHz:
+ src_clk = CLK_MAC0_2TOP_SEL_125M;
+ break;
+ case 50 * MHz:
+ src_clk = CLK_MAC0_2TOP_SEL_50M;
+ break;
+ case 25 * MHz:
+ src_clk = CLK_MAC0_2TOP_SEL_25M;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ rk_clrsetreg(&cru->clksel_con[31 + mac_id * 2],
+ CLK_MAC0_2TOP_SEL_MASK,
+ src_clk << CLK_MAC0_2TOP_SEL_SHIFT);
+
+ return rk3568_gmac_src_get_clk(priv, mac_id);
+}
+
+static ulong rk3568_gmac_out_get_clk(struct rk3568_clk_priv *priv,
+ ulong mac_id)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 sel, con;
+
+ con = readl(&cru->clksel_con[31 + mac_id * 2]);
+ sel = (con & CLK_MAC0_OUT_SEL_MASK) >> CLK_MAC0_OUT_SEL_SHIFT;
+
+ switch (sel) {
+ case CLK_MAC0_OUT_SEL_125M:
+ return 125 * MHz;
+ case CLK_MAC0_OUT_SEL_50M:
+ return 50 * MHz;
+ case CLK_MAC0_OUT_SEL_25M:
+ return 25 * MHz;
+ case CLK_MAC0_OUT_SEL_24M:
+ return OSC_HZ;
+ default:
+ return -ENOENT;
+ }
+}
+
+static ulong rk3568_gmac_out_set_clk(struct rk3568_clk_priv *priv,
+ ulong mac_id, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk;
+
+ switch (rate) {
+ case 125 * MHz:
+ src_clk = CLK_MAC0_OUT_SEL_125M;
+ break;
+ case 50 * MHz:
+ src_clk = CLK_MAC0_OUT_SEL_50M;
+ break;
+ case 25 * MHz:
+ src_clk = CLK_MAC0_OUT_SEL_25M;
+ break;
+ case 24 * MHz:
+ src_clk = CLK_MAC0_OUT_SEL_24M;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ rk_clrsetreg(&cru->clksel_con[31 + mac_id * 2],
+ CLK_MAC0_OUT_SEL_MASK,
+ src_clk << CLK_MAC0_OUT_SEL_SHIFT);
+
+ return rk3568_gmac_out_get_clk(priv, mac_id);
+}
+
+static ulong rk3568_gmac_ptp_ref_get_clk(struct rk3568_clk_priv *priv,
+ ulong mac_id)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 sel, con;
+
+ con = readl(&cru->clksel_con[31 + mac_id * 2]);
+ sel = (con & CLK_GMAC0_PTP_REF_SEL_MASK) >> CLK_GMAC0_PTP_REF_SEL_SHIFT;
+
+ switch (sel) {
+ case CLK_GMAC0_PTP_REF_SEL_62_5M:
+ return 62500 * KHz;
+ case CLK_GMAC0_PTP_REF_SEL_100M:
+ return 100 * MHz;
+ case CLK_GMAC0_PTP_REF_SEL_50M:
+ return 50 * MHz;
+ case CLK_GMAC0_PTP_REF_SEL_24M:
+ return OSC_HZ;
+ default:
+ return -ENOENT;
+ }
+}
+
+static ulong rk3568_gmac_ptp_ref_set_clk(struct rk3568_clk_priv *priv,
+ ulong mac_id, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk;
+
+ switch (rate) {
+ case 62500 * KHz:
+ src_clk = CLK_GMAC0_PTP_REF_SEL_62_5M;
+ break;
+ case 100 * MHz:
+ src_clk = CLK_GMAC0_PTP_REF_SEL_100M;
+ break;
+ case 50 * MHz:
+ src_clk = CLK_GMAC0_PTP_REF_SEL_50M;
+ break;
+ case 24 * MHz:
+ src_clk = CLK_GMAC0_PTP_REF_SEL_24M;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ rk_clrsetreg(&cru->clksel_con[31 + mac_id * 2],
+ CLK_GMAC0_PTP_REF_SEL_MASK,
+ src_clk << CLK_GMAC0_PTP_REF_SEL_SHIFT);
+
+ return rk3568_gmac_ptp_ref_get_clk(priv, mac_id);
+}
+
+static ulong rk3568_gmac_tx_rx_set_clk(struct rk3568_clk_priv *priv,
+ ulong mac_id, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 con, sel, div_sel;
+
+ con = readl(&cru->clksel_con[31 + mac_id * 2]);
+ sel = (con & RMII0_MODE_MASK) >> RMII0_MODE_SHIFT;
+
+ if (sel == RMII0_MODE_SEL_RGMII) {
+ if (rate == 2500000)
+ div_sel = RGMII0_CLK_SEL_2_5M;
+ else if (rate == 25000000)
+ div_sel = RGMII0_CLK_SEL_25M;
+ else
+ div_sel = RGMII0_CLK_SEL_125M;
+ rk_clrsetreg(&cru->clksel_con[31 + mac_id * 2],
+ RGMII0_CLK_SEL_MASK,
+ div_sel << RGMII0_CLK_SEL_SHIFT);
+ } else if (sel == RMII0_MODE_SEL_RMII) {
+ if (rate == 2500000)
+ div_sel = RMII0_CLK_SEL_2_5M;
+ else
+ div_sel = RMII0_CLK_SEL_25M;
+ rk_clrsetreg(&cru->clksel_con[31 + mac_id * 2],
+ RMII0_CLK_SEL_MASK,
+ div_sel << RMII0_CLK_SEL_SHIFT);
+ }
+
+ return 0;
+}
+
+static ulong rk3568_ebc_get_clk(struct rk3568_clk_priv *priv)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 con, div, p_rate;
+
+ con = readl(&cru->clksel_con[79]);
+ div = (con & CPLL_333M_DIV_MASK) >> CPLL_333M_DIV_SHIFT;
+ p_rate = DIV_TO_RATE(priv->cpll_hz, div);
+
+ con = readl(&cru->clksel_con[43]);
+ div = (con & DCLK_EBC_SEL_MASK) >> DCLK_EBC_SEL_SHIFT;
+ switch (div) {
+ case DCLK_EBC_SEL_GPLL_400M:
+ return 400 * MHz;
+ case DCLK_EBC_SEL_CPLL_333M:
+ return p_rate;
+ case DCLK_EBC_SEL_GPLL_200M:
+ return 200 * MHz;
+ default:
+ return -ENOENT;
+ }
+}
+
+static ulong rk3568_ebc_set_clk(struct rk3568_clk_priv *priv, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk_div;
+
+ src_clk_div = DIV_ROUND_UP(priv->cpll_hz, rate);
+ assert(src_clk_div - 1 <= 31);
+ rk_clrsetreg(&cru->clksel_con[79],
+ CPLL_333M_DIV_MASK,
+ (src_clk_div - 1) << CPLL_333M_DIV_SHIFT);
+ rk_clrsetreg(&cru->clksel_con[43],
+ DCLK_EBC_SEL_MASK,
+ DCLK_EBC_SEL_CPLL_333M << DCLK_EBC_SEL_SHIFT);
+
+ return rk3568_ebc_get_clk(priv);
+}
+
+static ulong rk3568_rkvdec_get_clk(struct rk3568_clk_priv *priv, ulong clk_id)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 con, div, src, p_rate;
+
+ switch (clk_id) {
+ case ACLK_RKVDEC_PRE:
+ case ACLK_RKVDEC:
+ con = readl(&cru->clksel_con[47]);
+ src = (con & ACLK_RKVDEC_SEL_MASK) >> ACLK_RKVDEC_SEL_SHIFT;
+ div = (con & ACLK_RKVDEC_DIV_MASK) >> ACLK_RKVDEC_DIV_SHIFT;
+ if (src == ACLK_RKVDEC_SEL_CPLL)
+ p_rate = priv->cpll_hz;
+ else
+ p_rate = priv->gpll_hz;
+ return DIV_TO_RATE(p_rate, div);
+ case CLK_RKVDEC_CORE:
+ con = readl(&cru->clksel_con[49]);
+ src = (con & CLK_RKVDEC_CORE_SEL_MASK)
+ >> CLK_RKVDEC_CORE_SEL_SHIFT;
+ div = (con & CLK_RKVDEC_CORE_DIV_MASK)
+ >> CLK_RKVDEC_CORE_DIV_SHIFT;
+ if (src == CLK_RKVDEC_CORE_SEL_CPLL)
+ p_rate = priv->cpll_hz;
+ else if (src == CLK_RKVDEC_CORE_SEL_NPLL)
+ p_rate = priv->npll_hz;
+ else if (src == CLK_RKVDEC_CORE_SEL_VPLL)
+ p_rate = priv->vpll_hz;
+ else
+ p_rate = priv->gpll_hz;
+ return DIV_TO_RATE(p_rate, div);
+ default:
+ return -ENOENT;
+ }
+}
+
+static ulong rk3568_rkvdec_set_clk(struct rk3568_clk_priv *priv,
+ ulong clk_id, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ int src_clk_div, src, p_rate;
+
+ switch (clk_id) {
+ case ACLK_RKVDEC_PRE:
+ case ACLK_RKVDEC:
+ src = (readl(&cru->clksel_con[47]) & ACLK_RKVDEC_SEL_MASK)
+ >> ACLK_RKVDEC_SEL_SHIFT;
+ if (src == ACLK_RKVDEC_SEL_CPLL)
+ p_rate = priv->cpll_hz;
+ else
+ p_rate = priv->gpll_hz;
+ src_clk_div = DIV_ROUND_UP(p_rate, rate);
+ assert(src_clk_div - 1 <= 31);
+ rk_clrsetreg(&cru->clksel_con[47],
+ ACLK_RKVDEC_SEL_MASK |
+ ACLK_RKVDEC_DIV_MASK,
+ (src << ACLK_RKVDEC_SEL_SHIFT) |
+ (src_clk_div - 1) << ACLK_RKVDEC_DIV_SHIFT);
+ break;
+ case CLK_RKVDEC_CORE:
+ src = (readl(&cru->clksel_con[49]) & CLK_RKVDEC_CORE_SEL_MASK)
+ >> CLK_RKVDEC_CORE_SEL_SHIFT;
+ if (src == CLK_RKVDEC_CORE_SEL_CPLL)
+ p_rate = priv->cpll_hz;
+ else if (src == CLK_RKVDEC_CORE_SEL_NPLL)
+ p_rate = priv->npll_hz;
+ else if (src == CLK_RKVDEC_CORE_SEL_VPLL)
+ p_rate = priv->vpll_hz;
+ else
+ p_rate = priv->gpll_hz;
+ src_clk_div = DIV_ROUND_UP(p_rate, rate);
+ assert(src_clk_div - 1 <= 31);
+ rk_clrsetreg(&cru->clksel_con[49],
+ CLK_RKVDEC_CORE_SEL_MASK |
+ CLK_RKVDEC_CORE_DIV_MASK,
+ (src << CLK_RKVDEC_CORE_SEL_SHIFT) |
+ (src_clk_div - 1) << CLK_RKVDEC_CORE_DIV_SHIFT);
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return rk3568_rkvdec_get_clk(priv, clk_id);
+}
+
+static ulong rk3568_uart_get_rate(struct rk3568_clk_priv *priv, ulong clk_id)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 reg, con, fracdiv, div, src, p_src, p_rate;
+ unsigned long m, n;
+
+ switch (clk_id) {
+ case SCLK_UART1:
+ reg = 52;
+ break;
+ case SCLK_UART2:
+ reg = 54;
+ break;
+ case SCLK_UART3:
+ reg = 56;
+ break;
+ case SCLK_UART4:
+ reg = 58;
+ break;
+ case SCLK_UART5:
+ reg = 60;
+ break;
+ case SCLK_UART6:
+ reg = 62;
+ break;
+ case SCLK_UART7:
+ reg = 64;
+ break;
+ case SCLK_UART8:
+ reg = 66;
+ break;
+ case SCLK_UART9:
+ reg = 68;
+ break;
+ default:
+ return -ENOENT;
+ }
+ con = readl(&cru->clksel_con[reg]);
+ src = (con & CLK_UART_SEL_MASK) >> CLK_UART_SEL_SHIFT;
+ div = (con & CLK_UART_SRC_DIV_MASK) >> CLK_UART_SRC_DIV_SHIFT;
+ p_src = (con & CLK_UART_SRC_SEL_MASK) >> CLK_UART_SRC_SEL_SHIFT;
+ if (p_src == CLK_UART_SRC_SEL_GPLL)
+ p_rate = priv->gpll_hz;
+ else if (p_src == CLK_UART_SRC_SEL_CPLL)
+ p_rate = priv->cpll_hz;
+ else
+ p_rate = 480000000;
+ if (src == CLK_UART_SEL_SRC) {
+ return DIV_TO_RATE(p_rate, div);
+ } else if (src == CLK_UART_SEL_FRAC) {
+ fracdiv = readl(&cru->clksel_con[reg + 1]);
+ n = fracdiv & CLK_UART_FRAC_NUMERATOR_MASK;
+ n >>= CLK_UART_FRAC_NUMERATOR_SHIFT;
+ m = fracdiv & CLK_UART_FRAC_DENOMINATOR_MASK;
+ m >>= CLK_UART_FRAC_DENOMINATOR_SHIFT;
+ return DIV_TO_RATE(p_rate, div) * n / m;
+ } else {
+ return OSC_HZ;
+ }
+}
+
+static ulong rk3568_uart_set_rate(struct rk3568_clk_priv *priv,
+ ulong clk_id, ulong rate)
+{
+ struct rk3568_cru *cru = priv->cru;
+ u32 reg, clk_src, uart_src, div;
+ unsigned long m = 0, n = 0, val;
+
+ if (priv->gpll_hz % rate == 0) {
+ clk_src = CLK_UART_SRC_SEL_GPLL;
+ uart_src = CLK_UART_SEL_SRC;
+ div = DIV_ROUND_UP(priv->gpll_hz, rate);
+ } else if (priv->cpll_hz % rate == 0) {
+ clk_src = CLK_UART_SRC_SEL_CPLL;
+ uart_src = CLK_UART_SEL_SRC;
+ div = DIV_ROUND_UP(priv->gpll_hz, rate);
+ } else if (rate == OSC_HZ) {
+ clk_src = CLK_UART_SRC_SEL_GPLL;
+ uart_src = CLK_UART_SEL_XIN24M;
+ div = 2;
+ } else {
+ clk_src = CLK_UART_SRC_SEL_GPLL;
+ uart_src = CLK_UART_SEL_FRAC;
+ div = 2;
+ rational_best_approximation(rate, priv->gpll_hz / div,
+ GENMASK(16 - 1, 0),
+ GENMASK(16 - 1, 0),
+ &m, &n);
+ }
+
+ switch (clk_id) {
+ case SCLK_UART1:
+ reg = 52;
+ break;
+ case SCLK_UART2:
+ reg = 54;
+ break;
+ case SCLK_UART3:
+ reg = 56;
+ break;
+ case SCLK_UART4:
+ reg = 58;
+ break;
+ case SCLK_UART5:
+ reg = 60;
+ break;
+ case SCLK_UART6:
+ reg = 62;
+ break;
+ case SCLK_UART7:
+ reg = 64;
+ break;
+ case SCLK_UART8:
+ reg = 66;
+ break;
+ case SCLK_UART9:
+ reg = 68;
+ break;
+ default:
+ return -ENOENT;
+ }
+ rk_clrsetreg(&cru->clksel_con[reg],
+ CLK_UART_SEL_MASK | CLK_UART_SRC_SEL_MASK |
+ CLK_UART_SRC_DIV_MASK,
+ (clk_src << CLK_UART_SRC_SEL_SHIFT) |
+ (uart_src << CLK_UART_SEL_SHIFT) |
+ ((div - 1) << CLK_UART_SRC_DIV_SHIFT));
+ if (m && n) {
+ val = m << CLK_UART_FRAC_NUMERATOR_SHIFT | n;
+ writel(val, &cru->clksel_con[reg + 1]);
+ }
+
+ return rk3568_uart_get_rate(priv, clk_id);
+}
+#endif
+
+static ulong rk3568_clk_get_rate(struct clk *clk)
+{
+ struct rk3568_clk_priv *priv = dev_get_priv(clk->dev);
+ ulong rate = 0;
+
+ if (!priv->gpll_hz) {
+ printf("%s gpll=%lu\n", __func__, priv->gpll_hz);
+ return -ENOENT;
+ }
+
+ switch (clk->id) {
+ case PLL_APLL:
+ case ARMCLK:
+ rate = rockchip_pll_get_rate(&rk3568_pll_clks[APLL], priv->cru,
+ APLL);
+ break;
+ case PLL_CPLL:
+ rate = rockchip_pll_get_rate(&rk3568_pll_clks[CPLL], priv->cru,
+ CPLL);
+ break;
+ case PLL_GPLL:
+ rate = rockchip_pll_get_rate(&rk3568_pll_clks[GPLL], priv->cru,
+ GPLL);
+ break;
+ case PLL_NPLL:
+ rate = rockchip_pll_get_rate(&rk3568_pll_clks[NPLL], priv->cru,
+ NPLL);
+ break;
+ case PLL_VPLL:
+ rate = rockchip_pll_get_rate(&rk3568_pll_clks[VPLL], priv->cru,
+ VPLL);
+ break;
+ case PLL_DPLL:
+ rate = rockchip_pll_get_rate(&rk3568_pll_clks[DPLL], priv->cru,
+ DPLL);
+ break;
+ case ACLK_BUS:
+ case PCLK_BUS:
+ case PCLK_WDT_NS:
+ rate = rk3568_bus_get_clk(priv, clk->id);
+ break;
+ case ACLK_PERIMID:
+ case HCLK_PERIMID:
+ rate = rk3568_perimid_get_clk(priv, clk->id);
+ break;
+ case ACLK_TOP_HIGH:
+ case ACLK_TOP_LOW:
+ case HCLK_TOP:
+ case PCLK_TOP:
+ rate = rk3568_top_get_clk(priv, clk->id);
+ break;
+ case CLK_I2C1:
+ case CLK_I2C2:
+ case CLK_I2C3:
+ case CLK_I2C4:
+ case CLK_I2C5:
+ rate = rk3568_i2c_get_clk(priv, clk->id);
+ break;
+ case CLK_SPI0:
+ case CLK_SPI1:
+ case CLK_SPI2:
+ case CLK_SPI3:
+ rate = rk3568_spi_get_clk(priv, clk->id);
+ break;
+ case CLK_PWM1:
+ case CLK_PWM2:
+ case CLK_PWM3:
+ rate = rk3568_pwm_get_clk(priv, clk->id);
+ break;
+ case CLK_SARADC:
+ case CLK_TSADC_TSEN:
+ case CLK_TSADC:
+ rate = rk3568_adc_get_clk(priv, clk->id);
+ break;
+ case HCLK_SDMMC0:
+ case CLK_SDMMC0:
+ case CLK_SDMMC1:
+ case CLK_SDMMC2:
+ rate = rk3568_sdmmc_get_clk(priv, clk->id);
+ break;
+ case SCLK_SFC:
+ rate = rk3568_sfc_get_clk(priv);
+ break;
+ case NCLK_NANDC:
+ rate = rk3568_nand_get_clk(priv);
+ break;
+ case CCLK_EMMC:
+ rate = rk3568_emmc_get_clk(priv);
+ break;
+ case BCLK_EMMC:
+ rate = rk3568_emmc_get_bclk(priv);
+ break;
+#ifndef CONFIG_SPL_BUILD
+ case ACLK_VOP:
+ rate = rk3568_aclk_vop_get_clk(priv);
+ break;
+ case DCLK_VOP0:
+ case DCLK_VOP1:
+ case DCLK_VOP2:
+ rate = rk3568_dclk_vop_get_clk(priv, clk->id);
+ break;
+ case SCLK_GMAC0:
+ case CLK_MAC0_2TOP:
+ case CLK_MAC0_REFOUT:
+ rate = rk3568_gmac_src_get_clk(priv, 0);
+ break;
+ case CLK_MAC0_OUT:
+ rate = rk3568_gmac_out_get_clk(priv, 0);
+ break;
+ case CLK_GMAC0_PTP_REF:
+ rate = rk3568_gmac_ptp_ref_get_clk(priv, 0);
+ break;
+ case SCLK_GMAC1:
+ case CLK_MAC1_2TOP:
+ case CLK_MAC1_REFOUT:
+ rate = rk3568_gmac_src_get_clk(priv, 1);
+ break;
+ case CLK_MAC1_OUT:
+ rate = rk3568_gmac_out_get_clk(priv, 1);
+ break;
+ case CLK_GMAC1_PTP_REF:
+ rate = rk3568_gmac_ptp_ref_get_clk(priv, 1);
+ break;
+ case DCLK_EBC:
+ rate = rk3568_ebc_get_clk(priv);
+ break;
+ case ACLK_RKVDEC_PRE:
+ case ACLK_RKVDEC:
+ case CLK_RKVDEC_CORE:
+ rate = rk3568_rkvdec_get_clk(priv, clk->id);
+ break;
+ case TCLK_WDT_NS:
+ rate = OSC_HZ;
+ break;
+ case SCLK_UART1:
+ case SCLK_UART2:
+ case SCLK_UART3:
+ case SCLK_UART4:
+ case SCLK_UART5:
+ case SCLK_UART6:
+ case SCLK_UART7:
+ case SCLK_UART8:
+ case SCLK_UART9:
+ rate = rk3568_uart_get_rate(priv, clk->id);
+ break;
+#endif
+ case ACLK_SECURE_FLASH:
+ case ACLK_CRYPTO_NS:
+ case HCLK_SECURE_FLASH:
+ case HCLK_CRYPTO_NS:
+ case CLK_CRYPTO_NS_RNG:
+ case CLK_CRYPTO_NS_CORE:
+ case CLK_CRYPTO_NS_PKA:
+ rate = rk3568_crypto_get_rate(priv, clk->id);
+ break;
+ case CPLL_500M:
+ case CPLL_333M:
+ case CPLL_250M:
+ case CPLL_125M:
+ case CPLL_100M:
+ case CPLL_62P5M:
+ case CPLL_50M:
+ case CPLL_25M:
+ rate = rk3568_cpll_div_get_rate(priv, clk->id);
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return rate;
+};
+
+static ulong rk3568_clk_set_rate(struct clk *clk, ulong rate)
+{
+ struct rk3568_clk_priv *priv = dev_get_priv(clk->dev);
+ ulong ret = 0;
+
+ if (!priv->gpll_hz) {
+ printf("%s gpll=%lu\n", __func__, priv->gpll_hz);
+ return -ENOENT;
+ }
+
+ switch (clk->id) {
+ case PLL_APLL:
+ case ARMCLK:
+ if (priv->armclk_hz)
+ rk3568_armclk_set_clk(priv, rate);
+ priv->armclk_hz = rate;
+ break;
+ case PLL_CPLL:
+ ret = rockchip_pll_set_rate(&rk3568_pll_clks[CPLL], priv->cru,
+ CPLL, rate);
+ priv->cpll_hz = rockchip_pll_get_rate(&rk3568_pll_clks[CPLL],
+ priv->cru, CPLL);
+ break;
+ case PLL_GPLL:
+ ret = rockchip_pll_set_rate(&rk3568_pll_clks[GPLL], priv->cru,
+ GPLL, rate);
+ priv->gpll_hz = rockchip_pll_get_rate(&rk3568_pll_clks[GPLL],
+ priv->cru, GPLL);
+ break;
+ case PLL_NPLL:
+ ret = rockchip_pll_set_rate(&rk3568_pll_clks[NPLL], priv->cru,
+ NPLL, rate);
+ break;
+ case PLL_VPLL:
+ ret = rockchip_pll_set_rate(&rk3568_pll_clks[VPLL], priv->cru,
+ VPLL, rate);
+ priv->vpll_hz = rockchip_pll_get_rate(&rk3568_pll_clks[VPLL],
+ priv->cru,
+ VPLL);
+ break;
+ case ACLK_BUS:
+ case PCLK_BUS:
+ case PCLK_WDT_NS:
+ ret = rk3568_bus_set_clk(priv, clk->id, rate);
+ break;
+ case ACLK_PERIMID:
+ case HCLK_PERIMID:
+ ret = rk3568_perimid_set_clk(priv, clk->id, rate);
+ break;
+ case ACLK_TOP_HIGH:
+ case ACLK_TOP_LOW:
+ case HCLK_TOP:
+ case PCLK_TOP:
+ ret = rk3568_top_set_clk(priv, clk->id, rate);
+ break;
+ case CLK_I2C1:
+ case CLK_I2C2:
+ case CLK_I2C3:
+ case CLK_I2C4:
+ case CLK_I2C5:
+ ret = rk3568_i2c_set_clk(priv, clk->id, rate);
+ break;
+ case CLK_SPI0:
+ case CLK_SPI1:
+ case CLK_SPI2:
+ case CLK_SPI3:
+ ret = rk3568_spi_set_clk(priv, clk->id, rate);
+ break;
+ case CLK_PWM1:
+ case CLK_PWM2:
+ case CLK_PWM3:
+ ret = rk3568_pwm_set_clk(priv, clk->id, rate);
+ break;
+ case CLK_SARADC:
+ case CLK_TSADC_TSEN:
+ case CLK_TSADC:
+ ret = rk3568_adc_set_clk(priv, clk->id, rate);
+ break;
+ case HCLK_SDMMC0:
+ case CLK_SDMMC0:
+ case CLK_SDMMC1:
+ case CLK_SDMMC2:
+ ret = rk3568_sdmmc_set_clk(priv, clk->id, rate);
+ break;
+ case SCLK_SFC:
+ ret = rk3568_sfc_set_clk(priv, rate);
+ break;
+ case NCLK_NANDC:
+ ret = rk3568_nand_set_clk(priv, rate);
+ break;
+ case CCLK_EMMC:
+ ret = rk3568_emmc_set_clk(priv, rate);
+ break;
+ case BCLK_EMMC:
+ ret = rk3568_emmc_set_bclk(priv, rate);
+ break;
+#ifndef CONFIG_SPL_BUILD
+ case ACLK_VOP:
+ ret = rk3568_aclk_vop_set_clk(priv, rate);
+ break;
+ case DCLK_VOP0:
+ case DCLK_VOP1:
+ case DCLK_VOP2:
+ ret = rk3568_dclk_vop_set_clk(priv, clk->id, rate);
+ break;
+ case SCLK_GMAC0:
+ case CLK_MAC0_2TOP:
+ case CLK_MAC0_REFOUT:
+ ret = rk3568_gmac_src_set_clk(priv, 0, rate);
+ break;
+ case CLK_MAC0_OUT:
+ ret = rk3568_gmac_out_set_clk(priv, 0, rate);
+ break;
+ case SCLK_GMAC0_RX_TX:
+ ret = rk3568_gmac_tx_rx_set_clk(priv, 0, rate);
+ break;
+ case CLK_GMAC0_PTP_REF:
+ ret = rk3568_gmac_ptp_ref_set_clk(priv, 0, rate);
+ break;
+ case SCLK_GMAC1:
+ case CLK_MAC1_2TOP:
+ case CLK_MAC1_REFOUT:
+ ret = rk3568_gmac_src_set_clk(priv, 1, rate);
+ break;
+ case CLK_MAC1_OUT:
+ ret = rk3568_gmac_out_set_clk(priv, 1, rate);
+ break;
+ case SCLK_GMAC1_RX_TX:
+ ret = rk3568_gmac_tx_rx_set_clk(priv, 1, rate);
+ break;
+ case CLK_GMAC1_PTP_REF:
+ ret = rk3568_gmac_ptp_ref_set_clk(priv, 1, rate);
+ break;
+ case DCLK_EBC:
+ ret = rk3568_ebc_set_clk(priv, rate);
+ break;
+ case ACLK_RKVDEC_PRE:
+ case ACLK_RKVDEC:
+ case CLK_RKVDEC_CORE:
+ ret = rk3568_rkvdec_set_clk(priv, clk->id, rate);
+ break;
+ case TCLK_WDT_NS:
+ ret = OSC_HZ;
+ break;
+ case SCLK_UART1:
+ case SCLK_UART2:
+ case SCLK_UART3:
+ case SCLK_UART4:
+ case SCLK_UART5:
+ case SCLK_UART6:
+ case SCLK_UART7:
+ case SCLK_UART8:
+ case SCLK_UART9:
+ ret = rk3568_uart_set_rate(priv, clk->id, rate);
+ break;
+#endif
+ case ACLK_SECURE_FLASH:
+ case ACLK_CRYPTO_NS:
+ case HCLK_SECURE_FLASH:
+ case HCLK_CRYPTO_NS:
+ case CLK_CRYPTO_NS_RNG:
+ case CLK_CRYPTO_NS_CORE:
+ case CLK_CRYPTO_NS_PKA:
+ ret = rk3568_crypto_set_rate(priv, clk->id, rate);
+ break;
+ case CPLL_500M:
+ case CPLL_333M:
+ case CPLL_250M:
+ case CPLL_125M:
+ case CPLL_100M:
+ case CPLL_62P5M:
+ case CPLL_50M:
+ case CPLL_25M:
+ ret = rk3568_cpll_div_set_rate(priv, clk->id, rate);
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return ret;
+};
+
+#if (IS_ENABLED(OF_CONTROL)) || (!IS_ENABLED(OF_PLATDATA))
+static int rk3568_gmac0_src_set_parent(struct clk *clk, struct clk *parent)
+{
+ struct rk3568_clk_priv *priv = dev_get_priv(clk->dev);
+ struct rk3568_cru *cru = priv->cru;
+
+ if (parent->id == CLK_MAC0_2TOP)
+ rk_clrsetreg(&cru->clksel_con[31],
+ RMII0_EXTCLK_SEL_MASK,
+ RMII0_EXTCLK_SEL_MAC0_TOP <<
+ RMII0_EXTCLK_SEL_SHIFT);
+ else
+ rk_clrsetreg(&cru->clksel_con[31],
+ RMII0_EXTCLK_SEL_MASK,
+ RMII0_EXTCLK_SEL_IO << RMII0_EXTCLK_SEL_SHIFT);
+ return 0;
+}
+
+static int rk3568_gmac1_src_set_parent(struct clk *clk, struct clk *parent)
+{
+ struct rk3568_clk_priv *priv = dev_get_priv(clk->dev);
+ struct rk3568_cru *cru = priv->cru;
+
+ if (parent->id == CLK_MAC1_2TOP)
+ rk_clrsetreg(&cru->clksel_con[33],
+ RMII0_EXTCLK_SEL_MASK,
+ RMII0_EXTCLK_SEL_MAC0_TOP <<
+ RMII0_EXTCLK_SEL_SHIFT);
+ else
+ rk_clrsetreg(&cru->clksel_con[33],
+ RMII0_EXTCLK_SEL_MASK,
+ RMII0_EXTCLK_SEL_IO << RMII0_EXTCLK_SEL_SHIFT);
+ return 0;
+}
+
+static int rk3568_gmac0_tx_rx_set_parent(struct clk *clk, struct clk *parent)
+{
+ struct rk3568_clk_priv *priv = dev_get_priv(clk->dev);
+ struct rk3568_cru *cru = priv->cru;
+
+ if (parent->id == SCLK_GMAC0_RGMII_SPEED)
+ rk_clrsetreg(&cru->clksel_con[31],
+ RMII0_MODE_MASK,
+ RMII0_MODE_SEL_RGMII << RMII0_MODE_SHIFT);
+ else if (parent->id == SCLK_GMAC0_RMII_SPEED)
+ rk_clrsetreg(&cru->clksel_con[31],
+ RMII0_MODE_MASK,
+ RMII0_MODE_SEL_RMII << RMII0_MODE_SHIFT);
+ else
+ rk_clrsetreg(&cru->clksel_con[31],
+ RMII0_MODE_MASK,
+ RMII0_MODE_SEL_GMII << RMII0_MODE_SHIFT);
+
+ return 0;
+}
+
+static int rk3568_gmac1_tx_rx_set_parent(struct clk *clk, struct clk *parent)
+{
+ struct rk3568_clk_priv *priv = dev_get_priv(clk->dev);
+ struct rk3568_cru *cru = priv->cru;
+
+ if (parent->id == SCLK_GMAC1_RGMII_SPEED)
+ rk_clrsetreg(&cru->clksel_con[33],
+ RMII0_MODE_MASK,
+ RMII0_MODE_SEL_RGMII << RMII0_MODE_SHIFT);
+ else if (parent->id == SCLK_GMAC1_RMII_SPEED)
+ rk_clrsetreg(&cru->clksel_con[33],
+ RMII0_MODE_MASK,
+ RMII0_MODE_SEL_RMII << RMII0_MODE_SHIFT);
+ else
+ rk_clrsetreg(&cru->clksel_con[33],
+ RMII0_MODE_MASK,
+ RMII0_MODE_SEL_GMII << RMII0_MODE_SHIFT);
+
+ return 0;
+}
+
+static int rk3568_dclk_vop_set_parent(struct clk *clk, struct clk *parent)
+{
+ struct rk3568_clk_priv *priv = dev_get_priv(clk->dev);
+ struct rk3568_cru *cru = priv->cru;
+ u32 con_id;
+
+ switch (clk->id) {
+ case DCLK_VOP0:
+ con_id = 39;
+ break;
+ case DCLK_VOP1:
+ con_id = 40;
+ break;
+ case DCLK_VOP2:
+ con_id = 41;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (parent->id == PLL_VPLL) {
+ rk_clrsetreg(&cru->clksel_con[con_id], DCLK0_VOP_SEL_MASK,
+ DCLK_VOP_SEL_VPLL << DCLK0_VOP_SEL_SHIFT);
+ } else {
+ rk_clrsetreg(&cru->clksel_con[con_id], DCLK0_VOP_SEL_MASK,
+ DCLK_VOP_SEL_HPLL << DCLK0_VOP_SEL_SHIFT);
+ }
+
+ return 0;
+}
+
+static int rk3568_rkvdec_set_parent(struct clk *clk, struct clk *parent)
+{
+ struct rk3568_clk_priv *priv = dev_get_priv(clk->dev);
+ struct rk3568_cru *cru = priv->cru;
+ u32 con_id, mask, shift;
+
+ switch (clk->id) {
+ case ACLK_RKVDEC_PRE:
+ con_id = 47;
+ mask = ACLK_RKVDEC_SEL_MASK;
+ shift = ACLK_RKVDEC_SEL_SHIFT;
+ break;
+ case CLK_RKVDEC_CORE:
+ con_id = 49;
+ mask = CLK_RKVDEC_CORE_SEL_MASK;
+ shift = CLK_RKVDEC_CORE_SEL_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (parent->id == PLL_CPLL) {
+ rk_clrsetreg(&cru->clksel_con[con_id], mask,
+ ACLK_RKVDEC_SEL_CPLL << shift);
+ } else {
+ rk_clrsetreg(&cru->clksel_con[con_id], mask,
+ ACLK_RKVDEC_SEL_GPLL << shift);
+ }
+
+ return 0;
+}
+
+static int rk3568_clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ switch (clk->id) {
+ case SCLK_GMAC0:
+ return rk3568_gmac0_src_set_parent(clk, parent);
+ case SCLK_GMAC1:
+ return rk3568_gmac1_src_set_parent(clk, parent);
+ case SCLK_GMAC0_RX_TX:
+ return rk3568_gmac0_tx_rx_set_parent(clk, parent);
+ case SCLK_GMAC1_RX_TX:
+ return rk3568_gmac1_tx_rx_set_parent(clk, parent);
+ case DCLK_VOP0:
+ case DCLK_VOP1:
+ case DCLK_VOP2:
+ return rk3568_dclk_vop_set_parent(clk, parent);
+ case ACLK_RKVDEC_PRE:
+ case CLK_RKVDEC_CORE:
+ return rk3568_rkvdec_set_parent(clk, parent);
+ default:
+ return -ENOENT;
+ }
+
+ return 0;
+}
+#endif
+
+static struct clk_ops rk3568_clk_ops = {
+ .get_rate = rk3568_clk_get_rate,
+ .set_rate = rk3568_clk_set_rate,
+#if (IS_ENABLED(OF_CONTROL)) || (!IS_ENABLED(OF_PLATDATA))
+ .set_parent = rk3568_clk_set_parent,
+#endif
+};
+
+static void rk3568_clk_init(struct rk3568_clk_priv *priv)
+{
+ int ret;
+
+ priv->sync_kernel = false;
+ if (!priv->armclk_enter_hz) {
+ priv->armclk_enter_hz =
+ rockchip_pll_get_rate(&rk3568_pll_clks[APLL],
+ priv->cru, APLL);
+ priv->armclk_init_hz = priv->armclk_enter_hz;
+ }
+
+ if (priv->armclk_init_hz != APLL_HZ) {
+ ret = rk3568_armclk_set_clk(priv, APLL_HZ);
+ if (!ret)
+ priv->armclk_init_hz = APLL_HZ;
+ }
+ if (priv->cpll_hz != CPLL_HZ) {
+ ret = rockchip_pll_set_rate(&rk3568_pll_clks[CPLL], priv->cru,
+ CPLL, CPLL_HZ);
+ if (!ret)
+ priv->cpll_hz = CPLL_HZ;
+ }
+ if (priv->gpll_hz != GPLL_HZ) {
+ ret = rockchip_pll_set_rate(&rk3568_pll_clks[GPLL], priv->cru,
+ GPLL, GPLL_HZ);
+ if (!ret)
+ priv->gpll_hz = GPLL_HZ;
+ }
+
+#ifdef CONFIG_SPL_BUILD
+ ret = rk3568_bus_set_clk(priv, ACLK_BUS, 150000000);
+ if (ret < 0)
+ printf("Fail to set the ACLK_BUS clock.\n");
+#endif
+
+ priv->ppll_hz = rk3568_pmu_pll_get_rate(priv, PPLL);
+ priv->hpll_hz = rk3568_pmu_pll_get_rate(priv, HPLL);
+}
+
+static int rk3568_clk_probe(struct udevice *dev)
+{
+ struct rk3568_clk_priv *priv = dev_get_priv(dev);
+ int ret;
+
+ priv->grf = syscon_get_first_range(ROCKCHIP_SYSCON_GRF);
+ if (IS_ERR(priv->grf))
+ return PTR_ERR(priv->grf);
+
+ rk3568_clk_init(priv);
+
+ /* Process 'assigned-{clocks/clock-parents/clock-rates}' properties */
+ ret = clk_set_defaults(dev, 1);
+ if (ret)
+ debug("%s clk_set_defaults failed %d\n", __func__, ret);
+ else
+ priv->sync_kernel = true;
+
+ return 0;
+}
+
+static int rk3568_clk_ofdata_to_platdata(struct udevice *dev)
+{
+ struct rk3568_clk_priv *priv = dev_get_priv(dev);
+
+ priv->cru = dev_read_addr_ptr(dev);
+
+ return 0;
+}
+
+static int rk3568_clk_bind(struct udevice *dev)
+{
+ int ret;
+ struct udevice *sys_child;
+ struct sysreset_reg *priv;
+
+ /* The reset driver does not have a device node, so bind it here */
+ ret = device_bind_driver(dev, "rockchip_sysreset", "sysreset",
+ &sys_child);
+ if (ret) {
+ debug("Warning: No sysreset driver: ret=%d\n", ret);
+ } else {
+ priv = malloc(sizeof(struct sysreset_reg));
+ priv->glb_srst_fst_value = offsetof(struct rk3568_cru,
+ glb_srst_fst);
+ priv->glb_srst_snd_value = offsetof(struct rk3568_cru,
+ glb_srsr_snd);
+ }
+
+#if CONFIG_IS_ENABLED(RESET_ROCKCHIP)
+ ret = offsetof(struct rk3568_cru, softrst_con[0]);
+ ret = rockchip_reset_bind(dev, ret, 30);
+ if (ret)
+ debug("Warning: software reset driver bind faile\n");
+#endif
+
+ return 0;
+}
+
+static const struct udevice_id rk3568_clk_ids[] = {
+ { .compatible = "rockchip,rk3568-cru" },
+ { }
+};
+
+U_BOOT_DRIVER(rockchip_rk3568_cru) = {
+ .name = "rockchip_rk3568_cru",
+ .id = UCLASS_CLK,
+ .of_match = rk3568_clk_ids,
+ .priv_auto = sizeof(struct rk3568_clk_priv),
+ .of_to_plat = rk3568_clk_ofdata_to_platdata,
+ .ops = &rk3568_clk_ops,
+ .bind = rk3568_clk_bind,
+ .probe = rk3568_clk_probe,
+#if CONFIG_IS_ENABLED(OF_PLATDATA)
+ .plat_auto = sizeof(struct rk3568_clk_plat),
+#endif
+};
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index ed151ee0a5c..a901ce55111 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -333,6 +333,22 @@ config CORTINA_NAND
The controller supports a maximum 8k page size and supports
a maximum 40-bit error correction per sector of 1024 bytes.
+config ROCKCHIP_NAND
+ bool "Support for NAND controller on Rockchip SoCs"
+ depends on ARCH_ROCKCHIP
+ select SYS_NAND_SELF_INIT
+ select DM_MTD
+ imply CMD_NAND
+ help
+ Enables support for NAND Flash chips on Rockchip SoCs platform.
+ This controller is found on Rockchip SoCs.
+ There are four different versions of NAND FLASH Controllers,
+ including:
+ NFC v600: RK2928, RK3066, RK3188
+ NFC v622: RK3036, RK3128
+ NFC v800: RK3308, RV1108
+ NFC v900: PX30, RK3326
+
comment "Generic NAND options"
config SYS_NAND_BLOCK_SIZE
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index f3f0e15a157..a5ed2c536f5 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_NAND_SUNXI) += sunxi_nand.o
obj-$(CONFIG_NAND_ZYNQ) += zynq_nand.o
obj-$(CONFIG_NAND_STM32_FMC2) += stm32_fmc2_nand.o
obj-$(CONFIG_CORTINA_NAND) += cortina_nand.o
+obj-$(CONFIG_ROCKCHIP_NAND) += rockchip_nfc.o
else # minimal SPL drivers
diff --git a/drivers/mtd/nand/raw/rockchip_nfc.c b/drivers/mtd/nand/raw/rockchip_nfc.c
new file mode 100644
index 00000000000..21776f3b14c
--- /dev/null
+++ b/drivers/mtd/nand/raw/rockchip_nfc.c
@@ -0,0 +1,1253 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip NAND Flash controller driver.
+ * Copyright (C) 2021 Rockchip Inc.
+ * Author: Yifeng Zhao <yifeng.zhao@rock-chips.com>
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <clk.h>
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <dm/devres.h>
+#include <fdtdec.h>
+#include <inttypes.h>
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <memalign.h>
+#include <nand.h>
+
+/*
+ * NFC Page Data Layout:
+ * 1024 bytes data + 4Bytes sys data + 28Bytes~124Bytes ECC data +
+ * 1024 bytes data + 4Bytes sys data + 28Bytes~124Bytes ECC data +
+ * ......
+ * NAND Page Data Layout:
+ * 1024 * n data + m Bytes oob
+ * Original Bad Block Mask Location:
+ * First byte of oob(spare).
+ * nand_chip->oob_poi data layout:
+ * 4Bytes sys data + .... + 4Bytes sys data + ECC data.
+ */
+
+/* NAND controller register definition */
+#define NFC_READ (0)
+#define NFC_WRITE (1)
+
+#define NFC_FMCTL (0x00)
+#define FMCTL_CE_SEL_M 0xFF
+#define FMCTL_CE_SEL(x) (1 << (x))
+#define FMCTL_WP BIT(8)
+#define FMCTL_RDY BIT(9)
+
+#define NFC_FMWAIT (0x04)
+#define FLCTL_RST BIT(0)
+#define FLCTL_WR (1) /* 0: read, 1: write */
+#define FLCTL_XFER_ST BIT(2)
+#define FLCTL_XFER_EN BIT(3)
+#define FLCTL_ACORRECT BIT(10) /* Auto correct error bits. */
+#define FLCTL_XFER_READY BIT(20)
+#define FLCTL_XFER_SECTOR (22)
+#define FLCTL_TOG_FIX BIT(29)
+
+#define BCHCTL_BANK_M (7 << 5)
+#define BCHCTL_BANK (5)
+
+#define DMA_ST BIT(0)
+#define DMA_WR (1) /* 0: write, 1: read */
+#define DMA_EN BIT(2)
+#define DMA_AHB_SIZE (3) /* 0: 1, 1: 2, 2: 4 */
+#define DMA_BURST_SIZE (6) /* 0: 1, 3: 4, 5: 8, 7: 16 */
+#define DMA_INC_NUM (9) /* 1 - 16 */
+
+#define ECC_ERR_CNT(x, e) ((((x) >> (e).low) & (e).low_mask) |\
+ (((x) >> (e).high) & (e).high_mask) << (e).low_bn)
+#define INT_DMA BIT(0)
+#define NFC_BANK (0x800)
+#define NFC_BANK_STEP (0x100)
+#define BANK_DATA (0x00)
+#define BANK_ADDR (0x04)
+#define BANK_CMD (0x08)
+#define NFC_SRAM0 (0x1000)
+#define NFC_SRAM1 (0x1400)
+#define NFC_SRAM_SIZE (0x400)
+#define NFC_TIMEOUT_MS (500)
+#define NFC_MAX_OOB_PER_STEP 128
+#define NFC_MIN_OOB_PER_STEP 64
+#define MAX_DATA_SIZE 0xFFFC
+#define MAX_ADDRESS_CYC 6
+#define NFC_ECC_MAX_MODES 4
+#define NFC_RB_DELAY_US 50
+#define NFC_MAX_PAGE_SIZE (16 * 1024)
+#define NFC_MAX_OOB_SIZE (16 * 128)
+#define NFC_MAX_NSELS (8) /* Some Socs only have 1 or 2 CSs. */
+#define NFC_SYS_DATA_SIZE (4) /* 4 bytes sys data in oob pre 1024 data.*/
+#define RK_DEFAULT_CLOCK_RATE (150 * 1000 * 1000) /* 150 Mhz */
+#define ACCTIMING(csrw, rwpw, rwcs) ((csrw) << 12 | (rwpw) << 5 | (rwcs))
+
+enum nfc_type {
+ NFC_V6,
+ NFC_V8,
+ NFC_V9,
+};
+
+/**
+ * struct rk_ecc_cnt_status: represent a ecc status data.
+ * @err_flag_bit: error flag bit index at register.
+ * @low: ECC count low bit index at register.
+ * @low_mask: mask bit.
+ * @low_bn: ECC count low bit number.
+ * @high: ECC count high bit index at register.
+ * @high_mask: mask bit
+ */
+struct ecc_cnt_status {
+ u8 err_flag_bit;
+ u8 low;
+ u8 low_mask;
+ u8 low_bn;
+ u8 high;
+ u8 high_mask;
+};
+
+/**
+ * @type: NFC version
+ * @ecc_strengths: ECC strengths
+ * @ecc_cfgs: ECC config values
+ * @flctl_off: FLCTL register offset
+ * @bchctl_off: BCHCTL register offset
+ * @dma_data_buf_off: DMA_DATA_BUF register offset
+ * @dma_oob_buf_off: DMA_OOB_BUF register offset
+ * @dma_cfg_off: DMA_CFG register offset
+ * @dma_st_off: DMA_ST register offset
+ * @bch_st_off: BCG_ST register offset
+ * @randmz_off: RANDMZ register offset
+ * @int_en_off: interrupt enable register offset
+ * @int_clr_off: interrupt clean register offset
+ * @int_st_off: interrupt status register offset
+ * @oob0_off: oob0 register offset
+ * @oob1_off: oob1 register offset
+ * @ecc0: represent ECC0 status data
+ * @ecc1: represent ECC1 status data
+ */
+struct nfc_cfg {
+ enum nfc_type type;
+ u8 ecc_strengths[NFC_ECC_MAX_MODES];
+ u32 ecc_cfgs[NFC_ECC_MAX_MODES];
+ u32 flctl_off;
+ u32 bchctl_off;
+ u32 dma_cfg_off;
+ u32 dma_data_buf_off;
+ u32 dma_oob_buf_off;
+ u32 dma_st_off;
+ u32 bch_st_off;
+ u32 randmz_off;
+ u32 int_en_off;
+ u32 int_clr_off;
+ u32 int_st_off;
+ u32 oob0_off;
+ u32 oob1_off;
+ struct ecc_cnt_status ecc0;
+ struct ecc_cnt_status ecc1;
+};
+
+struct rk_nfc_nand_chip {
+ struct nand_chip chip;
+
+ u16 boot_blks;
+ u16 metadata_size;
+ u32 boot_ecc;
+ u32 timing;
+
+ u8 nsels;
+ u8 sels[0];
+ /* Nothing after this field. */
+};
+
+struct rk_nfc {
+ struct nand_hw_control controller;
+ const struct nfc_cfg *cfg;
+ struct udevice *dev;
+
+ struct clk *nfc_clk;
+ struct clk *ahb_clk;
+ void __iomem *regs;
+
+ int selected_bank;
+ u32 band_offset;
+ u32 cur_ecc;
+ u32 cur_timing;
+
+ u8 *page_buf;
+ u32 *oob_buf;
+
+ unsigned long assigned_cs;
+};
+
+static inline struct rk_nfc_nand_chip *rk_nfc_to_rknand(struct nand_chip *chip)
+{
+ return container_of(chip, struct rk_nfc_nand_chip, chip);
+}
+
+static inline u8 *rk_nfc_buf_to_data_ptr(struct nand_chip *chip, const u8 *p, int i)
+{
+ return (u8 *)p + i * chip->ecc.size;
+}
+
+static inline u8 *rk_nfc_buf_to_oob_ptr(struct nand_chip *chip, int i)
+{
+ u8 *poi;
+
+ poi = chip->oob_poi + i * NFC_SYS_DATA_SIZE;
+
+ return poi;
+}
+
+static inline u8 *rk_nfc_buf_to_oob_ecc_ptr(struct nand_chip *chip, int i)
+{
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ u8 *poi;
+
+ poi = chip->oob_poi + rknand->metadata_size + chip->ecc.bytes * i;
+
+ return poi;
+}
+
+static inline int rk_nfc_data_len(struct nand_chip *chip)
+{
+ return chip->ecc.size + chip->ecc.bytes + NFC_SYS_DATA_SIZE;
+}
+
+static inline u8 *rk_nfc_data_ptr(struct nand_chip *chip, int i)
+{
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+
+ return nfc->page_buf + i * rk_nfc_data_len(chip);
+}
+
+static inline u8 *rk_nfc_oob_ptr(struct nand_chip *chip, int i)
+{
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+
+ return nfc->page_buf + i * rk_nfc_data_len(chip) + chip->ecc.size;
+}
+
+static int rk_nfc_hw_ecc_setup(struct nand_chip *chip, u32 strength)
+{
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ u32 reg, i;
+
+ for (i = 0; i < NFC_ECC_MAX_MODES; i++) {
+ if (strength == nfc->cfg->ecc_strengths[i]) {
+ reg = nfc->cfg->ecc_cfgs[i];
+ break;
+ }
+ }
+
+ if (i >= NFC_ECC_MAX_MODES)
+ return -EINVAL;
+
+ writel(reg, nfc->regs + nfc->cfg->bchctl_off);
+
+ /* Save chip ECC setting */
+ nfc->cur_ecc = strength;
+
+ return 0;
+}
+
+static void rk_nfc_select_chip(struct mtd_info *mtd, int cs)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u32 val;
+
+ if (cs < 0) {
+ nfc->selected_bank = -1;
+ /* Deselect the currently selected target. */
+ val = readl(nfc->regs + NFC_FMCTL);
+ val &= ~FMCTL_CE_SEL_M;
+ writel(val, nfc->regs + NFC_FMCTL);
+ return;
+ }
+
+ nfc->selected_bank = rknand->sels[cs];
+ nfc->band_offset = NFC_BANK + nfc->selected_bank * NFC_BANK_STEP;
+
+ val = readl(nfc->regs + NFC_FMCTL);
+ val &= ~FMCTL_CE_SEL_M;
+ val |= FMCTL_CE_SEL(nfc->selected_bank);
+
+ writel(val, nfc->regs + NFC_FMCTL);
+
+ /*
+ * Compare current chip timing with selected chip timing and
+ * change if needed.
+ */
+ if (nfc->cur_timing != rknand->timing) {
+ writel(rknand->timing, nfc->regs + NFC_FMWAIT);
+ nfc->cur_timing = rknand->timing;
+ }
+
+ /*
+ * Compare current chip ECC setting with selected chip ECC setting and
+ * change if needed.
+ */
+ if (nfc->cur_ecc != ecc->strength)
+ rk_nfc_hw_ecc_setup(chip, ecc->strength);
+}
+
+static inline int rk_nfc_wait_ioready(struct rk_nfc *nfc)
+{
+ u32 timeout = (CONFIG_SYS_HZ * NFC_TIMEOUT_MS) / 1000;
+ u32 time_start;
+
+ time_start = get_timer(0);
+ do {
+ if (readl(nfc->regs + NFC_FMCTL) & FMCTL_RDY)
+ return 0;
+ } while (get_timer(time_start) < timeout);
+
+ dev_err(nfc->dev, "wait for io ready timedout\n");
+ return -ETIMEDOUT;
+}
+
+static void rk_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ void __iomem *bank_base;
+ int i = 0;
+
+ bank_base = nfc->regs + nfc->band_offset + BANK_DATA;
+
+ for (i = 0; i < len; i++)
+ buf[i] = readl(bank_base);
+}
+
+static void rk_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ void __iomem *bank_base;
+ int i = 0;
+
+ bank_base = nfc->regs + nfc->band_offset + BANK_DATA;
+
+ for (i = 0; i < len; i++)
+ writel(buf[i], bank_base);
+}
+
+static void rk_nfc_cmd(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ void __iomem *bank_base;
+
+ bank_base = nfc->regs + nfc->band_offset;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ if (ctrl & NAND_ALE)
+ bank_base += BANK_ADDR;
+ else if (ctrl & NAND_CLE)
+ bank_base += BANK_CMD;
+ chip->IO_ADDR_W = bank_base;
+ }
+
+ if (dat != NAND_CMD_NONE)
+ writel(dat & 0xFF, chip->IO_ADDR_W);
+}
+
+static uint8_t rockchip_nand_read_byte(struct mtd_info *mtd)
+{
+ uint8_t ret;
+
+ rk_nfc_read_buf(mtd, &ret, 1);
+
+ return ret;
+}
+
+static int rockchip_nand_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+
+ if (readl(nfc->regs + NFC_FMCTL) & FMCTL_RDY)
+ return 1;
+
+ return 0;
+}
+
+static void rk_nfc_xfer_start(struct rk_nfc *nfc, u8 rw, u8 n_KB,
+ dma_addr_t dma_data, dma_addr_t dma_oob)
+{
+ u32 dma_reg, fl_reg, bch_reg;
+
+ dma_reg = DMA_ST | ((!rw) << DMA_WR) | DMA_EN | (2 << DMA_AHB_SIZE) |
+ (7 << DMA_BURST_SIZE) | (16 << DMA_INC_NUM);
+
+ fl_reg = (rw << FLCTL_WR) | FLCTL_XFER_EN | FLCTL_ACORRECT |
+ (n_KB << FLCTL_XFER_SECTOR) | FLCTL_TOG_FIX;
+
+ if (nfc->cfg->type == NFC_V6 || nfc->cfg->type == NFC_V8) {
+ bch_reg = readl_relaxed(nfc->regs + nfc->cfg->bchctl_off);
+ bch_reg = (bch_reg & (~BCHCTL_BANK_M)) |
+ (nfc->selected_bank << BCHCTL_BANK);
+ writel(bch_reg, nfc->regs + nfc->cfg->bchctl_off);
+ }
+
+ writel(dma_reg, nfc->regs + nfc->cfg->dma_cfg_off);
+ writel((u32)dma_data, nfc->regs + nfc->cfg->dma_data_buf_off);
+ writel((u32)dma_oob, nfc->regs + nfc->cfg->dma_oob_buf_off);
+ writel(fl_reg, nfc->regs + nfc->cfg->flctl_off);
+ fl_reg |= FLCTL_XFER_ST;
+ writel(fl_reg, nfc->regs + nfc->cfg->flctl_off);
+}
+
+static int rk_nfc_wait_for_xfer_done(struct rk_nfc *nfc)
+{
+ unsigned long timeout = (CONFIG_SYS_HZ * NFC_TIMEOUT_MS) / 1000;
+ void __iomem *ptr = nfc->regs + nfc->cfg->flctl_off;
+ u32 time_start;
+
+ time_start = get_timer(0);
+
+ do {
+ if (readl(ptr) & FLCTL_XFER_READY)
+ return 0;
+ } while (get_timer(time_start) < timeout);
+
+ dev_err(nfc->dev, "wait for io ready timedout\n");
+ return -ETIMEDOUT;
+}
+
+static int rk_nfc_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required,
+ int page)
+{
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int i, pages_per_blk;
+
+ pages_per_blk = mtd->erasesize / mtd->writesize;
+ if ((page < (pages_per_blk * rknand->boot_blks)) &&
+ rknand->boot_ecc != ecc->strength) {
+ /*
+ * There's currently no method to notify the MTD framework that
+ * a different ECC strength is in use for the boot blocks.
+ */
+ return -EIO;
+ }
+
+ if (!buf)
+ memset(nfc->page_buf, 0xff, mtd->writesize + mtd->oobsize);
+
+ for (i = 0; i < ecc->steps; i++) {
+ /* Copy data to the NFC buffer. */
+ if (buf)
+ memcpy(rk_nfc_data_ptr(chip, i),
+ rk_nfc_buf_to_data_ptr(chip, buf, i),
+ ecc->size);
+ /*
+ * The first four bytes of OOB are reserved for the
+ * boot ROM. In some debugging cases, such as with a
+ * read, erase and write back test these 4 bytes stored
+ * in OOB also need to be written back.
+ *
+ * The function nand_block_bad detects bad blocks like:
+ *
+ * bad = chip->oob_poi[chip->badblockpos];
+ *
+ * chip->badblockpos == 0 for a large page NAND Flash,
+ * so chip->oob_poi[0] is the bad block mask (BBM).
+ *
+ * The OOB data layout on the NFC is:
+ *
+ * PA0 PA1 PA2 PA3 | BBM OOB1 OOB2 OOB3 | ...
+ *
+ * or
+ *
+ * 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ...
+ *
+ * The code here just swaps the first 4 bytes with the last
+ * 4 bytes without losing any data.
+ *
+ * The chip->oob_poi data layout:
+ *
+ * BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3
+ *
+ * The rk_nfc_ooblayout_free() function already has reserved
+ * these 4 bytes with:
+ *
+ * oob_region->offset = NFC_SYS_DATA_SIZE + 2;
+ */
+ if (!i)
+ memcpy(rk_nfc_oob_ptr(chip, i),
+ rk_nfc_buf_to_oob_ptr(chip, ecc->steps - 1),
+ NFC_SYS_DATA_SIZE);
+ else
+ memcpy(rk_nfc_oob_ptr(chip, i),
+ rk_nfc_buf_to_oob_ptr(chip, i - 1),
+ NFC_SYS_DATA_SIZE);
+ /* Copy ECC data to the NFC buffer. */
+ memcpy(rk_nfc_oob_ptr(chip, i) + NFC_SYS_DATA_SIZE,
+ rk_nfc_buf_to_oob_ecc_ptr(chip, i),
+ ecc->bytes);
+ }
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ rk_nfc_write_buf(mtd, buf, mtd->writesize + mtd->oobsize);
+ return nand_prog_page_end_op(chip);
+}
+
+static int rk_nfc_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required,
+ int page)
+{
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int oob_step = (ecc->bytes > 60) ? NFC_MAX_OOB_PER_STEP :
+ NFC_MIN_OOB_PER_STEP;
+ int pages_per_blk = mtd->erasesize / mtd->writesize;
+ int ret = 0, i, boot_rom_mode = 0;
+ dma_addr_t dma_data, dma_oob;
+ u32 reg;
+ u8 *oob;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ if (buf)
+ memcpy(nfc->page_buf, buf, mtd->writesize);
+ else
+ memset(nfc->page_buf, 0xFF, mtd->writesize);
+
+ /*
+ * The first blocks (4, 8 or 16 depending on the device) are used
+ * by the boot ROM and the first 32 bits of OOB need to link to
+ * the next page address in the same block. We can't directly copy
+ * OOB data from the MTD framework, because this page address
+ * conflicts for example with the bad block marker (BBM),
+ * so we shift all OOB data including the BBM with 4 byte positions.
+ * As a consequence the OOB size available to the MTD framework is
+ * also reduced with 4 bytes.
+ *
+ * PA0 PA1 PA2 PA3 | BBM OOB1 OOB2 OOB3 | ...
+ *
+ * If a NAND is not a boot medium or the page is not a boot block,
+ * the first 4 bytes are left untouched by writing 0xFF to them.
+ *
+ * 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ...
+ *
+ * Configure the ECC algorithm supported by the boot ROM.
+ */
+ if (page < (pages_per_blk * rknand->boot_blks)) {
+ boot_rom_mode = 1;
+ if (rknand->boot_ecc != ecc->strength)
+ rk_nfc_hw_ecc_setup(chip, rknand->boot_ecc);
+ }
+
+ for (i = 0; i < ecc->steps; i++) {
+ if (!i) {
+ reg = 0xFFFFFFFF;
+ } else {
+ oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
+ reg = oob[0] | oob[1] << 8 | oob[2] << 16 |
+ oob[3] << 24;
+ }
+
+ if (!i && boot_rom_mode)
+ reg = (page & (pages_per_blk - 1)) * 4;
+
+ if (nfc->cfg->type == NFC_V9)
+ nfc->oob_buf[i] = reg;
+ else
+ nfc->oob_buf[i * (oob_step / 4)] = reg;
+ }
+
+ dma_data = dma_map_single((void *)nfc->page_buf,
+ mtd->writesize, DMA_TO_DEVICE);
+ dma_oob = dma_map_single(nfc->oob_buf,
+ ecc->steps * oob_step,
+ DMA_TO_DEVICE);
+
+ rk_nfc_xfer_start(nfc, NFC_WRITE, ecc->steps, dma_data,
+ dma_oob);
+ ret = rk_nfc_wait_for_xfer_done(nfc);
+
+ dma_unmap_single(dma_data, mtd->writesize,
+ DMA_TO_DEVICE);
+ dma_unmap_single(dma_oob, ecc->steps * oob_step,
+ DMA_TO_DEVICE);
+
+ if (boot_rom_mode && rknand->boot_ecc != ecc->strength)
+ rk_nfc_hw_ecc_setup(chip, ecc->strength);
+
+ if (ret) {
+ dev_err(nfc->dev, "write: wait transfer done timeout.\n");
+ return -ETIMEDOUT;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int rk_nfc_write_oob(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ return rk_nfc_write_page_hwecc(mtd, chip, NULL, 1, page);
+}
+
+static int rk_nfc_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf,
+ int oob_required,
+ int page)
+{
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int i, pages_per_blk;
+
+ pages_per_blk = mtd->erasesize / mtd->writesize;
+ if ((page < (pages_per_blk * rknand->boot_blks)) &&
+ nfc->selected_bank == 0 &&
+ rknand->boot_ecc != ecc->strength) {
+ /*
+ * There's currently no method to notify the MTD framework that
+ * a different ECC strength is in use for the boot blocks.
+ */
+ return -EIO;
+ }
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+ rk_nfc_read_buf(mtd, nfc->page_buf, mtd->writesize + mtd->oobsize);
+ for (i = 0; i < ecc->steps; i++) {
+ /*
+ * The first four bytes of OOB are reserved for the
+ * boot ROM. In some debugging cases, such as with a read,
+ * erase and write back test, these 4 bytes also must be
+ * saved somewhere, otherwise this information will be
+ * lost during a write back.
+ */
+ if (!i)
+ memcpy(rk_nfc_buf_to_oob_ptr(chip, ecc->steps - 1),
+ rk_nfc_oob_ptr(chip, i),
+ NFC_SYS_DATA_SIZE);
+ else
+ memcpy(rk_nfc_buf_to_oob_ptr(chip, i - 1),
+ rk_nfc_oob_ptr(chip, i),
+ NFC_SYS_DATA_SIZE);
+
+ /* Copy ECC data from the NFC buffer. */
+ memcpy(rk_nfc_buf_to_oob_ecc_ptr(chip, i),
+ rk_nfc_oob_ptr(chip, i) + NFC_SYS_DATA_SIZE,
+ ecc->bytes);
+
+ /* Copy data from the NFC buffer. */
+ if (buf)
+ memcpy(rk_nfc_buf_to_data_ptr(chip, buf, i),
+ rk_nfc_data_ptr(chip, i),
+ ecc->size);
+ }
+
+ return 0;
+}
+
+static int rk_nfc_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf,
+ int oob_required,
+ int page)
+{
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int oob_step = (ecc->bytes > 60) ? NFC_MAX_OOB_PER_STEP :
+ NFC_MIN_OOB_PER_STEP;
+ int pages_per_blk = mtd->erasesize / mtd->writesize;
+ dma_addr_t dma_data, dma_oob;
+ int ret = 0, i, cnt, boot_rom_mode = 0;
+ int max_bitflips = 0, bch_st, ecc_fail = 0;
+ u8 *oob;
+ u32 tmp;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ dma_data = dma_map_single(nfc->page_buf,
+ mtd->writesize,
+ DMA_FROM_DEVICE);
+ dma_oob = dma_map_single(nfc->oob_buf,
+ ecc->steps * oob_step,
+ DMA_FROM_DEVICE);
+
+ /*
+ * The first blocks (4, 8 or 16 depending on the device)
+ * are used by the boot ROM.
+ * Configure the ECC algorithm supported by the boot ROM.
+ */
+ if (page < (pages_per_blk * rknand->boot_blks) &&
+ nfc->selected_bank == 0) {
+ boot_rom_mode = 1;
+ if (rknand->boot_ecc != ecc->strength)
+ rk_nfc_hw_ecc_setup(chip, rknand->boot_ecc);
+ }
+
+ rk_nfc_xfer_start(nfc, NFC_READ, ecc->steps, dma_data,
+ dma_oob);
+ ret = rk_nfc_wait_for_xfer_done(nfc);
+
+ dma_unmap_single(dma_data, mtd->writesize,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(dma_oob, ecc->steps * oob_step,
+ DMA_FROM_DEVICE);
+
+ if (ret) {
+ ret = -ETIMEDOUT;
+ dev_err(nfc->dev, "read: wait transfer done timeout.\n");
+ goto timeout_err;
+ }
+
+ for (i = 1; i < ecc->steps; i++) {
+ oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
+ if (nfc->cfg->type == NFC_V9)
+ tmp = nfc->oob_buf[i];
+ else
+ tmp = nfc->oob_buf[i * (oob_step / 4)];
+ *oob++ = (u8)tmp;
+ *oob++ = (u8)(tmp >> 8);
+ *oob++ = (u8)(tmp >> 16);
+ *oob++ = (u8)(tmp >> 24);
+ }
+
+ for (i = 0; i < (ecc->steps / 2); i++) {
+ bch_st = readl_relaxed(nfc->regs +
+ nfc->cfg->bch_st_off + i * 4);
+ if (bch_st & BIT(nfc->cfg->ecc0.err_flag_bit) ||
+ bch_st & BIT(nfc->cfg->ecc1.err_flag_bit)) {
+ mtd->ecc_stats.failed++;
+ ecc_fail = 1;
+ } else {
+ cnt = ECC_ERR_CNT(bch_st, nfc->cfg->ecc0);
+ mtd->ecc_stats.corrected += cnt;
+ max_bitflips = max_t(u32, max_bitflips, cnt);
+
+ cnt = ECC_ERR_CNT(bch_st, nfc->cfg->ecc1);
+ mtd->ecc_stats.corrected += cnt;
+ max_bitflips = max_t(u32, max_bitflips, cnt);
+ }
+ }
+
+ if (buf)
+ memcpy(buf, nfc->page_buf, mtd->writesize);
+
+timeout_err:
+ if (boot_rom_mode && rknand->boot_ecc != ecc->strength)
+ rk_nfc_hw_ecc_setup(chip, ecc->strength);
+
+ if (ret)
+ return ret;
+
+ if (ecc_fail) {
+ dev_err(nfc->dev, "read page: %x ecc error!\n", page);
+ return 0;
+ }
+
+ return max_bitflips;
+}
+
+static int rk_nfc_read_oob(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ return rk_nfc_read_page_hwecc(mtd, chip, NULL, 1, page);
+}
+
+static inline void rk_nfc_hw_init(struct rk_nfc *nfc)
+{
+ /* Disable flash wp. */
+ writel(FMCTL_WP, nfc->regs + NFC_FMCTL);
+ /* Config default timing 40ns at 150 Mhz NFC clock. */
+ writel(0x1081, nfc->regs + NFC_FMWAIT);
+ nfc->cur_timing = 0x1081;
+ /* Disable randomizer and DMA. */
+ writel(0, nfc->regs + nfc->cfg->randmz_off);
+ writel(0, nfc->regs + nfc->cfg->dma_cfg_off);
+ writel(FLCTL_RST, nfc->regs + nfc->cfg->flctl_off);
+}
+
+static int rk_nfc_enable_clks(struct udevice *dev, struct rk_nfc *nfc)
+{
+ int ret;
+
+ if (!IS_ERR(nfc->nfc_clk)) {
+ ret = clk_prepare_enable(nfc->nfc_clk);
+ if (ret)
+ dev_err(dev, "failed to enable NFC clk\n");
+ }
+
+ ret = clk_prepare_enable(nfc->ahb_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable ahb clk\n");
+ if (!IS_ERR(nfc->nfc_clk))
+ clk_disable_unprepare(nfc->nfc_clk);
+ }
+
+ return 0;
+}
+
+static void rk_nfc_disable_clks(struct rk_nfc *nfc)
+{
+ if (!IS_ERR(nfc->nfc_clk))
+ clk_disable_unprepare(nfc->nfc_clk);
+ clk_disable_unprepare(nfc->ahb_clk);
+}
+
+static int rk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+
+ if (section)
+ return -ERANGE;
+
+ /*
+ * The beginning of the OOB area stores the reserved data for the NFC,
+ * the size of the reserved data is NFC_SYS_DATA_SIZE bytes.
+ */
+ oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2;
+ oob_region->offset = NFC_SYS_DATA_SIZE + 2;
+
+ return 0;
+}
+
+static int rk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+
+ if (section)
+ return -ERANGE;
+
+ oob_region->length = mtd->oobsize - rknand->metadata_size;
+ oob_region->offset = rknand->metadata_size;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops rk_nfc_ooblayout_ops = {
+ .rfree = rk_nfc_ooblayout_free,
+ .ecc = rk_nfc_ooblayout_ecc,
+};
+
+static int rk_nfc_ecc_init(struct rk_nfc *nfc, struct nand_chip *chip)
+{
+ const u8 *strengths = nfc->cfg->ecc_strengths;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 max_strength, nfc_max_strength;
+ int i;
+
+ nfc_max_strength = nfc->cfg->ecc_strengths[0];
+ /* If optional dt settings not present. */
+ if (!ecc->size || !ecc->strength ||
+ ecc->strength > nfc_max_strength) {
+ chip->ecc.size = 1024;
+ ecc->steps = mtd->writesize / ecc->size;
+
+ /*
+ * HW ECC always requests the number of ECC bytes per 1024 byte
+ * blocks. The first 4 OOB bytes are reserved for sys data.
+ */
+ max_strength = ((mtd->oobsize / ecc->steps) - 4) * 8 /
+ fls(8 * 1024);
+ if (max_strength > nfc_max_strength)
+ max_strength = nfc_max_strength;
+
+ for (i = 0; i < 4; i++) {
+ if (max_strength >= strengths[i])
+ break;
+ }
+
+ if (i >= 4) {
+ dev_err(nfc->dev, "unsupported ECC strength\n");
+ return -EOPNOTSUPP;
+ }
+
+ ecc->strength = strengths[i];
+ }
+ ecc->steps = mtd->writesize / ecc->size;
+ ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * chip->ecc.size), 8);
+
+ return 0;
+}
+
+static int rk_nfc_nand_chip_init(ofnode node, struct rk_nfc *nfc, int devnum)
+{
+ struct rk_nfc_nand_chip *rknand;
+ struct udevice *dev = nfc->dev;
+ struct nand_ecc_ctrl *ecc;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ u32 cs[NFC_MAX_NSELS];
+ int nsels, i, ret;
+ u32 tmp;
+
+ if (!ofnode_get_property(node, "reg", &nsels))
+ return -ENODEV;
+ nsels /= sizeof(u32);
+ if (!nsels || nsels > NFC_MAX_NSELS) {
+ dev_err(dev, "invalid reg property size %d\n", nsels);
+ return -EINVAL;
+ }
+
+ rknand = kzalloc(sizeof(*rknand) + nsels * sizeof(u8), GFP_KERNEL);
+ if (!rknand)
+ return -ENOMEM;
+
+ rknand->nsels = nsels;
+ rknand->timing = nfc->cur_timing;
+
+ ret = ofnode_read_u32_array(node, "reg", cs, nsels);
+ if (ret < 0) {
+ dev_err(dev, "Could not retrieve reg property\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nsels; i++) {
+ if (cs[i] >= NFC_MAX_NSELS) {
+ dev_err(dev, "invalid CS: %u\n", cs[i]);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs[i], &nfc->assigned_cs)) {
+ dev_err(dev, "CS %u already assigned\n", cs[i]);
+ return -EINVAL;
+ }
+
+ rknand->sels[i] = cs[i];
+ }
+
+ chip = &rknand->chip;
+ ecc = &chip->ecc;
+ ecc->mode = NAND_ECC_HW_SYNDROME;
+
+ ret = ofnode_read_u32(node, "nand-ecc-strength", &tmp);
+ ecc->strength = ret ? 0 : tmp;
+
+ ret = ofnode_read_u32(node, "nand-ecc-step-size", &tmp);
+ ecc->size = ret ? 0 : tmp;
+
+ mtd = nand_to_mtd(chip);
+ mtd->owner = THIS_MODULE;
+ mtd->dev->parent = dev;
+
+ nand_set_controller_data(chip, nfc);
+
+ chip->chip_delay = NFC_RB_DELAY_US;
+ chip->select_chip = rk_nfc_select_chip;
+ chip->cmd_ctrl = rk_nfc_cmd;
+ chip->read_buf = rk_nfc_read_buf;
+ chip->write_buf = rk_nfc_write_buf;
+ chip->read_byte = rockchip_nand_read_byte;
+ chip->dev_ready = rockchip_nand_dev_ready;
+ chip->controller = &nfc->controller;
+
+ chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+ chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER;
+
+ mtd_set_ooblayout(mtd, &rk_nfc_ooblayout_ops);
+ rk_nfc_hw_init(nfc);
+ ret = nand_scan_ident(mtd, nsels, NULL);
+ if (ret)
+ return ret;
+
+ ret = rk_nfc_ecc_init(nfc, chip);
+ if (ret) {
+ dev_err(dev, "rk_nfc_ecc_init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = ofnode_read_u32(node, "rockchip,boot-blks", &tmp);
+ rknand->boot_blks = ret ? 0 : tmp;
+
+ ret = ofnode_read_u32(node, "rockchip,boot-ecc-strength", &tmp);
+ rknand->boot_ecc = ret ? ecc->strength : tmp;
+
+ rknand->metadata_size = NFC_SYS_DATA_SIZE * ecc->steps;
+
+ if (rknand->metadata_size < NFC_SYS_DATA_SIZE + 2) {
+ dev_err(dev,
+ "driver needs at least %d bytes of meta data\n",
+ NFC_SYS_DATA_SIZE + 2);
+ return -EIO;
+ }
+
+ if (!nfc->page_buf) {
+ nfc->page_buf = kzalloc(NFC_MAX_PAGE_SIZE, GFP_KERNEL);
+ if (!nfc->page_buf)
+ return -ENOMEM;
+ }
+
+ if (!nfc->oob_buf) {
+ nfc->oob_buf = kzalloc(NFC_MAX_OOB_SIZE, GFP_KERNEL);
+ if (!nfc->oob_buf) {
+ kfree(nfc->page_buf);
+ nfc->page_buf = NULL;
+ return -ENOMEM;
+ }
+ }
+
+ ecc->read_page = rk_nfc_read_page_hwecc;
+ ecc->read_page_raw = rk_nfc_read_page_raw;
+ ecc->read_oob = rk_nfc_read_oob;
+ ecc->write_page = rk_nfc_write_page_hwecc;
+ ecc->write_page_raw = rk_nfc_write_page_raw;
+ ecc->write_oob = rk_nfc_write_oob;
+
+ ret = nand_scan_tail(mtd);
+ if (ret) {
+ dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+ return ret;
+ }
+
+ return nand_register(devnum, mtd);
+}
+
+static int rk_nfc_nand_chips_init(struct udevice *dev, struct rk_nfc *nfc)
+{
+ int ret, i = 0;
+ ofnode child;
+
+ ofnode_for_each_subnode(child, dev_ofnode(dev)) {
+ ret = rk_nfc_nand_chip_init(child, nfc, i++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct nfc_cfg nfc_v6_cfg = {
+ .type = NFC_V6,
+ .ecc_strengths = {60, 40, 24, 16},
+ .ecc_cfgs = {
+ 0x00040011, 0x00040001, 0x00000011, 0x00000001,
+ },
+ .flctl_off = 0x08,
+ .bchctl_off = 0x0C,
+ .dma_cfg_off = 0x10,
+ .dma_data_buf_off = 0x14,
+ .dma_oob_buf_off = 0x18,
+ .dma_st_off = 0x1C,
+ .bch_st_off = 0x20,
+ .randmz_off = 0x150,
+ .int_en_off = 0x16C,
+ .int_clr_off = 0x170,
+ .int_st_off = 0x174,
+ .oob0_off = 0x200,
+ .oob1_off = 0x230,
+ .ecc0 = {
+ .err_flag_bit = 2,
+ .low = 3,
+ .low_mask = 0x1F,
+ .low_bn = 5,
+ .high = 27,
+ .high_mask = 0x1,
+ },
+ .ecc1 = {
+ .err_flag_bit = 15,
+ .low = 16,
+ .low_mask = 0x1F,
+ .low_bn = 5,
+ .high = 29,
+ .high_mask = 0x1,
+ },
+};
+
+static struct nfc_cfg nfc_v8_cfg = {
+ .type = NFC_V8,
+ .ecc_strengths = {16, 16, 16, 16},
+ .ecc_cfgs = {
+ 0x00000001, 0x00000001, 0x00000001, 0x00000001,
+ },
+ .flctl_off = 0x08,
+ .bchctl_off = 0x0C,
+ .dma_cfg_off = 0x10,
+ .dma_data_buf_off = 0x14,
+ .dma_oob_buf_off = 0x18,
+ .dma_st_off = 0x1C,
+ .bch_st_off = 0x20,
+ .randmz_off = 0x150,
+ .int_en_off = 0x16C,
+ .int_clr_off = 0x170,
+ .int_st_off = 0x174,
+ .oob0_off = 0x200,
+ .oob1_off = 0x230,
+ .ecc0 = {
+ .err_flag_bit = 2,
+ .low = 3,
+ .low_mask = 0x1F,
+ .low_bn = 5,
+ .high = 27,
+ .high_mask = 0x1,
+ },
+ .ecc1 = {
+ .err_flag_bit = 15,
+ .low = 16,
+ .low_mask = 0x1F,
+ .low_bn = 5,
+ .high = 29,
+ .high_mask = 0x1,
+ },
+};
+
+static struct nfc_cfg nfc_v9_cfg = {
+ .type = NFC_V9,
+ .ecc_strengths = {70, 60, 40, 16},
+ .ecc_cfgs = {
+ 0x00000001, 0x06000001, 0x04000001, 0x02000001,
+ },
+ .flctl_off = 0x10,
+ .bchctl_off = 0x20,
+ .dma_cfg_off = 0x30,
+ .dma_data_buf_off = 0x34,
+ .dma_oob_buf_off = 0x38,
+ .dma_st_off = 0x3C,
+ .bch_st_off = 0x150,
+ .randmz_off = 0x208,
+ .int_en_off = 0x120,
+ .int_clr_off = 0x124,
+ .int_st_off = 0x128,
+ .oob0_off = 0x200,
+ .oob1_off = 0x204,
+ .ecc0 = {
+ .err_flag_bit = 2,
+ .low = 3,
+ .low_mask = 0x7F,
+ .low_bn = 7,
+ .high = 0,
+ .high_mask = 0x0,
+ },
+ .ecc1 = {
+ .err_flag_bit = 18,
+ .low = 19,
+ .low_mask = 0x7F,
+ .low_bn = 7,
+ .high = 0,
+ .high_mask = 0x0,
+ },
+};
+
+static const struct udevice_id rk_nfc_id_table[] = {
+ {
+ .compatible = "rockchip,px30-nfc",
+ .data = (unsigned long)&nfc_v9_cfg
+ },
+ {
+ .compatible = "rockchip,rk2928-nfc",
+ .data = (unsigned long)&nfc_v6_cfg
+ },
+ {
+ .compatible = "rockchip,rv1108-nfc",
+ .data = (unsigned long)&nfc_v8_cfg
+ },
+ {
+ .compatible = "rockchip,rk3308-nfc",
+ .data = (unsigned long)&nfc_v8_cfg
+ },
+ { /* sentinel */ }
+};
+
+static int rk_nfc_probe(struct udevice *dev)
+{
+ struct rk_nfc *nfc = dev_get_priv(dev);
+ int ret = 0;
+
+ nfc->cfg = (void *)dev_get_driver_data(dev);
+ nfc->dev = dev;
+
+ nfc->regs = (void *)dev_read_addr(dev);
+ if (IS_ERR(nfc->regs)) {
+ ret = PTR_ERR(nfc->regs);
+ goto release_nfc;
+ }
+
+ nfc->nfc_clk = devm_clk_get(dev, "nfc");
+ if (IS_ERR(nfc->nfc_clk)) {
+ dev_dbg(dev, "no NFC clk\n");
+ /* Some earlier models, such as rk3066, have no NFC clk. */
+ }
+
+ nfc->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(nfc->ahb_clk)) {
+ dev_err(dev, "no ahb clk\n");
+ ret = PTR_ERR(nfc->ahb_clk);
+ goto release_nfc;
+ }
+
+ ret = rk_nfc_enable_clks(dev, nfc);
+ if (ret)
+ goto release_nfc;
+
+ spin_lock_init(&nfc->controller.lock);
+ init_waitqueue_head(&nfc->controller.wq);
+
+ rk_nfc_hw_init(nfc);
+
+ ret = rk_nfc_nand_chips_init(dev, nfc);
+ if (ret) {
+ dev_err(dev, "failed to init NAND chips\n");
+ goto clk_disable;
+ }
+ return 0;
+
+clk_disable:
+ rk_nfc_disable_clks(nfc);
+release_nfc:
+ return ret;
+}
+
+U_BOOT_DRIVER(rockchip_nfc) = {
+ .name = "rockchip_nfc",
+ .id = UCLASS_MTD,
+ .of_match = rk_nfc_id_table,
+ .probe = rk_nfc_probe,
+ .priv_auto = sizeof(struct rk_nfc),
+};
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(rockchip_nfc),
+ &dev);
+ if (ret && ret != -ENODEV)
+ log_err("Failed to initialize ROCKCHIP NAND controller. (error %d)\n",
+ ret);
+}
+
+int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst)
+{
+ struct mtd_info *mtd;
+ size_t length = size;
+
+ mtd = get_nand_dev_by_index(0);
+ return nand_read_skip_bad(mtd, offs, &length, NULL, size, (u_char *)dst);
+}
+
+void nand_deselect(void) {}
diff --git a/drivers/pci/pcie_dw_rockchip.c b/drivers/pci/pcie_dw_rockchip.c
index bc22af4230c..9322e735b9c 100644
--- a/drivers/pci/pcie_dw_rockchip.c
+++ b/drivers/pci/pcie_dw_rockchip.c
@@ -61,13 +61,13 @@ struct rk_pcie {
#define PCIE_CLIENT_DBF_EN 0xffff0003
/* Parameters for the waiting for #perst signal */
-#define PERST_WAIT_MS 1000
+#define MACRO_US 1000
static int rk_pcie_read(void __iomem *addr, int size, u32 *val)
{
if ((uintptr_t)addr & (size - 1)) {
*val = 0;
- return PCIBIOS_UNSUPPORTED;
+ return -EOPNOTSUPP;
}
if (size == 4) {
@@ -87,7 +87,7 @@ static int rk_pcie_read(void __iomem *addr, int size, u32 *val)
static int rk_pcie_write(void __iomem *addr, int size, u32 val)
{
if ((uintptr_t)addr & (size - 1))
- return PCIBIOS_UNSUPPORTED;
+ return -EOPNOTSUPP;
if (size == 4)
writel(val, addr);
@@ -158,8 +158,6 @@ static inline void rk_pcie_writel_apb(struct rk_pcie *rk_pcie, u32 reg,
*/
static void rk_pcie_configure(struct rk_pcie *pci, u32 cap_speed)
{
- u32 val;
-
dw_pcie_dbi_write_enable(&pci->dw, true);
clrsetbits_le32(pci->dw.dbi_base + PCIE_LINK_CAPABILITY,
@@ -251,7 +249,7 @@ static int rk_pcie_link_up(struct rk_pcie *priv, u32 cap_speed)
* some wired devices need much more, such as 600ms.
* Add a enough delay to cover all cases.
*/
- msleep(PERST_WAIT_MS);
+ udelay(MACRO_US * 1000);
dm_gpio_set_value(&priv->rst_gpio, 1);
}
@@ -273,12 +271,12 @@ static int rk_pcie_link_up(struct rk_pcie *priv, u32 cap_speed)
dev_info(priv->dw.dev, "PCIe Linking... LTSSM is 0x%x\n",
rk_pcie_readl_apb(priv, PCIE_CLIENT_LTSSM_STATUS));
rk_pcie_debug_dump(priv);
- msleep(1000);
+ udelay(MACRO_US * 1000);
}
dev_err(priv->dw.dev, "PCIe-%d Link Fail\n", dev_seq(priv->dw.dev));
/* Link maybe in Gen switch recovery but we need to wait more 1s */
- msleep(1000);
+ udelay(MACRO_US * 1000);
return -EIO;
}
@@ -298,7 +296,7 @@ static int rockchip_pcie_init_port(struct udevice *dev)
}
}
- msleep(1000);
+ udelay(MACRO_US * 1000);
ret = generic_phy_init(&priv->phy);
if (ret) {
diff --git a/drivers/ram/rockchip/Makefile b/drivers/ram/rockchip/Makefile
index c3ec89ada4c..ca1c289b884 100644
--- a/drivers/ram/rockchip/Makefile
+++ b/drivers/ram/rockchip/Makefile
@@ -12,4 +12,5 @@ obj-$(CONFIG_ROCKCHIP_RK3288) = sdram_rk3288.o
obj-$(CONFIG_ROCKCHIP_RK3308) = sdram_rk3308.o
obj-$(CONFIG_ROCKCHIP_RK3328) = sdram_rk3328.o sdram_pctl_px30.o sdram_phy_px30.o
obj-$(CONFIG_ROCKCHIP_RK3399) += sdram_rk3399.o
+obj-$(CONFIG_ROCKCHIP_RK3568) += sdram_rk3568.o
obj-$(CONFIG_ROCKCHIP_SDRAM_COMMON) += sdram_common.o
diff --git a/drivers/ram/rockchip/sdram_rk3568.c b/drivers/ram/rockchip/sdram_rk3568.c
new file mode 100644
index 00000000000..0ac4b54eef3
--- /dev/null
+++ b/drivers/ram/rockchip/sdram_rk3568.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2021 Rockchip Electronics Co., Ltd.
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <ram.h>
+#include <syscon.h>
+#include <asm/arch-rockchip/clock.h>
+#include <asm/arch-rockchip/grf_rk3568.h>
+#include <asm/arch-rockchip/sdram.h>
+
+struct dram_info {
+ struct ram_info info;
+ struct rk3568_pmugrf *pmugrf;
+};
+
+static int rk3568_dmc_probe(struct udevice *dev)
+{
+ struct dram_info *priv = dev_get_priv(dev);
+
+ priv->pmugrf = syscon_get_first_range(ROCKCHIP_SYSCON_PMUGRF);
+ priv->info.base = CONFIG_SYS_SDRAM_BASE;
+ priv->info.size =
+ rockchip_sdram_size((phys_addr_t)&priv->pmugrf->pmu_os_reg2);
+
+ return 0;
+}
+
+static int rk3568_dmc_get_info(struct udevice *dev, struct ram_info *info)
+{
+ struct dram_info *priv = dev_get_priv(dev);
+
+ *info = priv->info;
+
+ return 0;
+}
+
+static struct ram_ops rk3568_dmc_ops = {
+ .get_info = rk3568_dmc_get_info,
+};
+
+static const struct udevice_id rk3568_dmc_ids[] = {
+ { .compatible = "rockchip,rk3568-dmc" },
+ { }
+};
+
+U_BOOT_DRIVER(dmc_rk3568) = {
+ .name = "rockchip_rk3568_dmc",
+ .id = UCLASS_RAM,
+ .of_match = rk3568_dmc_ids,
+ .ops = &rk3568_dmc_ops,
+ .probe = rk3568_dmc_probe,
+ .priv_auto = sizeof(struct dram_info),
+};