summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/ata/sata_mv.c29
-rw-r--r--drivers/ata/sata_sil.c8
-rw-r--r--drivers/ata/sata_sil.h4
-rw-r--r--drivers/bios_emulator/atibios.c98
-rw-r--r--drivers/bios_emulator/bios.c39
-rw-r--r--drivers/bootcount/Kconfig12
-rw-r--r--drivers/bootcount/Makefile1
-rw-r--r--drivers/bootcount/bootcount_syscon.c159
-rw-r--r--drivers/clk/Kconfig9
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/altera/Makefile4
-rw-r--r--drivers/clk/altera/clk-mem-n5x.c136
-rw-r--r--drivers/clk/altera/clk-mem-n5x.h84
-rw-r--r--drivers/clk/altera/clk-n5x.c489
-rw-r--r--drivers/clk/altera/clk-n5x.h217
-rw-r--r--drivers/clk/clk_stm32mp1.c1
-rw-r--r--drivers/clk/clk_versaclock.c1100
-rw-r--r--drivers/clk/rockchip/clk_px30.c32
-rw-r--r--drivers/core/fdtaddr.c3
-rw-r--r--drivers/core/uclass.c3
-rw-r--r--drivers/core/util.c11
-rw-r--r--drivers/crypto/fsl/jobdesc.c2
-rw-r--r--drivers/ddr/altera/Makefile3
-rw-r--r--drivers/ddr/altera/sdram_n5x.c2298
-rw-r--r--drivers/ddr/altera/sdram_soc64.c94
-rw-r--r--drivers/ddr/altera/sdram_soc64.h1
-rw-r--r--drivers/ddr/imx/Kconfig1
-rw-r--r--drivers/ddr/imx/imx8ulp/Kconfig11
-rw-r--r--drivers/ddr/imx/imx8ulp/Makefile9
-rw-r--r--drivers/ddr/imx/imx8ulp/ddr_init.c217
-rw-r--r--drivers/fastboot/fb_mmc.c26
-rw-r--r--drivers/gpio/Kconfig10
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/mcp230xx_gpio.c235
-rw-r--r--drivers/i2c/i2c-gpio.c9
-rw-r--r--drivers/i2c/i2c-uclass.c15
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/imx8ulp/Makefile4
-rw-r--r--drivers/misc/imx8ulp/fuse.c198
-rw-r--r--drivers/misc/imx8ulp/imx8ulp_mu.c234
-rw-r--r--drivers/misc/imx8ulp/s400_api.c244
-rw-r--r--drivers/mmc/Kconfig2
-rw-r--r--drivers/mmc/fsl_esdhc_imx.c12
-rw-r--r--drivers/mmc/rockchip_sdhci.c422
-rw-r--r--drivers/mtd/Kconfig1
-rw-r--r--drivers/mtd/spi/Kconfig6
-rw-r--r--drivers/mtd/spi/spi-nor-ids.c4
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/fec_mxc.c2
-rw-r--r--drivers/net/phy/Kconfig10
-rw-r--r--drivers/net/phy/cortina.c79
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/pci-aardvark.c52
-rw-r--r--drivers/pci/pci_common.c4
-rw-r--r--drivers/pci/pci_ftpci100.c319
-rw-r--r--drivers/pci/pci_gt64120.c64
-rw-r--r--drivers/pci/pci_msc01.c64
-rw-r--r--drivers/pci/pcie_imx.c81
-rw-r--r--drivers/pinctrl/nxp/Kconfig14
-rw-r--r--drivers/pinctrl/nxp/Makefile1
-rw-r--r--drivers/pinctrl/nxp/pinctrl-imx8ulp.c44
-rw-r--r--drivers/scsi/scsi.c6
-rw-r--r--drivers/spi/Kconfig8
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/nxp_fspi.c2
-rw-r--r--drivers/spi/rockchip_sfc.c646
-rw-r--r--drivers/usb/host/ohci-hcd.c2
-rw-r--r--drivers/video/Kconfig13
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/mcde_simple.c141
71 files changed, 7260 insertions, 798 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 56749278f43..fd218c90563 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_ARMADA_38X) += ddr/marvell/a38x/
obj-$(CONFIG_ARMADA_XP) += ddr/marvell/axp/
obj-$(CONFIG_$(SPL_)ALTERA_SDRAM) += ddr/altera/
obj-$(CONFIG_ARCH_IMX8M) += ddr/imx/imx8m/
+obj-$(CONFIG_IMX8ULP_DRAM) += ddr/imx/imx8ulp/
obj-$(CONFIG_SPL_POWER) += power/ power/pmic/
obj-$(CONFIG_SPL_POWER) += power/regulator/
obj-$(CONFIG_SPL_POWER_DOMAIN) += power/domain/
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 1012cb53742..dadb2c7c2e7 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -809,6 +809,7 @@ static int mv_ata_exec_ata_cmd_nondma(struct udevice *dev, int port,
static int mv_sata_identify(struct udevice *dev, int port, u16 *id)
{
struct sata_fis_h2d h2d;
+ int len;
memset(&h2d, 0, sizeof(struct sata_fis_h2d));
@@ -818,8 +819,32 @@ static int mv_sata_identify(struct udevice *dev, int port, u16 *id)
/* Give device time to get operational */
mdelay(10);
- return mv_ata_exec_ata_cmd_nondma(dev, port, &h2d, (u8 *)id,
- ATA_ID_WORDS * 2, READ_CMD);
+ /* During cold start, with some HDDs, the first ATA ID command does
+ * not populate the ID words. In fact, the first ATA ID
+ * command will only power up the drive, and then the ATA ID command
+ * processing is lost in the process.
+ */
+ len = mv_ata_exec_ata_cmd_nondma(dev, port, &h2d, (u8 *)id,
+ ATA_ID_WORDS * 2, READ_CMD);
+
+ /* If drive capacity has been filled in, then it was successfully
+ * identified (the drive has been powered up before, i.e.
+ * this function is invoked during a reboot)
+ */
+ if (ata_id_n_sectors(id) != 0)
+ return len;
+
+ /* Issue the 2nd ATA ID command to make sure the ID words are
+ * populated properly.
+ */
+ mdelay(10);
+ len = mv_ata_exec_ata_cmd_nondma(dev, port, &h2d, (u8 *)id,
+ ATA_ID_WORDS * 2, READ_CMD);
+ if (ata_id_n_sectors(id) != 0)
+ return len;
+
+ printf("Err: Failed to identify SATA device %d\n", port);
+ return -ENODEV;
}
static void mv_sata_xfer_mode(struct udevice *dev, int port, u16 *id)
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 7e4e97d803e..dda712f42cb 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -27,11 +27,7 @@
#include "sata_sil.h"
-#ifdef CONFIG_DM_PCI
#define virt_to_bus(devno, v) dm_pci_virt_to_mem(devno, (void *) (v))
-#else
-#define virt_to_bus(devno, v) pci_virt_to_mem(devno, (void *) (v))
-#endif
/* just compatible ahci_ops */
struct sil_ops {
@@ -616,11 +612,7 @@ static int sil_init_sata(struct udevice *uc_dev, int dev)
#else
priv->sil_sata_desc[dev] = sata;
priv->port_num = dev;
-#ifdef CONFIG_DM_PCI
sata->devno = uc_dev->parent;
-#else
- sata->devno = sata_info.devno;
-#endif /* CONFIG_DM_PCI */
#endif
sata->id = dev;
sata->port = port;
diff --git a/drivers/ata/sata_sil.h b/drivers/ata/sata_sil.h
index a300c0c3887..bea4322c919 100644
--- a/drivers/ata/sata_sil.h
+++ b/drivers/ata/sata_sil.h
@@ -21,11 +21,7 @@ struct sil_sata {
u16 pio;
u16 mwdma;
u16 udma;
-#ifdef CONFIG_DM_PCI
struct udevice *devno;
-#else
- pci_dev_t devno;
-#endif
int wcache;
int flush;
int flush_ext;
diff --git a/drivers/bios_emulator/atibios.c b/drivers/bios_emulator/atibios.c
index 6c7cc24cbd9..9547470a2f7 100644
--- a/drivers/bios_emulator/atibios.c
+++ b/drivers/bios_emulator/atibios.c
@@ -230,19 +230,12 @@ This function executes the BIOS POST code on the controller. We assume that
at this stage the controller has its I/O and memory space enabled and
that all other controllers are in a disabled state.
****************************************************************************/
-#ifdef CONFIG_DM_PCI
static void PCI_doBIOSPOST(struct udevice *pcidev, BE_VGAInfo *vga_info,
int vesa_mode, struct vbe_mode_info *mode_info)
-#else
-static void PCI_doBIOSPOST(pci_dev_t pcidev, BE_VGAInfo *vga_info,
- int vesa_mode, struct vbe_mode_info *mode_info)
-#endif
{
RMREGS regs;
RMSREGS sregs;
-#ifdef CONFIG_DM_PCI
pci_dev_t bdf;
-#endif
/* Determine the value to store in AX for BIOS POST. Per the PCI specs,
AH must contain the bus and AL must contain the devfn, encoded as
@@ -250,14 +243,9 @@ static void PCI_doBIOSPOST(pci_dev_t pcidev, BE_VGAInfo *vga_info,
*/
memset(&regs, 0, sizeof(regs));
memset(&sregs, 0, sizeof(sregs));
-#ifdef CONFIG_DM_PCI
bdf = dm_pci_get_bdf(pcidev);
regs.x.ax = (int)PCI_BUS(bdf) << 8 |
(int)PCI_DEV(bdf) << 3 | (int)PCI_FUNC(bdf);
-#else
- regs.x.ax = ((int)PCI_BUS(pcidev) << 8) |
- ((int)PCI_DEV(pcidev) << 3) | (int)PCI_FUNC(pcidev);
-#endif
/*Setup the X86 emulator for the VGA BIOS*/
BE_setVGA(vga_info);
@@ -300,28 +288,15 @@ NOTE: This function leaves the original memory aperture disabled by leaving
it programmed to all 1's. It must be restored to the correct value
later.
****************************************************************************/
-#ifdef CONFIG_DM_PCI
static u32 PCI_findBIOSAddr(struct udevice *pcidev, int *bar)
-#else
-static u32 PCI_findBIOSAddr(pci_dev_t pcidev, int *bar)
-#endif
{
u32 base, size;
for (*bar = 0x10; *bar <= 0x14; (*bar) += 4) {
-#ifdef CONFIG_DM_PCI
dm_pci_read_config32(pcidev, *bar, &base);
-#else
- pci_read_config_dword(pcidev, *bar, &base);
-#endif
if (!(base & 0x1)) {
-#ifdef CONFIG_DM_PCI
dm_pci_write_config32(pcidev, *bar, 0xFFFFFFFF);
dm_pci_read_config32(pcidev, *bar, &size);
-#else
- pci_write_config_dword(pcidev, *bar, 0xFFFFFFFF);
- pci_read_config_dword(pcidev, *bar, &size);
-#endif
size = ~(size & ~0xFF) + 1;
if (size >= MAX_BIOSLEN)
return base & ~0xFF;
@@ -344,19 +319,11 @@ necessary).
Anyway to fix this we change all I/O mapped base registers and
chop off the top bits.
****************************************************************************/
-#ifdef CONFIG_DM_PCI
static void PCI_fixupIObase(struct udevice *pcidev, int reg, u32 *base)
-#else
-static void PCI_fixupIObase(pci_dev_t pcidev, int reg, u32 * base)
-#endif
{
if ((*base & 0x1) && (*base > 0xFFFE)) {
*base &= 0xFFFF;
-#ifdef CONFIG_DM_PCI
dm_pci_write_config32(pcidev, reg, *base);
-#else
- pci_write_config_dword(pcidev, reg, *base);
-#endif
}
}
@@ -371,30 +338,18 @@ Pointers to the mapped BIOS image
REMARKS:
Maps a pointer to the BIOS image on the graphics card on the PCI bus.
****************************************************************************/
-#ifdef CONFIG_DM_PCI
void *PCI_mapBIOSImage(struct udevice *pcidev)
-#else
-void *PCI_mapBIOSImage(pci_dev_t pcidev)
-#endif
{
u32 BIOSImageBus;
int BIOSImageBAR;
u8 *BIOSImage;
/*Save PCI BAR registers that might get changed*/
-#ifdef CONFIG_DM_PCI
dm_pci_read_config32(pcidev, PCI_ROM_ADDRESS, &saveROMBaseAddress);
dm_pci_read_config32(pcidev, PCI_BASE_ADDRESS_0, &saveBaseAddress10);
dm_pci_read_config32(pcidev, PCI_BASE_ADDRESS_1, &saveBaseAddress14);
dm_pci_read_config32(pcidev, PCI_BASE_ADDRESS_2, &saveBaseAddress18);
dm_pci_read_config32(pcidev, PCI_BASE_ADDRESS_4, &saveBaseAddress20);
-#else
- pci_read_config_dword(pcidev, PCI_ROM_ADDRESS, &saveROMBaseAddress);
- pci_read_config_dword(pcidev, PCI_BASE_ADDRESS_0, &saveBaseAddress10);
- pci_read_config_dword(pcidev, PCI_BASE_ADDRESS_1, &saveBaseAddress14);
- pci_read_config_dword(pcidev, PCI_BASE_ADDRESS_2, &saveBaseAddress18);
- pci_read_config_dword(pcidev, PCI_BASE_ADDRESS_4, &saveBaseAddress20);
-#endif
/*Fix up I/O base registers to less than 64K */
if(saveBaseAddress14 != 0)
@@ -413,21 +368,12 @@ void *PCI_mapBIOSImage(pci_dev_t pcidev)
return NULL;
}
-#ifdef CONFIG_DM_PCI
BIOSImage = dm_pci_bus_to_virt(pcidev, BIOSImageBus,
PCI_REGION_MEM, 0, MAP_NOCACHE);
/*Change the PCI BAR registers to map it onto the bus.*/
dm_pci_write_config32(pcidev, BIOSImageBAR, 0);
dm_pci_write_config32(pcidev, PCI_ROM_ADDRESS, BIOSImageBus | 0x1);
-#else
- BIOSImage = pci_bus_to_virt(pcidev, BIOSImageBus,
- PCI_REGION_MEM, 0, MAP_NOCACHE);
-
- /*Change the PCI BAR registers to map it onto the bus.*/
- pci_write_config_dword(pcidev, BIOSImageBAR, 0);
- pci_write_config_dword(pcidev, PCI_ROM_ADDRESS, BIOSImageBus | 0x1);
-#endif
udelay(1);
/*Check that the BIOS image is valid. If not fail, or return the
@@ -447,7 +393,6 @@ pcidev - PCI device info for the video card on the bus
REMARKS:
Unmaps the BIOS image for the device and restores framebuffer mappings
****************************************************************************/
-#ifdef CONFIG_DM_PCI
void PCI_unmapBIOSImage(struct udevice *pcidev, void *BIOSImage)
{
dm_pci_write_config32(pcidev, PCI_ROM_ADDRESS, saveROMBaseAddress);
@@ -456,16 +401,6 @@ void PCI_unmapBIOSImage(struct udevice *pcidev, void *BIOSImage)
dm_pci_write_config32(pcidev, PCI_BASE_ADDRESS_2, saveBaseAddress18);
dm_pci_write_config32(pcidev, PCI_BASE_ADDRESS_4, saveBaseAddress20);
}
-#else
-void PCI_unmapBIOSImage(pci_dev_t pcidev, void *BIOSImage)
-{
- pci_write_config_dword(pcidev, PCI_ROM_ADDRESS, saveROMBaseAddress);
- pci_write_config_dword(pcidev, PCI_BASE_ADDRESS_0, saveBaseAddress10);
- pci_write_config_dword(pcidev, PCI_BASE_ADDRESS_1, saveBaseAddress14);
- pci_write_config_dword(pcidev, PCI_BASE_ADDRESS_2, saveBaseAddress18);
- pci_write_config_dword(pcidev, PCI_BASE_ADDRESS_4, saveBaseAddress20);
-}
-#endif
/****************************************************************************
PARAMETERS:
@@ -479,22 +414,14 @@ REMARKS:
Loads and POST's the display controllers BIOS, directly from the BIOS
image we can extract over the PCI bus.
****************************************************************************/
-#ifdef CONFIG_DM_PCI
static int PCI_postController(struct udevice *pcidev, uchar *bios_rom,
int bios_len, BE_VGAInfo *vga_info,
int vesa_mode, struct vbe_mode_info *mode_info)
-#else
-static int PCI_postController(pci_dev_t pcidev, uchar *bios_rom, int bios_len,
- BE_VGAInfo *vga_info, int vesa_mode,
- struct vbe_mode_info *mode_info)
-#endif
{
u32 bios_image_len;
uchar *mapped_bios;
uchar *copy_of_bios;
-#ifdef CONFIG_DM_PCI
pci_dev_t bdf;
-#endif
if (bios_rom) {
copy_of_bios = bios_rom;
@@ -522,16 +449,10 @@ static int PCI_postController(pci_dev_t pcidev, uchar *bios_rom, int bios_len,
}
/*Save information in vga_info structure*/
-#ifdef CONFIG_DM_PCI
bdf = dm_pci_get_bdf(pcidev);
vga_info->function = PCI_FUNC(bdf);
vga_info->device = PCI_DEV(bdf);
vga_info->bus = PCI_BUS(bdf);
-#else
- vga_info->function = PCI_FUNC(pcidev);
- vga_info->device = PCI_DEV(pcidev);
- vga_info->bus = PCI_BUS(pcidev);
-#endif
vga_info->pcidev = pcidev;
vga_info->BIOSImage = copy_of_bios;
vga_info->BIOSImageLen = bios_image_len;
@@ -549,22 +470,13 @@ static int PCI_postController(pci_dev_t pcidev, uchar *bios_rom, int bios_len,
return true;
}
-#ifdef CONFIG_DM_PCI
int biosemu_setup(struct udevice *pcidev, BE_VGAInfo **vga_infop)
-#else
-int biosemu_setup(pci_dev_t pcidev, BE_VGAInfo **vga_infop)
-#endif
{
BE_VGAInfo *VGAInfo;
-#ifdef CONFIG_DM_PCI
pci_dev_t bdf = dm_pci_get_bdf(pcidev);
printf("videoboot: Booting PCI video card bus %d, function %d, device %d\n",
PCI_BUS(bdf), PCI_FUNC(bdf), PCI_DEV(bdf));
-#else
- printf("videoboot: Booting PCI video card bus %d, function %d, device %d\n",
- PCI_BUS(pcidev), PCI_FUNC(pcidev), PCI_DEV(pcidev));
-#endif
/*Initialise the x86 BIOS emulator*/
if ((VGAInfo = malloc(sizeof(*VGAInfo))) == NULL) {
printf("videoboot: Out of memory!\n");
@@ -582,15 +494,9 @@ void biosemu_set_interrupt_handler(int intnum, int (*int_func)(void))
X86EMU_setupIntrFunc(intnum, (X86EMU_intrFuncs)int_func);
}
-#ifdef CONFIG_DM_PCI
int biosemu_run(struct udevice *pcidev, uchar *bios_rom, int bios_len,
BE_VGAInfo *vga_info, int clean_up, int vesa_mode,
struct vbe_mode_info *mode_info)
-#else
-int biosemu_run(pci_dev_t pcidev, uchar *bios_rom, int bios_len,
- BE_VGAInfo *vga_info, int clean_up, int vesa_mode,
- struct vbe_mode_info *mode_info)
-#endif
{
/*Post all the display controller BIOS'es*/
if (!PCI_postController(pcidev, bios_rom, bios_len, vga_info,
@@ -623,12 +529,8 @@ REMARKS:
Boots the PCI/AGP video card on the bus using the Video ROM BIOS image
and the X86 BIOS emulator module.
****************************************************************************/
-#ifdef CONFIG_DM_PCI
int BootVideoCardBIOS(struct udevice *pcidev, BE_VGAInfo **pVGAInfo,
int clean_up)
-#else
-int BootVideoCardBIOS(pci_dev_t pcidev, BE_VGAInfo **pVGAInfo, int clean_up)
-#endif
{
BE_VGAInfo *VGAInfo;
int ret;
diff --git a/drivers/bios_emulator/bios.c b/drivers/bios_emulator/bios.c
index 77c7f94bc63..9596a1fdd3e 100644
--- a/drivers/bios_emulator/bios.c
+++ b/drivers/bios_emulator/bios.c
@@ -185,21 +185,12 @@ static void X86API int1A(int unused)
case 0xB103: /* Find PCI class code */
M.x86.R_AH = DEVICE_NOT_FOUND;
#ifdef __KERNEL__
-#ifdef CONFIG_DM_PCI
dm_pci_read_config8(_BE_env.vgaInfo.pcidev, PCI_CLASS_PROG,
&interface);
dm_pci_read_config8(_BE_env.vgaInfo.pcidev, PCI_CLASS_DEVICE,
&subclass);
dm_pci_read_config8(_BE_env.vgaInfo.pcidev,
PCI_CLASS_DEVICE + 1, &baseclass);
-#else
- pci_read_config_byte(_BE_env.vgaInfo.pcidev, PCI_CLASS_PROG,
- &interface);
- pci_read_config_byte(_BE_env.vgaInfo.pcidev, PCI_CLASS_DEVICE,
- &subclass);
- pci_read_config_byte(_BE_env.vgaInfo.pcidev,
- PCI_CLASS_DEVICE + 1, &baseclass);
-#endif
if (M.x86.R_CL == interface && M.x86.R_CH == subclass
&& (u8) (M.x86.R_ECX >> 16) == baseclass) {
#else
@@ -218,13 +209,8 @@ static void X86API int1A(int unused)
if (M.x86.R_BX == pciSlot) {
M.x86.R_AH = SUCCESSFUL;
#ifdef __KERNEL__
-# ifdef CONFIG_DM_PCI
dm_pci_read_config8(_BE_env.vgaInfo.pcidev, M.x86.R_DI,
&M.x86.R_CL);
-# else
- pci_read_config_byte(_BE_env.vgaInfo.pcidev, M.x86.R_DI,
- &M.x86.R_CL);
-# endif
#else
M.x86.R_CL =
(u8) PCI_accessReg(M.x86.R_DI, 0, PCI_READ_BYTE,
@@ -238,13 +224,8 @@ static void X86API int1A(int unused)
if (M.x86.R_BX == pciSlot) {
M.x86.R_AH = SUCCESSFUL;
#ifdef __KERNEL__
-# ifdef CONFIG_DM_PCI
dm_pci_read_config16(_BE_env.vgaInfo.pcidev, M.x86.R_DI,
&M.x86.R_CX);
-# else
- pci_read_config_word(_BE_env.vgaInfo.pcidev, M.x86.R_DI,
- &M.x86.R_CX);
-# endif
#else
M.x86.R_CX =
(u16) PCI_accessReg(M.x86.R_DI, 0, PCI_READ_WORD,
@@ -258,13 +239,8 @@ static void X86API int1A(int unused)
if (M.x86.R_BX == pciSlot) {
M.x86.R_AH = SUCCESSFUL;
#ifdef __KERNEL__
-# ifdef CONFIG_DM_PCI
dm_pci_read_config32(_BE_env.vgaInfo.pcidev,
M.x86.R_DI, &M.x86.R_ECX);
-# else
- pci_read_config_dword(_BE_env.vgaInfo.pcidev,
- M.x86.R_DI, &M.x86.R_ECX);
-# endif
#else
M.x86.R_ECX =
(u32) PCI_accessReg(M.x86.R_DI, 0, PCI_READ_DWORD,
@@ -278,13 +254,8 @@ static void X86API int1A(int unused)
if (M.x86.R_BX == pciSlot) {
M.x86.R_AH = SUCCESSFUL;
#ifdef __KERNEL__
-# ifdef CONFIG_DM_PCI
dm_pci_write_config8(_BE_env.vgaInfo.pcidev,
M.x86.R_DI, M.x86.R_CL);
-# else
- pci_write_config_byte(_BE_env.vgaInfo.pcidev,
- M.x86.R_DI, M.x86.R_CL);
-# endif
#else
PCI_accessReg(M.x86.R_DI, M.x86.R_CL, PCI_WRITE_BYTE,
_BE_env.vgaInfo.pciInfo);
@@ -297,13 +268,8 @@ static void X86API int1A(int unused)
if (M.x86.R_BX == pciSlot) {
M.x86.R_AH = SUCCESSFUL;
#ifdef __KERNEL__
-# ifdef CONFIG_DM_PCI
dm_pci_write_config32(_BE_env.vgaInfo.pcidev,
M.x86.R_DI, M.x86.R_CX);
-# else
- pci_write_config_word(_BE_env.vgaInfo.pcidev,
- M.x86.R_DI, M.x86.R_CX);
-# endif
#else
PCI_accessReg(M.x86.R_DI, M.x86.R_CX, PCI_WRITE_WORD,
_BE_env.vgaInfo.pciInfo);
@@ -316,13 +282,8 @@ static void X86API int1A(int unused)
if (M.x86.R_BX == pciSlot) {
M.x86.R_AH = SUCCESSFUL;
#ifdef __KERNEL__
-# ifdef CONFIG_DM_PCI
dm_pci_write_config32(_BE_env.vgaInfo.pcidev,
M.x86.R_DI, M.x86.R_ECX);
-# else
- pci_write_config_dword(_BE_env.vgaInfo.pcidev,
- M.x86.R_DI, M.x86.R_ECX);
-# endif
#else
PCI_accessReg(M.x86.R_DI, M.x86.R_ECX, PCI_WRITE_DWORD,
_BE_env.vgaInfo.pciInfo);
diff --git a/drivers/bootcount/Kconfig b/drivers/bootcount/Kconfig
index 0de2b7bd78c..607027c968d 100644
--- a/drivers/bootcount/Kconfig
+++ b/drivers/bootcount/Kconfig
@@ -144,6 +144,18 @@ config BOOTCOUNT_MEM
is not cleared on softreset.
compatible = "u-boot,bootcount";
+config DM_BOOTCOUNT_SYSCON
+ bool "Support SYSCON devices as a backing store for bootcount"
+ select REGMAP
+ select SYSCON
+ help
+ Enable reading/writing the bootcount value in a DM SYSCON device.
+ The driver supports a fixed 32 bits size register using the native
+ endianness. However, this can be controlled from the SYSCON DT node
+ configuration.
+
+ Accessing the backend is done using the regmap interface.
+
endmenu
endif
diff --git a/drivers/bootcount/Makefile b/drivers/bootcount/Makefile
index 12658ffdcec..3a784bb0a64 100644
--- a/drivers/bootcount/Makefile
+++ b/drivers/bootcount/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_DM_BOOTCOUNT) += bootcount-uclass.o
obj-$(CONFIG_DM_BOOTCOUNT_RTC) += rtc.o
obj-$(CONFIG_DM_BOOTCOUNT_I2C_EEPROM) += i2c-eeprom.o
obj-$(CONFIG_DM_BOOTCOUNT_SPI_FLASH) += spi-flash.o
+obj-$(CONFIG_DM_BOOTCOUNT_SYSCON) += bootcount_syscon.o
diff --git a/drivers/bootcount/bootcount_syscon.c b/drivers/bootcount/bootcount_syscon.c
new file mode 100644
index 00000000000..413fd5bb9df
--- /dev/null
+++ b/drivers/bootcount/bootcount_syscon.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Vaisala Oyj. All rights reserved.
+ */
+
+#include <common.h>
+#include <bootcount.h>
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <linux/ioport.h>
+#include <regmap.h>
+#include <syscon.h>
+
+#define BYTES_TO_BITS(bytes) ((bytes) << 3)
+#define GEN_REG_MASK(val_size, val_addr) \
+ (GENMASK(BYTES_TO_BITS(val_size) - 1, 0) \
+ << (!!((val_addr) == 0x02) * BYTES_TO_BITS(2)))
+#define GET_DEFAULT_VALUE(val_size) \
+ (CONFIG_SYS_BOOTCOUNT_MAGIC >> \
+ (BYTES_TO_BITS((sizeof(u32) - (val_size)))))
+
+/**
+ * struct bootcount_syscon_priv - driver's private data
+ *
+ * @regmap: syscon regmap
+ * @reg_addr: register address used to store the bootcount value
+ * @size: size of the bootcount value (2 or 4 bytes)
+ * @magic: magic used to validate/save the bootcount value
+ * @magic_mask: magic value bitmask
+ * @reg_mask: mask used to identify the location of the bootcount value
+ * in the register when 2 bytes length is used
+ * @shift: value used to extract the botcount value from the register
+ */
+struct bootcount_syscon_priv {
+ struct regmap *regmap;
+ fdt_addr_t reg_addr;
+ fdt_size_t size;
+ u32 magic;
+ u32 magic_mask;
+ u32 reg_mask;
+ int shift;
+};
+
+static int bootcount_syscon_set(struct udevice *dev, const u32 val)
+{
+ struct bootcount_syscon_priv *priv = dev_get_priv(dev);
+ u32 regval;
+
+ if ((val & priv->magic_mask) != 0)
+ return -EINVAL;
+
+ regval = (priv->magic & priv->magic_mask) | (val & ~priv->magic_mask);
+
+ if (priv->size == 2) {
+ regval &= 0xffff;
+ regval |= (regval & 0xffff) << BYTES_TO_BITS(priv->size);
+ }
+
+ debug("%s: Prepare to write reg value: 0x%08x with register mask: 0x%08x\n",
+ __func__, regval, priv->reg_mask);
+
+ return regmap_update_bits(priv->regmap, priv->reg_addr, priv->reg_mask,
+ regval);
+}
+
+static int bootcount_syscon_get(struct udevice *dev, u32 *val)
+{
+ struct bootcount_syscon_priv *priv = dev_get_priv(dev);
+ u32 regval;
+ int ret;
+
+ ret = regmap_read(priv->regmap, priv->reg_addr, &regval);
+ if (ret)
+ return ret;
+
+ regval &= priv->reg_mask;
+ regval >>= priv->shift;
+
+ if ((regval & priv->magic_mask) == (priv->magic & priv->magic_mask)) {
+ *val = regval & ~priv->magic_mask;
+ } else {
+ dev_err(dev, "%s: Invalid bootcount magic\n", __func__);
+ return -EINVAL;
+ }
+
+ debug("%s: Read bootcount value: 0x%08x from regval: 0x%08x\n",
+ __func__, *val, regval);
+ return 0;
+}
+
+static int bootcount_syscon_of_to_plat(struct udevice *dev)
+{
+ struct bootcount_syscon_priv *priv = dev_get_priv(dev);
+ fdt_addr_t bootcount_offset;
+ fdt_size_t reg_size;
+
+ priv->regmap = syscon_regmap_lookup_by_phandle(dev, "syscon");
+ if (IS_ERR(priv->regmap)) {
+ dev_err(dev, "%s: Unable to find regmap (%ld)\n", __func__,
+ PTR_ERR(priv->regmap));
+ return PTR_ERR(priv->regmap);
+ }
+
+ priv->reg_addr = dev_read_addr_size_name(dev, "syscon_reg", &reg_size);
+ if (priv->reg_addr == FDT_ADDR_T_NONE) {
+ dev_err(dev, "%s: syscon_reg address not found\n", __func__);
+ return -EINVAL;
+ }
+ if (reg_size != 4) {
+ dev_err(dev, "%s: Unsupported register size: %d\n", __func__,
+ reg_size);
+ return -EINVAL;
+ }
+
+ bootcount_offset = dev_read_addr_size_name(dev, "offset", &priv->size);
+ if (bootcount_offset == FDT_ADDR_T_NONE) {
+ dev_err(dev, "%s: offset configuration not found\n", __func__);
+ return -EINVAL;
+ }
+ if (bootcount_offset + priv->size > reg_size) {
+ dev_err(dev,
+ "%s: Bootcount value doesn't fit in the reserved space\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (priv->size != 2 && priv->size != 4) {
+ dev_err(dev,
+ "%s: Driver supports only 2 and 4 bytes bootcount size\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ priv->magic = GET_DEFAULT_VALUE(priv->size);
+ priv->magic_mask = GENMASK(BYTES_TO_BITS(priv->size) - 1,
+ BYTES_TO_BITS(priv->size >> 1));
+ priv->shift = !!(bootcount_offset == 0x02) * BYTES_TO_BITS(priv->size);
+ priv->reg_mask = GEN_REG_MASK(priv->size, bootcount_offset);
+
+ return 0;
+}
+
+static const struct bootcount_ops bootcount_syscon_ops = {
+ .get = bootcount_syscon_get,
+ .set = bootcount_syscon_set,
+};
+
+static const struct udevice_id bootcount_syscon_ids[] = {
+ { .compatible = "u-boot,bootcount-syscon" },
+ {}
+};
+
+U_BOOT_DRIVER(bootcount_syscon) = {
+ .name = "bootcount-syscon",
+ .id = UCLASS_BOOTCOUNT,
+ .of_to_plat = bootcount_syscon_of_to_plat,
+ .priv_auto = sizeof(struct bootcount_syscon_priv),
+ .of_match = bootcount_syscon_ids,
+ .ops = &bootcount_syscon_ops,
+};
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index e07c6dd78a6..baac8d281e4 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -220,4 +220,13 @@ config SANDBOX_CLK_CCF
Enable this option if you want to test the Linux kernel's Common
Clock Framework [CCF] code in U-Boot's Sandbox clock driver.
+config CLK_VERSACLOCK
+ tristate "Enable VersaClock 5/6 devices"
+ depends on CLK
+ depends on CLK_CCF
+ depends on OF_CONTROL
+ help
+ This driver supports the IDT VersaClock 5 and VersaClock 6
+ programmable clock generators.
+
endmenu
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 6e9c2d54853..711ae5bc29d 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -52,3 +52,4 @@ obj-$(CONFIG_SANDBOX_CLK_CCF) += clk_sandbox_ccf.o
obj-$(CONFIG_STM32H7) += clk_stm32h7.o
obj-$(CONFIG_CLK_VERSAL) += clk_versal.o
obj-$(CONFIG_CLK_CDCE9XX) += clk-cdce9xx.o
+obj-$(CONFIG_CLK_VERSACLOCK) += clk_versaclock.o
diff --git a/drivers/clk/altera/Makefile b/drivers/clk/altera/Makefile
index 96215ad5c42..33db092918a 100644
--- a/drivers/clk/altera/Makefile
+++ b/drivers/clk/altera/Makefile
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0+
#
-# Copyright (C) 2018 Marek Vasut <marex@denx.de>
+# Copyright (C) 2018-2021 Marek Vasut <marex@denx.de>
#
obj-$(CONFIG_TARGET_SOCFPGA_AGILEX) += clk-agilex.o
obj-$(CONFIG_TARGET_SOCFPGA_ARRIA10) += clk-arria10.o
+obj-$(CONFIG_TARGET_SOCFPGA_N5X) += clk-n5x.o
+obj-$(CONFIG_TARGET_SOCFPGA_N5X) += clk-mem-n5x.o
diff --git a/drivers/clk/altera/clk-mem-n5x.c b/drivers/clk/altera/clk-mem-n5x.c
new file mode 100644
index 00000000000..ca449986418
--- /dev/null
+++ b/drivers/clk/altera/clk-mem-n5x.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2021 Intel Corporation <www.intel.com>
+ */
+
+#include <common.h>
+#include <asm/arch/clock_manager.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include "clk-mem-n5x.h"
+#include <clk-uclass.h>
+#include <dm.h>
+#include <dm/lists.h>
+#include <dm/util.h>
+#include <dt-bindings/clock/n5x-clock.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+struct socfpga_mem_clk_plat {
+ void __iomem *regs;
+};
+
+void clk_mem_wait_for_lock(struct socfpga_mem_clk_plat *plat, u32 mask)
+{
+ u32 inter_val;
+ u32 retry = 0;
+
+ do {
+ inter_val = CM_REG_READL(plat, MEMCLKMGR_STAT) & mask;
+
+ /* Wait for stable lock */
+ if (inter_val == mask)
+ retry++;
+ else
+ retry = 0;
+
+ if (retry >= 10)
+ return;
+ } while (1);
+}
+
+/*
+ * function to write the bypass register which requires a poll of the
+ * busy bit
+ */
+void clk_mem_write_bypass_mempll(struct socfpga_mem_clk_plat *plat, u32 val)
+{
+ CM_REG_WRITEL(plat, val, MEMCLKMGR_MEMPLL_BYPASS);
+}
+
+/*
+ * Setup clocks while making no assumptions about previous state of the clocks.
+ */
+static void clk_mem_basic_init(struct udevice *dev,
+ const struct cm_config * const cfg)
+{
+ struct socfpga_mem_clk_plat *plat = dev_get_plat(dev);
+
+ if (!cfg)
+ return;
+
+ /* Put PLLs in bypass */
+ clk_mem_write_bypass_mempll(plat, MEMCLKMGR_BYPASS_MEMPLL_ALL);
+
+ /* Put PLLs in Reset */
+ CM_REG_SETBITS(plat, MEMCLKMGR_MEMPLL_PLLCTRL,
+ MEMCLKMGR_PLLCTRL_BYPASS_MASK);
+
+ /* setup mem PLL */
+ CM_REG_WRITEL(plat, cfg->mem_memdiv, MEMCLKMGR_MEMPLL_MEMDIV);
+ CM_REG_WRITEL(plat, cfg->mem_pllglob, MEMCLKMGR_MEMPLL_PLLGLOB);
+ CM_REG_WRITEL(plat, cfg->mem_plldiv, MEMCLKMGR_MEMPLL_PLLDIV);
+ CM_REG_WRITEL(plat, cfg->mem_plloutdiv, MEMCLKMGR_MEMPLL_PLLOUTDIV);
+
+ /* Take PLL out of reset and power up */
+ CM_REG_CLRBITS(plat, MEMCLKMGR_MEMPLL_PLLCTRL,
+ MEMCLKMGR_PLLCTRL_BYPASS_MASK);
+}
+
+static int socfpga_mem_clk_enable(struct clk *clk)
+{
+ const struct cm_config *cm_default_cfg = cm_get_default_config();
+ struct socfpga_mem_clk_plat *plat = dev_get_plat(clk->dev);
+
+ clk_mem_basic_init(clk->dev, cm_default_cfg);
+
+ clk_mem_wait_for_lock(plat, MEMCLKMGR_STAT_ALLPLL_LOCKED_MASK);
+
+ CM_REG_WRITEL(plat, CM_REG_READL(plat, MEMCLKMGR_MEMPLL_PLLGLOB) |
+ MEMCLKMGR_PLLGLOB_CLR_LOSTLOCK_BYPASS_MASK,
+ MEMCLKMGR_MEMPLL_PLLGLOB);
+
+ /* Take all PLLs out of bypass */
+ clk_mem_write_bypass_mempll(plat, 0);
+
+ /* Clear the loss of lock bits (write 1 to clear) */
+ CM_REG_CLRBITS(plat, MEMCLKMGR_INTRCLR,
+ MEMCLKMGR_INTER_MEMPLLLOST_MASK);
+
+ /* Take all ping pong counters out of reset */
+ CM_REG_CLRBITS(plat, MEMCLKMGR_MEMPLL_EXTCNTRST,
+ MEMCLKMGR_EXTCNTRST_ALLCNTRST);
+
+ return 0;
+}
+
+static int socfpga_mem_clk_of_to_plat(struct udevice *dev)
+{
+ struct socfpga_mem_clk_plat *plat = dev_get_plat(dev);
+ fdt_addr_t addr;
+
+ addr = devfdt_get_addr(dev);
+ if (addr == FDT_ADDR_T_NONE)
+ return -EINVAL;
+ plat->regs = (void __iomem *)addr;
+
+ return 0;
+}
+
+static struct clk_ops socfpga_mem_clk_ops = {
+ .enable = socfpga_mem_clk_enable
+};
+
+static const struct udevice_id socfpga_mem_clk_match[] = {
+ { .compatible = "intel,n5x-mem-clkmgr" },
+ {}
+};
+
+U_BOOT_DRIVER(socfpga_n5x_mem_clk) = {
+ .name = "mem-clk-n5x",
+ .id = UCLASS_CLK,
+ .of_match = socfpga_mem_clk_match,
+ .ops = &socfpga_mem_clk_ops,
+ .of_to_plat = socfpga_mem_clk_of_to_plat,
+ .plat_auto = sizeof(struct socfpga_mem_clk_plat),
+};
diff --git a/drivers/clk/altera/clk-mem-n5x.h b/drivers/clk/altera/clk-mem-n5x.h
new file mode 100644
index 00000000000..d000ae260c1
--- /dev/null
+++ b/drivers/clk/altera/clk-mem-n5x.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2021 Intel Corporation <www.intel.com>
+ */
+
+#ifndef _CLK_MEM_N5X_
+#define _CLK_MEM_N5X_
+
+#ifndef __ASSEMBLY__
+#include <linux/bitops.h>
+#endif
+
+/* Clock Manager registers */
+#define MEMCLKMGR_STAT 4
+#define MEMCLKMGR_INTRGEN 8
+#define MEMCLKMGR_INTRMSK 0x0c
+#define MEMCLKMGR_INTRCLR 0x10
+#define MEMCLKMGR_INTRSTS 0x14
+#define MEMCLKMGR_INTRSTK 0x18
+#define MEMCLKMGR_INTRRAW 0x1c
+
+/* Memory Clock Manager PPL group registers */
+#define MEMCLKMGR_MEMPLL_EN 0x20
+#define MEMCLKMGR_MEMPLL_ENS 0x24
+#define MEMCLKMGR_MEMPLL_ENR 0x28
+#define MEMCLKMGR_MEMPLL_BYPASS 0x2c
+#define MEMCLKMGR_MEMPLL_BYPASSS 0x30
+#define MEMCLKMGR_MEMPLL_BYPASSR 0x34
+#define MEMCLKMGR_MEMPLL_MEMDIV 0x38
+#define MEMCLKMGR_MEMPLL_PLLGLOB 0x3c
+#define MEMCLKMGR_MEMPLL_PLLCTRL 0x40
+#define MEMCLKMGR_MEMPLL_PLLDIV 0x44
+#define MEMCLKMGR_MEMPLL_PLLOUTDIV 0x48
+#define MEMCLKMGR_MEMPLL_EXTCNTRST 0x4c
+
+#define MEMCLKMGR_CTRL_BOOTMODE BIT(0)
+
+#define MEMCLKMGR_STAT_MEMPLL_LOCKED BIT(8)
+
+#define MEMCLKMGR_STAT_ALLPLL_LOCKED_MASK \
+ (MEMCLKMGR_STAT_MEMPLL_LOCKED)
+
+#define MEMCLKMGR_INTER_MEMPLLLOCKED_MASK BIT(0)
+#define MEMCLKMGR_INTER_MEMPLLLOST_MASK BIT(2)
+
+#define MEMCLKMGR_BYPASS_MEMPLL_ALL 0x1
+
+#define MEMCLKMGR_MEMDIV_MPFEDIV_OFFSET 0
+#define MEMCLKMGR_MEMDIV_APBDIV_OFFSET 4
+#define MEMCLKMGR_MEMDIV_DFICTRLDIV_OFFSET 8
+#define MEMCLKMGR_MEMDIV_DFIDIV_OFFSET 12
+#define MEMCLKMGR_MEMDIV_DFICTRLDIV_MASK BIT(0)
+#define MEMCLKMGR_MEMDIV_DIVIDER_MASK GENMASK(1, 0)
+
+#define MEMCLKMGR_PLLGLOB_PSRC_MASK GENMASK(17, 16)
+#define MEMCLKMGR_PLLGLOB_PSRC_OFFSET 16
+#define MEMCLKMGR_PLLGLOB_LOSTLOCK_BYPASS_EN_MASK BIT(28)
+#define MEMCLKMGR_PLLGLOB_CLR_LOSTLOCK_BYPASS_MASK BIT(29)
+
+#define MEMCLKMGR_PSRC_EOSC1 0
+#define MEMCLKMGR_PSRC_INTOSC 1
+#define MEMCLKMGR_PSRC_F2S 2
+
+#define MEMCLKMGR_PLLCTRL_BYPASS_MASK BIT(0)
+#define MEMCLKMGR_PLLCTRL_RST_N_MASK BIT(1)
+
+#define MEMCLKMGR_PLLDIV_DIVR_MASK GENMASK(5, 0)
+#define MEMCLKMGR_PLLDIV_DIVF_MASK GENMASK(16, 8)
+#define MEMCLKMGR_PLLDIV_DIVQ_MASK GENMASK(26, 24)
+#define MEMCLKMGR_PLLDIV_RANGE_MASK GENMASK(30, 28)
+
+#define MEMCLKMGR_PLLDIV_DIVR_OFFSET 0
+#define MEMCLKMGR_PLLDIV_DIVF_OFFSET 8
+#define MEMCLKMGR_PLLDIV_DIVQ_QDIV_OFFSET 24
+#define MEMCLKMGR_PLLDIV_RANGE_OFFSET 28
+
+#define MEMCLKMGR_PLLOUTDIV_C0CNT_MASK GENMASK(4, 0)
+#define MEMCLKMGR_PLLOUTDIV_C0CNT_OFFSET 0
+
+#define MEMCLKMGR_EXTCNTRST_C0CNTRST BIT(7)
+#define MEMCLKMGR_EXTCNTRST_ALLCNTRST \
+ (MEMCLKMGR_EXTCNTRST_C0CNTRST)
+
+#endif /* _CLK_MEM_N5X_ */
diff --git a/drivers/clk/altera/clk-n5x.c b/drivers/clk/altera/clk-n5x.c
new file mode 100644
index 00000000000..bdcbbaae910
--- /dev/null
+++ b/drivers/clk/altera/clk-n5x.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2021 Intel Corporation <www.intel.com>
+ */
+
+#include <common.h>
+#include <asm/arch/clock_manager.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <clk-uclass.h>
+#include <dm.h>
+#include <dm/lists.h>
+#include <dm/util.h>
+#include <dt-bindings/clock/n5x-clock.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+struct socfpga_clk_plat {
+ void __iomem *regs;
+};
+
+/*
+ * function to write the bypass register which requires a poll of the
+ * busy bit
+ */
+static void clk_write_bypass_mainpll(struct socfpga_clk_plat *plat, u32 val)
+{
+ CM_REG_WRITEL(plat, val, CLKMGR_MAINPLL_BYPASS);
+ cm_wait_for_fsm();
+}
+
+static void clk_write_bypass_perpll(struct socfpga_clk_plat *plat, u32 val)
+{
+ CM_REG_WRITEL(plat, val, CLKMGR_PERPLL_BYPASS);
+ cm_wait_for_fsm();
+}
+
+/* function to write the ctrl register which requires a poll of the busy bit */
+static void clk_write_ctrl(struct socfpga_clk_plat *plat, u32 val)
+{
+ CM_REG_WRITEL(plat, val, CLKMGR_CTRL);
+ cm_wait_for_fsm();
+}
+
+/*
+ * Setup clocks while making no assumptions about previous state of the clocks.
+ */
+static void clk_basic_init(struct udevice *dev,
+ const struct cm_config * const cfg)
+{
+ struct socfpga_clk_plat *plat = dev_get_plat(dev);
+
+ if (!cfg)
+ return;
+
+#if IS_ENABLED(CONFIG_SPL_BUILD)
+ /* Always force clock manager into boot mode before any configuration */
+ clk_write_ctrl(plat,
+ CM_REG_READL(plat, CLKMGR_CTRL) | CLKMGR_CTRL_BOOTMODE);
+#else
+ /* Skip clock configuration in SSBL if it's not in boot mode */
+ if (!(CM_REG_READL(plat, CLKMGR_CTRL) & CLKMGR_CTRL_BOOTMODE))
+ return;
+#endif
+
+ /* Put both PLLs in bypass */
+ clk_write_bypass_mainpll(plat, CLKMGR_BYPASS_MAINPLL_ALL);
+ clk_write_bypass_perpll(plat, CLKMGR_BYPASS_PERPLL_ALL);
+
+ /* Put both PLLs in Reset */
+ CM_REG_SETBITS(plat, CLKMGR_MAINPLL_PLLCTRL,
+ CLKMGR_PLLCTRL_BYPASS_MASK);
+ CM_REG_SETBITS(plat, CLKMGR_PERPLL_PLLCTRL,
+ CLKMGR_PLLCTRL_BYPASS_MASK);
+
+ /* setup main PLL */
+ CM_REG_WRITEL(plat, cfg->main_pll_pllglob, CLKMGR_MAINPLL_PLLGLOB);
+ CM_REG_WRITEL(plat, cfg->main_pll_plldiv, CLKMGR_MAINPLL_PLLDIV);
+ CM_REG_WRITEL(plat, cfg->main_pll_plloutdiv, CLKMGR_MAINPLL_PLLOUTDIV);
+ CM_REG_WRITEL(plat, cfg->main_pll_mpuclk, CLKMGR_MAINPLL_MPUCLK);
+ CM_REG_WRITEL(plat, cfg->main_pll_nocclk, CLKMGR_MAINPLL_NOCCLK);
+ CM_REG_WRITEL(plat, cfg->main_pll_nocdiv, CLKMGR_MAINPLL_NOCDIV);
+
+ /* setup peripheral */
+ CM_REG_WRITEL(plat, cfg->per_pll_pllglob, CLKMGR_PERPLL_PLLGLOB);
+ CM_REG_WRITEL(plat, cfg->per_pll_plldiv, CLKMGR_PERPLL_PLLDIV);
+ CM_REG_WRITEL(plat, cfg->per_pll_plloutdiv, CLKMGR_PERPLL_PLLOUTDIV);
+ CM_REG_WRITEL(plat, cfg->per_pll_emacctl, CLKMGR_PERPLL_EMACCTL);
+ CM_REG_WRITEL(plat, cfg->per_pll_gpiodiv, CLKMGR_PERPLL_GPIODIV);
+
+ /* Take both PLL out of reset and power up */
+ CM_REG_CLRBITS(plat, CLKMGR_MAINPLL_PLLCTRL,
+ CLKMGR_PLLCTRL_BYPASS_MASK);
+ CM_REG_CLRBITS(plat, CLKMGR_PERPLL_PLLCTRL,
+ CLKMGR_PLLCTRL_BYPASS_MASK);
+
+ cm_wait_for_lock(CLKMGR_STAT_ALLPLL_LOCKED_MASK);
+
+ CM_REG_WRITEL(plat, cfg->alt_emacactr, CLKMGR_ALTR_EMACACTR);
+ CM_REG_WRITEL(plat, cfg->alt_emacbctr, CLKMGR_ALTR_EMACBCTR);
+ CM_REG_WRITEL(plat, cfg->alt_emacptpctr, CLKMGR_ALTR_EMACPTPCTR);
+ CM_REG_WRITEL(plat, cfg->alt_gpiodbctr, CLKMGR_ALTR_GPIODBCTR);
+ CM_REG_WRITEL(plat, cfg->alt_sdmmcctr, CLKMGR_ALTR_SDMMCCTR);
+ CM_REG_WRITEL(plat, cfg->alt_s2fuser0ctr, CLKMGR_ALTR_S2FUSER0CTR);
+ CM_REG_WRITEL(plat, cfg->alt_s2fuser1ctr, CLKMGR_ALTR_S2FUSER1CTR);
+ CM_REG_WRITEL(plat, cfg->alt_psirefctr, CLKMGR_ALTR_PSIREFCTR);
+
+ /* Configure ping pong counters in altera group */
+ CM_REG_WRITEL(plat, CLKMGR_LOSTLOCK_SET_MASK, CLKMGR_MAINPLL_LOSTLOCK);
+ CM_REG_WRITEL(plat, CLKMGR_LOSTLOCK_SET_MASK, CLKMGR_PERPLL_LOSTLOCK);
+
+ CM_REG_WRITEL(plat, CM_REG_READL(plat, CLKMGR_MAINPLL_PLLGLOB) |
+ CLKMGR_PLLGLOB_CLR_LOSTLOCK_BYPASS_MASK,
+ CLKMGR_MAINPLL_PLLGLOB);
+ CM_REG_WRITEL(plat, CM_REG_READL(plat, CLKMGR_PERPLL_PLLGLOB) |
+ CLKMGR_PLLGLOB_CLR_LOSTLOCK_BYPASS_MASK,
+ CLKMGR_PERPLL_PLLGLOB);
+
+ /* Take all PLLs out of bypass */
+ clk_write_bypass_mainpll(plat, 0);
+ clk_write_bypass_perpll(plat, 0);
+
+ /* Clear the loss of lock bits */
+ CM_REG_CLRBITS(plat, CLKMGR_INTRCLR,
+ CLKMGR_INTER_PERPLLLOST_MASK |
+ CLKMGR_INTER_MAINPLLLOST_MASK);
+
+ /* Take all ping pong counters out of reset */
+ CM_REG_CLRBITS(plat, CLKMGR_ALTR_EXTCNTRST,
+ CLKMGR_ALT_EXTCNTRST_ALLCNTRST_MASK);
+
+ /* Out of boot mode */
+ clk_write_ctrl(plat,
+ CM_REG_READL(plat, CLKMGR_CTRL) & ~CLKMGR_CTRL_BOOTMODE);
+}
+
+static u32 clk_get_5_1_clk_src(struct socfpga_clk_plat *plat, u32 reg)
+{
+ u32 clksrc = CM_REG_READL(plat, reg);
+
+ return (clksrc & CLKMGR_CLKSRC_MASK) >> CLKMGR_CLKSRC_OFFSET;
+}
+
+static u64 clk_get_pll_output_hz(struct socfpga_clk_plat *plat,
+ u32 pllglob_reg, u32 plldiv_reg)
+{
+ u64 clock = 0;
+ u32 clklsrc, divf, divr, divq, power = 1;
+
+ /* Get input clock frequency */
+ clklsrc = (CM_REG_READL(plat, pllglob_reg) &
+ CLKMGR_PLLGLOB_VCO_PSRC_MASK) >>
+ CLKMGR_PLLGLOB_VCO_PSRC_OFFSET;
+
+ switch (clklsrc) {
+ case CLKMGR_VCO_PSRC_EOSC1:
+ clock = cm_get_osc_clk_hz();
+ break;
+ case CLKMGR_VCO_PSRC_INTOSC:
+ clock = cm_get_intosc_clk_hz();
+ break;
+ case CLKMGR_VCO_PSRC_F2S:
+ clock = cm_get_fpga_clk_hz();
+ break;
+ }
+
+ /* Calculate pll out clock frequency */
+ divf = (CM_REG_READL(plat, plldiv_reg) &
+ CLKMGR_PLLDIV_FDIV_MASK) >>
+ CLKMGR_PLLDIV_FDIV_OFFSET;
+
+ divr = (CM_REG_READL(plat, plldiv_reg) &
+ CLKMGR_PLLDIV_REFCLKDIV_MASK) >>
+ CLKMGR_PLLDIV_REFCLKDIV_OFFSET;
+
+ divq = (CM_REG_READL(plat, plldiv_reg) &
+ CLKMGR_PLLDIV_OUTDIV_QDIV_MASK) >>
+ CLKMGR_PLLDIV_OUTDIV_QDIV_OFFSET;
+
+ while (divq) {
+ power *= 2;
+ divq--;
+ }
+
+ return (clock * 2 * (divf + 1)) / ((divr + 1) * power);
+}
+
+static u64 clk_get_clksrc_hz(struct socfpga_clk_plat *plat, u32 clksrc_reg,
+ u32 main_div, u32 per_div)
+{
+ u64 clock = 0;
+ u32 clklsrc = clk_get_5_1_clk_src(plat, clksrc_reg);
+
+ switch (clklsrc) {
+ case CLKMGR_CLKSRC_MAIN:
+ clock = clk_get_pll_output_hz(plat,
+ CLKMGR_MAINPLL_PLLGLOB,
+ CLKMGR_MAINPLL_PLLDIV);
+ clock /= 1 + main_div;
+ break;
+
+ case CLKMGR_CLKSRC_PER:
+ clock = clk_get_pll_output_hz(plat,
+ CLKMGR_PERPLL_PLLGLOB,
+ CLKMGR_PERPLL_PLLDIV);
+ clock /= 1 + per_div;
+ break;
+
+ case CLKMGR_CLKSRC_OSC1:
+ clock = cm_get_osc_clk_hz();
+ break;
+
+ case CLKMGR_CLKSRC_INTOSC:
+ clock = cm_get_intosc_clk_hz();
+ break;
+
+ case CLKMGR_CLKSRC_FPGA:
+ clock = cm_get_fpga_clk_hz();
+ break;
+ default:
+ return 0;
+ }
+
+ return clock;
+}
+
+static u64 clk_get_mpu_clk_hz(struct socfpga_clk_plat *plat)
+{
+ u32 mainpll_c0cnt = (CM_REG_READL(plat, CLKMGR_MAINPLL_PLLOUTDIV) &
+ CLKMGR_PLLOUTDIV_C0CNT_MASK) >>
+ CLKMGR_PLLOUTDIV_C0CNT_OFFSET;
+
+ u32 perpll_c0cnt = (CM_REG_READL(plat, CLKMGR_PERPLL_PLLOUTDIV) &
+ CLKMGR_PLLOUTDIV_C0CNT_MASK) >>
+ CLKMGR_PLLOUTDIV_C0CNT_OFFSET;
+
+ u64 clock = clk_get_clksrc_hz(plat, CLKMGR_MAINPLL_MPUCLK,
+ mainpll_c0cnt, perpll_c0cnt);
+
+ clock /= 1 + (CM_REG_READL(plat, CLKMGR_MAINPLL_MPUCLK) &
+ CLKMGR_CLKCNT_MSK);
+
+ return clock;
+}
+
+static u32 clk_get_l3_main_clk_hz(struct socfpga_clk_plat *plat)
+{
+ u32 mainpll_c1cnt = (CM_REG_READL(plat, CLKMGR_MAINPLL_PLLOUTDIV) &
+ CLKMGR_PLLOUTDIV_C1CNT_MASK) >>
+ CLKMGR_PLLOUTDIV_C1CNT_OFFSET;
+
+ u32 perpll_c1cnt = (CM_REG_READL(plat, CLKMGR_PERPLL_PLLOUTDIV) &
+ CLKMGR_PLLOUTDIV_C1CNT_MASK) >>
+ CLKMGR_PLLOUTDIV_C1CNT_OFFSET;
+
+ return clk_get_clksrc_hz(plat, CLKMGR_MAINPLL_NOCCLK,
+ mainpll_c1cnt, perpll_c1cnt);
+}
+
+static u32 clk_get_l4_main_clk_hz(struct socfpga_clk_plat *plat)
+{
+ u64 clock = clk_get_l3_main_clk_hz(plat);
+
+ clock /= BIT((CM_REG_READL(plat, CLKMGR_MAINPLL_NOCDIV) >>
+ CLKMGR_NOCDIV_L4MAIN_OFFSET) &
+ CLKMGR_NOCDIV_DIVIDER_MASK);
+
+ return clock;
+}
+
+static u32 clk_get_sdmmc_clk_hz(struct socfpga_clk_plat *plat)
+{
+ u32 mainpll_c3cnt = (CM_REG_READL(plat, CLKMGR_MAINPLL_PLLOUTDIV) &
+ CLKMGR_PLLOUTDIV_C3CNT_MASK) >>
+ CLKMGR_PLLOUTDIV_C3CNT_OFFSET;
+
+ u32 perpll_c3cnt = (CM_REG_READL(plat, CLKMGR_PERPLL_PLLOUTDIV) &
+ CLKMGR_PLLOUTDIV_C3CNT_MASK) >>
+ CLKMGR_PLLOUTDIV_C3CNT_OFFSET;
+
+ u64 clock = clk_get_clksrc_hz(plat, CLKMGR_ALTR_SDMMCCTR,
+ mainpll_c3cnt, perpll_c3cnt);
+
+ clock /= 1 + (CM_REG_READL(plat, CLKMGR_ALTR_SDMMCCTR) &
+ CLKMGR_CLKCNT_MSK);
+
+ return clock / 4;
+}
+
+static u32 clk_get_l4_sp_clk_hz(struct socfpga_clk_plat *plat)
+{
+ u64 clock = clk_get_l3_main_clk_hz(plat);
+
+ clock /= BIT((CM_REG_READL(plat, CLKMGR_MAINPLL_NOCDIV) >>
+ CLKMGR_NOCDIV_L4SPCLK_OFFSET) &
+ CLKMGR_NOCDIV_DIVIDER_MASK);
+
+ return clock;
+}
+
+static u32 clk_get_l4_mp_clk_hz(struct socfpga_clk_plat *plat)
+{
+ u64 clock = clk_get_l3_main_clk_hz(plat);
+
+ clock /= BIT((CM_REG_READL(plat, CLKMGR_MAINPLL_NOCDIV) >>
+ CLKMGR_NOCDIV_L4MPCLK_OFFSET) &
+ CLKMGR_NOCDIV_DIVIDER_MASK);
+
+ return clock;
+}
+
+static u32 clk_get_l4_sys_free_clk_hz(struct socfpga_clk_plat *plat)
+{
+ if (CM_REG_READL(plat, CLKMGR_STAT) & CLKMGR_STAT_BOOTMODE)
+ return clk_get_l3_main_clk_hz(plat) / 2;
+
+ return clk_get_l3_main_clk_hz(plat) / 4;
+}
+
+static u32 clk_get_emac_clk_hz(struct socfpga_clk_plat *plat, u32 emac_id)
+{
+ bool emacsel_a;
+ u32 ctl;
+ u32 ctr_reg;
+ u32 clock;
+ u32 div;
+ u32 reg;
+
+ /* Get EMAC clock source */
+ ctl = CM_REG_READL(plat, CLKMGR_PERPLL_EMACCTL);
+ if (emac_id == N5X_EMAC0_CLK)
+ ctl = (ctl >> CLKMGR_PERPLLGRP_EMACCTL_EMAC0SELB_OFFSET) &
+ CLKMGR_PERPLLGRP_EMACCTL_EMAC0SELB_MASK;
+ else if (emac_id == N5X_EMAC1_CLK)
+ ctl = (ctl >> CLKMGR_PERPLLGRP_EMACCTL_EMAC1SELB_OFFSET) &
+ CLKMGR_PERPLLGRP_EMACCTL_EMAC1SELB_MASK;
+ else if (emac_id == N5X_EMAC2_CLK)
+ ctl = (ctl >> CLKMGR_PERPLLGRP_EMACCTL_EMAC2SELB_OFFSET) &
+ CLKMGR_PERPLLGRP_EMACCTL_EMAC2SELB_MASK;
+ else
+ return 0;
+
+ if (ctl) {
+ /* EMAC B source */
+ emacsel_a = false;
+ ctr_reg = CLKMGR_ALTR_EMACBCTR;
+ } else {
+ /* EMAC A source */
+ emacsel_a = true;
+ ctr_reg = CLKMGR_ALTR_EMACACTR;
+ }
+
+ reg = CM_REG_READL(plat, ctr_reg);
+ clock = (reg & CLKMGR_ALT_EMACCTR_SRC_MASK)
+ >> CLKMGR_ALT_EMACCTR_SRC_OFFSET;
+ div = (reg & CLKMGR_ALT_EMACCTR_CNT_MASK)
+ >> CLKMGR_ALT_EMACCTR_CNT_OFFSET;
+
+ switch (clock) {
+ case CLKMGR_CLKSRC_MAIN:
+ clock = clk_get_pll_output_hz(plat,
+ CLKMGR_MAINPLL_PLLGLOB,
+ CLKMGR_MAINPLL_PLLDIV);
+
+ if (emacsel_a) {
+ clock /= 1 + ((CM_REG_READL(plat,
+ CLKMGR_MAINPLL_PLLOUTDIV) &
+ CLKMGR_PLLOUTDIV_C2CNT_MASK) >>
+ CLKMGR_PLLOUTDIV_C2CNT_OFFSET);
+ } else {
+ clock /= 1 + ((CM_REG_READL(plat,
+ CLKMGR_MAINPLL_PLLOUTDIV) &
+ CLKMGR_PLLOUTDIV_C3CNT_MASK) >>
+ CLKMGR_PLLOUTDIV_C3CNT_OFFSET);
+ }
+ break;
+
+ case CLKMGR_CLKSRC_PER:
+ clock = clk_get_pll_output_hz(plat,
+ CLKMGR_PERPLL_PLLGLOB,
+ CLKMGR_PERPLL_PLLDIV);
+ if (emacsel_a) {
+ clock /= 1 + ((CM_REG_READL(plat,
+ CLKMGR_PERPLL_PLLOUTDIV) &
+ CLKMGR_PLLOUTDIV_C2CNT_MASK) >>
+ CLKMGR_PLLOUTDIV_C2CNT_OFFSET);
+ } else {
+ clock /= 1 + ((CM_REG_READL(plat,
+ CLKMGR_PERPLL_PLLOUTDIV) &
+ CLKMGR_PLLOUTDIV_C3CNT_MASK >>
+ CLKMGR_PLLOUTDIV_C3CNT_OFFSET));
+ }
+ break;
+
+ case CLKMGR_CLKSRC_OSC1:
+ clock = cm_get_osc_clk_hz();
+ break;
+
+ case CLKMGR_CLKSRC_INTOSC:
+ clock = cm_get_intosc_clk_hz();
+ break;
+
+ case CLKMGR_CLKSRC_FPGA:
+ clock = cm_get_fpga_clk_hz();
+ break;
+ }
+
+ clock /= 1 + div;
+
+ return clock;
+}
+
+static ulong socfpga_clk_get_rate(struct clk *clk)
+{
+ struct socfpga_clk_plat *plat = dev_get_plat(clk->dev);
+
+ switch (clk->id) {
+ case N5X_MPU_CLK:
+ return clk_get_mpu_clk_hz(plat);
+ case N5X_L4_MAIN_CLK:
+ return clk_get_l4_main_clk_hz(plat);
+ case N5X_L4_SYS_FREE_CLK:
+ return clk_get_l4_sys_free_clk_hz(plat);
+ case N5X_L4_MP_CLK:
+ return clk_get_l4_mp_clk_hz(plat);
+ case N5X_L4_SP_CLK:
+ return clk_get_l4_sp_clk_hz(plat);
+ case N5X_SDMMC_CLK:
+ return clk_get_sdmmc_clk_hz(plat);
+ case N5X_EMAC0_CLK:
+ case N5X_EMAC1_CLK:
+ case N5X_EMAC2_CLK:
+ return clk_get_emac_clk_hz(plat, clk->id);
+ case N5X_USB_CLK:
+ case N5X_NAND_X_CLK:
+ return clk_get_l4_mp_clk_hz(plat);
+ case N5X_NAND_CLK:
+ return clk_get_l4_mp_clk_hz(plat) / 4;
+ default:
+ return -ENXIO;
+ }
+}
+
+static int socfpga_clk_enable(struct clk *clk)
+{
+ return 0;
+}
+
+static int socfpga_clk_probe(struct udevice *dev)
+{
+ const struct cm_config *cm_default_cfg = cm_get_default_config();
+
+ clk_basic_init(dev, cm_default_cfg);
+
+ return 0;
+}
+
+static int socfpga_clk_of_to_plat(struct udevice *dev)
+{
+ struct socfpga_clk_plat *plat = dev_get_plat(dev);
+ fdt_addr_t addr;
+
+ addr = devfdt_get_addr(dev);
+ if (addr == FDT_ADDR_T_NONE)
+ return -EINVAL;
+ plat->regs = (void __iomem *)addr;
+
+ return 0;
+}
+
+static struct clk_ops socfpga_clk_ops = {
+ .enable = socfpga_clk_enable,
+ .get_rate = socfpga_clk_get_rate,
+};
+
+static const struct udevice_id socfpga_clk_match[] = {
+ { .compatible = "intel,n5x-clkmgr" },
+ {}
+};
+
+U_BOOT_DRIVER(socfpga_n5x_clk) = {
+ .name = "clk-n5x",
+ .id = UCLASS_CLK,
+ .of_match = socfpga_clk_match,
+ .ops = &socfpga_clk_ops,
+ .probe = socfpga_clk_probe,
+ .of_to_plat = socfpga_clk_of_to_plat,
+ .plat_auto = sizeof(struct socfpga_clk_plat),
+};
diff --git a/drivers/clk/altera/clk-n5x.h b/drivers/clk/altera/clk-n5x.h
new file mode 100644
index 00000000000..8c00e90f894
--- /dev/null
+++ b/drivers/clk/altera/clk-n5x.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2021 Intel Corporation <www.intel.com>
+ */
+
+#ifndef _CLK_N5X_
+#define _CLK_N5X_
+
+#ifndef __ASSEMBLY__
+#include <linux/bitops.h>
+#endif
+
+#define CM_REG_READL(plat, reg) \
+ readl((plat)->regs + (reg))
+
+#define CM_REG_WRITEL(plat, data, reg) \
+ writel(data, (plat)->regs + (reg))
+
+#define CM_REG_CLRBITS(plat, reg, clear) \
+ clrbits_le32((plat)->regs + (reg), (clear))
+
+#define CM_REG_SETBITS(plat, reg, set) \
+ setbits_le32((plat)->regs + (reg), (set))
+
+struct cm_config {
+ /* main group */
+ u32 main_pll_mpuclk;
+ u32 main_pll_nocclk;
+ u32 main_pll_nocdiv;
+ u32 main_pll_pllglob;
+ u32 main_pll_plldiv;
+ u32 main_pll_plloutdiv;
+ u32 spare_1[4];
+
+ /* peripheral group */
+ u32 per_pll_emacctl;
+ u32 per_pll_gpiodiv;
+ u32 per_pll_pllglob;
+ u32 per_pll_plldiv;
+ u32 per_pll_plloutdiv;
+ u32 spare_2[4];
+
+ /* altera group */
+ u32 alt_emacactr;
+ u32 alt_emacbctr;
+ u32 alt_emacptpctr;
+ u32 alt_gpiodbctr;
+ u32 alt_sdmmcctr;
+ u32 alt_s2fuser0ctr;
+ u32 alt_s2fuser1ctr;
+ u32 alt_psirefctr;
+
+ /* incoming clock */
+ u32 hps_osc_clk_hz;
+ u32 fpga_clk_hz;
+ u32 spare_3[3];
+
+ /* memory clock group */
+ u32 mem_memdiv;
+ u32 mem_pllglob;
+ u32 mem_plldiv;
+ u32 mem_plloutdiv;
+ u32 spare_4[4];
+};
+
+/* Clock Manager registers */
+#define CLKMGR_CTRL 0
+#define CLKMGR_STAT 4
+#define CLKMGR_TESTIOCTRL 8
+#define CLKMGR_INTRGEN 0x0c
+#define CLKMGR_INTRMSK 0x10
+#define CLKMGR_INTRCLR 0x14
+#define CLKMGR_INTRSTS 0x18
+#define CLKMGR_INTRSTK 0x1c
+#define CLKMGR_INTRRAW 0x20
+
+/* Clock Manager Main PPL group registers */
+#define CLKMGR_MAINPLL_EN 0x24
+#define CLKMGR_MAINPLL_ENS 0x28
+#define CLKMGR_MAINPLL_ENR 0x2c
+#define CLKMGR_MAINPLL_BYPASS 0x30
+#define CLKMGR_MAINPLL_BYPASSS 0x34
+#define CLKMGR_MAINPLL_BYPASSR 0x38
+#define CLKMGR_MAINPLL_MPUCLK 0x3c
+#define CLKMGR_MAINPLL_NOCCLK 0x40
+#define CLKMGR_MAINPLL_NOCDIV 0x44
+#define CLKMGR_MAINPLL_PLLGLOB 0x48
+#define CLKMGR_MAINPLL_PLLCTRL 0x4c
+#define CLKMGR_MAINPLL_PLLDIV 0x50
+#define CLKMGR_MAINPLL_PLLOUTDIV 0x54
+#define CLKMGR_MAINPLL_LOSTLOCK 0x58
+
+/* Clock Manager Peripheral PPL group registers */
+#define CLKMGR_PERPLL_EN 0x7c
+#define CLKMGR_PERPLL_ENS 0x80
+#define CLKMGR_PERPLL_ENR 0x84
+#define CLKMGR_PERPLL_BYPASS 0x88
+#define CLKMGR_PERPLL_BYPASSS 0x8c
+#define CLKMGR_PERPLL_BYPASSR 0x90
+#define CLKMGR_PERPLL_EMACCTL 0x94
+#define CLKMGR_PERPLL_GPIODIV 0x98
+#define CLKMGR_PERPLL_PLLGLOB 0x9c
+#define CLKMGR_PERPLL_PLLCTRL 0xa0
+#define CLKMGR_PERPLL_PLLDIV 0xa4
+#define CLKMGR_PERPLL_PLLOUTDIV 0xa8
+#define CLKMGR_PERPLL_LOSTLOCK 0xac
+
+/* Clock Manager Altera group registers */
+#define CLKMGR_ALTR_EMACACTR 0xd4
+#define CLKMGR_ALTR_EMACBCTR 0xd8
+#define CLKMGR_ALTR_EMACPTPCTR 0xdc
+#define CLKMGR_ALTR_GPIODBCTR 0xe0
+#define CLKMGR_ALTR_SDMMCCTR 0xe4
+#define CLKMGR_ALTR_S2FUSER0CTR 0xe8
+#define CLKMGR_ALTR_S2FUSER1CTR 0xec
+#define CLKMGR_ALTR_PSIREFCTR 0xf0
+#define CLKMGR_ALTR_EXTCNTRST 0xf4
+
+#define CLKMGR_CTRL_BOOTMODE BIT(0)
+
+#define CLKMGR_STAT_BUSY BIT(0)
+#define CLKMGR_STAT_MAINPLL_LOCKED BIT(8)
+#define CLKMGR_STAT_MAIN_TRANS BIT(9)
+#define CLKMGR_STAT_PERPLL_LOCKED BIT(16)
+#define CLKMGR_STAT_PERF_TRANS BIT(17)
+#define CLKMGR_STAT_BOOTMODE BIT(24)
+#define CLKMGR_STAT_BOOTCLKSRC BIT(25)
+
+#define CLKMGR_STAT_ALLPLL_LOCKED_MASK \
+ (CLKMGR_STAT_MAINPLL_LOCKED | CLKMGR_STAT_PERPLL_LOCKED)
+
+#define CLKMGR_INTER_MAINPLLLOCKED_MASK BIT(0)
+#define CLKMGR_INTER_PERPLLLOCKED_MASK BIT(1)
+#define CLKMGR_INTER_MAINPLLLOST_MASK BIT(2)
+#define CLKMGR_INTER_PERPLLLOST_MASK BIT(3)
+
+#define CLKMGR_CLKSRC_MASK GENMASK(18, 16)
+#define CLKMGR_CLKSRC_OFFSET 16
+#define CLKMGR_CLKSRC_MAIN 0
+#define CLKMGR_CLKSRC_PER 1
+#define CLKMGR_CLKSRC_OSC1 2
+#define CLKMGR_CLKSRC_INTOSC 3
+#define CLKMGR_CLKSRC_FPGA 4
+#define CLKMGR_CLKCNT_MSK GENMASK(10, 0)
+
+#define CLKMGR_BYPASS_MAINPLL_ALL 0x7
+#define CLKMGR_BYPASS_PERPLL_ALL 0x7f
+
+#define CLKMGR_NOCDIV_L4MAIN_OFFSET 0
+#define CLKMGR_NOCDIV_L4MPCLK_OFFSET 8
+#define CLKMGR_NOCDIV_L4SPCLK_OFFSET 16
+#define CLKMGR_NOCDIV_CSATCLK_OFFSET 24
+#define CLKMGR_NOCDIV_CSTRACECLK_OFFSET 26
+#define CLKMGR_NOCDIV_CSPDBGCLK_OFFSET 28
+#define CLKMGR_NOCDIV_DIVIDER_MASK 0x3
+
+#define CLKMGR_PLLGLOB_VCO_PSRC_MASK GENMASK(17, 16)
+#define CLKMGR_PLLGLOB_VCO_PSRC_OFFSET 16
+#define CLKMGR_PLLGLOB_LOSTLOCK_BYPASS_EN_MASK BIT(28)
+#define CLKMGR_PLLGLOB_CLR_LOSTLOCK_BYPASS_MASK BIT(29)
+
+#define CLKMGR_VCO_PSRC_EOSC1 0
+#define CLKMGR_VCO_PSRC_INTOSC 1
+#define CLKMGR_VCO_PSRC_F2S 2
+
+#define CLKMGR_PLLCTRL_BYPASS_MASK BIT(0)
+#define CLKMGR_PLLCTRL_RST_N_MASK BIT(1)
+
+#define CLKMGR_PLLDIV_REFCLKDIV_MASK GENMASK(5, 0)
+#define CLKMGR_PLLDIV_FDIV_MASK GENMASK(16, 8)
+#define CLKMGR_PLLDIV_OUTDIV_QDIV_MASK GENMASK(26, 24)
+#define CLKMGR_PLLDIV_RANGE_MASK GENMASK(30, 28)
+
+#define CLKMGR_PLLDIV_REFCLKDIV_OFFSET 0
+#define CLKMGR_PLLDIV_FDIV_OFFSET 8
+#define CLKMGR_PLLDIV_OUTDIV_QDIV_OFFSET 24
+#define CLKMGR_PLLDIV_RANGE_OFFSET 28
+
+#define CLKMGR_PLLOUTDIV_C0CNT_MASK GENMASK(4, 0)
+#define CLKMGR_PLLOUTDIV_C1CNT_MASK GENMASK(12, 8)
+#define CLKMGR_PLLOUTDIV_C2CNT_MASK GENMASK(20, 16)
+#define CLKMGR_PLLOUTDIV_C3CNT_MASK GENMASK(28, 24)
+
+#define CLKMGR_PLLOUTDIV_C0CNT_OFFSET 0
+#define CLKMGR_PLLOUTDIV_C1CNT_OFFSET 8
+#define CLKMGR_PLLOUTDIV_C2CNT_OFFSET 16
+#define CLKMGR_PLLOUTDIV_C3CNT_OFFSET 24
+
+#define CLKMGR_PLLCX_EN_SET_MSK BIT(27)
+#define CLKMGR_PLLCX_MUTE_SET_MSK BIT(28)
+
+#define CLKMGR_VCOCALIB_MSCNT_MASK GENMASK(23, 16)
+#define CLKMGR_VCOCALIB_MSCNT_OFFSET 16
+#define CLKMGR_VCOCALIB_HSCNT_MASK GENMASK(9, 0)
+#define CLKMGR_VCOCALIB_MSCNT_CONST 100
+#define CLKMGR_VCOCALIB_HSCNT_CONST 4
+
+#define CLKMGR_PLLM_MDIV_MASK GENMASK(9, 0)
+
+#define CLKMGR_LOSTLOCK_SET_MASK BIT(0)
+
+#define CLKMGR_PERPLLGRP_EN_SDMMCCLK_MASK BIT(5)
+#define CLKMGR_PERPLLGRP_EMACCTL_EMAC0SELB_OFFSET 26
+#define CLKMGR_PERPLLGRP_EMACCTL_EMAC0SELB_MASK BIT(26)
+#define CLKMGR_PERPLLGRP_EMACCTL_EMAC1SELB_OFFSET 27
+#define CLKMGR_PERPLLGRP_EMACCTL_EMAC1SELB_MASK BIT(27)
+#define CLKMGR_PERPLLGRP_EMACCTL_EMAC2SELB_OFFSET 28
+#define CLKMGR_PERPLLGRP_EMACCTL_EMAC2SELB_MASK BIT(28)
+
+#define CLKMGR_ALT_EMACCTR_SRC_OFFSET 16
+#define CLKMGR_ALT_EMACCTR_SRC_MASK GENMASK(18, 16)
+#define CLKMGR_ALT_EMACCTR_CNT_OFFSET 0
+#define CLKMGR_ALT_EMACCTR_CNT_MASK GENMASK(10, 0)
+
+#define CLKMGR_ALT_EXTCNTRST_ALLCNTRST_MASK GENMASK(15, 0)
+
+#endif /* _CLK_N5X_ */
diff --git a/drivers/clk/clk_stm32mp1.c b/drivers/clk/clk_stm32mp1.c
index da95b1ac2f2..114192bb321 100644
--- a/drivers/clk/clk_stm32mp1.c
+++ b/drivers/clk/clk_stm32mp1.c
@@ -560,6 +560,7 @@ static const struct stm32mp1_clk_gate stm32mp1_clk_gate[] = {
STM32MP1_CLK_SET_CLR(RCC_MP_APB5ENSETR, 2, I2C4_K, _I2C46_SEL),
STM32MP1_CLK_SET_CLR(RCC_MP_APB5ENSETR, 3, I2C6_K, _I2C46_SEL),
STM32MP1_CLK_SET_CLR(RCC_MP_APB5ENSETR, 8, RTCAPB, _PCLK5),
+ STM32MP1_CLK_SET_CLR(RCC_MP_APB5ENSETR, 16, BSEC, _UNKNOWN_SEL),
STM32MP1_CLK_SET_CLR(RCC_MP_APB5ENSETR, 20, STGEN_K, _STGEN_SEL),
STM32MP1_CLK_SET_CLR_F(RCC_MP_AHB2ENSETR, 5, ADC12, _HCLK2),
diff --git a/drivers/clk/clk_versaclock.c b/drivers/clk/clk_versaclock.c
new file mode 100644
index 00000000000..578668bcf83
--- /dev/null
+++ b/drivers/clk/clk_versaclock.c
@@ -0,0 +1,1100 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for IDT Versaclock 5/6
+ *
+ * Derived from code Copyright (C) 2017 Marek Vasut <marek.vasut@gmail.com>
+ */
+
+#include <common.h>
+#include <clk.h>
+#include <clk-uclass.h>
+#include <dm.h>
+#include <errno.h>
+#include <i2c.h>
+#include <dm/device_compat.h>
+#include <log.h>
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+
+#include <dt-bindings/clk/versaclock.h>
+
+/* VersaClock5 registers */
+#define VC5_OTP_CONTROL 0x00
+
+/* Factory-reserved register block */
+#define VC5_RSVD_DEVICE_ID 0x01
+#define VC5_RSVD_ADC_GAIN_7_0 0x02
+#define VC5_RSVD_ADC_GAIN_15_8 0x03
+#define VC5_RSVD_ADC_OFFSET_7_0 0x04
+#define VC5_RSVD_ADC_OFFSET_15_8 0x05
+#define VC5_RSVD_TEMPY 0x06
+#define VC5_RSVD_OFFSET_TBIN 0x07
+#define VC5_RSVD_GAIN 0x08
+#define VC5_RSVD_TEST_NP 0x09
+#define VC5_RSVD_UNUSED 0x0a
+#define VC5_RSVD_BANDGAP_TRIM_UP 0x0b
+#define VC5_RSVD_BANDGAP_TRIM_DN 0x0c
+#define VC5_RSVD_CLK_R_12_CLK_AMP_4 0x0d
+#define VC5_RSVD_CLK_R_34_CLK_AMP_4 0x0e
+#define VC5_RSVD_CLK_AMP_123 0x0f
+
+/* Configuration register block */
+#define VC5_PRIM_SRC_SHDN 0x10
+#define VC5_PRIM_SRC_SHDN_EN_XTAL BIT(7)
+#define VC5_PRIM_SRC_SHDN_EN_CLKIN BIT(6)
+#define VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ BIT(3)
+#define VC5_PRIM_SRC_SHDN_SP BIT(1)
+#define VC5_PRIM_SRC_SHDN_EN_GBL_SHDN BIT(0)
+
+#define VC5_VCO_BAND 0x11
+#define VC5_XTAL_X1_LOAD_CAP 0x12
+#define VC5_XTAL_X2_LOAD_CAP 0x13
+#define VC5_REF_DIVIDER 0x15
+#define VC5_REF_DIVIDER_SEL_PREDIV2 BIT(7)
+#define VC5_REF_DIVIDER_REF_DIV(n) ((n) & 0x3f)
+
+#define VC5_VCO_CTRL_AND_PREDIV 0x16
+#define VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV BIT(7)
+
+#define VC5_FEEDBACK_INT_DIV 0x17
+#define VC5_FEEDBACK_INT_DIV_BITS 0x18
+#define VC5_FEEDBACK_FRAC_DIV(n) (0x19 + (n))
+#define VC5_RC_CONTROL0 0x1e
+#define VC5_RC_CONTROL1 0x1f
+/* Register 0x20 is factory reserved */
+
+/* Output divider control for divider 1,2,3,4 */
+#define VC5_OUT_DIV_CONTROL(idx) (0x21 + ((idx) * 0x10))
+#define VC5_OUT_DIV_CONTROL_RESET BIT(7)
+#define VC5_OUT_DIV_CONTROL_SELB_NORM BIT(3)
+#define VC5_OUT_DIV_CONTROL_SEL_EXT BIT(2)
+#define VC5_OUT_DIV_CONTROL_INT_MODE BIT(1)
+#define VC5_OUT_DIV_CONTROL_EN_FOD BIT(0)
+
+#define VC5_OUT_DIV_FRAC(idx, n) (0x22 + ((idx) * 0x10) + (n))
+#define VC5_OUT_DIV_FRAC4_OD_SCEE BIT(1)
+
+#define VC5_OUT_DIV_STEP_SPREAD(idx, n) (0x26 + ((idx) * 0x10) + (n))
+#define VC5_OUT_DIV_SPREAD_MOD(idx, n) (0x29 + ((idx) * 0x10) + (n))
+#define VC5_OUT_DIV_SKEW_INT(idx, n) (0x2b + ((idx) * 0x10) + (n))
+#define VC5_OUT_DIV_INT(idx, n) (0x2d + ((idx) * 0x10) + (n))
+#define VC5_OUT_DIV_SKEW_FRAC(idx) (0x2f + ((idx) * 0x10))
+/* Registers 0x30, 0x40, 0x50 are factory reserved */
+
+/* Clock control register for clock 1,2 */
+#define VC5_CLK_OUTPUT_CFG(idx, n) (0x60 + ((idx) * 0x2) + (n))
+#define VC5_CLK_OUTPUT_CFG0_CFG_SHIFT 5
+#define VC5_CLK_OUTPUT_CFG0_CFG_MASK GENMASK(7, VC5_CLK_OUTPUT_CFG0_CFG_SHIFT)
+
+#define VC5_CLK_OUTPUT_CFG0_CFG_LVPECL (VC5_LVPECL)
+#define VC5_CLK_OUTPUT_CFG0_CFG_CMOS (VC5_CMOS)
+#define VC5_CLK_OUTPUT_CFG0_CFG_HCSL33 (VC5_HCSL33)
+#define VC5_CLK_OUTPUT_CFG0_CFG_LVDS (VC5_LVDS)
+#define VC5_CLK_OUTPUT_CFG0_CFG_CMOS2 (VC5_CMOS2)
+#define VC5_CLK_OUTPUT_CFG0_CFG_CMOSD (VC5_CMOSD)
+#define VC5_CLK_OUTPUT_CFG0_CFG_HCSL25 (VC5_HCSL25)
+
+#define VC5_CLK_OUTPUT_CFG0_PWR_SHIFT 3
+#define VC5_CLK_OUTPUT_CFG0_PWR_MASK GENMASK(4, VC5_CLK_OUTPUT_CFG0_PWR_SHIFT)
+#define VC5_CLK_OUTPUT_CFG0_PWR_18 (0 << VC5_CLK_OUTPUT_CFG0_PWR_SHIFT)
+#define VC5_CLK_OUTPUT_CFG0_PWR_25 (2 << VC5_CLK_OUTPUT_CFG0_PWR_SHIFT)
+#define VC5_CLK_OUTPUT_CFG0_PWR_33 (3 << VC5_CLK_OUTPUT_CFG0_PWR_SHIFT)
+#define VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT 0
+#define VC5_CLK_OUTPUT_CFG0_SLEW_MASK GENMASK(1, VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT)
+#define VC5_CLK_OUTPUT_CFG0_SLEW_80 (0 << VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT)
+#define VC5_CLK_OUTPUT_CFG0_SLEW_85 (1 << VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT)
+#define VC5_CLK_OUTPUT_CFG0_SLEW_90 (2 << VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT)
+#define VC5_CLK_OUTPUT_CFG0_SLEW_100 (3 << VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT)
+#define VC5_CLK_OUTPUT_CFG1_EN_CLKBUF BIT(0)
+
+#define VC5_CLK_OE_SHDN 0x68
+#define VC5_CLK_OS_SHDN 0x69
+
+#define VC5_GLOBAL_REGISTER 0x76
+#define VC5_GLOBAL_REGISTER_GLOBAL_RESET BIT(5)
+
+/* PLL/VCO runs between 2.5 GHz and 3.0 GHz */
+#define VC5_PLL_VCO_MIN 2500000000UL
+#define VC5_PLL_VCO_MAX 3000000000UL
+
+/* VC5 Input mux settings */
+#define VC5_MUX_IN_XIN BIT(0)
+#define VC5_MUX_IN_CLKIN BIT(1)
+
+/* Maximum number of clk_out supported by this driver */
+#define VC5_MAX_CLK_OUT_NUM 5
+
+/* Maximum number of FODs supported by this driver */
+#define VC5_MAX_FOD_NUM 4
+
+/* flags to describe chip features */
+/* chip has built-in oscilator */
+#define VC5_HAS_INTERNAL_XTAL BIT(0)
+/* chip has PFD requency doubler */
+#define VC5_HAS_PFD_FREQ_DBL BIT(1)
+
+/* Supported IDT VC5 models. */
+enum vc5_model {
+ IDT_VC5_5P49V5923,
+ IDT_VC5_5P49V5925,
+ IDT_VC5_5P49V5933,
+ IDT_VC5_5P49V5935,
+ IDT_VC6_5P49V6901,
+ IDT_VC6_5P49V6965,
+};
+
+/* Structure to describe features of a particular VC5 model */
+struct vc5_chip_info {
+ const enum vc5_model model;
+ const unsigned int clk_fod_cnt;
+ const unsigned int clk_out_cnt;
+ const u32 flags;
+};
+
+struct vc5_driver_data;
+
+struct vc5_hw_data {
+ struct clk hw;
+ struct vc5_driver_data *vc5;
+ u32 div_int;
+ u32 div_frc;
+ unsigned int num;
+};
+
+struct vc5_out_data {
+ struct clk hw;
+ struct vc5_driver_data *vc5;
+ unsigned int num;
+ unsigned int clk_output_cfg0;
+ unsigned int clk_output_cfg0_mask;
+};
+
+struct vc5_driver_data {
+ struct udevice *i2c;
+ const struct vc5_chip_info *chip_info;
+
+ struct clk *pin_xin;
+ struct clk *pin_clkin;
+ unsigned char clk_mux_ins;
+ struct clk clk_mux;
+ struct clk clk_mul;
+ struct clk clk_pfd;
+ struct vc5_hw_data clk_pll;
+ struct vc5_hw_data clk_fod[VC5_MAX_FOD_NUM];
+ struct vc5_out_data clk_out[VC5_MAX_CLK_OUT_NUM];
+};
+
+static const struct vc5_chip_info idt_5p49v5923_info = {
+ .model = IDT_VC5_5P49V5923,
+ .clk_fod_cnt = 2,
+ .clk_out_cnt = 3,
+ .flags = 0,
+};
+
+static const struct vc5_chip_info idt_5p49v5925_info = {
+ .model = IDT_VC5_5P49V5925,
+ .clk_fod_cnt = 4,
+ .clk_out_cnt = 5,
+ .flags = 0,
+};
+
+static const struct vc5_chip_info idt_5p49v5933_info = {
+ .model = IDT_VC5_5P49V5933,
+ .clk_fod_cnt = 2,
+ .clk_out_cnt = 3,
+ .flags = VC5_HAS_INTERNAL_XTAL,
+};
+
+static const struct vc5_chip_info idt_5p49v5935_info = {
+ .model = IDT_VC5_5P49V5935,
+ .clk_fod_cnt = 4,
+ .clk_out_cnt = 5,
+ .flags = VC5_HAS_INTERNAL_XTAL,
+};
+
+static const struct vc5_chip_info idt_5p49v6901_info = {
+ .model = IDT_VC6_5P49V6901,
+ .clk_fod_cnt = 4,
+ .clk_out_cnt = 5,
+ .flags = VC5_HAS_PFD_FREQ_DBL,
+};
+
+static const struct vc5_chip_info idt_5p49v6965_info = {
+ .model = IDT_VC6_5P49V6965,
+ .clk_fod_cnt = 4,
+ .clk_out_cnt = 5,
+ .flags = 0,
+};
+
+static int vc5_update_bits(struct udevice *dev, unsigned int reg, unsigned int mask,
+ unsigned int src)
+{
+ int ret;
+ unsigned char cache;
+
+ ret = dm_i2c_read(dev, reg, &cache, 1);
+ if (ret < 0)
+ return ret;
+
+ cache &= ~mask;
+ cache |= mask & src;
+ ret = dm_i2c_write(dev, reg, (uchar *)&cache, 1);
+
+ return ret;
+}
+
+static unsigned long vc5_mux_get_rate(struct clk *hw)
+{
+ return clk_get_rate(clk_get_parent(hw));
+}
+
+static int vc5_mux_set_parent(struct clk *hw, unsigned char index)
+{
+ struct vc5_driver_data *vc5 = container_of(hw, struct vc5_driver_data, clk_mux);
+ const u8 mask = VC5_PRIM_SRC_SHDN_EN_XTAL | VC5_PRIM_SRC_SHDN_EN_CLKIN;
+ u8 src;
+
+ if (index > 1 || !vc5->clk_mux_ins)
+ return -EINVAL;
+
+ if (vc5->clk_mux_ins == (VC5_MUX_IN_CLKIN | VC5_MUX_IN_XIN)) {
+ if (index == 0)
+ src = VC5_PRIM_SRC_SHDN_EN_XTAL;
+ if (index == 1)
+ src = VC5_PRIM_SRC_SHDN_EN_CLKIN;
+ } else {
+ if (index != 0)
+ return -EINVAL;
+
+ if (vc5->clk_mux_ins == VC5_MUX_IN_XIN)
+ src = VC5_PRIM_SRC_SHDN_EN_XTAL;
+ else if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
+ src = VC5_PRIM_SRC_SHDN_EN_CLKIN;
+ else /* Invalid; should have been caught by vc5_probe() */
+ return -EINVAL;
+ }
+
+ return vc5_update_bits(vc5->i2c, VC5_PRIM_SRC_SHDN, mask, src);
+}
+
+static const struct clk_ops vc5_mux_ops = {
+ .get_rate = vc5_mux_get_rate,
+};
+
+static unsigned long vc5_pfd_round_rate(struct clk *hw, unsigned long rate)
+{
+ struct clk *clk_parent = clk_get_parent(hw);
+ unsigned long parent_rate = clk_get_rate(clk_parent);
+ unsigned long idiv;
+
+ /* PLL cannot operate with input clock above 50 MHz. */
+ if (rate > 50000000)
+ return -EINVAL;
+
+ /* CLKIN within range of PLL input, feed directly to PLL. */
+ if (parent_rate <= 50000000)
+ return parent_rate;
+
+ idiv = DIV_ROUND_UP(parent_rate, rate);
+ if (idiv > 127)
+ return -EINVAL;
+
+ return parent_rate / idiv;
+}
+
+static unsigned long vc5_pfd_recalc_rate(struct clk *hw)
+{
+ struct vc5_driver_data *vc5 =
+ container_of(hw, struct vc5_driver_data, clk_pfd);
+ unsigned int prediv, div;
+ struct clk *clk_parent = clk_get_parent(hw);
+ unsigned long parent_rate = clk_get_rate(clk_parent);
+
+ dm_i2c_read(vc5->i2c, VC5_VCO_CTRL_AND_PREDIV, (uchar *)&prediv, 1);
+
+ /* The bypass_prediv is set, PLL fed from Ref_in directly. */
+ if (prediv & VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV)
+ return parent_rate;
+
+ dm_i2c_read(vc5->i2c, VC5_REF_DIVIDER, (uchar *)&div, 1);
+
+ /* The Sel_prediv2 is set, PLL fed from prediv2 (Ref_in / 2) */
+ if (div & VC5_REF_DIVIDER_SEL_PREDIV2)
+ return parent_rate / 2;
+ else
+ return parent_rate / VC5_REF_DIVIDER_REF_DIV(div);
+}
+
+static unsigned long vc5_pfd_set_rate(struct clk *hw, unsigned long rate)
+{
+ struct vc5_driver_data *vc5 =
+ container_of(hw, struct vc5_driver_data, clk_pfd);
+ unsigned long idiv;
+ u8 div;
+ struct clk *clk_parent = clk_get_parent(hw);
+ unsigned long parent_rate = clk_get_rate(clk_parent);
+
+ /* CLKIN within range of PLL input, feed directly to PLL. */
+ if (parent_rate <= 50000000) {
+ vc5_update_bits(vc5->i2c, VC5_VCO_CTRL_AND_PREDIV,
+ VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV,
+ VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV);
+ vc5_update_bits(vc5->i2c, VC5_REF_DIVIDER, 0xff, 0x00);
+ return 0;
+ }
+
+ idiv = DIV_ROUND_UP(parent_rate, rate);
+
+ /* We have dedicated div-2 predivider. */
+ if (idiv == 2)
+ div = VC5_REF_DIVIDER_SEL_PREDIV2;
+ else
+ div = VC5_REF_DIVIDER_REF_DIV(idiv);
+
+ vc5_update_bits(vc5->i2c, VC5_REF_DIVIDER, 0xff, div);
+ vc5_update_bits(vc5->i2c, VC5_VCO_CTRL_AND_PREDIV,
+ VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV, 0);
+
+ return 0;
+}
+
+static const struct clk_ops vc5_pfd_ops = {
+ .round_rate = vc5_pfd_round_rate,
+ .get_rate = vc5_pfd_recalc_rate,
+ .set_rate = vc5_pfd_set_rate,
+};
+
+/*
+ * VersaClock5 PLL/VCO
+ */
+static unsigned long vc5_pll_recalc_rate(struct clk *hw)
+{
+ struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
+ struct vc5_driver_data *vc = hwdata->vc5;
+ struct clk *clk_parent = clk_get_parent(hw);
+ unsigned long parent_rate = clk_get_rate(clk_parent);
+ u32 div_int, div_frc;
+ u8 fb[5];
+
+ dm_i2c_read(vc->i2c, VC5_FEEDBACK_INT_DIV, fb, 5);
+
+ div_int = (fb[0] << 4) | (fb[1] >> 4);
+ div_frc = (fb[2] << 16) | (fb[3] << 8) | fb[4];
+
+ /* The PLL divider has 12 integer bits and 24 fractional bits */
+ return (parent_rate * div_int) + ((parent_rate * div_frc) >> 24);
+}
+
+static unsigned long vc5_pll_round_rate(struct clk *hw, unsigned long rate)
+{
+ struct clk *clk_parent = clk_get_parent(hw);
+ unsigned long parent_rate = clk_get_rate(clk_parent);
+ struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
+ u32 div_int;
+ u64 div_frc;
+
+ if (rate < VC5_PLL_VCO_MIN)
+ rate = VC5_PLL_VCO_MIN;
+ if (rate > VC5_PLL_VCO_MAX)
+ rate = VC5_PLL_VCO_MAX;
+
+ /* Determine integer part, which is 12 bit wide */
+ div_int = rate / parent_rate;
+ if (div_int > 0xfff)
+ rate = parent_rate * 0xfff;
+
+ /* Determine best fractional part, which is 24 bit wide */
+ div_frc = rate % parent_rate;
+ div_frc *= BIT(24) - 1;
+ do_div(div_frc, parent_rate);
+
+ hwdata->div_int = div_int;
+ hwdata->div_frc = (u32)div_frc;
+
+ return (parent_rate * div_int) + ((parent_rate * div_frc) >> 24);
+}
+
+static unsigned long vc5_pll_set_rate(struct clk *hw, unsigned long rate)
+{
+ struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
+ struct vc5_driver_data *vc5 = hwdata->vc5;
+ u8 fb[5];
+
+ fb[0] = hwdata->div_int >> 4;
+ fb[1] = hwdata->div_int << 4;
+ fb[2] = hwdata->div_frc >> 16;
+ fb[3] = hwdata->div_frc >> 8;
+ fb[4] = hwdata->div_frc;
+
+ return dm_i2c_write(vc5->i2c, VC5_FEEDBACK_INT_DIV, fb, 5);
+}
+
+static const struct clk_ops vc5_pll_ops = {
+ .round_rate = vc5_pll_round_rate,
+ .get_rate = vc5_pll_recalc_rate,
+ .set_rate = vc5_pll_set_rate,
+};
+
+static unsigned long vc5_fod_recalc_rate(struct clk *hw)
+{
+ struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
+ struct vc5_driver_data *vc = hwdata->vc5;
+ struct clk *parent = &vc->clk_pll.hw;
+ unsigned long parent_rate = vc5_pll_recalc_rate(parent);
+
+ /* VCO frequency is divided by two before entering FOD */
+ u32 f_in = parent_rate / 2;
+ u32 div_int, div_frc;
+ u8 od_int[2];
+ u8 od_frc[4];
+
+ dm_i2c_read(vc->i2c, VC5_OUT_DIV_INT(hwdata->num, 0), od_int, 2);
+ dm_i2c_read(vc->i2c, VC5_OUT_DIV_FRAC(hwdata->num, 0), od_frc, 4);
+
+ div_int = (od_int[0] << 4) | (od_int[1] >> 4);
+ div_frc = (od_frc[0] << 22) | (od_frc[1] << 14) |
+ (od_frc[2] << 6) | (od_frc[3] >> 2);
+
+ /* Avoid division by zero if the output is not configured. */
+ if (div_int == 0 && div_frc == 0)
+ return 0;
+
+ /* The PLL divider has 12 integer bits and 30 fractional bits */
+ return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
+}
+
+static unsigned long vc5_fod_round_rate(struct clk *hw, unsigned long rate)
+{
+ struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
+ struct vc5_driver_data *vc = hwdata->vc5;
+ struct clk *parent = &vc->clk_pll.hw;
+ unsigned long parent_rate = vc5_pll_recalc_rate(parent);
+
+ /* VCO frequency is divided by two before entering FOD */
+ u32 f_in = parent_rate / 2;
+ u32 div_int;
+ u64 div_frc;
+
+ /* Determine integer part, which is 12 bit wide */
+ div_int = f_in / rate;
+
+ /*
+ * WARNING: The clock chip does not output signal if the integer part
+ * of the divider is 0xfff and fractional part is non-zero.
+ * Clamp the divider at 0xffe to keep the code simple.
+ */
+ if (div_int > 0xffe) {
+ div_int = 0xffe;
+ rate = f_in / div_int;
+ }
+
+ /* Determine best fractional part, which is 30 bit wide */
+ div_frc = f_in % rate;
+ div_frc <<= 24;
+ do_div(div_frc, rate);
+
+ hwdata->div_int = div_int;
+ hwdata->div_frc = (u32)div_frc;
+
+ return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
+}
+
+static unsigned long vc5_fod_set_rate(struct clk *hw, unsigned long rate)
+{
+ struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
+ struct vc5_driver_data *vc5 = hwdata->vc5;
+
+ u8 data[14] = {
+ hwdata->div_frc >> 22, hwdata->div_frc >> 14,
+ hwdata->div_frc >> 6, hwdata->div_frc << 2,
+ 0, 0, 0, 0, 0,
+ 0, 0,
+ hwdata->div_int >> 4, hwdata->div_int << 4,
+ 0
+ };
+
+ dm_i2c_write(vc5->i2c, VC5_OUT_DIV_FRAC(hwdata->num, 0), data, 14);
+
+ /*
+ * Toggle magic bit in undocumented register for unknown reason.
+ * This is what the IDT timing commander tool does and the chip
+ * datasheet somewhat implies this is needed, but the register
+ * and the bit is not documented.
+ */
+ vc5_update_bits(vc5->i2c, VC5_GLOBAL_REGISTER,
+ VC5_GLOBAL_REGISTER_GLOBAL_RESET, 0);
+ vc5_update_bits(vc5->i2c, VC5_GLOBAL_REGISTER,
+ VC5_GLOBAL_REGISTER_GLOBAL_RESET,
+ VC5_GLOBAL_REGISTER_GLOBAL_RESET);
+
+ return 0;
+}
+
+static const struct clk_ops vc5_fod_ops = {
+ .round_rate = vc5_fod_round_rate,
+ .get_rate = vc5_fod_recalc_rate,
+ .set_rate = vc5_fod_set_rate,
+};
+
+static int vc5_clk_out_prepare(struct clk *hw)
+{
+ struct udevice *dev;
+ struct vc5_driver_data *vc5;
+ struct vc5_out_data *hwdata;
+
+ const u8 mask = VC5_OUT_DIV_CONTROL_SELB_NORM |
+ VC5_OUT_DIV_CONTROL_SEL_EXT |
+ VC5_OUT_DIV_CONTROL_EN_FOD;
+ unsigned int src;
+ int ret;
+
+ uclass_get_device_by_name(UCLASS_CLK, clk_hw_get_name(hw), &dev);
+ vc5 = dev_get_priv(dev);
+ hwdata = &vc5->clk_out[hw->id];
+
+ /*
+ * If the input mux is disabled, enable it first and
+ * select source from matching FOD.
+ */
+
+ dm_i2c_read(vc5->i2c, VC5_OUT_DIV_CONTROL(hwdata->num), (uchar *)&src, 1);
+
+ if ((src & mask) == 0) {
+ src = VC5_OUT_DIV_CONTROL_RESET | VC5_OUT_DIV_CONTROL_EN_FOD;
+ ret = vc5_update_bits(vc5->i2c,
+ VC5_OUT_DIV_CONTROL(hwdata->num),
+ mask | VC5_OUT_DIV_CONTROL_RESET, src);
+ if (ret)
+ return ret;
+ }
+
+ /* Enable the clock buffer */
+ vc5_update_bits(vc5->i2c, VC5_CLK_OUTPUT_CFG(hwdata->num, 1),
+ VC5_CLK_OUTPUT_CFG1_EN_CLKBUF,
+ VC5_CLK_OUTPUT_CFG1_EN_CLKBUF);
+ if (hwdata->clk_output_cfg0_mask) {
+ vc5_update_bits(vc5->i2c, VC5_CLK_OUTPUT_CFG(hwdata->num, 0),
+ hwdata->clk_output_cfg0_mask,
+ hwdata->clk_output_cfg0);
+ }
+
+ return 0;
+}
+
+static int vc5_clk_out_unprepare(struct clk *hw)
+{
+ struct udevice *dev;
+ struct vc5_driver_data *vc5;
+ struct vc5_out_data *hwdata;
+ int ret;
+
+ uclass_get_device_by_name(UCLASS_CLK, clk_hw_get_name(hw), &dev);
+ vc5 = dev_get_priv(dev);
+ hwdata = &vc5->clk_out[hw->id];
+
+ /* Disable the clock buffer */
+ ret = vc5_update_bits(vc5->i2c, VC5_CLK_OUTPUT_CFG(hwdata->num, 1),
+ VC5_CLK_OUTPUT_CFG1_EN_CLKBUF, 0);
+
+ return ret;
+}
+
+static int vc5_clk_out_set_parent(struct vc5_driver_data *vc, u8 num, u8 index)
+{
+ const u8 mask = VC5_OUT_DIV_CONTROL_RESET |
+ VC5_OUT_DIV_CONTROL_SELB_NORM |
+ VC5_OUT_DIV_CONTROL_SEL_EXT |
+ VC5_OUT_DIV_CONTROL_EN_FOD;
+ const u8 extclk = VC5_OUT_DIV_CONTROL_SELB_NORM |
+ VC5_OUT_DIV_CONTROL_SEL_EXT;
+ u8 src = VC5_OUT_DIV_CONTROL_RESET;
+
+ if (index == 0)
+ src |= VC5_OUT_DIV_CONTROL_EN_FOD;
+ else
+ src |= extclk;
+
+ return vc5_update_bits(vc->i2c, VC5_OUT_DIV_CONTROL(num), mask, src);
+}
+
+/*
+ * The device references to the Versaclock point to the head, so xlate needs to
+ * redirect it to clk_out[idx]
+ */
+static int vc5_clk_out_xlate(struct clk *hw, struct ofnode_phandle_args *args)
+{
+ unsigned int idx = args->args[0];
+
+ if (args->args_count != 1) {
+ debug("Invaild args_count: %d\n", args->args_count);
+ return -EINVAL;
+ }
+
+ hw->id = idx;
+
+ return 0;
+}
+
+static unsigned long vc5_clk_out_set_rate(struct clk *hw, unsigned long rate)
+{
+ struct udevice *dev;
+ struct vc5_driver_data *vc;
+ struct clk *parent;
+
+ uclass_get_device_by_name(UCLASS_CLK, clk_hw_get_name(hw), &dev);
+ vc = dev_get_priv(dev);
+ parent = clk_get_parent(&vc->clk_out[hw->id].hw);
+
+ /* setting the output rate really means setting the parent FOD rate */
+ return clk_set_rate(parent, clk_round_rate(parent, rate));
+}
+
+static unsigned long vc5_clk_out_get_rate(struct clk *hw)
+{
+ return clk_get_parent_rate(hw);
+}
+
+static const struct clk_ops vc5_clk_out_ops = {
+ .enable = vc5_clk_out_prepare,
+ .disable = vc5_clk_out_unprepare,
+ .set_rate = vc5_clk_out_set_rate,
+ .get_rate = vc5_clk_out_get_rate,
+};
+
+static const struct clk_ops vc5_clk_out_sel_ops = {
+ .enable = vc5_clk_out_prepare,
+ .disable = vc5_clk_out_unprepare,
+ .get_rate = vc5_clk_out_get_rate,
+};
+
+static const struct clk_ops vc5_clk_ops = {
+ .enable = vc5_clk_out_prepare,
+ .disable = vc5_clk_out_unprepare,
+ .of_xlate = vc5_clk_out_xlate,
+ .set_rate = vc5_clk_out_set_rate,
+ .get_rate = vc5_clk_out_get_rate,
+};
+
+static int vc5_map_index_to_output(const enum vc5_model model,
+ const unsigned int n)
+{
+ switch (model) {
+ case IDT_VC5_5P49V5933:
+ return (n == 0) ? 0 : 3;
+ case IDT_VC5_5P49V5923:
+ case IDT_VC5_5P49V5925:
+ case IDT_VC5_5P49V5935:
+ case IDT_VC6_5P49V6901:
+ case IDT_VC6_5P49V6965:
+ default:
+ return n;
+ }
+}
+
+static int vc5_update_mode(ofnode np_output,
+ struct vc5_out_data *clk_out)
+{
+ u32 value;
+
+ if (!ofnode_read_u32(np_output, "idt,mode", &value)) {
+ clk_out->clk_output_cfg0_mask |= VC5_CLK_OUTPUT_CFG0_CFG_MASK;
+ switch (value) {
+ case VC5_CLK_OUTPUT_CFG0_CFG_LVPECL:
+ case VC5_CLK_OUTPUT_CFG0_CFG_CMOS:
+ case VC5_CLK_OUTPUT_CFG0_CFG_HCSL33:
+ case VC5_CLK_OUTPUT_CFG0_CFG_LVDS:
+ case VC5_CLK_OUTPUT_CFG0_CFG_CMOS2:
+ case VC5_CLK_OUTPUT_CFG0_CFG_CMOSD:
+ case VC5_CLK_OUTPUT_CFG0_CFG_HCSL25:
+ clk_out->clk_output_cfg0 |=
+ value << VC5_CLK_OUTPUT_CFG0_CFG_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int vc5_update_power(ofnode np_output, struct vc5_out_data *clk_out)
+{
+ u32 value;
+
+ if (!ofnode_read_u32(np_output, "idt,voltage-microvolt", &value)) {
+ clk_out->clk_output_cfg0_mask |= VC5_CLK_OUTPUT_CFG0_PWR_MASK;
+ switch (value) {
+ case 1800000:
+ clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_PWR_18;
+ break;
+ case 2500000:
+ clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_PWR_25;
+ break;
+ case 3300000:
+ clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_PWR_33;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int vc5_map_cap_value(u32 femtofarads)
+{
+ int mapped_value;
+
+ /*
+ * The datasheet explicitly states 9000 - 25000 with 0.5pF
+ * steps, but the Programmer's guide shows the steps are 0.430pF.
+ * After getting feedback from Renesas, the .5pF steps were the
+ * goal, but 430nF was the actual values.
+ * Because of this, the actual range goes to 22760 instead of 25000
+ */
+ if (femtofarads < 9000 || femtofarads > 22760)
+ return -EINVAL;
+
+ /*
+ * The Programmer's guide shows XTAL[5:0] but in reality,
+ * XTAL[0] and XTAL[1] are both LSB which makes the math
+ * strange. With clarfication from Renesas, setting the
+ * values should be simpler by ignoring XTAL[0]
+ */
+ mapped_value = DIV_ROUND_CLOSEST(femtofarads - 9000, 430);
+
+ /*
+ * Since the calculation ignores XTAL[0], there is one
+ * special case where mapped_value = 32. In reality, this means
+ * the real mapped value should be 111111b. In other cases,
+ * the mapped_value needs to be shifted 1 to the left.
+ */
+ if (mapped_value > 31)
+ mapped_value = 0x3f;
+ else
+ mapped_value <<= 1;
+
+ return mapped_value;
+}
+
+static int vc5_update_cap_load(ofnode node, struct vc5_driver_data *vc5)
+{
+ u32 value;
+ int mapped_value;
+
+ if (!ofnode_read_u32(node, "idt,xtal-load-femtofarads", &value)) {
+ mapped_value = vc5_map_cap_value(value);
+
+ if (mapped_value < 0)
+ return mapped_value;
+
+ /*
+ * The mapped_value is really the high 6 bits of
+ * VC5_XTAL_X1_LOAD_CAP and VC5_XTAL_X2_LOAD_CAP, so
+ * shift the value 2 places.
+ */
+ vc5_update_bits(vc5->i2c, VC5_XTAL_X1_LOAD_CAP, ~0x03, mapped_value << 2);
+ vc5_update_bits(vc5->i2c, VC5_XTAL_X2_LOAD_CAP, ~0x03, mapped_value << 2);
+ }
+
+ return 0;
+}
+
+static int vc5_update_slew(ofnode np_output, struct vc5_out_data *clk_out)
+{
+ u32 value;
+
+ if (!ofnode_read_u32(np_output, "idt,slew-percent", &value)) {
+ clk_out->clk_output_cfg0_mask |= VC5_CLK_OUTPUT_CFG0_SLEW_MASK;
+
+ switch (value) {
+ case 80:
+ clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_SLEW_80;
+ break;
+ case 85:
+ clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_SLEW_85;
+ break;
+ case 90:
+ clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_SLEW_90;
+ break;
+ case 100:
+ clk_out->clk_output_cfg0 |=
+ VC5_CLK_OUTPUT_CFG0_SLEW_100;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int vc5_get_output_config(struct udevice *dev,
+ struct vc5_out_data *clk_out)
+{
+ ofnode np_output;
+ char child_name[5];
+ int ret = 0;
+
+ sprintf(child_name, "OUT%d", clk_out->num + 1);
+
+ np_output = dev_read_subnode(dev, child_name);
+
+ if (!ofnode_valid(np_output)) {
+ dev_dbg(dev, "Invalid clock output configuration OUT%d\n",
+ clk_out->num + 1);
+ return 0;
+ }
+
+ ret = vc5_update_mode(np_output, clk_out);
+ if (ret)
+ return ret;
+
+ ret = vc5_update_power(np_output, clk_out);
+ if (ret)
+ return ret;
+
+ ret = vc5_update_slew(np_output, clk_out);
+
+ return ret;
+}
+
+static char *versaclock_get_name(const char *dev_name, const char *clk_name, int index)
+{
+ int length;
+ char *buf;
+
+ if (index < 0)
+ length = snprintf(NULL, 0, "%s.%s", dev_name, clk_name) + 1;
+ else
+ length = snprintf(NULL, 0, "%s.%s%d", dev_name, clk_name, index) + 1;
+
+ buf = malloc(length);
+ if (!buf)
+ ERR_PTR(-ENOMEM);
+
+ if (index < 0)
+ snprintf(buf, length, "%s.%s", dev_name, clk_name);
+ else
+ snprintf(buf, length, "%s.%s%d", dev_name, clk_name, index);
+
+ return buf;
+}
+
+int versaclock_probe(struct udevice *dev)
+{
+ struct vc5_driver_data *vc5 = dev_get_priv(dev);
+ struct vc5_chip_info *chip = (void *)dev_get_driver_data(dev);
+ unsigned int n, idx = 0;
+ char *mux_name, *pfd_name, *pll_name, *outsel_name;
+ char *out_name[VC5_MAX_CLK_OUT_NUM];
+ char *fod_name[VC5_MAX_FOD_NUM];
+ int ret;
+ u64 val;
+
+ val = (u64)dev_read_addr_ptr(dev);
+ ret = i2c_get_chip(dev->parent, val, 1, &vc5->i2c);
+
+ if (ret) {
+ dev_dbg(dev, "I2C probe failed.\n");
+ return ret;
+ }
+
+ vc5->chip_info = chip;
+ vc5->pin_xin = devm_clk_get(dev, "xin");
+
+ if (IS_ERR(vc5->pin_xin))
+ dev_dbg(dev, "failed to get xin clock\n");
+
+ ret = clk_enable(vc5->pin_xin);
+ if (ret)
+ dev_dbg(dev, "failed to enable XIN clock\n");
+
+ vc5->pin_clkin = devm_clk_get(dev, "clkin");
+
+ /* Register clock input mux */
+ if (!IS_ERR(vc5->pin_xin)) {
+ vc5->clk_mux_ins |= VC5_MUX_IN_XIN;
+ } else if (vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL) {
+ if (IS_ERR(vc5->pin_xin))
+ return PTR_ERR(vc5->pin_xin);
+ vc5->clk_mux_ins |= VC5_MUX_IN_XIN;
+ }
+
+ mux_name = versaclock_get_name(dev->name, "mux", -1);
+ if (IS_ERR(mux_name))
+ return PTR_ERR(mux_name);
+
+ clk_register(&vc5->clk_mux, "versaclock-mux", mux_name, vc5->pin_xin->dev->name);
+
+ if (!IS_ERR(vc5->pin_xin))
+ vc5_mux_set_parent(&vc5->clk_mux, 1);
+ else
+ vc5_mux_set_parent(&vc5->clk_mux, 0);
+
+ /* Configure Optional Loading Capacitance for external XTAL */
+ if (!(vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL)) {
+ ret = vc5_update_cap_load(dev_ofnode(dev), vc5);
+ if (ret)
+ dev_dbg(dev, "failed to vc5_update_cap_load\n");
+ }
+
+ /* Register PFD */
+ pfd_name = versaclock_get_name(dev->name, "pfd", -1);
+ if (IS_ERR(pfd_name)) {
+ ret = PTR_ERR(pfd_name);
+ goto free_mux;
+ }
+
+ ret = clk_register(&vc5->clk_pfd, "versaclock-pfd", pfd_name, vc5->clk_mux.dev->name);
+ if (ret)
+ goto free_pfd;
+
+ /* Register PLL */
+ vc5->clk_pll.num = 0;
+ vc5->clk_pll.vc5 = vc5;
+ pll_name = versaclock_get_name(dev->name, "pll", -1);
+ if (IS_ERR(pll_name)) {
+ ret = PTR_ERR(pll_name);
+ goto free_pfd;
+ }
+
+ ret = clk_register(&vc5->clk_pll.hw, "versaclock-pll", pll_name, vc5->clk_pfd.dev->name);
+ if (ret)
+ goto free_pll;
+
+ /* Register FODs */
+ for (n = 0; n < vc5->chip_info->clk_fod_cnt; n++) {
+ fod_name[n] = versaclock_get_name(dev->name, "fod", n);
+ if (IS_ERR(pll_name)) {
+ ret = PTR_ERR(fod_name[n]);
+ goto free_fod;
+ }
+ idx = vc5_map_index_to_output(vc5->chip_info->model, n);
+ vc5->clk_fod[n].num = idx;
+ vc5->clk_fod[n].vc5 = vc5;
+ ret = clk_register(&vc5->clk_fod[n].hw, "versaclock-fod", fod_name[n],
+ vc5->clk_pll.hw.dev->name);
+ if (ret)
+ goto free_fod;
+ }
+
+ /* Register MUX-connected OUT0_I2C_SELB output */
+ vc5->clk_out[0].num = idx;
+ vc5->clk_out[0].vc5 = vc5;
+ outsel_name = versaclock_get_name(dev->name, "out0_sel_i2cb", -1);
+ if (IS_ERR(outsel_name)) {
+ ret = PTR_ERR(outsel_name);
+ goto free_fod;
+ };
+
+ ret = clk_register(&vc5->clk_out[0].hw, "versaclock-outsel", outsel_name,
+ vc5->clk_mux.dev->name);
+ if (ret)
+ goto free_selb;
+
+ /* Register FOD-connected OUTx outputs */
+ for (n = 1; n < vc5->chip_info->clk_out_cnt; n++) {
+ idx = vc5_map_index_to_output(vc5->chip_info->model, n - 1);
+ out_name[n] = versaclock_get_name(dev->name, "out", n);
+ if (IS_ERR(out_name[n])) {
+ ret = PTR_ERR(out_name[n]);
+ goto free_selb;
+ }
+ vc5->clk_out[n].num = idx;
+ vc5->clk_out[n].vc5 = vc5;
+ ret = clk_register(&vc5->clk_out[n].hw, "versaclock-out", out_name[n],
+ vc5->clk_fod[idx].hw.dev->name);
+ if (ret)
+ goto free_out;
+ vc5_clk_out_set_parent(vc5, idx, 0);
+
+ /* Fetch Clock Output configuration from DT (if specified) */
+ ret = vc5_get_output_config(dev, &vc5->clk_out[n]);
+ if (ret) {
+ dev_dbg(dev, "failed to vc5_get_output_config()\n");
+ goto free_out;
+ }
+ }
+
+ return 0;
+
+free_out:
+ for (n = 1; n < vc5->chip_info->clk_out_cnt; n++) {
+ clk_free(&vc5->clk_out[n].hw);
+ free(out_name[n]);
+ }
+free_selb:
+ clk_free(&vc5->clk_out[0].hw);
+ free(outsel_name);
+free_fod:
+ for (n = 0; n < vc5->chip_info->clk_fod_cnt; n++) {
+ clk_free(&vc5->clk_fod[n].hw);
+ free(fod_name[n]);
+ }
+free_pll:
+ clk_free(&vc5->clk_pll.hw);
+ free(pll_name);
+free_pfd:
+ clk_free(&vc5->clk_pfd);
+ free(pfd_name);
+free_mux:
+ clk_free(&vc5->clk_mux);
+ free(mux_name);
+
+ return ret;
+}
+
+static const struct udevice_id versaclock_ids[] = {
+ { .compatible = "idt,5p49v5923", .data = (ulong)&idt_5p49v5923_info },
+ { .compatible = "idt,5p49v5925", .data = (ulong)&idt_5p49v5925_info },
+ { .compatible = "idt,5p49v5933", .data = (ulong)&idt_5p49v5933_info },
+ { .compatible = "idt,5p49v5935", .data = (ulong)&idt_5p49v5935_info },
+ { .compatible = "idt,5p49v6901", .data = (ulong)&idt_5p49v6901_info },
+ { .compatible = "idt,5p49v6965", .data = (ulong)&idt_5p49v6965_info },
+ {},
+};
+
+U_BOOT_DRIVER(versaclock) = {
+ .name = "versaclock",
+ .id = UCLASS_CLK,
+ .ops = &vc5_clk_ops,
+ .of_match = versaclock_ids,
+ .probe = versaclock_probe,
+ .priv_auto = sizeof(struct vc5_driver_data),
+};
+
+U_BOOT_DRIVER(versaclock_mux) = {
+ .name = "versaclock-mux",
+ .id = UCLASS_CLK,
+ .ops = &vc5_mux_ops,
+};
+
+U_BOOT_DRIVER(versaclock_pfd) = {
+ .name = "versaclock-pfd",
+ .id = UCLASS_CLK,
+ .ops = &vc5_pfd_ops,
+};
+
+U_BOOT_DRIVER(versaclock_pll) = {
+ .name = "versaclock-pll",
+ .id = UCLASS_CLK,
+ .ops = &vc5_pll_ops,
+};
+
+U_BOOT_DRIVER(versaclock_fod) = {
+ .name = "versaclock-fod",
+ .id = UCLASS_CLK,
+ .ops = &vc5_fod_ops,
+};
+
+U_BOOT_DRIVER(versaclock_out) = {
+ .name = "versaclock-out",
+ .id = UCLASS_CLK,
+ .ops = &vc5_clk_out_ops,
+};
+
+U_BOOT_DRIVER(versaclock_outsel) = {
+ .name = "versaclock-outsel",
+ .id = UCLASS_CLK,
+ .ops = &vc5_clk_out_sel_ops,
+};
diff --git a/drivers/clk/rockchip/clk_px30.c b/drivers/clk/rockchip/clk_px30.c
index 6b746f4c658..a49b6f19f4f 100644
--- a/drivers/clk/rockchip/clk_px30.c
+++ b/drivers/clk/rockchip/clk_px30.c
@@ -581,6 +581,32 @@ static ulong px30_mmc_set_clk(struct px30_clk_priv *priv,
return px30_mmc_get_clk(priv, clk_id);
}
+static ulong px30_sfc_get_clk(struct px30_clk_priv *priv, uint clk_id)
+{
+ struct px30_cru *cru = priv->cru;
+ u32 div, con;
+
+ con = readl(&cru->clksel_con[22]);
+ div = (con & SFC_DIV_CON_MASK) >> SFC_DIV_CON_SHIFT;
+
+ return DIV_TO_RATE(priv->gpll_hz, div);
+}
+
+static ulong px30_sfc_set_clk(struct px30_clk_priv *priv,
+ ulong clk_id, ulong set_rate)
+{
+ struct px30_cru *cru = priv->cru;
+ int src_clk_div;
+
+ src_clk_div = DIV_ROUND_UP(priv->gpll_hz, set_rate);
+ rk_clrsetreg(&cru->clksel_con[22],
+ SFC_PLL_SEL_MASK | SFC_DIV_CON_MASK,
+ 0 << SFC_PLL_SEL_SHIFT |
+ (src_clk_div - 1) << SFC_DIV_CON_SHIFT);
+
+ return px30_sfc_get_clk(priv, clk_id);
+}
+
static ulong px30_pwm_get_clk(struct px30_clk_priv *priv, ulong clk_id)
{
struct px30_cru *cru = priv->cru;
@@ -1192,6 +1218,9 @@ static ulong px30_clk_get_rate(struct clk *clk)
case SCLK_EMMC_SAMPLE:
rate = px30_mmc_get_clk(priv, clk->id);
break;
+ case SCLK_SFC:
+ rate = px30_sfc_get_clk(priv, clk->id);
+ break;
case SCLK_I2C0:
case SCLK_I2C1:
case SCLK_I2C2:
@@ -1271,6 +1300,9 @@ static ulong px30_clk_set_rate(struct clk *clk, ulong rate)
case SCLK_EMMC:
ret = px30_mmc_set_clk(priv, clk->id, rate);
break;
+ case SCLK_SFC:
+ ret = px30_sfc_set_clk(priv, clk->id, rate);
+ break;
case SCLK_I2C0:
case SCLK_I2C1:
case SCLK_I2C2:
diff --git a/drivers/core/fdtaddr.c b/drivers/core/fdtaddr.c
index b9874c743d1..4ffbd6b2ebc 100644
--- a/drivers/core/fdtaddr.c
+++ b/drivers/core/fdtaddr.c
@@ -200,8 +200,7 @@ fdt_addr_t devfdt_get_addr_pci(const struct udevice *dev)
ulong addr;
addr = devfdt_get_addr(dev);
- if (CONFIG_IS_ENABLED(PCI) && IS_ENABLED(CONFIG_DM_PCI) &&
- addr == FDT_ADDR_T_NONE) {
+ if (CONFIG_IS_ENABLED(PCI) && addr == FDT_ADDR_T_NONE) {
struct fdt_pci_addr pci_addr;
u32 bar;
int ret;
diff --git a/drivers/core/uclass.c b/drivers/core/uclass.c
index 117d35ac49c..3146dfd0320 100644
--- a/drivers/core/uclass.c
+++ b/drivers/core/uclass.c
@@ -146,6 +146,9 @@ int uclass_get(enum uclass_id id, struct uclass **ucp)
{
struct uclass *uc;
+ /* Immediately fail if driver model is not set up */
+ if (!gd->uclass_root)
+ return -EDEADLK;
*ucp = NULL;
uc = uclass_find(id);
if (!uc) {
diff --git a/drivers/core/util.c b/drivers/core/util.c
index 91e93b0cf14..5be4ee79deb 100644
--- a/drivers/core/util.c
+++ b/drivers/core/util.c
@@ -11,17 +11,6 @@
#include <linux/libfdt.h>
#include <vsprintf.h>
-#if CONFIG_IS_ENABLED(DM_WARN)
-void dm_warn(const char *fmt, ...)
-{
- va_list args;
-
- va_start(args, fmt);
- vprintf(fmt, args);
- va_end(args);
-}
-#endif
-
int list_count_items(struct list_head *head)
{
struct list_head *node;
diff --git a/drivers/crypto/fsl/jobdesc.c b/drivers/crypto/fsl/jobdesc.c
index d2354155318..c350b328561 100644
--- a/drivers/crypto/fsl/jobdesc.c
+++ b/drivers/crypto/fsl/jobdesc.c
@@ -300,7 +300,7 @@ void inline_cnstr_jobdesc_rng_deinstantiation(u32 *desc, int handle)
void inline_cnstr_jobdesc_rng(u32 *desc, void *data_out, u32 size)
{
- dma_addr_t dma_data_out = virt_to_phys(data_out);
+ caam_dma_addr_t dma_data_out = virt_to_phys(data_out);
init_job_desc(desc, 0);
append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG |
diff --git a/drivers/ddr/altera/Makefile b/drivers/ddr/altera/Makefile
index 39dfee5d5a6..9fa5d85a27e 100644
--- a/drivers/ddr/altera/Makefile
+++ b/drivers/ddr/altera/Makefile
@@ -4,11 +4,12 @@
# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
#
# (C) Copyright 2010, Thomas Chou <thomas@wytron.com.tw>
-# Copyright (C) 2014 Altera Corporation <www.altera.com>
+# Copyright (C) 2014-2021 Altera Corporation <www.altera.com>
ifdef CONFIG_$(SPL_)ALTERA_SDRAM
obj-$(CONFIG_TARGET_SOCFPGA_GEN5) += sdram_gen5.o sequencer.o
obj-$(CONFIG_TARGET_SOCFPGA_ARRIA10) += sdram_arria10.o
obj-$(CONFIG_TARGET_SOCFPGA_STRATIX10) += sdram_soc64.o sdram_s10.o
obj-$(CONFIG_TARGET_SOCFPGA_AGILEX) += sdram_soc64.o sdram_agilex.o
+obj-$(CONFIG_TARGET_SOCFPGA_N5X) += sdram_soc64.o sdram_n5x.o
endif
diff --git a/drivers/ddr/altera/sdram_n5x.c b/drivers/ddr/altera/sdram_n5x.c
new file mode 100644
index 00000000000..ac13ac4319c
--- /dev/null
+++ b/drivers/ddr/altera/sdram_n5x.c
@@ -0,0 +1,2298 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2021 Intel Corporation <www.intel.com>
+ *
+ */
+
+#include <common.h>
+#include <clk.h>
+#include <div64.h>
+#include <dm.h>
+#include <errno.h>
+#include <fdtdec.h>
+#include <hang.h>
+#include <ram.h>
+#include <reset.h>
+#include "sdram_soc64.h"
+#include <wait_bit.h>
+#include <asm/arch/firewall.h>
+#include <asm/arch/handoff_soc64.h>
+#include <asm/arch/misc.h>
+#include <asm/arch/reset_manager.h>
+#include <asm/arch/system_manager.h>
+#include <asm/io.h>
+#include <linux/err.h>
+#include <linux/sizes.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* MPFE NOC registers */
+#define FPGA2SDRAM_MGR_MAIN_SIDEBANDMGR_FLAGOUTSET0 0xF8024050
+
+/* Memory reset manager */
+#define MEM_RST_MGR_STATUS 0x8
+
+/* Register and bit in memory reset manager */
+#define MEM_RST_MGR_STATUS_RESET_COMPLETE BIT(0)
+#define MEM_RST_MGR_STATUS_PWROKIN_STATUS BIT(1)
+#define MEM_RST_MGR_STATUS_CONTROLLER_RST BIT(2)
+#define MEM_RST_MGR_STATUS_AXI_RST BIT(3)
+
+#define TIMEOUT_200MS 200
+#define TIMEOUT_5000MS 5000
+
+/* DDR4 umctl2 */
+#define DDR4_MSTR_OFFSET 0x0
+#define DDR4_FREQ_RATIO BIT(22)
+
+#define DDR4_STAT_OFFSET 0x4
+#define DDR4_STAT_SELFREF_TYPE GENMASK(5, 4)
+#define DDR4_STAT_SELFREF_TYPE_SHIFT 4
+#define DDR4_STAT_OPERATING_MODE GENMASK(2, 0)
+
+#define DDR4_MRCTRL0_OFFSET 0x10
+#define DDR4_MRCTRL0_MR_TYPE BIT(0)
+#define DDR4_MRCTRL0_MPR_EN BIT(1)
+#define DDR4_MRCTRL0_MR_RANK GENMASK(5, 4)
+#define DDR4_MRCTRL0_MR_RANK_SHIFT 4
+#define DDR4_MRCTRL0_MR_ADDR GENMASK(15, 12)
+#define DDR4_MRCTRL0_MR_ADDR_SHIFT 12
+#define DDR4_MRCTRL0_MR_WR BIT(31)
+
+#define DDR4_MRCTRL1_OFFSET 0x14
+#define DDR4_MRCTRL1_MR_DATA 0x3FFFF
+
+#define DDR4_MRSTAT_OFFSET 0x18
+#define DDR4_MRSTAT_MR_WR_BUSY BIT(0)
+
+#define DDR4_MRCTRL2_OFFSET 0x1C
+
+#define DDR4_PWRCTL_OFFSET 0x30
+#define DDR4_PWRCTL_SELFREF_EN BIT(0)
+#define DDR4_PWRCTL_POWERDOWN_EN BIT(1)
+#define DDR4_PWRCTL_EN_DFI_DRAM_CLK_DISABLE BIT(3)
+#define DDR4_PWRCTL_SELFREF_SW BIT(5)
+
+#define DDR4_PWRTMG_OFFSET 0x34
+#define DDR4_HWLPCTL_OFFSET 0x38
+#define DDR4_RFSHCTL0_OFFSET 0x50
+#define DDR4_RFSHCTL1_OFFSET 0x54
+
+#define DDR4_RFSHCTL3_OFFSET 0x60
+#define DDR4_RFSHCTL3_DIS_AUTO_REFRESH BIT(0)
+#define DDR4_RFSHCTL3_REFRESH_MODE GENMASK(6, 4)
+#define DDR4_RFSHCTL3_REFRESH_MODE_SHIFT 4
+
+#define DDR4_ECCCFG0_OFFSET 0x70
+#define DDR4_ECC_MODE GENMASK(2, 0)
+#define DDR4_DIS_SCRUB BIT(4)
+#define LPDDR4_ECCCFG0_ECC_REGION_MAP_GRANU_SHIFT 30
+#define LPDDR4_ECCCFG0_ECC_REGION_MAP_SHIFT 8
+
+#define DDR4_ECCCFG1_OFFSET 0x74
+#define LPDDR4_ECCCFG1_ECC_REGIONS_PARITY_LOCK BIT(4)
+
+#define DDR4_CRCPARCTL0_OFFSET 0xC0
+#define DDR4_CRCPARCTL0_DFI_ALERT_ERR_INIT_CLR BIT(1)
+
+#define DDR4_CRCPARCTL1_OFFSET 0xC4
+#define DDR4_CRCPARCTL1_CRC_PARITY_RETRY_ENABLE BIT(8)
+#define DDR4_CRCPARCTL1_ALERT_WAIT_FOR_SW BIT(9)
+
+#define DDR4_CRCPARSTAT_OFFSET 0xCC
+#define DDR4_CRCPARSTAT_DFI_ALERT_ERR_INT BIT(16)
+#define DDR4_CRCPARSTAT_DFI_ALERT_ERR_FATL_INT BIT(17)
+#define DDR4_CRCPARSTAT_DFI_ALERT_ERR_NO_SW BIT(19)
+#define DDR4_CRCPARSTAT_CMD_IN_ERR_WINDOW BIT(29)
+
+#define DDR4_INIT0_OFFSET 0xD0
+#define DDR4_INIT0_SKIP_RAM_INIT GENMASK(31, 30)
+
+#define DDR4_RANKCTL_OFFSET 0xF4
+#define DDR4_RANKCTL_DIFF_RANK_RD_GAP GENMASK(7, 4)
+#define DDR4_RANKCTL_DIFF_RANK_WR_GAP GENMASK(11, 8)
+#define DDR4_RANKCTL_DIFF_RANK_RD_GAP_MSB BIT(24)
+#define DDR4_RANKCTL_DIFF_RANK_WR_GAP_MSB BIT(26)
+#define DDR4_RANKCTL_DIFF_RANK_RD_GAP_SHIFT 4
+#define DDR4_RANKCTL_DIFF_RANK_WR_GAP_SHIFT 8
+#define DDR4_RANKCTL_DIFF_RANK_RD_GAP_MSB_SHIFT 24
+#define DDR4_RANKCTL_DIFF_RANK_WR_GAP_MSB_SHIFT 26
+
+#define DDR4_RANKCTL1_OFFSET 0xF8
+#define DDR4_RANKCTL1_WR2RD_DR GENMASK(5, 0)
+
+#define DDR4_DRAMTMG2_OFFSET 0x108
+#define DDR4_DRAMTMG2_WR2RD GENMASK(5, 0)
+#define DDR4_DRAMTMG2_RD2WR GENMASK(13, 8)
+#define DDR4_DRAMTMG2_RD2WR_SHIFT 8
+
+#define DDR4_DRAMTMG9_OFFSET 0x124
+#define DDR4_DRAMTMG9_W2RD_S GENMASK(5, 0)
+
+#define DDR4_DFITMG1_OFFSET 0x194
+#define DDR4_DFITMG1_DFI_T_WRDATA_DELAY GENMASK(20, 16)
+#define DDR4_DFITMG1_DFI_T_WRDATA_SHIFT 16
+
+#define DDR4_DFIMISC_OFFSET 0x1B0
+#define DDR4_DFIMISC_DFI_INIT_COMPLETE_EN BIT(0)
+#define DDR4_DFIMISC_DFI_INIT_START BIT(5)
+
+#define DDR4_DFISTAT_OFFSET 0x1BC
+#define DDR4_DFI_INIT_COMPLETE BIT(0)
+
+#define DDR4_DBG0_OFFSET 0x300
+
+#define DDR4_DBG1_OFFSET 0x304
+#define DDR4_DBG1_DISDQ BIT(0)
+#define DDR4_DBG1_DIS_HIF BIT(1)
+
+#define DDR4_DBGCAM_OFFSET 0x308
+#define DDR4_DBGCAM_DBG_RD_Q_EMPTY BIT(25)
+#define DDR4_DBGCAM_DBG_WR_Q_EMPTY BIT(26)
+#define DDR4_DBGCAM_RD_DATA_PIPELINE_EMPTY BIT(28)
+#define DDR4_DBGCAM_WR_DATA_PIPELINE_EMPTY BIT(29)
+
+#define DDR4_SWCTL_OFFSET 0x320
+#define DDR4_SWCTL_SW_DONE BIT(0)
+
+#define DDR4_SWSTAT_OFFSET 0x324
+#define DDR4_SWSTAT_SW_DONE_ACK BIT(0)
+
+#define DDR4_PSTAT_OFFSET 0x3FC
+#define DDR4_PSTAT_RD_PORT_BUSY_0 BIT(0)
+#define DDR4_PSTAT_WR_PORT_BUSY_0 BIT(16)
+
+#define DDR4_PCTRL0_OFFSET 0x490
+#define DDR4_PCTRL0_PORT_EN BIT(0)
+
+#define DDR4_SBRCTL_OFFSET 0xF24
+#define DDR4_SBRCTL_SCRUB_INTERVAL 0x1FFF00
+#define DDR4_SBRCTL_SCRUB_EN BIT(0)
+#define DDR4_SBRCTL_SCRUB_WRITE BIT(2)
+#define DDR4_SBRCTL_SCRUB_BURST_1 BIT(4)
+
+#define DDR4_SBRSTAT_OFFSET 0xF28
+#define DDR4_SBRSTAT_SCRUB_BUSY BIT(0)
+#define DDR4_SBRSTAT_SCRUB_DONE BIT(1)
+
+#define DDR4_SBRWDATA0_OFFSET 0xF2C
+#define DDR4_SBRWDATA1_OFFSET 0xF30
+#define DDR4_SBRSTART0_OFFSET 0xF38
+#define DDR4_SBRSTART1_OFFSET 0xF3C
+#define DDR4_SBRRANGE0_OFFSET 0xF40
+#define DDR4_SBRRANGE1_OFFSET 0xF44
+
+/* DDR PHY */
+#define DDR_PHY_TXODTDRVSTREN_B0_P0 0x2009A
+#define DDR_PHY_RXPBDLYTG0_R0 0x200D0
+#define DDR_PHY_DBYTE0_TXDQDLYTG0_U0_P0 0x201A0
+
+#define DDR_PHY_DBYTE0_TXDQDLYTG0_U1_P0 0x203A0
+#define DDR_PHY_DBYTE1_TXDQDLYTG0_U0_P0 0x221A0
+#define DDR_PHY_DBYTE1_TXDQDLYTG0_U1_P0 0x223A0
+#define DDR_PHY_TXDQDLYTG0_COARSE_DELAY GENMASK(9, 6)
+#define DDR_PHY_TXDQDLYTG0_COARSE_DELAY_SHIFT 6
+
+#define DDR_PHY_CALRATE_OFFSET 0x40110
+#define DDR_PHY_CALZAP_OFFSET 0x40112
+#define DDR_PHY_SEQ0BDLY0_P0_OFFSET 0x40016
+#define DDR_PHY_SEQ0BDLY1_P0_OFFSET 0x40018
+#define DDR_PHY_SEQ0BDLY2_P0_OFFSET 0x4001A
+#define DDR_PHY_SEQ0BDLY3_P0_OFFSET 0x4001C
+
+#define DDR_PHY_MEMRESETL_OFFSET 0x400C0
+#define DDR_PHY_MEMRESETL_VALUE BIT(0)
+#define DDR_PHY_PROTECT_MEMRESET BIT(1)
+
+#define DDR_PHY_CALBUSY_OFFSET 0x4012E
+#define DDR_PHY_CALBUSY BIT(0)
+
+#define DDR_PHY_TRAIN_IMEM_OFFSET 0xA0000
+#define DDR_PHY_TRAIN_DMEM_OFFSET 0xA8000
+
+#define DMEM_MB_CDD_RR_1_0_OFFSET 0xA802C
+#define DMEM_MB_CDD_RR_0_1_OFFSET 0xA8030
+#define DMEM_MB_CDD_WW_1_0_OFFSET 0xA8038
+#define DMEM_MB_CDD_WW_0_1_OFFSET 0xA803C
+#define DMEM_MB_CDD_RW_1_1_OFFSET 0xA8046
+#define DMEM_MB_CDD_RW_1_0_OFFSET 0xA8048
+#define DMEM_MB_CDD_RW_0_1_OFFSET 0xA804A
+#define DMEM_MB_CDD_RW_0_0_OFFSET 0xA804C
+
+#define DMEM_MB_CDD_CHA_RR_1_0_OFFSET 0xA8026
+#define DMEM_MB_CDD_CHA_RR_0_1_OFFSET 0xA8026
+#define DMEM_MB_CDD_CHB_RR_1_0_OFFSET 0xA8058
+#define DMEM_MB_CDD_CHB_RR_0_1_OFFSET 0xA805A
+#define DMEM_MB_CDD_CHA_WW_1_0_OFFSET 0xA8030
+#define DMEM_MB_CDD_CHA_WW_0_1_OFFSET 0xA8030
+#define DMEM_MB_CDD_CHB_WW_1_0_OFFSET 0xA8062
+#define DMEM_MB_CDD_CHB_WW_0_1_OFFSET 0xA8064
+
+#define DMEM_MB_CDD_CHA_RW_1_1_OFFSET 0xA8028
+#define DMEM_MB_CDD_CHA_RW_1_0_OFFSET 0xA8028
+#define DMEM_MB_CDD_CHA_RW_0_1_OFFSET 0xA802A
+#define DMEM_MB_CDD_CHA_RW_0_0_OFFSET 0xA802A
+
+#define DMEM_MB_CDD_CHB_RW_1_1_OFFSET 0xA805A
+#define DMEM_MB_CDD_CHB_RW_1_0_OFFSET 0xA805C
+#define DMEM_MB_CDD_CHB_RW_0_1_OFFSET 0xA805c
+#define DMEM_MB_CDD_CHB_RW_0_0_OFFSET 0xA805E
+
+#define DDR_PHY_SEQ0DISABLEFLAG0_OFFSET 0x120018
+#define DDR_PHY_SEQ0DISABLEFLAG1_OFFSET 0x12001A
+#define DDR_PHY_SEQ0DISABLEFLAG2_OFFSET 0x12001C
+#define DDR_PHY_SEQ0DISABLEFLAG3_OFFSET 0x12001E
+#define DDR_PHY_SEQ0DISABLEFLAG4_OFFSET 0x120020
+#define DDR_PHY_SEQ0DISABLEFLAG5_OFFSET 0x120022
+#define DDR_PHY_SEQ0DISABLEFLAG6_OFFSET 0x120024
+#define DDR_PHY_SEQ0DISABLEFLAG7_OFFSET 0x120026
+
+#define DDR_PHY_UCCLKHCLKENABLES_OFFSET 0x180100
+#define DDR_PHY_UCCLKHCLKENABLES_UCCLKEN BIT(0)
+#define DDR_PHY_UCCLKHCLKENABLES_HCLKEN BIT(1)
+
+#define DDR_PHY_UCTWRITEPROT_OFFSET 0x180066
+#define DDR_PHY_UCTWRITEPROT BIT(0)
+
+#define DDR_PHY_APBONLY0_OFFSET 0x1A0000
+#define DDR_PHY_MICROCONTMUXSEL BIT(0)
+
+#define DDR_PHY_UCTSHADOWREGS_OFFSET 0x1A0008
+#define DDR_PHY_UCTSHADOWREGS_UCTWRITEPROTESHADOW BIT(0)
+
+#define DDR_PHY_DCTWRITEPROT_OFFSET 0x1A0062
+#define DDR_PHY_DCTWRITEPROT BIT(0)
+
+#define DDR_PHY_UCTWRITEONLYSHADOW_OFFSET 0x1A0064
+#define DDR_PHY_UCTDATWRITEONLYSHADOW_OFFSET 0x1A0068
+
+#define DDR_PHY_MICRORESET_OFFSET 0x1A0132
+#define DDR_PHY_MICRORESET_STALL BIT(0)
+#define DDR_PHY_MICRORESET_RESET BIT(3)
+
+#define DDR_PHY_TXODTDRVSTREN_B0_P1 0x22009A
+
+/* For firmware training */
+#define HW_DBG_TRACE_CONTROL_OFFSET 0x18
+#define FW_TRAINING_COMPLETED_STAT 0x07
+#define FW_TRAINING_FAILED_STAT 0xFF
+#define FW_COMPLETION_MSG_ONLY_MODE 0xFF
+#define FW_STREAMING_MSG_ID 0x08
+#define GET_LOWHW_DATA(x) ((x) & 0xFFFF)
+#define GET_LOWB_DATA(x) ((x) & 0xFF)
+#define GET_HIGHB_DATA(x) (((x) & 0xFF00) >> 8)
+
+/* Operating mode */
+#define OPM_INIT 0x000
+#define OPM_NORMAL 0x001
+#define OPM_PWR_D0WN 0x010
+#define OPM_SELF_SELFREF 0x011
+#define OPM_DDR4_DEEP_PWR_DOWN 0x100
+
+/* Refresh mode */
+#define FIXED_1X 0
+#define FIXED_2X BIT(0)
+#define FIXED_4X BIT(4)
+
+/* Address of mode register */
+#define MR0 0x0000
+#define MR1 0x0001
+#define MR2 0x0010
+#define MR3 0x0011
+#define MR4 0x0100
+#define MR5 0x0101
+#define MR6 0x0110
+#define MR7 0x0111
+
+/* MR rank */
+#define RANK0 0x1
+#define RANK1 0x2
+#define ALL_RANK 0x3
+
+#define MR5_BIT4 BIT(4)
+
+/* Value for ecc_region_map */
+#define ALL_PROTECTED 0x7F
+
+/* Region size for ECCCFG0.ecc_region_map */
+enum region_size {
+ ONE_EIGHT,
+ ONE_SIXTEENTH,
+ ONE_THIRTY_SECOND,
+ ONE_SIXTY_FOURTH
+};
+
+enum ddr_type {
+ DDRTYPE_LPDDR4_0,
+ DDRTYPE_LPDDR4_1,
+ DDRTYPE_DDR4,
+ DDRTYPE_UNKNOWN
+};
+
+/* Reset type */
+enum reset_type {
+ POR_RESET,
+ WARM_RESET,
+ COLD_RESET
+};
+
+/* DDR handoff structure */
+struct ddr_handoff {
+ /* Memory reset manager base */
+ phys_addr_t mem_reset_base;
+
+ /* First controller attributes */
+ phys_addr_t cntlr_handoff_base;
+ phys_addr_t cntlr_base;
+ size_t cntlr_total_length;
+ enum ddr_type cntlr_t;
+ size_t cntlr_handoff_length;
+
+ /* Second controller attributes*/
+ phys_addr_t cntlr2_handoff_base;
+ phys_addr_t cntlr2_base;
+ size_t cntlr2_total_length;
+ enum ddr_type cntlr2_t;
+ size_t cntlr2_handoff_length;
+
+ /* PHY attributes */
+ phys_addr_t phy_handoff_base;
+ phys_addr_t phy_base;
+ size_t phy_total_length;
+ size_t phy_handoff_length;
+
+ /* PHY engine attributes */
+ phys_addr_t phy_engine_handoff_base;
+ size_t phy_engine_total_length;
+ size_t phy_engine_handoff_length;
+
+ /* Calibration attributes */
+ phys_addr_t train_imem_base;
+ phys_addr_t train_dmem_base;
+ size_t train_imem_length;
+ size_t train_dmem_length;
+};
+
+/* Message mode */
+enum message_mode {
+ MAJOR_MESSAGE,
+ STREAMING_MESSAGE
+};
+
+static int clr_ca_parity_error_status(phys_addr_t umctl2_base)
+{
+ int ret;
+
+ debug("%s: Clear C/A parity error status in MR5[4]\n", __func__);
+
+ /* Set mode register MRS */
+ clrbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET, DDR4_MRCTRL0_MPR_EN);
+
+ /* Set mode register to write operation */
+ setbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET, DDR4_MRCTRL0_MR_TYPE);
+
+ /* Set the address of mode rgister to 0x101(MR5) */
+ setbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET,
+ (MR5 << DDR4_MRCTRL0_MR_ADDR_SHIFT) &
+ DDR4_MRCTRL0_MR_ADDR);
+
+ /* Set MR rank to rank 1 */
+ setbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET,
+ (RANK1 << DDR4_MRCTRL0_MR_RANK_SHIFT) &
+ DDR4_MRCTRL0_MR_RANK);
+
+ /* Clear C/A parity error status in MR5[4] */
+ clrbits_le32(umctl2_base + DDR4_MRCTRL1_OFFSET, MR5_BIT4);
+
+ /* Trigger mode register read or write operation */
+ setbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET, DDR4_MRCTRL0_MR_WR);
+
+ /* Wait for retry done */
+ ret = wait_for_bit_le32((const void *)(umctl2_base +
+ DDR4_MRSTAT_OFFSET), DDR4_MRSTAT_MR_WR_BUSY,
+ false, TIMEOUT_200MS, false);
+ if (ret) {
+ debug("%s: Timeout while waiting for", __func__);
+ debug(" no outstanding MR transaction\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ddr_retry_software_sequence(phys_addr_t umctl2_base)
+{
+ u32 value;
+ int ret;
+
+ /* Check software can perform MRS/MPR/PDA? */
+ value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) &
+ DDR4_CRCPARSTAT_DFI_ALERT_ERR_NO_SW;
+
+ if (value) {
+ /* Clear interrupt bit for DFI alert error */
+ setbits_le32(umctl2_base + DDR4_CRCPARCTL0_OFFSET,
+ DDR4_CRCPARCTL0_DFI_ALERT_ERR_INIT_CLR);
+ }
+
+ debug("%s: Software can perform MRS/MPR/PDA\n", __func__);
+
+ ret = wait_for_bit_le32((const void *)(umctl2_base +
+ DDR4_MRSTAT_OFFSET),
+ DDR4_MRSTAT_MR_WR_BUSY,
+ false, TIMEOUT_200MS, false);
+ if (ret) {
+ debug("%s: Timeout while waiting for", __func__);
+ debug(" no outstanding MR transaction\n");
+ return ret;
+ }
+
+ ret = clr_ca_parity_error_status(umctl2_base);
+ if (ret)
+ return ret;
+
+ if (!value) {
+ /* Clear interrupt bit for DFI alert error */
+ setbits_le32(umctl2_base + DDR4_CRCPARCTL0_OFFSET,
+ DDR4_CRCPARCTL0_DFI_ALERT_ERR_INIT_CLR);
+ }
+
+ return 0;
+}
+
+static int ensure_retry_procedure_complete(phys_addr_t umctl2_base)
+{
+ u32 value;
+ u32 start = get_timer(0);
+ int ret;
+
+ /* Check parity/crc/error window is emptied ? */
+ value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) &
+ DDR4_CRCPARSTAT_CMD_IN_ERR_WINDOW;
+
+ /* Polling until parity/crc/error window is emptied */
+ while (value) {
+ if (get_timer(start) > TIMEOUT_200MS) {
+ debug("%s: Timeout while waiting for",
+ __func__);
+ debug(" parity/crc/error window empty\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Check software intervention is enabled? */
+ value = readl(umctl2_base + DDR4_CRCPARCTL1_OFFSET) &
+ DDR4_CRCPARCTL1_ALERT_WAIT_FOR_SW;
+ if (value) {
+ debug("%s: Software intervention is enabled\n",
+ __func__);
+
+ /* Check dfi alert error interrupt is set? */
+ value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) &
+ DDR4_CRCPARSTAT_DFI_ALERT_ERR_INT;
+
+ if (value) {
+ ret = ddr_retry_software_sequence(umctl2_base);
+ debug("%s: DFI alert error interrupt ",
+ __func__);
+ debug("is set\n");
+
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Check fatal parity error interrupt is set?
+ */
+ value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) &
+ DDR4_CRCPARSTAT_DFI_ALERT_ERR_FATL_INT;
+ if (value) {
+ printf("%s: Fatal parity error ",
+ __func__);
+ printf("interrupt is set, Hang it!!\n");
+ hang();
+ }
+ }
+
+ value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) &
+ DDR4_CRCPARSTAT_CMD_IN_ERR_WINDOW;
+
+ udelay(1);
+ WATCHDOG_RESET();
+ }
+
+ return 0;
+}
+
+static int enable_quasi_dynamic_reg_grp3(phys_addr_t umctl2_base,
+ enum ddr_type umctl2_type)
+{
+ u32 i, value, backup;
+ int ret = 0;
+
+ /* Disable input traffic per port */
+ clrbits_le32(umctl2_base + DDR4_PCTRL0_OFFSET, DDR4_PCTRL0_PORT_EN);
+
+ /* Polling AXI port until idle */
+ ret = wait_for_bit_le32((const void *)(umctl2_base +
+ DDR4_PSTAT_OFFSET),
+ DDR4_PSTAT_WR_PORT_BUSY_0 |
+ DDR4_PSTAT_RD_PORT_BUSY_0, false,
+ TIMEOUT_200MS, false);
+ if (ret) {
+ debug("%s: Timeout while waiting for", __func__);
+ debug(" controller idle\n");
+ return ret;
+ }
+
+ /* Backup user setting */
+ backup = readl(umctl2_base + DDR4_DBG1_OFFSET);
+
+ /* Disable input traffic to the controller */
+ setbits_le32(umctl2_base + DDR4_DBG1_OFFSET, DDR4_DBG1_DIS_HIF);
+
+ /*
+ * Ensure CAM/data pipelines are empty.
+ * Poll until CAM/data pipelines are set at least twice,
+ * timeout at 200ms
+ */
+ for (i = 0; i < 2; i++) {
+ ret = wait_for_bit_le32((const void *)(umctl2_base +
+ DDR4_DBGCAM_OFFSET),
+ DDR4_DBGCAM_WR_DATA_PIPELINE_EMPTY |
+ DDR4_DBGCAM_RD_DATA_PIPELINE_EMPTY |
+ DDR4_DBGCAM_DBG_WR_Q_EMPTY |
+ DDR4_DBGCAM_DBG_RD_Q_EMPTY, true,
+ TIMEOUT_200MS, false);
+ if (ret) {
+ debug("%s: loop(%u): Timeout while waiting for",
+ __func__, i + 1);
+ debug(" CAM/data pipelines are empty\n");
+
+ goto out;
+ }
+ }
+
+ if (umctl2_type == DDRTYPE_DDR4) {
+ /* Check DDR4 retry is enabled ? */
+ value = readl(umctl2_base + DDR4_CRCPARCTL1_OFFSET) &
+ DDR4_CRCPARCTL1_CRC_PARITY_RETRY_ENABLE;
+
+ if (value) {
+ debug("%s: DDR4 retry is enabled\n", __func__);
+
+ ret = ensure_retry_procedure_complete(umctl2_base);
+ if (ret) {
+ debug("%s: Timeout while waiting for",
+ __func__);
+ debug(" retry procedure complete\n");
+
+ goto out;
+ }
+ }
+ }
+
+ debug("%s: Quasi-dynamic group 3 registers are enabled\n", __func__);
+
+out:
+ /* Restore user setting */
+ writel(backup, umctl2_base + DDR4_DBG1_OFFSET);
+
+ return ret;
+}
+
+static enum ddr_type get_ddr_type(phys_addr_t ddr_type_location)
+{
+ u32 ddr_type_magic = readl(ddr_type_location);
+
+ if (ddr_type_magic == SOC64_HANDOFF_DDR_UMCTL2_DDR4_TYPE)
+ return DDRTYPE_DDR4;
+
+ if (ddr_type_magic == SOC64_HANDOFF_DDR_UMCTL2_LPDDR4_0_TYPE)
+ return DDRTYPE_LPDDR4_0;
+
+ if (ddr_type_magic == SOC64_HANDOFF_DDR_UMCTL2_LPDDR4_1_TYPE)
+ return DDRTYPE_LPDDR4_1;
+
+ return DDRTYPE_UNKNOWN;
+}
+
+static void use_lpddr4_interleaving(bool set)
+{
+ if (set) {
+ printf("Starting LPDDR4 interleaving configuration ...\n");
+ setbits_le32(FPGA2SDRAM_MGR_MAIN_SIDEBANDMGR_FLAGOUTSET0,
+ BIT(5));
+ } else {
+ printf("Starting LPDDR4 non-interleaving configuration ...\n");
+ clrbits_le32(FPGA2SDRAM_MGR_MAIN_SIDEBANDMGR_FLAGOUTSET0,
+ BIT(5));
+ }
+}
+
+static void use_ddr4(enum ddr_type type)
+{
+ if (type == DDRTYPE_DDR4) {
+ printf("Starting DDR4 configuration ...\n");
+ setbits_le32(socfpga_get_sysmgr_addr() + SYSMGR_SOC64_DDR_MODE,
+ SYSMGR_SOC64_DDR_MODE_MSK);
+ } else if (type == DDRTYPE_LPDDR4_0) {
+ printf("Starting LPDDR4 configuration ...\n");
+ clrbits_le32(socfpga_get_sysmgr_addr() + SYSMGR_SOC64_DDR_MODE,
+ SYSMGR_SOC64_DDR_MODE_MSK);
+
+ use_lpddr4_interleaving(false);
+ }
+}
+
+static int scrubber_ddr_config(phys_addr_t umctl2_base,
+ enum ddr_type umctl2_type)
+{
+ u32 backup[9];
+ int ret;
+
+ /* Reset to default value, prevent scrubber stop due to lower power */
+ writel(0, umctl2_base + DDR4_PWRCTL_OFFSET);
+
+ /* Backup user settings */
+ backup[0] = readl(umctl2_base + DDR4_SBRCTL_OFFSET);
+ backup[1] = readl(umctl2_base + DDR4_SBRWDATA0_OFFSET);
+ backup[2] = readl(umctl2_base + DDR4_SBRSTART0_OFFSET);
+ if (umctl2_type == DDRTYPE_DDR4) {
+ backup[3] = readl(umctl2_base + DDR4_SBRWDATA1_OFFSET);
+ backup[4] = readl(umctl2_base + DDR4_SBRSTART1_OFFSET);
+ }
+ backup[5] = readl(umctl2_base + DDR4_SBRRANGE0_OFFSET);
+ backup[6] = readl(umctl2_base + DDR4_SBRRANGE1_OFFSET);
+ backup[7] = readl(umctl2_base + DDR4_ECCCFG0_OFFSET);
+ backup[8] = readl(umctl2_base + DDR4_ECCCFG1_OFFSET);
+
+ if (umctl2_type != DDRTYPE_DDR4) {
+ /* Lock ECC region, ensure this regions is not being accessed */
+ setbits_le32(umctl2_base + DDR4_ECCCFG1_OFFSET,
+ LPDDR4_ECCCFG1_ECC_REGIONS_PARITY_LOCK);
+ }
+ /* Disable input traffic per port */
+ clrbits_le32(umctl2_base + DDR4_PCTRL0_OFFSET, DDR4_PCTRL0_PORT_EN);
+ /* Disables scrubber */
+ clrbits_le32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
+ /* Polling all scrub writes data have been sent */
+ ret = wait_for_bit_le32((const void *)(umctl2_base +
+ DDR4_SBRSTAT_OFFSET), DDR4_SBRSTAT_SCRUB_BUSY,
+ false, TIMEOUT_5000MS, false);
+ if (ret) {
+ debug("%s: Timeout while waiting for", __func__);
+ debug(" sending all scrub data\n");
+ return ret;
+ }
+
+ /* LPDDR4 supports inline ECC only */
+ if (umctl2_type != DDRTYPE_DDR4) {
+ /*
+ * Setting all regions for protected, this is required for
+ * srubber to init whole LPDDR4 expect ECC region
+ */
+ writel(((ONE_EIGHT <<
+ LPDDR4_ECCCFG0_ECC_REGION_MAP_GRANU_SHIFT) |
+ (ALL_PROTECTED << LPDDR4_ECCCFG0_ECC_REGION_MAP_SHIFT)),
+ umctl2_base + DDR4_ECCCFG0_OFFSET);
+ }
+
+ /* Scrub_burst = 1, scrub_mode = 1(performs writes) */
+ writel(DDR4_SBRCTL_SCRUB_BURST_1 | DDR4_SBRCTL_SCRUB_WRITE,
+ umctl2_base + DDR4_SBRCTL_OFFSET);
+
+ /* Zeroing whole DDR */
+ writel(0, umctl2_base + DDR4_SBRWDATA0_OFFSET);
+ writel(0, umctl2_base + DDR4_SBRSTART0_OFFSET);
+ if (umctl2_type == DDRTYPE_DDR4) {
+ writel(0, umctl2_base + DDR4_SBRWDATA1_OFFSET);
+ writel(0, umctl2_base + DDR4_SBRSTART1_OFFSET);
+ }
+ writel(0, umctl2_base + DDR4_SBRRANGE0_OFFSET);
+ writel(0, umctl2_base + DDR4_SBRRANGE1_OFFSET);
+
+ /* Enables scrubber */
+ setbits_le32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
+ /* Polling all scrub writes commands have been sent */
+ ret = wait_for_bit_le32((const void *)(umctl2_base +
+ DDR4_SBRSTAT_OFFSET), DDR4_SBRSTAT_SCRUB_DONE,
+ true, TIMEOUT_5000MS, false);
+ if (ret) {
+ debug("%s: Timeout while waiting for", __func__);
+ debug(" sending all scrub commands\n");
+ return ret;
+ }
+
+ /* Polling all scrub writes data have been sent */
+ ret = wait_for_bit_le32((const void *)(umctl2_base +
+ DDR4_SBRSTAT_OFFSET), DDR4_SBRSTAT_SCRUB_BUSY,
+ false, TIMEOUT_5000MS, false);
+ if (ret) {
+ printf("%s: Timeout while waiting for", __func__);
+ printf(" sending all scrub data\n");
+ return ret;
+ }
+
+ /* Disables scrubber */
+ clrbits_le32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
+
+ /* Restore user settings */
+ writel(backup[0], umctl2_base + DDR4_SBRCTL_OFFSET);
+ writel(backup[1], umctl2_base + DDR4_SBRWDATA0_OFFSET);
+ writel(backup[2], umctl2_base + DDR4_SBRSTART0_OFFSET);
+ if (umctl2_type == DDRTYPE_DDR4) {
+ writel(backup[3], umctl2_base + DDR4_SBRWDATA1_OFFSET);
+ writel(backup[4], umctl2_base + DDR4_SBRSTART1_OFFSET);
+ }
+ writel(backup[5], umctl2_base + DDR4_SBRRANGE0_OFFSET);
+ writel(backup[6], umctl2_base + DDR4_SBRRANGE1_OFFSET);
+ writel(backup[7], umctl2_base + DDR4_ECCCFG0_OFFSET);
+ writel(backup[8], umctl2_base + DDR4_ECCCFG1_OFFSET);
+
+ /* Enables ECC scrub on scrubber */
+ if (!(readl(umctl2_base + DDR4_SBRCTL_OFFSET) &
+ DDR4_SBRCTL_SCRUB_WRITE)) {
+ /* Enables scrubber */
+ setbits_le32(umctl2_base + DDR4_SBRCTL_OFFSET,
+ DDR4_SBRCTL_SCRUB_EN);
+ }
+
+ return 0;
+}
+
+static void handoff_process(struct ddr_handoff *ddr_handoff_info,
+ phys_addr_t handoff_base, size_t length,
+ phys_addr_t base)
+{
+ u32 handoff_table[length];
+ u32 i, value = 0;
+
+ /* Execute configuration handoff */
+ socfpga_handoff_read((void *)handoff_base, handoff_table, length);
+
+ for (i = 0; i < length; i = i + 2) {
+ debug("%s: wr = 0x%08x ", __func__, handoff_table[i + 1]);
+ if (ddr_handoff_info && base == ddr_handoff_info->phy_base) {
+ /*
+ * Convert PHY odd offset to even offset that
+ * supported by ARM processor.
+ */
+ value = handoff_table[i] << 1;
+
+ writew(handoff_table[i + 1],
+ (uintptr_t)(value + base));
+ debug("rd = 0x%08x ",
+ readw((uintptr_t)(value + base)));
+ debug("PHY offset: 0x%08x ", handoff_table[i + 1]);
+ } else {
+ value = handoff_table[i];
+ writel(handoff_table[i + 1], (uintptr_t)(value +
+ base));
+ debug("rd = 0x%08x ",
+ readl((uintptr_t)(value + base)));
+ }
+
+ debug("Absolute addr: 0x%08llx, APB offset: 0x%08x\n",
+ value + base, value);
+ }
+}
+
+static int init_umctl2(phys_addr_t umctl2_handoff_base,
+ phys_addr_t umctl2_base, enum ddr_type umctl2_type,
+ size_t umctl2_handoff_length,
+ u32 *user_backup)
+{
+ int ret;
+
+ if (umctl2_type == DDRTYPE_DDR4)
+ printf("Initializing DDR4 controller ...\n");
+ else if (umctl2_type == DDRTYPE_LPDDR4_0)
+ printf("Initializing LPDDR4_0 controller ...\n");
+ else if (umctl2_type == DDRTYPE_LPDDR4_1)
+ printf("Initializing LPDDR4_1 controller ...\n");
+
+ /* Prevent controller from issuing read/write to SDRAM */
+ setbits_le32(umctl2_base + DDR4_DBG1_OFFSET, DDR4_DBG1_DISDQ);
+
+ /* Put SDRAM into self-refresh */
+ setbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET, DDR4_PWRCTL_SELFREF_EN);
+
+ /* Enable quasi-dynamic programing of the controller registers */
+ clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
+
+ /* Ensure the controller is in initialization mode */
+ ret = wait_for_bit_le32((const void *)(umctl2_base + DDR4_STAT_OFFSET),
+ DDR4_STAT_OPERATING_MODE, false, TIMEOUT_200MS,
+ false);
+ if (ret) {
+ debug("%s: Timeout while waiting for", __func__);
+ debug(" init operating mode\n");
+ return ret;
+ }
+
+ debug("%s: UMCTL2 handoff base address = 0x%p table length = 0x%08x\n",
+ __func__, (u32 *)umctl2_handoff_base,
+ (u32)umctl2_handoff_length);
+
+ handoff_process(NULL, umctl2_handoff_base, umctl2_handoff_length,
+ umctl2_base);
+
+ /* Backup user settings, restore after DDR up running */
+ *user_backup = readl(umctl2_base + DDR4_PWRCTL_OFFSET);
+
+ /* Disable self resfresh */
+ clrbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET, DDR4_PWRCTL_SELFREF_EN);
+
+ if (umctl2_type == DDRTYPE_LPDDR4_0 ||
+ umctl2_type == DDRTYPE_LPDDR4_1) {
+ /* Setting selfref_sw to 1, based on lpddr4 requirement */
+ setbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET,
+ DDR4_PWRCTL_SELFREF_SW);
+
+ /* Backup user settings, restore after DDR up running */
+ user_backup++;
+ *user_backup = readl(umctl2_base + DDR4_INIT0_OFFSET) &
+ DDR4_INIT0_SKIP_RAM_INIT;
+
+ /*
+ * Setting INIT0.skip_dram_init to 0x3, based on lpddr4
+ * requirement
+ */
+ setbits_le32(umctl2_base + DDR4_INIT0_OFFSET,
+ DDR4_INIT0_SKIP_RAM_INIT);
+ }
+
+ /* Complete quasi-dynamic register programming */
+ setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
+
+ /* Enable controller from issuing read/write to SDRAM */
+ clrbits_le32(umctl2_base + DDR4_DBG1_OFFSET, DDR4_DBG1_DISDQ);
+
+ return 0;
+}
+
+static int phy_pre_handoff_config(phys_addr_t umctl2_base,
+ enum ddr_type umctl2_type)
+{
+ int ret;
+ u32 value;
+
+ if (umctl2_type == DDRTYPE_DDR4) {
+ /* Check DDR4 retry is enabled ? */
+ value = readl(umctl2_base + DDR4_CRCPARCTL1_OFFSET) &
+ DDR4_CRCPARCTL1_CRC_PARITY_RETRY_ENABLE;
+
+ if (value) {
+ debug("%s: DDR4 retry is enabled\n", __func__);
+ debug("%s: Disable auto refresh is not supported\n",
+ __func__);
+ } else {
+ /* Disable auto refresh */
+ setbits_le32(umctl2_base + DDR4_RFSHCTL3_OFFSET,
+ DDR4_RFSHCTL3_DIS_AUTO_REFRESH);
+ }
+ }
+
+ /* Disable selfref_en & powerdown_en, nvr disable dfi dram clk */
+ clrbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET,
+ DDR4_PWRCTL_EN_DFI_DRAM_CLK_DISABLE |
+ DDR4_PWRCTL_POWERDOWN_EN | DDR4_PWRCTL_SELFREF_EN);
+
+ /* Enable quasi-dynamic programing of the controller registers */
+ clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
+
+ ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type);
+ if (ret)
+ return ret;
+
+ /* Masking dfi init complete */
+ clrbits_le32(umctl2_base + DDR4_DFIMISC_OFFSET,
+ DDR4_DFIMISC_DFI_INIT_COMPLETE_EN);
+
+ /* Complete quasi-dynamic register programming */
+ setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
+
+ /* Polling programming done */
+ ret = wait_for_bit_le32((const void *)(umctl2_base +
+ DDR4_SWSTAT_OFFSET), DDR4_SWSTAT_SW_DONE_ACK,
+ true, TIMEOUT_200MS, false);
+ if (ret) {
+ debug("%s: Timeout while waiting for", __func__);
+ debug(" programming done\n");
+ }
+
+ return ret;
+}
+
+static int init_phy(struct ddr_handoff *ddr_handoff_info)
+{
+ int ret;
+
+ printf("Initializing DDR PHY ...\n");
+
+ if (ddr_handoff_info->cntlr_t == DDRTYPE_DDR4 ||
+ ddr_handoff_info->cntlr_t == DDRTYPE_LPDDR4_0) {
+ ret = phy_pre_handoff_config(ddr_handoff_info->cntlr_base,
+ ddr_handoff_info->cntlr_t);
+ if (ret)
+ return ret;
+ }
+
+ if (ddr_handoff_info->cntlr2_t == DDRTYPE_LPDDR4_1) {
+ ret = phy_pre_handoff_config
+ (ddr_handoff_info->cntlr2_base,
+ ddr_handoff_info->cntlr2_t);
+ if (ret)
+ return ret;
+ }
+
+ /* Execute PHY configuration handoff */
+ handoff_process(ddr_handoff_info, ddr_handoff_info->phy_handoff_base,
+ ddr_handoff_info->phy_handoff_length,
+ ddr_handoff_info->phy_base);
+
+ printf("DDR PHY configuration is completed\n");
+
+ return 0;
+}
+
+static void phy_init_engine(struct ddr_handoff *handoff)
+{
+ printf("Load PHY Init Engine ...\n");
+
+ /* Execute PIE production code handoff */
+ handoff_process(handoff, handoff->phy_engine_handoff_base,
+ handoff->phy_engine_handoff_length, handoff->phy_base);
+
+ printf("End of loading PHY Init Engine\n");
+}
+
+int populate_ddr_handoff(struct ddr_handoff *handoff)
+{
+ phys_addr_t next_section_header;
+
+ /* DDR handoff */
+ handoff->mem_reset_base = SOC64_HANDOFF_DDR_MEMRESET_BASE;
+ debug("%s: DDR memory reset base = 0x%x\n", __func__,
+ (u32)handoff->mem_reset_base);
+ debug("%s: DDR memory reset address = 0x%x\n", __func__,
+ readl(handoff->mem_reset_base));
+
+ /* Beginning of DDR controller handoff */
+ handoff->cntlr_handoff_base = SOC64_HANDOFF_DDR_UMCTL2_SECTION;
+ debug("%s: cntlr handoff base = 0x%x\n", __func__,
+ (u32)handoff->cntlr_handoff_base);
+
+ /* Get 1st DDR type */
+ handoff->cntlr_t = get_ddr_type(handoff->cntlr_handoff_base +
+ SOC64_HANDOFF_DDR_UMCTL2_TYPE_OFFSET);
+ if (handoff->cntlr_t == DDRTYPE_LPDDR4_1 ||
+ handoff->cntlr_t == DDRTYPE_UNKNOWN) {
+ debug("%s: Wrong DDR handoff format, the 1st DDR ", __func__);
+ debug("type must be DDR4 or LPDDR4_0\n");
+ return -ENOEXEC;
+ }
+
+ /* 1st cntlr base physical address */
+ handoff->cntlr_base = readl(handoff->cntlr_handoff_base +
+ SOC64_HANDOFF_DDR_UMCTL2_BASE_ADDR_OFFSET);
+ debug("%s: cntlr base = 0x%x\n", __func__, (u32)handoff->cntlr_base);
+
+ /* Get the total length of DDR cntlr handoff section */
+ handoff->cntlr_total_length = readl(handoff->cntlr_handoff_base +
+ SOC64_HANDOFF_OFFSET_LENGTH);
+ debug("%s: Umctl2 total length in byte = 0x%x\n", __func__,
+ (u32)handoff->cntlr_total_length);
+
+ /* Get the length of user setting data in DDR cntlr handoff section */
+ handoff->cntlr_handoff_length = socfpga_get_handoff_size((void *)
+ handoff->cntlr_handoff_base);
+ debug("%s: Umctl2 handoff length in word(32-bit) = 0x%x\n", __func__,
+ (u32)handoff->cntlr_handoff_length);
+
+ /* Wrong format on user setting data */
+ if (handoff->cntlr_handoff_length < 0) {
+ debug("%s: Wrong format on user setting data\n", __func__);
+ return -ENOEXEC;
+ }
+
+ /* Get the next handoff section address */
+ next_section_header = handoff->cntlr_handoff_base +
+ handoff->cntlr_total_length;
+ debug("%s: Next handoff section header location = 0x%llx\n", __func__,
+ next_section_header);
+
+ /*
+ * Checking next section handoff is cntlr or PHY, and changing
+ * subsequent implementation accordingly
+ */
+ if (readl(next_section_header) == SOC64_HANDOFF_DDR_UMCTL2_MAGIC) {
+ /* Get the next cntlr handoff section address */
+ handoff->cntlr2_handoff_base = next_section_header;
+ debug("%s: umctl2 2nd handoff base = 0x%x\n", __func__,
+ (u32)handoff->cntlr2_handoff_base);
+
+ /* Get 2nd DDR type */
+ handoff->cntlr2_t = get_ddr_type(handoff->cntlr2_handoff_base +
+ SOC64_HANDOFF_DDR_UMCTL2_TYPE_OFFSET);
+ if (handoff->cntlr2_t == DDRTYPE_LPDDR4_0 ||
+ handoff->cntlr2_t == DDRTYPE_UNKNOWN) {
+ debug("%s: Wrong DDR handoff format, the 2nd DDR ",
+ __func__);
+ debug("type must be LPDDR4_1\n");
+ return -ENOEXEC;
+ }
+
+ /* 2nd umctl2 base physical address */
+ handoff->cntlr2_base =
+ readl(handoff->cntlr2_handoff_base +
+ SOC64_HANDOFF_DDR_UMCTL2_BASE_ADDR_OFFSET);
+ debug("%s: cntlr2 base = 0x%x\n", __func__,
+ (u32)handoff->cntlr2_base);
+
+ /* Get the total length of 2nd DDR umctl2 handoff section */
+ handoff->cntlr2_total_length =
+ readl(handoff->cntlr2_handoff_base +
+ SOC64_HANDOFF_OFFSET_LENGTH);
+ debug("%s: Umctl2_2nd total length in byte = 0x%x\n", __func__,
+ (u32)handoff->cntlr2_total_length);
+
+ /*
+ * Get the length of user setting data in DDR umctl2 handoff
+ * section
+ */
+ handoff->cntlr2_handoff_length =
+ socfpga_get_handoff_size((void *)
+ handoff->cntlr2_handoff_base);
+ debug("%s: cntlr2 handoff length in word(32-bit) = 0x%x\n",
+ __func__,
+ (u32)handoff->cntlr2_handoff_length);
+
+ /* Wrong format on user setting data */
+ if (handoff->cntlr2_handoff_length < 0) {
+ debug("%s: Wrong format on umctl2 user setting data\n",
+ __func__);
+ return -ENOEXEC;
+ }
+
+ /* Get the next handoff section address */
+ next_section_header = handoff->cntlr2_handoff_base +
+ handoff->cntlr2_total_length;
+ debug("%s: Next handoff section header location = 0x%llx\n",
+ __func__, next_section_header);
+ }
+
+ /* Checking next section handoff is PHY ? */
+ if (readl(next_section_header) == SOC64_HANDOFF_DDR_PHY_MAGIC) {
+ /* DDR PHY handoff */
+ handoff->phy_handoff_base = next_section_header;
+ debug("%s: PHY handoff base = 0x%x\n", __func__,
+ (u32)handoff->phy_handoff_base);
+
+ /* PHY base physical address */
+ handoff->phy_base = readl(handoff->phy_handoff_base +
+ SOC64_HANDOFF_DDR_PHY_BASE_OFFSET);
+ debug("%s: PHY base = 0x%x\n", __func__,
+ (u32)handoff->phy_base);
+
+ /* Get the total length of PHY handoff section */
+ handoff->phy_total_length = readl(handoff->phy_handoff_base +
+ SOC64_HANDOFF_OFFSET_LENGTH);
+ debug("%s: PHY total length in byte = 0x%x\n", __func__,
+ (u32)handoff->phy_total_length);
+
+ /*
+ * Get the length of user setting data in DDR PHY handoff
+ * section
+ */
+ handoff->phy_handoff_length = socfpga_get_handoff_size((void *)
+ handoff->phy_handoff_base);
+ debug("%s: PHY handoff length in word(32-bit) = 0x%x\n",
+ __func__, (u32)handoff->phy_handoff_length);
+
+ /* Wrong format on PHY user setting data */
+ if (handoff->phy_handoff_length < 0) {
+ debug("%s: Wrong format on PHY user setting data\n",
+ __func__);
+ return -ENOEXEC;
+ }
+
+ /* Get the next handoff section address */
+ next_section_header = handoff->phy_handoff_base +
+ handoff->phy_total_length;
+ debug("%s: Next handoff section header location = 0x%llx\n",
+ __func__, next_section_header);
+ } else {
+ debug("%s: Wrong format for DDR handoff, expect PHY",
+ __func__);
+ debug(" handoff section after umctl2 handoff section\n");
+ return -ENOEXEC;
+ }
+
+ /* Checking next section handoff is PHY init Engine ? */
+ if (readl(next_section_header) ==
+ SOC64_HANDOFF_DDR_PHY_INIT_ENGINE_MAGIC) {
+ /* DDR PHY Engine handoff */
+ handoff->phy_engine_handoff_base = next_section_header;
+ debug("%s: PHY init engine handoff base = 0x%x\n", __func__,
+ (u32)handoff->phy_engine_handoff_base);
+
+ /* Get the total length of PHY init engine handoff section */
+ handoff->phy_engine_total_length =
+ readl(handoff->phy_engine_handoff_base +
+ SOC64_HANDOFF_OFFSET_LENGTH);
+ debug("%s: PHY engine total length in byte = 0x%x\n", __func__,
+ (u32)handoff->phy_engine_total_length);
+
+ /*
+ * Get the length of user setting data in DDR PHY init engine
+ * handoff section
+ */
+ handoff->phy_engine_handoff_length =
+ socfpga_get_handoff_size((void *)
+ handoff->phy_engine_handoff_base);
+ debug("%s: PHY engine handoff length in word(32-bit) = 0x%x\n",
+ __func__, (u32)handoff->phy_engine_handoff_length);
+
+ /* Wrong format on PHY init engine setting data */
+ if (handoff->phy_engine_handoff_length < 0) {
+ debug("%s: Wrong format on PHY init engine ",
+ __func__);
+ debug("user setting data\n");
+ return -ENOEXEC;
+ }
+ } else {
+ debug("%s: Wrong format for DDR handoff, expect PHY",
+ __func__);
+ debug(" init engine handoff section after PHY handoff\n");
+ debug(" section\n");
+ return -ENOEXEC;
+ }
+
+ handoff->train_imem_base = handoff->phy_base +
+ DDR_PHY_TRAIN_IMEM_OFFSET;
+ debug("%s: PHY train IMEM base = 0x%x\n",
+ __func__, (u32)handoff->train_imem_base);
+
+ handoff->train_dmem_base = handoff->phy_base +
+ DDR_PHY_TRAIN_DMEM_OFFSET;
+ debug("%s: PHY train DMEM base = 0x%x\n",
+ __func__, (u32)handoff->train_dmem_base);
+
+ handoff->train_imem_length = SOC64_HANDOFF_DDR_TRAIN_IMEM_LENGTH;
+ debug("%s: PHY train IMEM length = 0x%x\n",
+ __func__, (u32)handoff->train_imem_length);
+
+ handoff->train_dmem_length = SOC64_HANDOFF_DDR_TRAIN_DMEM_LENGTH;
+ debug("%s: PHY train DMEM length = 0x%x\n",
+ __func__, (u32)handoff->train_dmem_length);
+
+ return 0;
+}
+
+int enable_ddr_clock(struct udevice *dev)
+{
+ struct clk *ddr_clk;
+ int ret;
+
+ /* Enable clock before init DDR */
+ ddr_clk = devm_clk_get(dev, "mem_clk");
+ if (!IS_ERR(ddr_clk)) {
+ ret = clk_enable(ddr_clk);
+ if (ret) {
+ printf("%s: Failed to enable DDR clock\n", __func__);
+ return ret;
+ }
+ } else {
+ ret = PTR_ERR(ddr_clk);
+ debug("%s: Failed to get DDR clock from dts\n", __func__);
+ return ret;
+ }
+
+ printf("%s: DDR clock is enabled\n", __func__);
+
+ return 0;
+}
+
+static int ddr_start_dfi_init(phys_addr_t umctl2_base,
+ enum ddr_type umctl2_type)
+{
+ int ret;
+
+ debug("%s: Start DFI init\n", __func__);
+
+ /* Enable quasi-dynamic programing of controller registers */
+ clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
+
+ ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type);
+ if (ret)
+ return ret;
+
+ /* Start DFI init sequence */
+ setbits_le32(umctl2_base + DDR4_DFIMISC_OFFSET,
+ DDR4_DFIMISC_DFI_INIT_START);
+
+ /* Complete quasi-dynamic register programming */
+ setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
+
+ /* Polling programming done */
+ ret = wait_for_bit_le32((const void *)(umctl2_base +
+ DDR4_SWSTAT_OFFSET),
+ DDR4_SWSTAT_SW_DONE_ACK, true,
+ TIMEOUT_200MS, false);
+ if (ret) {
+ debug("%s: Timeout while waiting for", __func__);
+ debug(" programming done\n");
+ }
+
+ return ret;
+}
+
+static int ddr_check_dfi_init_complete(phys_addr_t umctl2_base,
+ enum ddr_type umctl2_type)
+{
+ int ret;
+
+ /* Polling DFI init complete */
+ ret = wait_for_bit_le32((const void *)(umctl2_base +
+ DDR4_DFISTAT_OFFSET),
+ DDR4_DFI_INIT_COMPLETE, true,
+ TIMEOUT_200MS, false);
+ if (ret) {
+ debug("%s: Timeout while waiting for", __func__);
+ debug(" DFI init done\n");
+ return ret;
+ }
+
+ debug("%s: DFI init completed.\n", __func__);
+
+ /* Enable quasi-dynamic programing of controller registers */
+ clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
+
+ ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type);
+ if (ret)
+ return ret;
+
+ /* Stop DFI init sequence */
+ clrbits_le32(umctl2_base + DDR4_DFIMISC_OFFSET,
+ DDR4_DFIMISC_DFI_INIT_START);
+
+ /* Complete quasi-dynamic register programming */
+ setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
+
+ /* Polling programming done */
+ ret = wait_for_bit_le32((const void *)(umctl2_base +
+ DDR4_SWSTAT_OFFSET),
+ DDR4_SWSTAT_SW_DONE_ACK, true,
+ TIMEOUT_200MS, false);
+ if (ret) {
+ debug("%s: Timeout while waiting for", __func__);
+ debug(" programming done\n");
+ return ret;
+ }
+
+ debug("%s:DDR programming done\n", __func__);
+
+ return ret;
+}
+
+static int ddr_trigger_sdram_init(phys_addr_t umctl2_base,
+ enum ddr_type umctl2_type)
+{
+ int ret;
+
+ /* Enable quasi-dynamic programing of controller registers */
+ clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
+
+ ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type);
+ if (ret)
+ return ret;
+
+ /* Unmasking dfi init complete */
+ setbits_le32(umctl2_base + DDR4_DFIMISC_OFFSET,
+ DDR4_DFIMISC_DFI_INIT_COMPLETE_EN);
+
+ /* Software exit from self-refresh */
+ clrbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET, DDR4_PWRCTL_SELFREF_SW);
+
+ /* Complete quasi-dynamic register programming */
+ setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
+
+ /* Polling programming done */
+ ret = wait_for_bit_le32((const void *)(umctl2_base +
+ DDR4_SWSTAT_OFFSET),
+ DDR4_SWSTAT_SW_DONE_ACK, true,
+ TIMEOUT_200MS, false);
+ if (ret) {
+ debug("%s: Timeout while waiting for", __func__);
+ debug(" programming done\n");
+ return ret;
+ }
+
+ debug("%s:DDR programming done\n", __func__);
+ return ret;
+}
+
+static int ddr_post_handoff_config(phys_addr_t umctl2_base,
+ enum ddr_type umctl2_type)
+{
+ int ret = 0;
+ u32 value;
+ u32 start = get_timer(0);
+
+ do {
+ if (get_timer(start) > TIMEOUT_200MS) {
+ debug("%s: Timeout while waiting for",
+ __func__);
+ debug(" DDR enters normal operating mode\n");
+ return -ETIMEDOUT;
+ }
+
+ udelay(1);
+ WATCHDOG_RESET();
+
+ /* Polling until SDRAM entered normal operating mode */
+ value = readl(umctl2_base + DDR4_STAT_OFFSET) &
+ DDR4_STAT_OPERATING_MODE;
+ } while (value != OPM_NORMAL);
+
+ printf("DDR entered normal operating mode\n");
+
+ /* Enabling auto refresh */
+ clrbits_le32(umctl2_base + DDR4_RFSHCTL3_OFFSET,
+ DDR4_RFSHCTL3_DIS_AUTO_REFRESH);
+
+ /* Checking ECC is enabled? */
+ value = readl(umctl2_base + DDR4_ECCCFG0_OFFSET) & DDR4_ECC_MODE;
+ if (value) {
+ printf("ECC is enabled\n");
+ ret = scrubber_ddr_config(umctl2_base, umctl2_type);
+ if (ret)
+ printf("Failed to enable ECC\n");
+ }
+
+ return ret;
+}
+
+static int configure_training_firmware(struct ddr_handoff *ddr_handoff_info,
+ const void *train_imem,
+ const void *train_dmem)
+{
+ int ret = 0;
+
+ printf("Configuring training firmware ...\n");
+
+ /* Reset SDRAM */
+ writew(DDR_PHY_PROTECT_MEMRESET,
+ (uintptr_t)(ddr_handoff_info->phy_base +
+ DDR_PHY_MEMRESETL_OFFSET));
+
+ /* Enable access to the PHY configuration registers */
+ clrbits_le16(ddr_handoff_info->phy_base + DDR_PHY_APBONLY0_OFFSET,
+ DDR_PHY_MICROCONTMUXSEL);
+
+ /* Copy train IMEM bin */
+ memcpy((void *)ddr_handoff_info->train_imem_base, train_imem,
+ ddr_handoff_info->train_imem_length);
+
+ ret = memcmp((void *)ddr_handoff_info->train_imem_base, train_imem,
+ ddr_handoff_info->train_imem_length);
+ if (ret) {
+ debug("%s: Failed to copy train IMEM binary\n", __func__);
+ /* Isolate the APB access from internal CSRs */
+ setbits_le16(ddr_handoff_info->phy_base +
+ DDR_PHY_APBONLY0_OFFSET, DDR_PHY_MICROCONTMUXSEL);
+ return ret;
+ }
+
+ memcpy((void *)ddr_handoff_info->train_dmem_base, train_dmem,
+ ddr_handoff_info->train_dmem_length);
+
+ ret = memcmp((void *)ddr_handoff_info->train_dmem_base, train_dmem,
+ ddr_handoff_info->train_dmem_length);
+ if (ret)
+ debug("%s: Failed to copy train DMEM binary\n", __func__);
+
+ /* Isolate the APB access from internal CSRs */
+ setbits_le16(ddr_handoff_info->phy_base + DDR_PHY_APBONLY0_OFFSET,
+ DDR_PHY_MICROCONTMUXSEL);
+
+ return ret;
+}
+
+static void calibrating_sdram(struct ddr_handoff *ddr_handoff_info)
+{
+ /* Init mailbox protocol - set 1 to DCTWRITEPROT[0] */
+ setbits_le16(ddr_handoff_info->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET,
+ DDR_PHY_DCTWRITEPROT);
+
+ /* Init mailbox protocol - set 1 to UCTWRITEPROT[0] */
+ setbits_le16(ddr_handoff_info->phy_base + DDR_PHY_UCTWRITEPROT_OFFSET,
+ DDR_PHY_UCTWRITEPROT);
+
+ /* Reset and stalling ARC processor */
+ setbits_le16(ddr_handoff_info->phy_base + DDR_PHY_MICRORESET_OFFSET,
+ DDR_PHY_MICRORESET_RESET | DDR_PHY_MICRORESET_STALL);
+
+ /* Release ARC processor */
+ clrbits_le16(ddr_handoff_info->phy_base + DDR_PHY_MICRORESET_OFFSET,
+ DDR_PHY_MICRORESET_RESET);
+
+ /* Starting PHY firmware execution */
+ clrbits_le16(ddr_handoff_info->phy_base + DDR_PHY_MICRORESET_OFFSET,
+ DDR_PHY_MICRORESET_STALL);
+}
+
+static int get_mail(struct ddr_handoff *handoff, enum message_mode mode,
+ u32 *message_id)
+{
+ int ret;
+
+ /* Polling major messages from PMU */
+ ret = wait_for_bit_le16((const void *)(handoff->phy_base +
+ DDR_PHY_UCTSHADOWREGS_OFFSET),
+ DDR_PHY_UCTSHADOWREGS_UCTWRITEPROTESHADOW,
+ false, TIMEOUT_200MS, false);
+ if (ret) {
+ debug("%s: Timeout while waiting for",
+ __func__);
+ debug(" major messages from PMU\n");
+ return ret;
+ }
+
+ *message_id = readw((uintptr_t)(handoff->phy_base +
+ DDR_PHY_UCTWRITEONLYSHADOW_OFFSET));
+
+ if (mode == STREAMING_MESSAGE)
+ *message_id |= readw((uintptr_t)((handoff->phy_base +
+ DDR_PHY_UCTDATWRITEONLYSHADOW_OFFSET))) <<
+ SZ_16;
+
+ /* Ack the receipt of the major message */
+ clrbits_le16(handoff->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET,
+ DDR_PHY_DCTWRITEPROT);
+
+ ret = wait_for_bit_le16((const void *)(handoff->phy_base +
+ DDR_PHY_UCTSHADOWREGS_OFFSET),
+ DDR_PHY_UCTSHADOWREGS_UCTWRITEPROTESHADOW,
+ true, TIMEOUT_200MS, false);
+ if (ret) {
+ debug("%s: Timeout while waiting for",
+ __func__);
+ debug(" ack the receipt of the major message completed\n");
+ return ret;
+ }
+
+ /* Complete protocol */
+ setbits_le16(handoff->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET,
+ DDR_PHY_DCTWRITEPROT);
+
+ return ret;
+}
+
+static int get_mail_streaming(struct ddr_handoff *handoff,
+ enum message_mode mode, u32 *index)
+{
+ int ret;
+
+ *index = readw((uintptr_t)(handoff->phy_base +
+ DDR_PHY_UCTWRITEONLYSHADOW_OFFSET));
+
+ if (mode == STREAMING_MESSAGE)
+ *index |= readw((uintptr_t)((handoff->phy_base +
+ DDR_PHY_UCTDATWRITEONLYSHADOW_OFFSET))) <<
+ SZ_16;
+
+ /* Ack the receipt of the major message */
+ clrbits_le16(handoff->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET,
+ DDR_PHY_DCTWRITEPROT);
+
+ ret = wait_for_bit_le16((const void *)(handoff->phy_base +
+ DDR_PHY_UCTSHADOWREGS_OFFSET),
+ DDR_PHY_UCTSHADOWREGS_UCTWRITEPROTESHADOW,
+ true, TIMEOUT_200MS, false);
+ if (ret) {
+ debug("%s: Timeout while waiting for",
+ __func__);
+ debug(" ack the receipt of the major message completed\n");
+ return ret;
+ }
+
+ /* Complete protocol */
+ setbits_le16(handoff->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET,
+ DDR_PHY_DCTWRITEPROT);
+
+ return 0;
+}
+
+static int decode_streaming_message(struct ddr_handoff *ddr_handoff_info,
+ u32 *streaming_index)
+{
+ int i = 0, ret;
+ u32 temp;
+
+ temp = *streaming_index;
+
+ while (i < GET_LOWHW_DATA(temp)) {
+ ret = get_mail(ddr_handoff_info, STREAMING_MESSAGE,
+ streaming_index);
+ if (ret)
+ return ret;
+
+ printf("args[%d]: 0x%x ", i, *streaming_index);
+ i++;
+ }
+
+ return 0;
+}
+
+static int poll_for_training_complete(struct ddr_handoff *ddr_handoff_info)
+{
+ int ret;
+ u32 message_id = 0;
+ u32 streaming_index = 0;
+
+ do {
+ ret = get_mail(ddr_handoff_info, MAJOR_MESSAGE, &message_id);
+ if (ret)
+ return ret;
+
+ printf("Major message id = 0%x\n", message_id);
+
+ if (message_id == FW_STREAMING_MSG_ID) {
+ ret = get_mail_streaming(ddr_handoff_info,
+ STREAMING_MESSAGE,
+ &streaming_index);
+ if (ret)
+ return ret;
+
+ printf("streaming index 0%x : ", streaming_index);
+
+ decode_streaming_message(ddr_handoff_info,
+ &streaming_index);
+
+ printf("\n");
+ }
+ } while ((message_id != FW_TRAINING_COMPLETED_STAT) &&
+ (message_id != FW_TRAINING_FAILED_STAT));
+
+ if (message_id == FW_TRAINING_COMPLETED_STAT) {
+ printf("DDR firmware training completed\n");
+ } else if (message_id == FW_TRAINING_FAILED_STAT) {
+ printf("DDR firmware training failed\n");
+ hang();
+ }
+
+ return 0;
+}
+
+static void enable_phy_clk_for_csr_access(struct ddr_handoff *handoff,
+ bool enable)
+{
+ if (enable) {
+ /* Enable PHY clk */
+ setbits_le16((uintptr_t)(handoff->phy_base +
+ DDR_PHY_UCCLKHCLKENABLES_OFFSET),
+ DDR_PHY_UCCLKHCLKENABLES_UCCLKEN |
+ DDR_PHY_UCCLKHCLKENABLES_HCLKEN);
+ } else {
+ /* Disable PHY clk */
+ clrbits_le16((uintptr_t)(handoff->phy_base +
+ DDR_PHY_UCCLKHCLKENABLES_OFFSET),
+ DDR_PHY_UCCLKHCLKENABLES_UCCLKEN |
+ DDR_PHY_UCCLKHCLKENABLES_HCLKEN);
+ }
+}
+
+/* helper function for updating train result to umctl2 RANKCTL register */
+static void set_cal_res_to_rankctrl(u32 reg_addr, u16 update_value,
+ u32 mask, u32 msb_mask, u32 shift)
+{
+ u32 reg, value;
+
+ reg = readl((uintptr_t)reg_addr);
+
+ debug("max value divided by 2 is 0x%x\n", update_value);
+ debug("umclt2 register 0x%x value is 0%x before ", reg_addr, reg);
+ debug("update with train result\n");
+
+ value = (reg & mask) >> shift;
+
+ value += update_value + 3;
+
+ /* reg value greater than 0xF, set one to diff_rank_wr_gap_msb */
+ if (value > 0xF)
+ setbits_le32((u32 *)(uintptr_t)reg_addr, msb_mask);
+ else
+ clrbits_le32((u32 *)(uintptr_t)reg_addr, msb_mask);
+
+ reg = readl((uintptr_t)reg_addr);
+
+ value = (value << shift) & mask;
+
+ /* update register */
+ writel((reg & (~mask)) | value, (uintptr_t)reg_addr);
+
+ reg = readl((uintptr_t)reg_addr);
+ debug("umclt2 register 0x%x value is 0%x before ", reg_addr, reg);
+ debug("update with train result\n");
+}
+
+/* helper function for updating train result to register */
+static void set_cal_res_to_reg(u32 reg_addr, u16 update_value, u32 mask,
+ u32 shift)
+{
+ u32 reg, value;
+
+ reg = readl((uintptr_t)reg_addr);
+
+ debug("max value divided by 2 is 0x%x\n", update_value);
+ debug("umclt2 register 0x%x value is 0%x before ", reg_addr, reg);
+ debug("update with train result\n");
+
+ value = (reg & mask) >> shift;
+
+ value = ((value + update_value + 3) << shift) & mask;
+
+ /* update register */
+ writel((reg & (~mask)) | value, (uintptr_t)reg_addr);
+
+ reg = readl((uintptr_t)reg_addr);
+ debug("umclt2 register 0x%x value is 0%x before ", reg_addr, reg);
+ debug("update with train result\n");
+}
+
+static u16 get_max_txdqsdlytg0_ux_p0(struct ddr_handoff *handoff, u32 reg,
+ u8 numdbyte, u16 upd_val)
+{
+ u32 b_addr;
+ u16 val;
+ u8 byte;
+
+ /* Getting max value from DBYTEx TxDqsDlyTg0_ux_p0 */
+ for (byte = 0; byte < numdbyte; byte++) {
+ b_addr = byte << 13;
+
+ /* TxDqsDlyTg0[9:6] is the coarse delay */
+ val = (readw((uintptr_t)(handoff->phy_base +
+ reg + b_addr)) &
+ DDR_PHY_TXDQDLYTG0_COARSE_DELAY) >>
+ DDR_PHY_TXDQDLYTG0_COARSE_DELAY_SHIFT;
+
+ upd_val = max(val, upd_val);
+ }
+
+ return upd_val;
+}
+
+static int set_cal_res_to_umctl2(struct ddr_handoff *handoff,
+ phys_addr_t umctl2_base,
+ enum ddr_type umctl2_type)
+{
+ int ret;
+ u8 numdbyte = 0x8;
+ u16 upd_val, val;
+ u32 dramtmg2_reg_addr, rankctl_reg_addr, reg_addr;
+
+ /* Enable quasi-dynamic programing of the controller registers */
+ clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
+
+ ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type);
+ if (ret)
+ return ret;
+
+ /* Enable access to the PHY configuration registers */
+ clrbits_le16(handoff->phy_base + DDR_PHY_APBONLY0_OFFSET,
+ DDR_PHY_MICROCONTMUXSEL);
+
+ if (umctl2_type == DDRTYPE_DDR4) {
+ val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_WW_1_0_OFFSET)));
+
+ upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_WW_0_1_OFFSET)));
+ } else if (umctl2_type == DDRTYPE_LPDDR4_0) {
+ val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_CHA_WW_1_0_OFFSET)));
+
+ upd_val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_CHA_WW_0_1_OFFSET)));
+ } else if (umctl2_type == DDRTYPE_LPDDR4_1) {
+ val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_CHB_WW_1_0_OFFSET)));
+
+ upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_CHB_WW_0_1_OFFSET)));
+ }
+
+ upd_val = max(val, upd_val);
+ debug("max value is 0x%x\n", upd_val);
+
+ /* Divided by two is required when running in freq ratio 1:2 */
+ if (!(readl(umctl2_base + DDR4_MSTR_OFFSET) & DDR4_FREQ_RATIO))
+ upd_val = DIV_ROUND_CLOSEST(upd_val, 2);
+
+ debug("Update train value to umctl2 RANKCTL.diff_rank_wr_gap\n");
+ rankctl_reg_addr = umctl2_base + DDR4_RANKCTL_OFFSET;
+ /* Update train value to umctl2 RANKCTL.diff_rank_wr_gap */
+ set_cal_res_to_rankctrl(rankctl_reg_addr, upd_val,
+ DDR4_RANKCTL_DIFF_RANK_WR_GAP,
+ DDR4_RANKCTL_DIFF_RANK_WR_GAP_MSB,
+ DDR4_RANKCTL_DIFF_RANK_WR_GAP_SHIFT);
+
+ debug("Update train value to umctl2 DRAMTMG2.W2RD\n");
+ dramtmg2_reg_addr = umctl2_base + DDR4_DRAMTMG2_OFFSET;
+ /* Update train value to umctl2 dramtmg2.wr2rd */
+ set_cal_res_to_reg(dramtmg2_reg_addr, upd_val, DDR4_DRAMTMG2_WR2RD, 0);
+
+ if (umctl2_type == DDRTYPE_DDR4) {
+ debug("Update train value to umctl2 DRAMTMG9.W2RD_S\n");
+ reg_addr = umctl2_base + DDR4_DRAMTMG9_OFFSET;
+ /* Update train value to umctl2 dramtmg9.wr2rd_s */
+ set_cal_res_to_reg(reg_addr, upd_val, DDR4_DRAMTMG9_W2RD_S, 0);
+ }
+
+ if (umctl2_type == DDRTYPE_DDR4) {
+ val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_RR_1_0_OFFSET)));
+
+ upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_RR_0_1_OFFSET)));
+ } else if (umctl2_type == DDRTYPE_LPDDR4_0) {
+ val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_CHA_RR_1_0_OFFSET)));
+
+ upd_val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_CHA_RR_0_1_OFFSET)));
+ } else if (umctl2_type == DDRTYPE_LPDDR4_1) {
+ val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_CHB_RR_1_0_OFFSET)));
+
+ upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_CHB_RR_0_1_OFFSET)));
+ }
+
+ upd_val = max(val, upd_val);
+ debug("max value is 0x%x\n", upd_val);
+
+ /* Divided by two is required when running in freq ratio 1:2 */
+ if (!(readl(umctl2_base + DDR4_MSTR_OFFSET) & DDR4_FREQ_RATIO))
+ upd_val = DIV_ROUND_CLOSEST(upd_val, 2);
+
+ debug("Update train value to umctl2 RANKCTL.diff_rank_rd_gap\n");
+ /* Update train value to umctl2 RANKCTL.diff_rank_rd_gap */
+ set_cal_res_to_rankctrl(rankctl_reg_addr, upd_val,
+ DDR4_RANKCTL_DIFF_RANK_RD_GAP,
+ DDR4_RANKCTL_DIFF_RANK_RD_GAP_MSB,
+ DDR4_RANKCTL_DIFF_RANK_RD_GAP_SHIFT);
+
+ if (umctl2_type == DDRTYPE_DDR4) {
+ val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_RW_1_1_OFFSET)));
+
+ upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_RW_1_0_OFFSET)));
+
+ upd_val = max(val, upd_val);
+
+ val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_RW_0_1_OFFSET)));
+
+ upd_val = max(val, upd_val);
+
+ val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_RW_0_0_OFFSET)));
+
+ upd_val = max(val, upd_val);
+ } else if (umctl2_type == DDRTYPE_LPDDR4_0) {
+ val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_CHA_RW_1_1_OFFSET)));
+
+ upd_val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_CHA_RW_1_0_OFFSET)));
+
+ upd_val = max(val, upd_val);
+
+ val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_CHA_RW_0_1_OFFSET)));
+
+ upd_val = max(val, upd_val);
+
+ val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_CHA_RW_0_0_OFFSET)));
+
+ upd_val = max(val, upd_val);
+ } else if (umctl2_type == DDRTYPE_LPDDR4_1) {
+ val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_CHB_RW_1_1_OFFSET)));
+
+ upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_CHB_RW_1_0_OFFSET)));
+
+ upd_val = max(val, upd_val);
+
+ val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_CHB_RW_0_1_OFFSET)));
+
+ upd_val = max(val, upd_val);
+
+ val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
+ DMEM_MB_CDD_CHB_RW_0_0_OFFSET)));
+
+ upd_val = max(val, upd_val);
+ }
+
+ debug("max value is 0x%x\n", upd_val);
+
+ /* Divided by two is required when running in freq ratio 1:2 */
+ if (!(readl(umctl2_base + DDR4_MSTR_OFFSET) & DDR4_FREQ_RATIO))
+ upd_val = DIV_ROUND_CLOSEST(upd_val, 2);
+
+ debug("Update train value to umctl2 dramtmg2.rd2wr\n");
+ /* Update train value to umctl2 dramtmg2.rd2wr */
+ set_cal_res_to_reg(dramtmg2_reg_addr, upd_val, DDR4_DRAMTMG2_RD2WR,
+ DDR4_DRAMTMG2_RD2WR_SHIFT);
+
+ /* Checking ECC is enabled?, lpddr4 using inline ECC */
+ val = readl(umctl2_base + DDR4_ECCCFG0_OFFSET) & DDR4_ECC_MODE;
+ if (val && umctl2_type == DDRTYPE_DDR4)
+ numdbyte = 0x9;
+
+ upd_val = 0;
+
+ /* Getting max value from DBYTEx TxDqsDlyTg0_u0_p0 */
+ upd_val = get_max_txdqsdlytg0_ux_p0(handoff,
+ DDR_PHY_DBYTE0_TXDQDLYTG0_U0_P0,
+ numdbyte, upd_val);
+
+ /* Getting max value from DBYTEx TxDqsDlyTg0_u1_p0 */
+ upd_val = get_max_txdqsdlytg0_ux_p0(handoff,
+ DDR_PHY_DBYTE0_TXDQDLYTG0_U1_P0,
+ numdbyte, upd_val);
+
+ debug("TxDqsDlyTg0 max value is 0x%x\n", upd_val);
+
+ /* Divided by two is required when running in freq ratio 1:2 */
+ if (!(readl(umctl2_base + DDR4_MSTR_OFFSET) & DDR4_FREQ_RATIO))
+ upd_val = DIV_ROUND_CLOSEST(upd_val, 2);
+
+ reg_addr = umctl2_base + DDR4_DFITMG1_OFFSET;
+ /* Update train value to umctl2 dfitmg1.dfi_wrdata_delay */
+ set_cal_res_to_reg(reg_addr, upd_val, DDR4_DFITMG1_DFI_T_WRDATA_DELAY,
+ DDR4_DFITMG1_DFI_T_WRDATA_SHIFT);
+
+ /* Complete quasi-dynamic register programming */
+ setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
+
+ /* Polling programming done */
+ ret = wait_for_bit_le32((const void *)(umctl2_base +
+ DDR4_SWSTAT_OFFSET), DDR4_SWSTAT_SW_DONE_ACK,
+ true, TIMEOUT_200MS, false);
+ if (ret) {
+ debug("%s: Timeout while waiting for", __func__);
+ debug(" programming done\n");
+ }
+
+ /* Isolate the APB access from internal CSRs */
+ setbits_le16(handoff->phy_base + DDR_PHY_APBONLY0_OFFSET,
+ DDR_PHY_MICROCONTMUXSEL);
+
+ return ret;
+}
+
+static int update_training_result(struct ddr_handoff *ddr_handoff_info)
+{
+ int ret = 0;
+
+ /* Updating training result to first DDR controller */
+ if (ddr_handoff_info->cntlr_t == DDRTYPE_DDR4 ||
+ ddr_handoff_info->cntlr_t == DDRTYPE_LPDDR4_0) {
+ ret = set_cal_res_to_umctl2(ddr_handoff_info,
+ ddr_handoff_info->cntlr_base,
+ ddr_handoff_info->cntlr_t);
+ if (ret) {
+ debug("%s: Failed to update train result to ",
+ __func__);
+ debug("first DDR controller\n");
+ return ret;
+ }
+ }
+
+ /* Updating training result to 2nd DDR controller */
+ if (ddr_handoff_info->cntlr2_t == DDRTYPE_LPDDR4_1) {
+ ret = set_cal_res_to_umctl2(ddr_handoff_info,
+ ddr_handoff_info->cntlr2_base,
+ ddr_handoff_info->cntlr2_t);
+ if (ret) {
+ debug("%s: Failed to update train result to ",
+ __func__);
+ debug("2nd DDR controller\n");
+ }
+ }
+
+ return ret;
+}
+
+static int start_ddr_calibration(struct ddr_handoff *ddr_handoff_info)
+{
+ int ret;
+
+ /* Implement 1D training firmware */
+ ret = configure_training_firmware(ddr_handoff_info,
+ (const void *)SOC64_HANDOFF_DDR_TRAIN_IMEM_1D_SECTION,
+ (const void *)SOC64_HANDOFF_DDR_TRAIN_DMEM_1D_SECTION);
+ if (ret) {
+ debug("%s: Failed to configure 1D training firmware\n",
+ __func__);
+ return ret;
+ }
+
+ calibrating_sdram(ddr_handoff_info);
+
+ ret = poll_for_training_complete(ddr_handoff_info);
+ if (ret) {
+ debug("%s: Failed to get FW training completed\n",
+ __func__);
+ return ret;
+ }
+
+ /* Updating training result to DDR controller */
+ ret = update_training_result(ddr_handoff_info);
+ if (ret)
+ return ret;
+
+ /* Implement 2D training firmware */
+ ret = configure_training_firmware(ddr_handoff_info,
+ (const void *)SOC64_HANDOFF_DDR_TRAIN_IMEM_2D_SECTION,
+ (const void *)SOC64_HANDOFF_DDR_TRAIN_DMEM_2D_SECTION);
+ if (ret) {
+ debug("%s: Failed to update train result to ", __func__);
+ debug("DDR controller\n");
+ return ret;
+ }
+
+ calibrating_sdram(ddr_handoff_info);
+
+ ret = poll_for_training_complete(ddr_handoff_info);
+ if (ret)
+ debug("%s: Failed to get FW training completed\n",
+ __func__);
+
+ return ret;
+}
+
+static int init_controller(struct ddr_handoff *ddr_handoff_info,
+ u32 *user_backup, u32 *user_backup_2nd)
+{
+ int ret = 0;
+
+ if (ddr_handoff_info->cntlr_t == DDRTYPE_DDR4 ||
+ ddr_handoff_info->cntlr_t == DDRTYPE_LPDDR4_0) {
+ /* Initialize 1st DDR controller */
+ ret = init_umctl2(ddr_handoff_info->cntlr_handoff_base,
+ ddr_handoff_info->cntlr_base,
+ ddr_handoff_info->cntlr_t,
+ ddr_handoff_info->cntlr_handoff_length,
+ user_backup);
+ if (ret) {
+ debug("%s: Failed to inilialize first controller\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ if (ddr_handoff_info->cntlr2_t == DDRTYPE_LPDDR4_1) {
+ /* Initialize 2nd DDR controller */
+ ret = init_umctl2(ddr_handoff_info->cntlr2_handoff_base,
+ ddr_handoff_info->cntlr2_base,
+ ddr_handoff_info->cntlr2_t,
+ ddr_handoff_info->cntlr2_handoff_length,
+ user_backup_2nd);
+ if (ret)
+ debug("%s: Failed to inilialize 2nd controller\n",
+ __func__);
+ }
+
+ return ret;
+}
+
+static int dfi_init(struct ddr_handoff *ddr_handoff_info)
+{
+ int ret;
+
+ ret = ddr_start_dfi_init(ddr_handoff_info->cntlr_base,
+ ddr_handoff_info->cntlr_t);
+ if (ret)
+ return ret;
+
+ if (ddr_handoff_info->cntlr2_t == DDRTYPE_LPDDR4_1)
+ ret = ddr_start_dfi_init(ddr_handoff_info->cntlr2_base,
+ ddr_handoff_info->cntlr2_t);
+
+ return ret;
+}
+
+static int check_dfi_init(struct ddr_handoff *handoff)
+{
+ int ret;
+
+ ret = ddr_check_dfi_init_complete(handoff->cntlr_base,
+ handoff->cntlr_t);
+ if (ret)
+ return ret;
+
+ if (handoff->cntlr2_t == DDRTYPE_LPDDR4_1)
+ ret = ddr_check_dfi_init_complete(handoff->cntlr2_base,
+ handoff->cntlr2_t);
+
+ return ret;
+}
+
+static int trigger_sdram_init(struct ddr_handoff *handoff)
+{
+ int ret;
+
+ ret = ddr_trigger_sdram_init(handoff->cntlr_base,
+ handoff->cntlr_t);
+ if (ret)
+ return ret;
+
+ if (handoff->cntlr2_t == DDRTYPE_LPDDR4_1)
+ ret = ddr_trigger_sdram_init(handoff->cntlr2_base,
+ handoff->cntlr2_t);
+
+ return ret;
+}
+
+static int ddr_post_config(struct ddr_handoff *handoff)
+{
+ int ret;
+
+ ret = ddr_post_handoff_config(handoff->cntlr_base,
+ handoff->cntlr_t);
+ if (ret)
+ return ret;
+
+ if (handoff->cntlr2_t == DDRTYPE_LPDDR4_1)
+ ret = ddr_post_handoff_config(handoff->cntlr2_base,
+ handoff->cntlr2_t);
+
+ return ret;
+}
+
+static bool is_ddr_retention_enabled(u32 boot_scratch_cold0_reg)
+{
+ return boot_scratch_cold0_reg &
+ ALT_SYSMGR_SCRATCH_REG_0_DDR_RETENTION_MASK;
+}
+
+static bool is_ddr_bitstream_sha_matching(u32 boot_scratch_cold0_reg)
+{
+ return boot_scratch_cold0_reg & ALT_SYSMGR_SCRATCH_REG_0_DDR_SHA_MASK;
+}
+
+static enum reset_type get_reset_type(u32 boot_scratch_cold0_reg)
+{
+ return (boot_scratch_cold0_reg &
+ ALT_SYSMGR_SCRATCH_REG_0_DDR_RESET_TYPE_MASK) >>
+ ALT_SYSMGR_SCRATCH_REG_0_DDR_RESET_TYPE_SHIFT;
+}
+
+void reset_type_debug_print(u32 boot_scratch_cold0_reg)
+{
+ switch (get_reset_type(boot_scratch_cold0_reg)) {
+ case POR_RESET:
+ debug("%s: POR is triggered\n", __func__);
+ break;
+ case WARM_RESET:
+ debug("%s: Warm reset is triggered\n", __func__);
+ break;
+ case COLD_RESET:
+ debug("%s: Cold reset is triggered\n", __func__);
+ break;
+ default:
+ debug("%s: Invalid reset type\n", __func__);
+ }
+}
+
+bool is_ddr_init(void)
+{
+ u32 reg = readl(socfpga_get_sysmgr_addr() +
+ SYSMGR_SOC64_BOOT_SCRATCH_COLD0);
+
+ reset_type_debug_print(reg);
+
+ if (get_reset_type(reg) == POR_RESET) {
+ debug("%s: DDR init is required\n", __func__);
+ return true;
+ }
+
+ if (get_reset_type(reg) == WARM_RESET) {
+ debug("%s: DDR init is skipped\n", __func__);
+ return false;
+ }
+
+ if (get_reset_type(reg) == COLD_RESET) {
+ if (is_ddr_retention_enabled(reg) &&
+ is_ddr_bitstream_sha_matching(reg)) {
+ debug("%s: DDR retention bit is set\n", __func__);
+ debug("%s: Matching in DDR bistream\n", __func__);
+ debug("%s: DDR init is skipped\n", __func__);
+ return false;
+ }
+ }
+
+ debug("%s: DDR init is required\n", __func__);
+ return true;
+}
+
+int sdram_mmr_init_full(struct udevice *dev)
+{
+ u32 user_backup[2], user_backup_2nd[2];
+ int ret;
+ struct bd_info bd;
+ struct ddr_handoff ddr_handoff_info;
+ struct altera_sdram_priv *priv = dev_get_priv(dev);
+
+ printf("Checking SDRAM configuration in progress ...\n");
+ ret = populate_ddr_handoff(&ddr_handoff_info);
+ if (ret) {
+ debug("%s: Failed to populate DDR handoff\n",
+ __func__);
+ return ret;
+ }
+
+ /* Set the MPFE NoC mux to correct DDR controller type */
+ use_ddr4(ddr_handoff_info.cntlr_t);
+
+ if (is_ddr_init()) {
+ printf("SDRAM init in progress ...\n");
+
+ /*
+ * Polling reset complete, must be high to ensure DDR subsystem
+ * in complete reset state before init DDR clock and DDR
+ * controller
+ */
+ ret = wait_for_bit_le32((const void *)((uintptr_t)(readl
+ (ddr_handoff_info.mem_reset_base) +
+ MEM_RST_MGR_STATUS)),
+ MEM_RST_MGR_STATUS_RESET_COMPLETE,
+ true, TIMEOUT_200MS, false);
+ if (ret) {
+ debug("%s: Timeout while waiting for", __func__);
+ debug(" reset complete done\n");
+ return ret;
+ }
+
+ ret = enable_ddr_clock(dev);
+ if (ret)
+ return ret;
+
+ ret = init_controller(&ddr_handoff_info, user_backup,
+ user_backup_2nd);
+ if (ret) {
+ debug("%s: Failed to inilialize DDR controller\n",
+ __func__);
+ return ret;
+ }
+
+ /* Release the controller from reset */
+ setbits_le32((uintptr_t)
+ (readl(ddr_handoff_info.mem_reset_base) +
+ MEM_RST_MGR_STATUS), MEM_RST_MGR_STATUS_AXI_RST |
+ MEM_RST_MGR_STATUS_CONTROLLER_RST |
+ MEM_RST_MGR_STATUS_RESET_COMPLETE);
+
+ printf("DDR controller configuration is completed\n");
+
+ /* Initialize DDR PHY */
+ ret = init_phy(&ddr_handoff_info);
+ if (ret) {
+ debug("%s: Failed to inilialize DDR PHY\n", __func__);
+ return ret;
+ }
+
+ enable_phy_clk_for_csr_access(&ddr_handoff_info, true);
+
+ ret = start_ddr_calibration(&ddr_handoff_info);
+ if (ret) {
+ debug("%s: Failed to calibrate DDR\n", __func__);
+ return ret;
+ }
+
+ enable_phy_clk_for_csr_access(&ddr_handoff_info, false);
+
+ /* Reset ARC processor when no using for security purpose */
+ setbits_le16(ddr_handoff_info.phy_base +
+ DDR_PHY_MICRORESET_OFFSET,
+ DDR_PHY_MICRORESET_RESET);
+
+ /* DDR freq set to support DDR4-3200 */
+ phy_init_engine(&ddr_handoff_info);
+
+ ret = dfi_init(&ddr_handoff_info);
+ if (ret)
+ return ret;
+
+ ret = check_dfi_init(&ddr_handoff_info);
+ if (ret)
+ return ret;
+
+ ret = trigger_sdram_init(&ddr_handoff_info);
+ if (ret)
+ return ret;
+
+ ret = ddr_post_config(&ddr_handoff_info);
+ if (ret)
+ return ret;
+
+ /* Restore user settings */
+ writel(user_backup[0], ddr_handoff_info.cntlr_base +
+ DDR4_PWRCTL_OFFSET);
+
+ if (ddr_handoff_info.cntlr2_t == DDRTYPE_LPDDR4_0)
+ setbits_le32(ddr_handoff_info.cntlr_base +
+ DDR4_INIT0_OFFSET, user_backup[1]);
+
+ if (ddr_handoff_info.cntlr2_t == DDRTYPE_LPDDR4_1) {
+ /* Restore user settings */
+ writel(user_backup_2nd[0],
+ ddr_handoff_info.cntlr2_base +
+ DDR4_PWRCTL_OFFSET);
+
+ setbits_le32(ddr_handoff_info.cntlr2_base +
+ DDR4_INIT0_OFFSET, user_backup_2nd[1]);
+ }
+
+ /* Enable input traffic per port */
+ setbits_le32(ddr_handoff_info.cntlr_base + DDR4_PCTRL0_OFFSET,
+ DDR4_PCTRL0_PORT_EN);
+
+ if (ddr_handoff_info.cntlr2_t == DDRTYPE_LPDDR4_1) {
+ /* Enable input traffic per port */
+ setbits_le32(ddr_handoff_info.cntlr2_base +
+ DDR4_PCTRL0_OFFSET, DDR4_PCTRL0_PORT_EN);
+ }
+
+ printf("DDR init success\n");
+ }
+
+ /* Get bank configuration from devicetree */
+ ret = fdtdec_decode_ram_size(gd->fdt_blob, NULL, 0, NULL,
+ (phys_size_t *)&gd->ram_size, &bd);
+ if (ret) {
+ debug("%s: Failed to decode memory node\n", __func__);
+ return -1;
+ }
+
+ printf("DDR: %lld MiB\n", gd->ram_size >> 20);
+
+ priv->info.base = bd.bi_dram[0].start;
+ priv->info.size = gd->ram_size;
+
+ sdram_size_check(&bd);
+
+ sdram_set_firewall(&bd);
+
+ return 0;
+}
diff --git a/drivers/ddr/altera/sdram_soc64.c b/drivers/ddr/altera/sdram_soc64.c
index a08f0953e55..d6baac24106 100644
--- a/drivers/ddr/altera/sdram_soc64.c
+++ b/drivers/ddr/altera/sdram_soc64.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) 2016-2019 Intel Corporation <www.intel.com>
+ * Copyright (C) 2016-2021 Intel Corporation <www.intel.com>
*
*/
@@ -100,12 +100,14 @@ int emif_reset(struct altera_sdram_plat *plat)
return 0;
}
+#if !IS_ENABLED(CONFIG_TARGET_SOCFPGA_N5X)
int poll_hmc_clock_status(void)
{
return wait_for_bit_le32((const void *)(socfpga_get_sysmgr_addr() +
SYSMGR_SOC64_HMC_CLK),
SYSMGR_HMC_CLK_STATUS_MSK, true, 1000, false);
}
+#endif
void sdram_clear_mem(phys_addr_t addr, phys_size_t size)
{
@@ -182,6 +184,7 @@ void sdram_size_check(struct bd_info *bd)
phys_size_t total_ram_check = 0;
phys_size_t ram_check = 0;
phys_addr_t start = 0;
+ phys_size_t size, remaining_size;
int bank;
/* Sanity check ensure correct SDRAM size specified */
@@ -189,10 +192,27 @@ void sdram_size_check(struct bd_info *bd)
for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
start = bd->bi_dram[bank].start;
+ remaining_size = bd->bi_dram[bank].size;
while (ram_check < bd->bi_dram[bank].size) {
- ram_check += get_ram_size((void *)(start + ram_check),
- (phys_size_t)SZ_1G);
+ size = min((phys_addr_t)SZ_1G,
+ (phys_addr_t)remaining_size);
+
+ /*
+ * Ensure the size is power of two, this is requirement
+ * to run get_ram_size() / memory test
+ */
+ if (size != 0 && ((size & (size - 1)) == 0)) {
+ ram_check += get_ram_size((void *)
+ (start + ram_check), size);
+ remaining_size = bd->bi_dram[bank].size -
+ ram_check;
+ } else {
+ puts("DDR: Memory test requires SDRAM size ");
+ puts("in power of two!\n");
+ hang();
+ }
}
+
total_ram_check += ram_check;
ram_check = 0;
}
@@ -231,11 +251,78 @@ phys_size_t sdram_calculate_size(struct altera_sdram_plat *plat)
return size;
}
+void sdram_set_firewall(struct bd_info *bd)
+{
+ u32 i;
+ phys_size_t value;
+ u32 lower, upper;
+
+ for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
+ if (!bd->bi_dram[i].size)
+ continue;
+
+ value = bd->bi_dram[i].start;
+
+ /* Keep first 1MB of SDRAM memory region as secure region when
+ * using ATF flow, where the ATF code is located.
+ */
+ if (IS_ENABLED(CONFIG_SPL_ATF) && i == 0)
+ value += SZ_1M;
+
+ /* Setting non-secure MPU region base and base extended */
+ lower = lower_32_bits(value);
+ upper = upper_32_bits(value);
+ FW_MPU_DDR_SCR_WRITEL(lower,
+ FW_MPU_DDR_SCR_MPUREGION0ADDR_BASE +
+ (i * 4 * sizeof(u32)));
+ FW_MPU_DDR_SCR_WRITEL(upper & 0xff,
+ FW_MPU_DDR_SCR_MPUREGION0ADDR_BASEEXT +
+ (i * 4 * sizeof(u32)));
+
+ /* Setting non-secure Non-MPU region base and base extended */
+ FW_MPU_DDR_SCR_WRITEL(lower,
+ FW_MPU_DDR_SCR_NONMPUREGION0ADDR_BASE +
+ (i * 4 * sizeof(u32)));
+ FW_MPU_DDR_SCR_WRITEL(upper & 0xff,
+ FW_MPU_DDR_SCR_NONMPUREGION0ADDR_BASEEXT +
+ (i * 4 * sizeof(u32)));
+
+ /* Setting non-secure MPU limit and limit extexded */
+ value = bd->bi_dram[i].start + bd->bi_dram[i].size - 1;
+
+ lower = lower_32_bits(value);
+ upper = upper_32_bits(value);
+
+ FW_MPU_DDR_SCR_WRITEL(lower,
+ FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMIT +
+ (i * 4 * sizeof(u32)));
+ FW_MPU_DDR_SCR_WRITEL(upper & 0xff,
+ FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMITEXT +
+ (i * 4 * sizeof(u32)));
+
+ /* Setting non-secure Non-MPU limit and limit extexded */
+ FW_MPU_DDR_SCR_WRITEL(lower,
+ FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMIT +
+ (i * 4 * sizeof(u32)));
+ FW_MPU_DDR_SCR_WRITEL(upper & 0xff,
+ FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT +
+ (i * 4 * sizeof(u32)));
+
+ FW_MPU_DDR_SCR_WRITEL(BIT(i) | BIT(i + 8),
+ FW_MPU_DDR_SCR_EN_SET);
+ }
+}
+
static int altera_sdram_of_to_plat(struct udevice *dev)
{
struct altera_sdram_plat *plat = dev_get_plat(dev);
fdt_addr_t addr;
+ /* These regs info are part of DDR handoff in bitstream */
+#if IS_ENABLED(CONFIG_TARGET_SOCFPGA_N5X)
+ return 0;
+#endif
+
addr = dev_read_addr_index(dev, 0);
if (addr == FDT_ADDR_T_NONE)
return -EINVAL;
@@ -296,6 +383,7 @@ static struct ram_ops altera_sdram_ops = {
static const struct udevice_id altera_sdram_ids[] = {
{ .compatible = "altr,sdr-ctl-s10" },
{ .compatible = "intel,sdr-ctl-agilex" },
+ { .compatible = "intel,sdr-ctl-n5x" },
{ /* sentinel */ }
};
diff --git a/drivers/ddr/altera/sdram_soc64.h b/drivers/ddr/altera/sdram_soc64.h
index 8af0afc4100..7460f8c220d 100644
--- a/drivers/ddr/altera/sdram_soc64.h
+++ b/drivers/ddr/altera/sdram_soc64.h
@@ -180,6 +180,7 @@ int emif_reset(struct altera_sdram_plat *plat);
int poll_hmc_clock_status(void);
void sdram_clear_mem(phys_addr_t addr, phys_size_t size);
void sdram_init_ecc_bits(struct bd_info *bd);
+void sdram_set_firewall(struct bd_info *bd);
void sdram_size_check(struct bd_info *bd);
phys_size_t sdram_calculate_size(struct altera_sdram_plat *plat);
int sdram_mmr_init_full(struct udevice *dev);
diff --git a/drivers/ddr/imx/Kconfig b/drivers/ddr/imx/Kconfig
index 7e06fb2f7d2..179f34530d7 100644
--- a/drivers/ddr/imx/Kconfig
+++ b/drivers/ddr/imx/Kconfig
@@ -1 +1,2 @@
source "drivers/ddr/imx/imx8m/Kconfig"
+source "drivers/ddr/imx/imx8ulp/Kconfig"
diff --git a/drivers/ddr/imx/imx8ulp/Kconfig b/drivers/ddr/imx/imx8ulp/Kconfig
new file mode 100644
index 00000000000..e56062a1d03
--- /dev/null
+++ b/drivers/ddr/imx/imx8ulp/Kconfig
@@ -0,0 +1,11 @@
+menu "i.MX8ULP DDR controllers"
+ depends on ARCH_IMX8ULP
+
+config IMX8ULP_DRAM
+ bool "imx8m dram"
+
+config IMX8ULP_DRAM_PHY_PLL_BYPASS
+ bool "Enable the DDR PHY PLL bypass mode, so PHY clock is from DDR_CLK "
+ depends on IMX8ULP_DRAM
+
+endmenu
diff --git a/drivers/ddr/imx/imx8ulp/Makefile b/drivers/ddr/imx/imx8ulp/Makefile
new file mode 100644
index 00000000000..7f44a92180f
--- /dev/null
+++ b/drivers/ddr/imx/imx8ulp/Makefile
@@ -0,0 +1,9 @@
+#
+# Copyright 2021 NXP
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+ifdef CONFIG_SPL_BUILD
+obj-$(CONFIG_IMX8ULP_DRAM) += ddr_init.o
+endif
diff --git a/drivers/ddr/imx/imx8ulp/ddr_init.c b/drivers/ddr/imx/imx8ulp/ddr_init.c
new file mode 100644
index 00000000000..16aaf56103d
--- /dev/null
+++ b/drivers/ddr/imx/imx8ulp/ddr_init.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Copyright 2021 NXP
+ */
+#include <common.h>
+#include <asm/io.h>
+#include <asm/arch/clock.h>
+#include <asm/arch/ddr.h>
+#include <asm/arch/imx-regs.h>
+
+#define DENALI_CTL_00 (DDR_CTL_BASE_ADDR + 4 * 0)
+#define CTL_START 0x1
+
+#define DENALI_CTL_03 (DDR_CTL_BASE_ADDR + 4 * 3)
+#define DENALI_CTL_197 (DDR_CTL_BASE_ADDR + 4 * 197)
+#define DENALI_CTL_250 (DDR_CTL_BASE_ADDR + 4 * 250)
+#define DENALI_CTL_251 (DDR_CTL_BASE_ADDR + 4 * 251)
+#define DENALI_CTL_266 (DDR_CTL_BASE_ADDR + 4 * 266)
+#define DFI_INIT_COMPLETE 0x2
+
+#define DENALI_CTL_614 (DDR_CTL_BASE_ADDR + 4 * 614)
+#define DENALI_CTL_615 (DDR_CTL_BASE_ADDR + 4 * 615)
+
+#define DENALI_PI_00 (DDR_PI_BASE_ADDR + 4 * 0)
+#define PI_START 0x1
+
+#define DENALI_PI_04 (DDR_PI_BASE_ADDR + 4 * 4)
+#define DENALI_PI_11 (DDR_PI_BASE_ADDR + 4 * 11)
+#define DENALI_PI_12 (DDR_PI_BASE_ADDR + 4 * 12)
+#define DENALI_CTL_23 (DDR_CTL_BASE_ADDR + 4 * 23)
+#define DENALI_CTL_25 (DDR_CTL_BASE_ADDR + 4 * 25)
+
+#define DENALI_PHY_1624 (DDR_PHY_BASE_ADDR + 4 * 1624)
+#define DENALI_PHY_1537 (DDR_PHY_BASE_ADDR + 4 * 1537)
+#define PHY_FREQ_SEL_MULTICAST_EN(X) ((X) << 8)
+#define PHY_FREQ_SEL_INDEX(X) ((X) << 16)
+
+#define DENALI_PHY_1547 (DDR_PHY_BASE_ADDR + 4 * 1547)
+#define DENALI_PHY_1555 (DDR_PHY_BASE_ADDR + 4 * 1555)
+#define DENALI_PHY_1564 (DDR_PHY_BASE_ADDR + 4 * 1564)
+#define DENALI_PHY_1565 (DDR_PHY_BASE_ADDR + 4 * 1565)
+
+static void ddr_enable_pll_bypass(void)
+{
+ u32 reg_val;
+
+ /* PI_INIT_LVL_EN=0x0 (DENALI_PI_04) */
+ reg_val = readl(DENALI_PI_04) & ~0x1;
+ writel(reg_val, DENALI_PI_04);
+
+ /* PI_FREQ_MAP=0x1 (DENALI_PI_12) */
+ writel(0x1, DENALI_PI_12);
+
+ /* PI_INIT_WORK_FREQ=0x0 (DENALI_PI_11) */
+ reg_val = readl(DENALI_PI_11) & ~(0x1f << 8);
+ writel(reg_val, DENALI_PI_11);
+
+ /* DFIBUS_FREQ_INIT=0x0 (DENALI_CTL_23) */
+ reg_val = readl(DENALI_CTL_23) & ~(0x3 << 24);
+ writel(reg_val, DENALI_CTL_23);
+
+ /* PHY_LP4_BOOT_DISABLE=0x0 (DENALI_PHY_1547) */
+ reg_val = readl(DENALI_PHY_1547) & ~(0x1 << 8);
+ writel(reg_val, DENALI_PHY_1547);
+
+ /* PHY_PLL_BYPASS=0x1 (DENALI_PHY_1624) */
+ reg_val = readl(DENALI_PHY_1624) | 0x1;
+ writel(reg_val, DENALI_PHY_1624);
+
+ /* PHY_LP4_BOOT_PLL_BYPASS to 0x1 (DENALI_PHY_1555) */
+ reg_val = readl(DENALI_PHY_1555) | 0x1;
+ writel(reg_val, DENALI_PHY_1555);
+
+ /* FREQ_CHANGE_TYPE_F0 = 0x0/FREQ_CHANGE_TYPE_F1 = 0x1/FREQ_CHANGE_TYPE_F2 = 0x2 */
+ reg_val = 0x020100;
+ writel(reg_val, DENALI_CTL_25);
+}
+
+int ddr_calibration(unsigned int fsp_table[3])
+{
+ u32 reg_val;
+ u32 int_status_init, phy_freq_req, phy_freq_type;
+ u32 lock_0, lock_1, lock_2;
+ u32 freq_chg_pt, freq_chg_cnt;
+
+ if (IS_ENABLED(CONFIG_IMX8ULP_DRAM_PHY_PLL_BYPASS)) {
+ ddr_enable_pll_bypass();
+ freq_chg_cnt = 0;
+ freq_chg_pt = 0;
+ } else {
+ reg_val = readl(DENALI_CTL_250);
+ if (((reg_val >> 16) & 0x3) == 1)
+ freq_chg_cnt = 2;
+ else
+ freq_chg_cnt = 3;
+
+ reg_val = readl(DENALI_PI_12);
+ if (reg_val == 0x3) {
+ freq_chg_pt = 1;
+ } else if (reg_val == 0x7) {
+ freq_chg_pt = 2;
+ } else {
+ printf("frequency map(0x%x) is wrong, please check!\r\n", reg_val);
+ return -1;
+ }
+ }
+
+ /* Assert PI_START parameter and then assert START parameter in Controller. */
+ reg_val = readl(DENALI_PI_00) | PI_START;
+ writel(reg_val, DENALI_PI_00);
+
+ reg_val = readl(DENALI_CTL_00) | CTL_START;
+ writel(reg_val, DENALI_CTL_00);
+
+ /* Poll for init_done_bit in Controller interrupt status register (INT_STATUS_INIT) */
+ do {
+ if (!freq_chg_cnt) {
+ int_status_init = (readl(DENALI_CTL_266) >> 8) & 0xff;
+ /* DDR subsystem is ready for traffic. */
+ if (int_status_init & DFI_INIT_COMPLETE) {
+ debug("complete\n");
+ break;
+ }
+ }
+
+ /*
+ * During leveling, PHY will request for freq change and SoC clock logic
+ * should provide requested frequency
+ * Polling SIM LPDDR_CTRL2 Bit phy_freq_chg_req until be 1'b1
+ */
+ reg_val = readl(AVD_SIM_LPDDR_CTRL2);
+ phy_freq_req = (reg_val >> 7) & 0x1;
+
+ if (phy_freq_req) {
+ phy_freq_type = reg_val & 0x1F;
+ if (phy_freq_type == 0x00) {
+ debug("Poll for freq_chg_req on SIM register and change to F0 frequency.\n");
+ set_ddr_clk(fsp_table[phy_freq_type] >> 1);
+
+ /* Write 1'b1 at LPDDR_CTRL2 bit phy_freq_cfg_ack */
+ reg_val = readl(AVD_SIM_LPDDR_CTRL2);
+ writel(reg_val | (0x1 << 6), AVD_SIM_LPDDR_CTRL2);
+ } else if (phy_freq_type == 0x01) {
+ debug("Poll for freq_chg_req on SIM register and change to F1 frequency.\n");
+ set_ddr_clk(fsp_table[phy_freq_type] >> 1);
+
+ /* Write 1'b1 at LPDDR_CTRL2 bit phy_freq_cfg_ack */
+ reg_val = readl(AVD_SIM_LPDDR_CTRL2);
+ writel(reg_val | (0x1 << 6), AVD_SIM_LPDDR_CTRL2);
+ if (freq_chg_pt == 1)
+ freq_chg_cnt--;
+ } else if (phy_freq_type == 0x02) {
+ debug("Poll for freq_chg_req on SIM register and change to F2 frequency.\n");
+ set_ddr_clk(fsp_table[phy_freq_type] >> 1);
+
+ /* Write 1'b1 at LPDDR_CTRL2 bit phy_freq_cfg_ack */
+ reg_val = readl(AVD_SIM_LPDDR_CTRL2);
+ writel(reg_val | (0x1 << 6), AVD_SIM_LPDDR_CTRL2);
+ if (freq_chg_pt == 2)
+ freq_chg_cnt--;
+ }
+ reg_val = readl(AVD_SIM_LPDDR_CTRL2);
+ }
+ } while (1);
+
+ /* Check PLL lock status */
+ lock_0 = readl(DENALI_PHY_1564) & 0xffff;
+ lock_1 = (readl(DENALI_PHY_1564) >> 16) & 0xffff;
+ lock_2 = readl(DENALI_PHY_1565) & 0xffff;
+
+ if ((lock_0 & 0x3) != 0x3 || (lock_1 & 0x3) != 0x3 || (lock_2 & 0x3) != 0x3) {
+ debug("De-Skew PLL failed to lock\n");
+ debug("lock_0=0x%x, lock_1=0x%x, lock_2=0x%x\n", lock_0, lock_1, lock_2);
+ return -1;
+ }
+
+ debug("De-Skew PLL is locked and ready\n");
+ return 0;
+}
+
+int ddr_init(struct dram_timing_info2 *dram_timing)
+{
+ int i;
+
+ if (IS_ENABLED(CONFIG_IMX8ULP_DRAM_PHY_PLL_BYPASS)) {
+ /* Use PLL bypass for boot freq */
+ /* Since PLL can't generate the double freq, Need ddr clock to generate it. */
+ set_ddr_clk(dram_timing->fsp_table[0]); /* Set to boot freq */
+ setbits_le32(AVD_SIM_BASE_ADDR, 0x1); /* SIM_DDR_CTRL_DIV2_EN */
+ } else {
+ set_ddr_clk(dram_timing->fsp_table[0] >> 1); /* Set to boot freq */
+ clrbits_le32(AVD_SIM_BASE_ADDR, 0x1); /* SIM_DDR_CTRL_DIV2_EN */
+ }
+
+ /* Initialize CTL registers */
+ for (i = 0; i < dram_timing->ctl_cfg_num; i++)
+ writel(dram_timing->ctl_cfg[i].val, (ulong)dram_timing->ctl_cfg[i].reg);
+
+ /* Initialize PI registers */
+ for (i = 0; i < dram_timing->pi_cfg_num; i++)
+ writel(dram_timing->pi_cfg[i].val, (ulong)dram_timing->pi_cfg[i].reg);
+
+ /* Write PHY regiters for all 3 frequency points (48Mhz/384Mhz/528Mhz): f1_index=0 */
+ writel(PHY_FREQ_SEL_MULTICAST_EN(1) | PHY_FREQ_SEL_INDEX(0), DENALI_PHY_1537);
+ for (i = 0; i < dram_timing->phy_f1_cfg_num; i++)
+ writel(dram_timing->phy_f1_cfg[i].val, (ulong)dram_timing->phy_f1_cfg[i].reg);
+
+ /* Write PHY regiters for freqency point 2 (528Mhz): f2_index=1 */
+ writel(PHY_FREQ_SEL_MULTICAST_EN(0) | PHY_FREQ_SEL_INDEX(1), DENALI_PHY_1537);
+ for (i = 0; i < dram_timing->phy_f2_cfg_num; i++)
+ writel(dram_timing->phy_f2_cfg[i].val, (ulong)dram_timing->phy_f2_cfg[i].reg);
+
+ /* Re-enable MULTICAST mode */
+ writel(PHY_FREQ_SEL_MULTICAST_EN(1) | PHY_FREQ_SEL_INDEX(0), DENALI_PHY_1537);
+
+ return ddr_calibration(dram_timing->fsp_table);
+}
diff --git a/drivers/fastboot/fb_mmc.c b/drivers/fastboot/fb_mmc.c
index 2f3837e5591..cbb3f7b1dea 100644
--- a/drivers/fastboot/fb_mmc.c
+++ b/drivers/fastboot/fb_mmc.c
@@ -512,7 +512,7 @@ void fastboot_mmc_flash_write(const char *cmd, void *download_buffer,
u32 download_bytes, char *response)
{
struct blk_desc *dev_desc;
- struct disk_partition info;
+ struct disk_partition info = {0};
#ifdef CONFIG_FASTBOOT_MMC_BOOT_SUPPORT
if (strcmp(cmd, CONFIG_FASTBOOT_MMC_BOOT1_NAME) == 0) {
@@ -525,19 +525,14 @@ void fastboot_mmc_flash_write(const char *cmd, void *download_buffer,
if (strcmp(cmd, CONFIG_FASTBOOT_MMC_BOOT2_NAME) == 0) {
dev_desc = fastboot_mmc_get_dev(response);
if (dev_desc)
- fb_mmc_boot_ops(dev_desc, download_buffer, 1,
+ fb_mmc_boot_ops(dev_desc, download_buffer, 2,
download_bytes, response);
return;
}
#endif
#if CONFIG_IS_ENABLED(EFI_PARTITION)
-#ifndef CONFIG_FASTBOOT_MMC_USER_SUPPORT
if (strcmp(cmd, CONFIG_FASTBOOT_GPT_NAME) == 0) {
-#else
- if (strcmp(cmd, CONFIG_FASTBOOT_GPT_NAME) == 0 ||
- strcmp(cmd, CONFIG_FASTBOOT_MMC_USER_NAME) == 0) {
-#endif
dev_desc = fastboot_mmc_get_dev(response);
if (!dev_desc)
return;
@@ -599,7 +594,20 @@ void fastboot_mmc_flash_write(const char *cmd, void *download_buffer,
}
#endif
- if (fastboot_mmc_get_part_info(cmd, &dev_desc, &info, response) < 0)
+#if CONFIG_IS_ENABLED(FASTBOOT_MMC_USER_SUPPORT)
+ if (strcmp(cmd, CONFIG_FASTBOOT_MMC_USER_NAME) == 0) {
+ dev_desc = fastboot_mmc_get_dev(response);
+ if (!dev_desc)
+ return;
+
+ strlcpy((char *)&info.name, cmd, sizeof(info.name));
+ info.size = dev_desc->lba;
+ info.blksz = dev_desc->blksz;
+ }
+#endif
+
+ if (!info.name[0] &&
+ fastboot_mmc_get_part_info(cmd, &dev_desc, &info, response) < 0)
return;
if (is_sparse_image(download_buffer)) {
@@ -655,7 +663,7 @@ void fastboot_mmc_erase(const char *cmd, char *response)
/* erase EMMC boot2 */
dev_desc = fastboot_mmc_get_dev(response);
if (dev_desc)
- fb_mmc_boot_ops(dev_desc, NULL, 1, 0, response);
+ fb_mmc_boot_ops(dev_desc, NULL, 2, 0, response);
return;
}
#endif
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 09695f6c2b0..4a89c1a62b7 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -179,6 +179,16 @@ config LPC32XX_GPIO
help
Support for the LPC32XX GPIO driver.
+config MCP230XX_GPIO
+ bool "MCP230XX GPIO driver"
+ depends on DM
+ help
+ Support for Microchip's MCP230XX I2C connected GPIO devices.
+ The following chips are supported:
+ - MCP23008
+ - MCP23017
+ - MCP23018
+
config MSCC_SGPIO
bool "Microsemi Serial GPIO driver"
depends on DM_GPIO && SOC_VCOREIII
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 16b09fb1b5b..58f4704f6bc 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_KIRKWOOD_GPIO) += kw_gpio.o
obj-$(CONFIG_KONA_GPIO) += kona_gpio.o
obj-$(CONFIG_MARVELL_GPIO) += mvgpio.o
obj-$(CONFIG_MARVELL_MFP) += mvmfp.o
+obj-$(CONFIG_MCP230XX_GPIO) += mcp230xx_gpio.o
obj-$(CONFIG_MXC_GPIO) += mxc_gpio.o
obj-$(CONFIG_MXS_GPIO) += mxs_gpio.o
obj-$(CONFIG_PCA953X) += pca953x.o
diff --git a/drivers/gpio/mcp230xx_gpio.c b/drivers/gpio/mcp230xx_gpio.c
new file mode 100644
index 00000000000..9f02fd42b35
--- /dev/null
+++ b/drivers/gpio/mcp230xx_gpio.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021, Collabora Ltd.
+ * Copyright (C) 2021, General Electric Company
+ * Author(s): Sebastian Reichel <sebastian.reichel@collabora.com>
+ */
+
+#define LOG_CATEGORY UCLASS_GPIO
+
+#include <common.h>
+#include <errno.h>
+#include <dm.h>
+#include <i2c.h>
+#include <asm/gpio.h>
+#include <dm/device_compat.h>
+#include <dt-bindings/gpio/gpio.h>
+
+enum mcp230xx_type {
+ UNKNOWN = 0,
+ MCP23008,
+ MCP23017,
+ MCP23018,
+};
+
+#define MCP230XX_IODIR 0x00
+#define MCP230XX_GPPU 0x06
+#define MCP230XX_GPIO 0x09
+#define MCP230XX_OLAT 0x0a
+
+#define BANKSIZE 8
+
+static int mcp230xx_read(struct udevice *dev, uint reg, uint offset)
+{
+ struct gpio_dev_priv *uc_priv = dev_get_uclass_priv(dev);
+ int bank = offset / BANKSIZE;
+ int mask = 1 << (offset % BANKSIZE);
+ int shift = (uc_priv->gpio_count / BANKSIZE) - 1;
+ int ret;
+
+ ret = dm_i2c_reg_read(dev, (reg << shift) | bank);
+ if (ret < 0)
+ return ret;
+
+ return !!(ret & mask);
+}
+
+static int mcp230xx_write(struct udevice *dev, uint reg, uint offset, bool val)
+{
+ struct gpio_dev_priv *uc_priv = dev_get_uclass_priv(dev);
+ int bank = offset / BANKSIZE;
+ int mask = 1 << (offset % BANKSIZE);
+ int shift = (uc_priv->gpio_count / BANKSIZE) - 1;
+
+ return dm_i2c_reg_clrset(dev, (reg << shift) | bank, mask, val ? mask : 0);
+}
+
+static int mcp230xx_get_value(struct udevice *dev, uint offset)
+{
+ int ret;
+
+ ret = mcp230xx_read(dev, MCP230XX_GPIO, offset);
+ if (ret < 0) {
+ dev_err(dev, "%s error: %d\n", __func__, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int mcp230xx_set_value(struct udevice *dev, uint offset, int val)
+{
+ int ret;
+
+ ret = mcp230xx_write(dev, MCP230XX_GPIO, offset, val);
+ if (ret < 0) {
+ dev_err(dev, "%s error: %d\n", __func__, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int mcp230xx_get_flags(struct udevice *dev, unsigned int offset,
+ ulong *flags)
+{
+ int direction, pullup;
+
+ pullup = mcp230xx_read(dev, MCP230XX_GPPU, offset);
+ if (pullup < 0) {
+ dev_err(dev, "%s error: %d\n", __func__, pullup);
+ return pullup;
+ }
+
+ direction = mcp230xx_read(dev, MCP230XX_IODIR, offset);
+ if (direction < 0) {
+ dev_err(dev, "%s error: %d\n", __func__, direction);
+ return direction;
+ }
+
+ *flags = direction ? GPIOD_IS_IN : GPIOD_IS_OUT;
+
+ if (pullup)
+ *flags |= GPIOD_PULL_UP;
+
+ return 0;
+}
+
+static int mcp230xx_set_flags(struct udevice *dev, uint offset, ulong flags)
+{
+ bool input = !(flags & (GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE));
+ bool pullup = flags & GPIOD_PULL_UP;
+ ulong supported_mask;
+ int ret;
+
+ /* Note: active-low is ignored (handled by core) */
+ supported_mask = GPIOD_ACTIVE_LOW | GPIOD_MASK_DIR | GPIOD_PULL_UP;
+ if (flags & ~supported_mask) {
+ dev_err(dev, "%s unsupported flag(s): %lx\n", __func__, flags);
+ return -EINVAL;
+ }
+
+ ret = mcp230xx_write(dev, MCP230XX_OLAT, offset, !!(flags & GPIOD_IS_OUT_ACTIVE));
+ if (ret) {
+ dev_err(dev, "%s failed to setup output latch: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = mcp230xx_write(dev, MCP230XX_GPPU, offset, pullup);
+ if (ret) {
+ dev_err(dev, "%s failed to setup pull-up: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = mcp230xx_write(dev, MCP230XX_IODIR, offset, input);
+ if (ret) {
+ dev_err(dev, "%s failed to setup direction: %d\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mcp230xx_direction_input(struct udevice *dev, uint offset)
+{
+ return mcp230xx_set_flags(dev, offset, GPIOD_IS_IN);
+}
+
+static int mcp230xx_direction_output(struct udevice *dev, uint offset, int val)
+{
+ int ret = mcp230xx_set_value(dev, offset, val);
+ if (ret < 0) {
+ dev_err(dev, "%s error: %d\n", __func__, ret);
+ return ret;
+ }
+ return mcp230xx_set_flags(dev, offset, GPIOD_IS_OUT);
+}
+
+static int mcp230xx_get_function(struct udevice *dev, uint offset)
+{
+ int ret;
+
+ ret = mcp230xx_read(dev, MCP230XX_IODIR, offset);
+ if (ret < 0) {
+ dev_err(dev, "%s error: %d\n", __func__, ret);
+ return ret;
+ }
+
+ return ret ? GPIOF_INPUT : GPIOF_OUTPUT;
+}
+
+static const struct dm_gpio_ops mcp230xx_ops = {
+ .direction_input = mcp230xx_direction_input,
+ .direction_output = mcp230xx_direction_output,
+ .get_value = mcp230xx_get_value,
+ .set_value = mcp230xx_set_value,
+ .get_function = mcp230xx_get_function,
+ .set_flags = mcp230xx_set_flags,
+ .get_flags = mcp230xx_get_flags,
+};
+
+static int mcp230xx_probe(struct udevice *dev)
+{
+ struct gpio_dev_priv *uc_priv = dev_get_uclass_priv(dev);
+ char name[32], label[8], *str;
+ int addr, gpio_count, size;
+ const u8 *tmp;
+
+ switch (dev_get_driver_data(dev)) {
+ case MCP23008:
+ gpio_count = 8;
+ break;
+ case MCP23017:
+ case MCP23018:
+ gpio_count = 16;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ addr = dev_read_addr(dev);
+ tmp = dev_read_prop(dev, "label", &size);
+ if (tmp) {
+ memcpy(label, tmp, sizeof(label) - 1);
+ label[sizeof(label) - 1] = '\0';
+ snprintf(name, sizeof(name), "%s@%x_", label, addr);
+ } else {
+ snprintf(name, sizeof(name), "gpio@%x_", addr);
+ }
+
+ str = strdup(name);
+ if (!str)
+ return -ENOMEM;
+
+ uc_priv->bank_name = str;
+ uc_priv->gpio_count = gpio_count;
+
+ dev_dbg(dev, "%s is ready\n", str);
+
+ return 0;
+}
+
+static const struct udevice_id mcp230xx_ids[] = {
+ { .compatible = "microchip,mcp23008", .data = MCP23008, },
+ { .compatible = "microchip,mcp23017", .data = MCP23017, },
+ { .compatible = "microchip,mcp23018", .data = MCP23018, },
+ { }
+};
+
+U_BOOT_DRIVER(mcp230xx) = {
+ .name = "mcp230xx",
+ .id = UCLASS_GPIO,
+ .ops = &mcp230xx_ops,
+ .probe = mcp230xx_probe,
+ .of_match = mcp230xx_ids,
+};
diff --git a/drivers/i2c/i2c-gpio.c b/drivers/i2c/i2c-gpio.c
index cf8f8f40359..1aedad5c8ed 100644
--- a/drivers/i2c/i2c-gpio.c
+++ b/drivers/i2c/i2c-gpio.c
@@ -336,8 +336,17 @@ static int i2c_gpio_of_to_plat(struct udevice *dev)
struct i2c_gpio_bus *bus = dev_get_priv(dev);
int ret;
+ /* "gpios" is deprecated and replaced by "sda-gpios" + "scl-gpios". */
ret = gpio_request_list_by_name(dev, "gpios", bus->gpios,
ARRAY_SIZE(bus->gpios), 0);
+ if (ret == -ENOENT) {
+ ret = gpio_request_by_name(dev, "sda-gpios", 0,
+ &bus->gpios[PIN_SDA], 0);
+ if (ret < 0)
+ goto error;
+ ret = gpio_request_by_name(dev, "scl-gpios", 0,
+ &bus->gpios[PIN_SCL], 0);
+ }
if (ret < 0)
goto error;
diff --git a/drivers/i2c/i2c-uclass.c b/drivers/i2c/i2c-uclass.c
index 04c88503a2f..db1c9d94624 100644
--- a/drivers/i2c/i2c-uclass.c
+++ b/drivers/i2c/i2c-uclass.c
@@ -247,6 +247,21 @@ int dm_i2c_reg_write(struct udevice *dev, uint offset, uint value)
return dm_i2c_write(dev, offset, &val, 1);
}
+int dm_i2c_reg_clrset(struct udevice *dev, uint offset, u32 clr, u32 set)
+{
+ uint8_t val;
+ int ret;
+
+ ret = dm_i2c_read(dev, offset, &val, 1);
+ if (ret < 0)
+ return ret;
+
+ val &= ~clr;
+ val |= set;
+
+ return dm_i2c_write(dev, offset, &val, 1);
+}
+
/**
* i2c_probe_chip() - probe for a chip on a bus
*
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 0c67d43a5d4..b64cd2a4de9 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_SANDBOX) += irq_sandbox.o
obj-$(CONFIG_$(SPL_)I2C_EEPROM) += i2c_eeprom.o
obj-$(CONFIG_IHS_FPGA) += ihs_fpga.o
obj-$(CONFIG_IMX8) += imx8/
+obj-$(CONFIG_IMX8ULP) += imx8ulp/
obj-$(CONFIG_LED_STATUS) += status_led.o
obj-$(CONFIG_LED_STATUS_GPIO) += gpio_led.o
obj-$(CONFIG_MPC83XX_SERDES) += mpc83xx_serdes.o
diff --git a/drivers/misc/imx8ulp/Makefile b/drivers/misc/imx8ulp/Makefile
new file mode 100644
index 00000000000..927cc552163
--- /dev/null
+++ b/drivers/misc/imx8ulp/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0+
+
+obj-y += s400_api.o imx8ulp_mu.o
+obj-$(CONFIG_CMD_FUSE) += fuse.o
diff --git a/drivers/misc/imx8ulp/fuse.c b/drivers/misc/imx8ulp/fuse.c
new file mode 100644
index 00000000000..d1feb62ab59
--- /dev/null
+++ b/drivers/misc/imx8ulp/fuse.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 NXP
+ */
+
+#include <common.h>
+#include <console.h>
+#include <errno.h>
+#include <fuse.h>
+#include <asm/arch/sys_proto.h>
+#include <asm/arch/imx-regs.h>
+#include <env.h>
+#include <asm/arch/s400_api.h>
+#include <asm/global_data.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define FUSE_BANKS 64
+#define WORDS_PER_BANKS 8
+
+struct fsb_map_entry {
+ s32 fuse_bank;
+ u32 fuse_words;
+ bool redundancy;
+};
+
+struct s400_map_entry {
+ s32 fuse_bank;
+ u32 fuse_words;
+ u32 fuse_offset;
+ u32 s400_index;
+};
+
+struct fsb_map_entry fsb_mapping_table[] = {
+ { 3, 8 },
+ { 4, 8 },
+ { 5, 8 },
+ { 6, 8 },
+ { -1, 48 }, /* Reserve 48 words */
+ { 8, 4, true },
+ { 24, 4, true },
+ { 26, 4, true },
+ { 27, 4, true },
+ { 28, 8 },
+ { 29, 8 },
+ { 30, 8 },
+ { 31, 8 },
+ { 37, 8 },
+ { 38, 8 },
+ { 39, 8 },
+ { 40, 8 },
+ { 41, 8 },
+ { 42, 8 },
+ { 43, 8 },
+ { 44, 8 },
+ { 45, 8 },
+ { 46, 8 },
+};
+
+struct s400_map_entry s400_api_mapping_table[] = {
+ { 1, 8 }, /* LOCK */
+ { 2, 8 }, /* ECID */
+ { 7, 4, 0, 1 }, /* OTP_UNIQ_ID */
+ { 23, 1, 4, 2 }, /* OTFAD */
+};
+
+static s32 map_fsb_fuse_index(u32 bank, u32 word, bool *redundancy)
+{
+ s32 size = ARRAY_SIZE(fsb_mapping_table);
+ s32 i, word_pos = 0;
+
+ /* map the fuse from ocotp fuse map to FSB*/
+ for (i = 0; i < size; i++) {
+ if (fsb_mapping_table[i].fuse_bank != -1 &&
+ fsb_mapping_table[i].fuse_bank == bank) {
+ break;
+ }
+
+ word_pos += fsb_mapping_table[i].fuse_words;
+ }
+
+ if (i == size)
+ return -1; /* Failed to find */
+
+ if (fsb_mapping_table[i].redundancy) {
+ *redundancy = true;
+ return (word >> 1) + word_pos;
+ }
+
+ *redundancy = false;
+ return word + word_pos;
+}
+
+static s32 map_s400_fuse_index(u32 bank, u32 word)
+{
+ s32 size = ARRAY_SIZE(s400_api_mapping_table);
+ s32 i;
+
+ /* map the fuse from ocotp fuse map to FSB*/
+ for (i = 0; i < size; i++) {
+ if (s400_api_mapping_table[i].fuse_bank != -1 &&
+ s400_api_mapping_table[i].fuse_bank == bank) {
+ if (word >= s400_api_mapping_table[i].fuse_offset &&
+ word < (s400_api_mapping_table[i].fuse_offset +
+ s400_api_mapping_table[i].fuse_words))
+ break;
+ }
+ }
+
+ if (i == size)
+ return -1; /* Failed to find */
+
+ if (s400_api_mapping_table[i].s400_index != 0)
+ return s400_api_mapping_table[i].s400_index;
+
+ return s400_api_mapping_table[i].fuse_bank * 8 + word;
+}
+
+int fuse_sense(u32 bank, u32 word, u32 *val)
+{
+ s32 word_index;
+ bool redundancy;
+
+ if (bank >= FUSE_BANKS || word >= WORDS_PER_BANKS || !val)
+ return -EINVAL;
+
+ word_index = map_fsb_fuse_index(bank, word, &redundancy);
+ if (word_index >= 0) {
+ *val = readl((ulong)FSB_BASE_ADDR + 0x800 + (word_index << 2));
+ if (redundancy)
+ *val = (*val >> ((word % 2) * 16)) & 0xFFFF;
+
+ return 0;
+ }
+
+ word_index = map_s400_fuse_index(bank, word);
+ if (word_index >= 0) {
+ u32 data[4];
+ u32 res, size = 4;
+ int ret;
+
+ /* Only UID return 4 words */
+ if (word_index != 1)
+ size = 1;
+
+ ret = ahab_read_common_fuse(word_index, data, size, &res);
+ if (ret) {
+ printf("ahab read fuse failed %d, 0x%x\n", ret, res);
+ return ret;
+ }
+
+ if (word_index == 1) {
+ *val = data[word]; /* UID */
+ } else if (word_index == 2) {
+ /*
+ * OTFAD 3 bits as follow:
+ * bit 0: OTFAD_ENABLE
+ * bit 1: OTFAD_DISABLE_OVERRIDE
+ * bit 2: KEY_BLOB_EN
+ */
+ *val = data[0] << 3;
+ } else {
+ *val = data[0];
+ }
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+int fuse_read(u32 bank, u32 word, u32 *val)
+{
+ return fuse_sense(bank, word, val);
+}
+
+int fuse_prog(u32 bank, u32 word, u32 val)
+{
+ u32 res;
+ int ret;
+
+ if (bank >= FUSE_BANKS || word >= WORDS_PER_BANKS || !val)
+ return -EINVAL;
+
+ ret = ahab_write_fuse((bank * 8 + word), val, false, &res);
+ if (ret) {
+ printf("ahab write fuse failed %d, 0x%x\n", ret, res);
+ return ret;
+ }
+
+ return 0;
+}
+
+int fuse_override(u32 bank, u32 word, u32 val)
+{
+ printf("Override fuse to i.MX8ULP in u-boot is forbidden\n");
+ return -EPERM;
+}
diff --git a/drivers/misc/imx8ulp/imx8ulp_mu.c b/drivers/misc/imx8ulp/imx8ulp_mu.c
new file mode 100644
index 00000000000..333ebdf5765
--- /dev/null
+++ b/drivers/misc/imx8ulp/imx8ulp_mu.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 NXP
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <dm.h>
+#include <dm/lists.h>
+#include <dm/root.h>
+#include <dm/device-internal.h>
+#include <asm/arch/s400_api.h>
+#include <asm/arch/imx-regs.h>
+#include <linux/iopoll.h>
+#include <misc.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+struct imx8ulp_mu {
+ struct mu_type *base;
+};
+
+#define MU_SR_TE0_MASK BIT(0)
+#define MU_SR_RF0_MASK BIT(0)
+#define MU_TR_COUNT 4
+#define MU_RR_COUNT 4
+
+void mu_hal_init(ulong base)
+{
+ struct mu_type *mu_base = (struct mu_type *)base;
+
+ writel(0, &mu_base->tcr);
+ writel(0, &mu_base->rcr);
+}
+
+int mu_hal_sendmsg(ulong base, u32 reg_index, u32 msg)
+{
+ struct mu_type *mu_base = (struct mu_type *)base;
+ u32 mask = MU_SR_TE0_MASK << reg_index;
+ u32 val;
+ int ret;
+
+ assert(reg_index < MU_TR_COUNT);
+
+ debug("sendmsg sr 0x%x\n", readl(&mu_base->sr));
+
+ /* Wait TX register to be empty. */
+ ret = readl_poll_timeout(&mu_base->tsr, val, val & mask, 10000);
+ if (ret < 0) {
+ debug("%s timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ debug("tr[%d] 0x%x\n", reg_index, msg);
+
+ writel(msg, &mu_base->tr[reg_index]);
+
+ return 0;
+}
+
+int mu_hal_receivemsg(ulong base, u32 reg_index, u32 *msg)
+{
+ struct mu_type *mu_base = (struct mu_type *)base;
+ u32 mask = MU_SR_RF0_MASK << reg_index;
+ u32 val;
+ int ret;
+
+ assert(reg_index < MU_TR_COUNT);
+
+ debug("receivemsg sr 0x%x\n", readl(&mu_base->sr));
+
+ /* Wait RX register to be full. */
+ ret = readl_poll_timeout(&mu_base->rsr, val, val & mask, 10000);
+ if (ret < 0) {
+ debug("%s timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ *msg = readl(&mu_base->rr[reg_index]);
+
+ debug("rr[%d] 0x%x\n", reg_index, *msg);
+
+ return 0;
+}
+
+static int imx8ulp_mu_read(struct mu_type *base, void *data)
+{
+ struct imx8ulp_s400_msg *msg = (struct imx8ulp_s400_msg *)data;
+ int ret;
+ u8 count = 0;
+
+ if (!msg)
+ return -EINVAL;
+
+ /* Read first word */
+ ret = mu_hal_receivemsg((ulong)base, 0, (u32 *)msg);
+ if (ret)
+ return ret;
+ count++;
+
+ /* Check size */
+ if (msg->size > S400_MAX_MSG) {
+ *((u32 *)msg) = 0;
+ return -EINVAL;
+ }
+
+ /* Read remaining words */
+ while (count < msg->size) {
+ ret = mu_hal_receivemsg((ulong)base, count % MU_RR_COUNT,
+ &msg->data[count - 1]);
+ if (ret)
+ return ret;
+ count++;
+ }
+
+ return 0;
+}
+
+static int imx8ulp_mu_write(struct mu_type *base, void *data)
+{
+ struct imx8ulp_s400_msg *msg = (struct imx8ulp_s400_msg *)data;
+ int ret;
+ u8 count = 0;
+
+ if (!msg)
+ return -EINVAL;
+
+ /* Check size */
+ if (msg->size > S400_MAX_MSG)
+ return -EINVAL;
+
+ /* Write first word */
+ ret = mu_hal_sendmsg((ulong)base, 0, *((u32 *)msg));
+ if (ret)
+ return ret;
+ count++;
+
+ /* Write remaining words */
+ while (count < msg->size) {
+ ret = mu_hal_sendmsg((ulong)base, count % MU_TR_COUNT,
+ msg->data[count - 1]);
+ if (ret)
+ return ret;
+ count++;
+ }
+
+ return 0;
+}
+
+/*
+ * Note the function prototype use msgid as the 2nd parameter, here
+ * we take it as no_resp.
+ */
+static int imx8ulp_mu_call(struct udevice *dev, int no_resp, void *tx_msg,
+ int tx_size, void *rx_msg, int rx_size)
+{
+ struct imx8ulp_mu *priv = dev_get_priv(dev);
+ u32 result;
+ int ret;
+
+ /* Expect tx_msg, rx_msg are the same value */
+ if (rx_msg && tx_msg != rx_msg)
+ printf("tx_msg %p, rx_msg %p\n", tx_msg, rx_msg);
+
+ ret = imx8ulp_mu_write(priv->base, tx_msg);
+ if (ret)
+ return ret;
+ if (!no_resp) {
+ ret = imx8ulp_mu_read(priv->base, rx_msg);
+ if (ret)
+ return ret;
+ }
+
+ result = ((struct imx8ulp_s400_msg *)rx_msg)->data[0];
+ if ((result & 0xff) == 0xd6)
+ return 0;
+
+ return -EIO;
+}
+
+static int imx8ulp_mu_probe(struct udevice *dev)
+{
+ struct imx8ulp_mu *priv = dev_get_priv(dev);
+ fdt_addr_t addr;
+
+ debug("%s(dev=%p) (priv=%p)\n", __func__, dev, priv);
+
+ addr = devfdt_get_addr(dev);
+ if (addr == FDT_ADDR_T_NONE)
+ return -EINVAL;
+
+ priv->base = (struct mu_type *)addr;
+
+ debug("mu base 0x%lx\n", (ulong)priv->base);
+
+ /* U-Boot not enable interrupts, so need to enable RX interrupts */
+ mu_hal_init((ulong)priv->base);
+
+ gd->arch.s400_dev = dev;
+
+ return 0;
+}
+
+static int imx8ulp_mu_remove(struct udevice *dev)
+{
+ return 0;
+}
+
+static int imx8ulp_mu_bind(struct udevice *dev)
+{
+ debug("%s(dev=%p)\n", __func__, dev);
+
+ return 0;
+}
+
+static struct misc_ops imx8ulp_mu_ops = {
+ .call = imx8ulp_mu_call,
+};
+
+static const struct udevice_id imx8ulp_mu_ids[] = {
+ { .compatible = "fsl,imx8ulp-mu" },
+ { }
+};
+
+U_BOOT_DRIVER(imx8ulp_mu) = {
+ .name = "imx8ulp_mu",
+ .id = UCLASS_MISC,
+ .of_match = imx8ulp_mu_ids,
+ .probe = imx8ulp_mu_probe,
+ .bind = imx8ulp_mu_bind,
+ .remove = imx8ulp_mu_remove,
+ .ops = &imx8ulp_mu_ops,
+ .priv_auto = sizeof(struct imx8ulp_mu),
+};
diff --git a/drivers/misc/imx8ulp/s400_api.c b/drivers/misc/imx8ulp/s400_api.c
new file mode 100644
index 00000000000..d76a95febe7
--- /dev/null
+++ b/drivers/misc/imx8ulp/s400_api.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 NXP
+ *
+ */
+
+#include <common.h>
+#include <hang.h>
+#include <malloc.h>
+#include <asm/io.h>
+#include <dm.h>
+#include <asm/arch/s400_api.h>
+#include <misc.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+int ahab_release_rdc(u8 core_id, bool xrdc, u32 *response)
+{
+ struct udevice *dev = gd->arch.s400_dev;
+ int size = sizeof(struct imx8ulp_s400_msg);
+ struct imx8ulp_s400_msg msg;
+ int ret;
+
+ if (!dev) {
+ printf("s400 dev is not initialized\n");
+ return -ENODEV;
+ }
+
+ msg.version = AHAB_VERSION;
+ msg.tag = AHAB_CMD_TAG;
+ msg.size = 2;
+ msg.command = AHAB_RELEASE_RDC_REQ_CID;
+ if (xrdc)
+ msg.data[0] = (0x78 << 8) | core_id;
+ else
+ msg.data[0] = (0x74 << 8) | core_id;
+
+ ret = misc_call(dev, false, &msg, size, &msg, size);
+ if (ret)
+ printf("Error: %s: ret %d, core id %u, response 0x%x\n",
+ __func__, ret, core_id, msg.data[0]);
+
+ if (response)
+ *response = msg.data[0];
+
+ return ret;
+}
+
+int ahab_auth_oem_ctnr(ulong ctnr_addr, u32 *response)
+{
+ struct udevice *dev = gd->arch.s400_dev;
+ int size = sizeof(struct imx8ulp_s400_msg);
+ struct imx8ulp_s400_msg msg;
+ int ret;
+
+ if (!dev) {
+ printf("s400 dev is not initialized\n");
+ return -ENODEV;
+ }
+
+ msg.version = AHAB_VERSION;
+ msg.tag = AHAB_CMD_TAG;
+ msg.size = 3;
+ msg.command = AHAB_AUTH_OEM_CTNR_CID;
+ msg.data[0] = upper_32_bits(ctnr_addr);
+ msg.data[1] = lower_32_bits(ctnr_addr);
+
+ ret = misc_call(dev, false, &msg, size, &msg, size);
+ if (ret)
+ printf("Error: %s: ret %d, cntr_addr 0x%lx, response 0x%x\n",
+ __func__, ret, ctnr_addr, msg.data[0]);
+
+ if (response)
+ *response = msg.data[0];
+
+ return ret;
+}
+
+int ahab_release_container(u32 *response)
+{
+ struct udevice *dev = gd->arch.s400_dev;
+ int size = sizeof(struct imx8ulp_s400_msg);
+ struct imx8ulp_s400_msg msg;
+ int ret;
+
+ if (!dev) {
+ printf("s400 dev is not initialized\n");
+ return -ENODEV;
+ }
+
+ msg.version = AHAB_VERSION;
+ msg.tag = AHAB_CMD_TAG;
+ msg.size = 1;
+ msg.command = AHAB_RELEASE_CTNR_CID;
+
+ ret = misc_call(dev, false, &msg, size, &msg, size);
+ if (ret)
+ printf("Error: %s: ret %d, response 0x%x\n",
+ __func__, ret, msg.data[0]);
+
+ if (response)
+ *response = msg.data[0];
+
+ return ret;
+}
+
+int ahab_verify_image(u32 img_id, u32 *response)
+{
+ struct udevice *dev = gd->arch.s400_dev;
+ int size = sizeof(struct imx8ulp_s400_msg);
+ struct imx8ulp_s400_msg msg;
+ int ret;
+
+ if (!dev) {
+ printf("s400 dev is not initialized\n");
+ return -ENODEV;
+ }
+
+ msg.version = AHAB_VERSION;
+ msg.tag = AHAB_CMD_TAG;
+ msg.size = 2;
+ msg.command = AHAB_VERIFY_IMG_CID;
+ msg.data[0] = 1 << img_id;
+
+ ret = misc_call(dev, false, &msg, size, &msg, size);
+ if (ret)
+ printf("Error: %s: ret %d, img_id %u, response 0x%x\n",
+ __func__, ret, img_id, msg.data[0]);
+
+ if (response)
+ *response = msg.data[0];
+
+ return ret;
+}
+
+int ahab_forward_lifecycle(u16 life_cycle, u32 *response)
+{
+ struct udevice *dev = gd->arch.s400_dev;
+ int size = sizeof(struct imx8ulp_s400_msg);
+ struct imx8ulp_s400_msg msg;
+ int ret;
+
+ if (!dev) {
+ printf("s400 dev is not initialized\n");
+ return -ENODEV;
+ }
+
+ msg.version = AHAB_VERSION;
+ msg.tag = AHAB_CMD_TAG;
+ msg.size = 2;
+ msg.command = AHAB_FWD_LIFECYCLE_UP_REQ_CID;
+ msg.data[0] = life_cycle;
+
+ ret = misc_call(dev, false, &msg, size, &msg, size);
+ if (ret)
+ printf("Error: %s: ret %d, life_cycle 0x%x, response 0x%x\n",
+ __func__, ret, life_cycle, msg.data[0]);
+
+ if (response)
+ *response = msg.data[0];
+
+ return ret;
+}
+
+int ahab_read_common_fuse(u16 fuse_id, u32 *fuse_words, u32 fuse_num, u32 *response)
+{
+ struct udevice *dev = gd->arch.s400_dev;
+ int size = sizeof(struct imx8ulp_s400_msg);
+ struct imx8ulp_s400_msg msg;
+ int ret;
+
+ if (!dev) {
+ printf("s400 dev is not initialized\n");
+ return -ENODEV;
+ }
+
+ if (!fuse_words) {
+ printf("Invalid parameters for fuse read\n");
+ return -EINVAL;
+ }
+
+ if ((fuse_id != 1 && fuse_num != 1) ||
+ (fuse_id == 1 && fuse_num != 4)) {
+ printf("Invalid fuse number parameter\n");
+ return -EINVAL;
+ }
+
+ msg.version = AHAB_VERSION;
+ msg.tag = AHAB_CMD_TAG;
+ msg.size = 2;
+ msg.command = AHAB_READ_FUSE_REQ_CID;
+ msg.data[0] = fuse_id;
+
+ ret = misc_call(dev, false, &msg, size, &msg, size);
+ if (ret)
+ printf("Error: %s: ret %d, fuse_id 0x%x, response 0x%x\n",
+ __func__, ret, fuse_id, msg.data[0]);
+
+ if (response)
+ *response = msg.data[0];
+
+ fuse_words[0] = msg.data[1];
+ if (fuse_id == 1) {
+ /* OTP_UNIQ_ID */
+ fuse_words[1] = msg.data[2];
+ fuse_words[2] = msg.data[3];
+ fuse_words[3] = msg.data[4];
+ }
+
+ return ret;
+}
+
+int ahab_write_fuse(u16 fuse_id, u32 fuse_val, bool lock, u32 *response)
+{
+ struct udevice *dev = gd->arch.s400_dev;
+ int size = sizeof(struct imx8ulp_s400_msg);
+ struct imx8ulp_s400_msg msg;
+ int ret;
+
+ if (!dev) {
+ printf("s400 dev is not initialized\n");
+ return -ENODEV;
+ }
+
+ msg.version = AHAB_VERSION;
+ msg.tag = AHAB_CMD_TAG;
+ msg.size = 3;
+ msg.command = AHAB_WRITE_FUSE_REQ_CID;
+ msg.data[0] = (32 << 16) | (fuse_id << 5);
+ if (lock)
+ msg.data[0] |= (1 << 31);
+
+ msg.data[1] = fuse_val;
+
+ ret = misc_call(dev, false, &msg, size, &msg, size);
+ if (ret)
+ printf("Error: %s: ret %d, fuse_id 0x%x, response 0x%x\n",
+ __func__, ret, fuse_id, msg.data[0]);
+
+ if (response)
+ *response = msg.data[0];
+
+ return ret;
+}
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index 717ce5a62f4..1569e8c44ac 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -832,7 +832,7 @@ config FSL_ESDHC_IMX
config FSL_USDHC
bool "Freescale/NXP i.MX uSDHC controller support"
- depends on MX6 || MX7 ||ARCH_MX7ULP || IMX8 || IMX8M || IMXRT
+ depends on MX6 || MX7 ||ARCH_MX7ULP || IMX8 || IMX8M || IMX8ULP || IMXRT
select FSL_ESDHC_IMX
help
This enables the Ultra Secured Digital Host Controller enhancements
diff --git a/drivers/mmc/fsl_esdhc_imx.c b/drivers/mmc/fsl_esdhc_imx.c
index 465d935daf6..aabf39535f9 100644
--- a/drivers/mmc/fsl_esdhc_imx.c
+++ b/drivers/mmc/fsl_esdhc_imx.c
@@ -291,7 +291,8 @@ static int esdhc_setup_data(struct fsl_esdhc_priv *priv, struct mmc *mmc,
{
int timeout;
struct fsl_esdhc *regs = priv->esdhc_regs;
-#if defined(CONFIG_S32V234) || defined(CONFIG_IMX8) || defined(CONFIG_IMX8M)
+#if defined(CONFIG_S32V234) || defined(CONFIG_IMX8) || defined(CONFIG_IMX8M) || \
+ defined(CONFIG_IMX8ULP)
dma_addr_t addr;
#endif
uint wml_value;
@@ -304,7 +305,8 @@ static int esdhc_setup_data(struct fsl_esdhc_priv *priv, struct mmc *mmc,
esdhc_clrsetbits32(&regs->wml, WML_RD_WML_MASK, wml_value);
#ifndef CONFIG_SYS_FSL_ESDHC_USE_PIO
-#if defined(CONFIG_S32V234) || defined(CONFIG_IMX8) || defined(CONFIG_IMX8M)
+#if defined(CONFIG_S32V234) || defined(CONFIG_IMX8) || defined(CONFIG_IMX8M) || \
+ defined(CONFIG_IMX8ULP)
addr = virt_to_phys((void *)(data->dest));
if (upper_32_bits(addr))
printf("Error found for upper 32 bits\n");
@@ -341,7 +343,8 @@ static int esdhc_setup_data(struct fsl_esdhc_priv *priv, struct mmc *mmc,
esdhc_clrsetbits32(&regs->wml, WML_WR_WML_MASK,
wml_value << 16);
#ifndef CONFIG_SYS_FSL_ESDHC_USE_PIO
-#if defined(CONFIG_S32V234) || defined(CONFIG_IMX8) || defined(CONFIG_IMX8M)
+#if defined(CONFIG_S32V234) || defined(CONFIG_IMX8) || defined(CONFIG_IMX8M) || \
+ defined(CONFIG_IMX8ULP)
addr = virt_to_phys((void *)(data->src));
if (upper_32_bits(addr))
printf("Error found for upper 32 bits\n");
@@ -406,7 +409,8 @@ static void check_and_invalidate_dcache_range
unsigned end = 0;
unsigned size = roundup(ARCH_DMA_MINALIGN,
data->blocks*data->blocksize);
-#if defined(CONFIG_S32V234) || defined(CONFIG_IMX8) || defined(CONFIG_IMX8M)
+#if defined(CONFIG_S32V234) || defined(CONFIG_IMX8) || defined(CONFIG_IMX8M) || \
+ defined(CONFIG_IMX8ULP)
dma_addr_t addr;
addr = virt_to_phys((void *)(data->dest));
diff --git a/drivers/mmc/rockchip_sdhci.c b/drivers/mmc/rockchip_sdhci.c
index d95f8b2a15b..1ac00587d44 100644
--- a/drivers/mmc/rockchip_sdhci.c
+++ b/drivers/mmc/rockchip_sdhci.c
@@ -6,90 +6,421 @@
*/
#include <common.h>
+#include <clk.h>
#include <dm.h>
+#include <dm/ofnode.h>
#include <dt-structs.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/libfdt.h>
+#include <linux/iopoll.h>
#include <malloc.h>
#include <mapmem.h>
+#include "mmc_private.h"
#include <sdhci.h>
-#include <clk.h>
+#include <syscon.h>
+#include <asm/arch-rockchip/clock.h>
+#include <asm/arch-rockchip/hardware.h>
/* 400KHz is max freq for card ID etc. Use that as min */
#define EMMC_MIN_FREQ 400000
+#define KHz (1000)
+#define MHz (1000 * KHz)
+#define SDHCI_TUNING_LOOP_COUNT 40
+
+#define PHYCTRL_CALDONE_MASK 0x1
+#define PHYCTRL_CALDONE_SHIFT 0x6
+#define PHYCTRL_CALDONE_DONE 0x1
+#define PHYCTRL_DLLRDY_MASK 0x1
+#define PHYCTRL_DLLRDY_SHIFT 0x5
+#define PHYCTRL_DLLRDY_DONE 0x1
+#define PHYCTRL_FREQSEL_200M 0x0
+#define PHYCTRL_FREQSEL_50M 0x1
+#define PHYCTRL_FREQSEL_100M 0x2
+#define PHYCTRL_FREQSEL_150M 0x3
+#define PHYCTRL_DLL_LOCK_WO_TMOUT(x) \
+ ((((x) >> PHYCTRL_DLLRDY_SHIFT) & PHYCTRL_DLLRDY_MASK) ==\
+ PHYCTRL_DLLRDY_DONE)
+
+/* Rockchip specific Registers */
+#define DWCMSHC_EMMC_DLL_CTRL 0x800
+#define DWCMSHC_EMMC_DLL_CTRL_RESET BIT(1)
+#define DWCMSHC_EMMC_DLL_RXCLK 0x804
+#define DWCMSHC_EMMC_DLL_TXCLK 0x808
+#define DWCMSHC_EMMC_DLL_STRBIN 0x80c
+#define DWCMSHC_EMMC_DLL_STATUS0 0x840
+#define DWCMSHC_EMMC_DLL_STATUS1 0x844
+#define DWCMSHC_EMMC_DLL_START BIT(0)
+#define DWCMSHC_EMMC_DLL_RXCLK_SRCSEL 29
+#define DWCMSHC_EMMC_DLL_START_POINT 16
+#define DWCMSHC_EMMC_DLL_START_DEFAULT 5
+#define DWCMSHC_EMMC_DLL_INC_VALUE 2
+#define DWCMSHC_EMMC_DLL_INC 8
+#define DWCMSHC_EMMC_DLL_DLYENA BIT(27)
+#define DLL_TXCLK_TAPNUM_DEFAULT 0x10
+#define DLL_STRBIN_TAPNUM_DEFAULT 0x3
+#define DLL_TXCLK_TAPNUM_FROM_SW BIT(24)
+#define DWCMSHC_EMMC_DLL_LOCKED BIT(8)
+#define DWCMSHC_EMMC_DLL_TIMEOUT BIT(9)
+#define DLL_RXCLK_NO_INVERTER 1
+#define DLL_RXCLK_INVERTER 0
+#define DWCMSHC_ENHANCED_STROBE BIT(8)
+#define DLL_LOCK_WO_TMOUT(x) \
+ ((((x) & DWCMSHC_EMMC_DLL_LOCKED) == DWCMSHC_EMMC_DLL_LOCKED) && \
+ (((x) & DWCMSHC_EMMC_DLL_TIMEOUT) == 0))
+#define ROCKCHIP_MAX_CLKS 3
struct rockchip_sdhc_plat {
-#if CONFIG_IS_ENABLED(OF_PLATDATA)
- struct dtd_rockchip_rk3399_sdhci_5_1 dtplat;
-#endif
struct mmc_config cfg;
struct mmc mmc;
};
+struct rockchip_emmc_phy {
+ u32 emmcphy_con[7];
+ u32 reserved;
+ u32 emmcphy_status;
+};
+
struct rockchip_sdhc {
struct sdhci_host host;
+ struct udevice *dev;
void *base;
+ struct rockchip_emmc_phy *phy;
+ struct clk emmc_clk;
+};
+
+struct sdhci_data {
+ int (*emmc_set_clock)(struct sdhci_host *host, unsigned int clock);
+ int (*emmc_phy_init)(struct udevice *dev);
+ int (*get_phy)(struct udevice *dev);
+};
+
+static int rk3399_emmc_phy_init(struct udevice *dev)
+{
+ return 0;
+}
+
+static void rk3399_emmc_phy_power_on(struct rockchip_emmc_phy *phy, u32 clock)
+{
+ u32 caldone, dllrdy, freqsel;
+
+ writel(RK_CLRSETBITS(7 << 4, 0), &phy->emmcphy_con[6]);
+ writel(RK_CLRSETBITS(1 << 11, 1 << 11), &phy->emmcphy_con[0]);
+ writel(RK_CLRSETBITS(0xf << 7, 6 << 7), &phy->emmcphy_con[0]);
+
+ /*
+ * According to the user manual, calpad calibration
+ * cycle takes more than 2us without the minimal recommended
+ * value, so we may need a little margin here
+ */
+ udelay(3);
+ writel(RK_CLRSETBITS(1, 1), &phy->emmcphy_con[6]);
+
+ /*
+ * According to the user manual, it asks driver to
+ * wait 5us for calpad busy trimming. But it seems that
+ * 5us of caldone isn't enough for all cases.
+ */
+ udelay(500);
+ caldone = readl(&phy->emmcphy_status);
+ caldone = (caldone >> PHYCTRL_CALDONE_SHIFT) & PHYCTRL_CALDONE_MASK;
+ if (caldone != PHYCTRL_CALDONE_DONE) {
+ printf("%s: caldone timeout.\n", __func__);
+ return;
+ }
+
+ /* Set the frequency of the DLL operation */
+ if (clock < 75 * MHz)
+ freqsel = PHYCTRL_FREQSEL_50M;
+ else if (clock < 125 * MHz)
+ freqsel = PHYCTRL_FREQSEL_100M;
+ else if (clock < 175 * MHz)
+ freqsel = PHYCTRL_FREQSEL_150M;
+ else
+ freqsel = PHYCTRL_FREQSEL_200M;
+
+ /* Set the frequency of the DLL operation */
+ writel(RK_CLRSETBITS(3 << 12, freqsel << 12), &phy->emmcphy_con[0]);
+ writel(RK_CLRSETBITS(1 << 1, 1 << 1), &phy->emmcphy_con[6]);
+
+ read_poll_timeout(readl, &phy->emmcphy_status, dllrdy,
+ PHYCTRL_DLL_LOCK_WO_TMOUT(dllrdy), 1, 5000);
+}
+
+static void rk3399_emmc_phy_power_off(struct rockchip_emmc_phy *phy)
+{
+ writel(RK_CLRSETBITS(1, 0), &phy->emmcphy_con[6]);
+ writel(RK_CLRSETBITS(1 << 1, 0), &phy->emmcphy_con[6]);
+}
+
+static int rk3399_emmc_get_phy(struct udevice *dev)
+{
+ struct rockchip_sdhc *priv = dev_get_priv(dev);
+ ofnode phy_node;
+ void *grf_base;
+ u32 grf_phy_offset, phandle;
+
+ phandle = dev_read_u32_default(dev, "phys", 0);
+ phy_node = ofnode_get_by_phandle(phandle);
+ if (!ofnode_valid(phy_node)) {
+ debug("Not found emmc phy device\n");
+ return -ENODEV;
+ }
+
+ grf_base = syscon_get_first_range(ROCKCHIP_SYSCON_GRF);
+ if (grf_base < 0) {
+ printf("%s Get syscon grf failed", __func__);
+ return -ENODEV;
+ }
+ grf_phy_offset = ofnode_read_u32_default(phy_node, "reg", 0);
+
+ priv->phy = (struct rockchip_emmc_phy *)(grf_base + grf_phy_offset);
+
+ return 0;
+}
+
+static int rk3399_sdhci_emmc_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct rockchip_sdhc *priv = container_of(host, struct rockchip_sdhc, host);
+ int cycle_phy = host->clock != clock && clock > EMMC_MIN_FREQ;
+
+ if (cycle_phy)
+ rk3399_emmc_phy_power_off(priv->phy);
+
+ sdhci_set_clock(host->mmc, clock);
+
+ if (cycle_phy)
+ rk3399_emmc_phy_power_on(priv->phy, clock);
+
+ return 0;
+}
+
+static int rk3568_emmc_phy_init(struct udevice *dev)
+{
+ struct rockchip_sdhc *prv = dev_get_priv(dev);
+ struct sdhci_host *host = &prv->host;
+ u32 extra;
+
+ extra = DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL;
+ sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_RXCLK);
+
+ return 0;
+}
+
+static int rk3568_sdhci_emmc_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct rockchip_sdhc *priv = container_of(host, struct rockchip_sdhc, host);
+ int val, ret;
+ u32 extra;
+
+ if (clock > host->max_clk)
+ clock = host->max_clk;
+ if (clock)
+ clk_set_rate(&priv->emmc_clk, clock);
+
+ sdhci_set_clock(host->mmc, clock);
+
+ if (clock >= 100 * MHz) {
+ /* reset DLL */
+ sdhci_writel(host, DWCMSHC_EMMC_DLL_CTRL_RESET, DWCMSHC_EMMC_DLL_CTRL);
+ udelay(1);
+ sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_CTRL);
+
+ /* Init DLL settings */
+ extra = DWCMSHC_EMMC_DLL_START_DEFAULT << DWCMSHC_EMMC_DLL_START_POINT |
+ DWCMSHC_EMMC_DLL_INC_VALUE << DWCMSHC_EMMC_DLL_INC |
+ DWCMSHC_EMMC_DLL_START;
+ sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_CTRL);
+
+ ret = read_poll_timeout(readl, host->ioaddr + DWCMSHC_EMMC_DLL_STATUS0,
+ val, DLL_LOCK_WO_TMOUT(val), 1, 500);
+ if (ret)
+ return ret;
+
+ extra = DWCMSHC_EMMC_DLL_DLYENA |
+ DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL;
+ sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_RXCLK);
+
+ extra = DWCMSHC_EMMC_DLL_DLYENA |
+ DLL_TXCLK_TAPNUM_DEFAULT |
+ DLL_TXCLK_TAPNUM_FROM_SW;
+ sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_TXCLK);
+
+ extra = DWCMSHC_EMMC_DLL_DLYENA |
+ DLL_STRBIN_TAPNUM_DEFAULT;
+ sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
+ } else {
+ /* reset the clock phase when the frequency is lower than 100MHz */
+ sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_CTRL);
+ extra = DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL;
+ sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_RXCLK);
+ sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK);
+ sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_STRBIN);
+ }
+
+ return 0;
+}
+
+static int rk3568_emmc_get_phy(struct udevice *dev)
+{
+ return 0;
+}
+
+static int rockchip_sdhci_set_ios_post(struct sdhci_host *host)
+{
+ struct rockchip_sdhc *priv = container_of(host, struct rockchip_sdhc, host);
+ struct sdhci_data *data = (struct sdhci_data *)dev_get_driver_data(priv->dev);
+ struct mmc *mmc = host->mmc;
+ uint clock = mmc->tran_speed;
+ u32 reg;
+
+ if (!clock)
+ clock = mmc->clock;
+
+ if (data->emmc_set_clock)
+ data->emmc_set_clock(host, clock);
+
+ if (mmc->selected_mode == MMC_HS_400 || mmc->selected_mode == MMC_HS_400_ES) {
+ reg = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ reg &= ~SDHCI_CTRL_UHS_MASK;
+ reg |= SDHCI_CTRL_HS400;
+ sdhci_writew(host, reg, SDHCI_HOST_CONTROL2);
+ } else {
+ sdhci_set_uhs_timing(host);
+ }
+
+ return 0;
+}
+
+static int rockchip_sdhci_execute_tuning(struct mmc *mmc, u8 opcode)
+{
+ struct sdhci_host *host = dev_get_priv(mmc->dev);
+ char tuning_loop_counter = SDHCI_TUNING_LOOP_COUNT;
+ struct mmc_cmd cmd;
+ u32 ctrl, blk_size;
+ int ret = 0;
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl |= SDHCI_CTRL_EXEC_TUNING;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
+ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
+
+ blk_size = SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 64);
+ if (opcode == MMC_CMD_SEND_TUNING_BLOCK_HS200 && host->mmc->bus_width == 8)
+ blk_size = SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 128);
+ sdhci_writew(host, blk_size, SDHCI_BLOCK_SIZE);
+ sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
+
+ cmd.cmdidx = opcode;
+ cmd.resp_type = MMC_RSP_R1;
+ cmd.cmdarg = 0;
+
+ do {
+ if (tuning_loop_counter-- == 0)
+ break;
+
+ mmc_send_cmd(mmc, &cmd, NULL);
+
+ if (opcode == MMC_CMD_SEND_TUNING_BLOCK)
+ /*
+ * For tuning command, do not do busy loop. As tuning
+ * is happening (CLK-DATA latching for setup/hold time
+ * requirements), give time to complete
+ */
+ udelay(1);
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
+
+ if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
+ printf("%s:Tuning failed\n", __func__);
+ ret = -EIO;
+ }
+
+ if (tuning_loop_counter < 0) {
+ ctrl &= ~SDHCI_CTRL_TUNED_CLK;
+ sdhci_writel(host, ctrl, SDHCI_HOST_CONTROL2);
+ }
+
+ /* Enable only interrupts served by the SD controller */
+ sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK, SDHCI_INT_ENABLE);
+ /* Mask all sdhci interrupt sources */
+ sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE);
+
+ return ret;
+}
+
+static struct sdhci_ops rockchip_sdhci_ops = {
+ .set_ios_post = rockchip_sdhci_set_ios_post,
+ .platform_execute_tuning = &rockchip_sdhci_execute_tuning,
};
-static int arasan_sdhci_probe(struct udevice *dev)
+static int rockchip_sdhci_probe(struct udevice *dev)
{
+ struct sdhci_data *data = (struct sdhci_data *)dev_get_driver_data(dev);
struct mmc_uclass_priv *upriv = dev_get_uclass_priv(dev);
struct rockchip_sdhc_plat *plat = dev_get_plat(dev);
struct rockchip_sdhc *prv = dev_get_priv(dev);
+ struct mmc_config *cfg = &plat->cfg;
struct sdhci_host *host = &prv->host;
- int max_frequency, ret;
struct clk clk;
+ int ret;
-#if CONFIG_IS_ENABLED(OF_PLATDATA)
- struct dtd_rockchip_rk3399_sdhci_5_1 *dtplat = &plat->dtplat;
-
- host->name = dev->name;
- host->ioaddr = map_sysmem(dtplat->reg[0], dtplat->reg[1]);
- max_frequency = dtplat->max_frequency;
- ret = clk_get_by_driver_info(dev, dtplat->clocks, &clk);
-#else
- max_frequency = dev_read_u32_default(dev, "max-frequency", 0);
+ host->max_clk = cfg->f_max;
ret = clk_get_by_index(dev, 0, &clk);
-#endif
if (!ret) {
- ret = clk_set_rate(&clk, max_frequency);
+ ret = clk_set_rate(&clk, host->max_clk);
if (IS_ERR_VALUE(ret))
printf("%s clk set rate fail!\n", __func__);
} else {
printf("%s fail to get clk\n", __func__);
}
+ prv->emmc_clk = clk;
+ prv->dev = dev;
+
+ if (data->get_phy) {
+ ret = data->get_phy(dev);
+ if (ret)
+ return ret;
+ }
+
+ if (data->emmc_phy_init) {
+ ret = data->emmc_phy_init(dev);
+ if (ret)
+ return ret;
+ }
+
+ host->ops = &rockchip_sdhci_ops;
host->quirks = SDHCI_QUIRK_WAIT_SEND_CMD;
- host->max_clk = max_frequency;
- /*
- * The sdhci-driver only supports 4bit and 8bit, as sdhci_setup_cfg
- * doesn't allow us to clear MMC_MODE_4BIT. Consequently, we don't
- * check for other bus-width values.
- */
- if (host->bus_width == 8)
- host->host_caps |= MMC_MODE_8BIT;
host->mmc = &plat->mmc;
host->mmc->priv = &prv->host;
host->mmc->dev = dev;
upriv->mmc = host->mmc;
- ret = sdhci_setup_cfg(&plat->cfg, host, 0, EMMC_MIN_FREQ);
+ ret = sdhci_setup_cfg(cfg, host, cfg->f_max, EMMC_MIN_FREQ);
if (ret)
return ret;
return sdhci_probe(dev);
}
-static int arasan_sdhci_of_to_plat(struct udevice *dev)
+static int rockchip_sdhci_of_to_plat(struct udevice *dev)
{
-#if !CONFIG_IS_ENABLED(OF_PLATDATA)
+ struct rockchip_sdhc_plat *plat = dev_get_plat(dev);
struct sdhci_host *host = dev_get_priv(dev);
+ struct mmc_config *cfg = &plat->cfg;
+ int ret;
host->name = dev->name;
host->ioaddr = dev_read_addr_ptr(dev);
- host->bus_width = dev_read_u32_default(dev, "bus-width", 4);
-#endif
+
+ ret = mmc_of_parse(dev, cfg);
+ if (ret)
+ return ret;
return 0;
}
@@ -101,19 +432,38 @@ static int rockchip_sdhci_bind(struct udevice *dev)
return sdhci_bind(dev, &plat->mmc, &plat->cfg);
}
-static const struct udevice_id arasan_sdhci_ids[] = {
- { .compatible = "arasan,sdhci-5.1" },
+static const struct sdhci_data rk3399_data = {
+ .emmc_set_clock = rk3399_sdhci_emmc_set_clock,
+ .get_phy = rk3399_emmc_get_phy,
+ .emmc_phy_init = rk3399_emmc_phy_init,
+};
+
+static const struct sdhci_data rk3568_data = {
+ .emmc_set_clock = rk3568_sdhci_emmc_set_clock,
+ .get_phy = rk3568_emmc_get_phy,
+ .emmc_phy_init = rk3568_emmc_phy_init,
+};
+
+static const struct udevice_id sdhci_ids[] = {
+ {
+ .compatible = "arasan,sdhci-5.1",
+ .data = (ulong)&rk3399_data,
+ },
+ {
+ .compatible = "rockchip,rk3568-dwcmshc",
+ .data = (ulong)&rk3568_data,
+ },
{ }
};
U_BOOT_DRIVER(arasan_sdhci_drv) = {
- .name = "rockchip_rk3399_sdhci_5_1",
+ .name = "rockchip_sdhci_5_1",
.id = UCLASS_MMC,
- .of_match = arasan_sdhci_ids,
- .of_to_plat = arasan_sdhci_of_to_plat,
+ .of_match = sdhci_ids,
+ .of_to_plat = rockchip_sdhci_of_to_plat,
.ops = &sdhci_ops,
.bind = rockchip_sdhci_bind,
- .probe = arasan_sdhci_probe,
+ .probe = rockchip_sdhci_probe,
.priv_auto = sizeof(struct rockchip_sdhc),
.plat_auto = sizeof(struct rockchip_sdhc_plat),
};
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index ad50c5e870e..b303fabe0f0 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -42,6 +42,7 @@ config FLASH_CFI_DRIVER
config CFI_FLASH
bool "Enable Driver Model for CFI Flash driver"
depends on DM_MTD
+ select FLASH_CFI_DRIVER
help
The Common Flash Interface specification was developed by Intel,
AMD and other flash manufactures. It provides a universal method
diff --git a/drivers/mtd/spi/Kconfig b/drivers/mtd/spi/Kconfig
index 1b2ef37e92d..b2291f72905 100644
--- a/drivers/mtd/spi/Kconfig
+++ b/drivers/mtd/spi/Kconfig
@@ -204,6 +204,12 @@ config SPI_FLASH_XMC
Add support for various XMC (Wuhan Xinxin Semiconductor
Manufacturing Corp.) SPI flash chips (XM25xxx)
+config SPI_FLASH_XTX
+ bool "XTX SPI flash support"
+ help
+ Add support for various XTX (XTX Technology Limited)
+ SPI flash chips (XT25xxx).
+
endif
config SPI_FLASH_USE_4K_SECTORS
diff --git a/drivers/mtd/spi/spi-nor-ids.c b/drivers/mtd/spi/spi-nor-ids.c
index cb3a08872d6..4aef1ddd6e2 100644
--- a/drivers/mtd/spi/spi-nor-ids.c
+++ b/drivers/mtd/spi/spi-nor-ids.c
@@ -362,5 +362,9 @@ const struct flash_info spi_nor_ids[] = {
{ INFO("XM25QH64C", 0x204017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ INFO("XM25QH128A", 0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
#endif
+#ifdef CONFIG_SPI_FLASH_XTX
+ /* XTX Technology (Shenzhen) Limited */
+ { INFO("xt25f128b", 0x0b4018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+#endif
{ },
};
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8b10148579e..32f2708dc33 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -331,7 +331,7 @@ config FEC_MXC_MDIO_BASE
config FEC_MXC
bool "FEC Ethernet controller"
- depends on MX28 || MX5 || MX6 || MX7 || IMX8 || IMX8M || VF610
+ depends on MX28 || MX5 || MX6 || MX7 || IMX8 || IMX8M || IMX8ULP || VF610
help
This driver supports the 10/100 Fast Ethernet controller for
NXP i.MX processors.
diff --git a/drivers/net/fec_mxc.c b/drivers/net/fec_mxc.c
index db2cdaf684b..9bb42e5ca90 100644
--- a/drivers/net/fec_mxc.c
+++ b/drivers/net/fec_mxc.c
@@ -631,7 +631,7 @@ static int fec_init(struct eth_device *dev, struct bd_info *bd)
writel(0x00000000, &fec->eth->gaddr2);
/* Do not access reserved register */
- if (!is_mx6ul() && !is_mx6ull() && !is_imx8() && !is_imx8m()) {
+ if (!is_mx6ul() && !is_mx6ull() && !is_imx8() && !is_imx8m() && !is_imx8ulp()) {
/* clear MIB RAM */
for (i = mib_ptr; i <= mib_ptr + 0xfc; i += 4)
writel(0, i);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 405bf767530..64d5ddf2385 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -131,6 +131,16 @@ config SYS_CORTINA_FW_IN_SPIFLASH
endchoice
+config CORTINA_FW_ADDR
+ hex "Cortina Firmware Address"
+ depends on PHY_CORTINA && !SYS_CORTINA_NO_FW_UPLOAD
+ default 0x0
+
+config CORTINA_FW_LENGTH
+ hex "Cortina Firmware Length"
+ depends on PHY_CORTINA && !SYS_CORTINA_NO_FW_UPLOAD
+ default 0x40000
+
config PHY_CORTINA_ACCESS
bool "Cortina Access Ethernet PHYs support"
default y
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index b381a431fd9..2ac02952450 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -17,12 +17,11 @@
#include <linux/err.h>
#include <phy.h>
#include <cortina.h>
-#ifdef CONFIG_SYS_CORTINA_FW_IN_NAND
#include <nand.h>
-#elif defined(CONFIG_SYS_CORTINA_FW_IN_SPIFLASH)
#include <spi_flash.h>
-#elif defined(CONFIG_SYS_CORTINA_FW_IN_MMC)
#include <mmc.h>
+#ifdef CONFIG_ARM64
+#include <asm/arch/cpu.h>
#endif
#ifndef CONFIG_PHYLIB_10G
@@ -124,6 +123,11 @@ struct cortina_reg_config cortina_reg_cfg[] = {
{VILLA_LINE_SDS_COMMON_STX0_TX_OUTPUT_CTRLB, 0xc01E},
};
+__weak ulong *cs4340_get_fw_addr(void)
+{
+ return (ulong *)CONFIG_CORTINA_FW_ADDR;
+}
+
void cs4340_upload_firmware(struct phy_device *phydev)
{
char line_temp[0x50] = {0};
@@ -132,22 +136,76 @@ void cs4340_upload_firmware(struct phy_device *phydev)
int i, line_cnt = 0, column_cnt = 0;
struct cortina_reg_config fw_temp;
char *addr = NULL;
+ ulong cortina_fw_addr = (ulong)cs4340_get_fw_addr();
+
+#ifdef CONFIG_TFABOOT
+ enum boot_src src = get_boot_src();
+
+ if (src == BOOT_SOURCE_IFC_NOR) {
+ addr = (char *)cortina_fw_addr;
+ } else if (src == BOOT_SOURCE_IFC_NAND) {
+ int ret;
+ size_t fw_length = CONFIG_CORTINA_FW_LENGTH;
+
+ addr = malloc(CONFIG_CORTINA_FW_LENGTH);
+ ret = nand_read(get_nand_dev_by_index(0),
+ (loff_t)cortina_fw_addr, &fw_length, (u_char *)addr);
+ if (ret == -EUCLEAN) {
+ printf("NAND read of Cortina firmware at 0x%lx failed %d\n",
+ cortina_fw_addr, ret);
+ }
+ } else if (src == BOOT_SOURCE_QSPI_NOR) {
+ int ret;
+ struct spi_flash *ucode_flash;
+ addr = malloc(CONFIG_CORTINA_FW_LENGTH);
+ ucode_flash = spi_flash_probe(CONFIG_ENV_SPI_BUS, CONFIG_ENV_SPI_CS,
+ CONFIG_ENV_SPI_MAX_HZ, CONFIG_ENV_SPI_MODE);
+ if (!ucode_flash) {
+ puts("SF: probe for Cortina ucode failed\n");
+ } else {
+ ret = spi_flash_read(ucode_flash, cortina_fw_addr,
+ CONFIG_CORTINA_FW_LENGTH, addr);
+ if (ret)
+ puts("SF: read for Cortina ucode failed\n");
+ spi_flash_free(ucode_flash);
+ }
+ } else if (src == BOOT_SOURCE_SD_MMC) {
+ int dev = CONFIG_SYS_MMC_ENV_DEV;
+ u32 cnt = CONFIG_CORTINA_FW_LENGTH / 512;
+ u32 blk = cortina_fw_addr / 512;
+ struct mmc *mmc = find_mmc_device(CONFIG_SYS_MMC_ENV_DEV);
+
+ if (!mmc) {
+ puts("Failed to find MMC device for Cortina ucode\n");
+ } else {
+ addr = malloc(CONFIG_CORTINA_FW_LENGTH);
+ printf("MMC read: dev # %u, block # %u, count %u ...\n",
+ dev, blk, cnt);
+ mmc_init(mmc);
+#ifdef CONFIG_BLK
+ (void)blk_dread(mmc_get_blk_desc(mmc), blk, cnt, addr);
+#else
+ (void)mmc->block_dev.block_read(&mmc->block_dev, blk, cnt, addr);
+#endif
+ }
+ }
+#else /* CONFIG_TFABOOT */
#if defined(CONFIG_SYS_CORTINA_FW_IN_NOR) || \
defined(CONFIG_SYS_CORTINA_FW_IN_REMOTE)
- addr = (char *)CONFIG_CORTINA_FW_ADDR;
+ addr = (char *)cortina_fw_addr;
#elif defined(CONFIG_SYS_CORTINA_FW_IN_NAND)
int ret;
size_t fw_length = CONFIG_CORTINA_FW_LENGTH;
addr = malloc(CONFIG_CORTINA_FW_LENGTH);
ret = nand_read(get_nand_dev_by_index(0),
- (loff_t)CONFIG_CORTINA_FW_ADDR,
+ (loff_t)cortina_fw_addr,
&fw_length, (u_char *)addr);
if (ret == -EUCLEAN) {
- printf("NAND read of Cortina firmware at 0x%x failed %d\n",
- CONFIG_CORTINA_FW_ADDR, ret);
+ printf("NAND read of Cortina firmware at 0x%lx failed %d\n",
+ cortina_fw_addr, ret);
}
#elif defined(CONFIG_SYS_CORTINA_FW_IN_SPIFLASH)
int ret;
@@ -159,7 +217,7 @@ void cs4340_upload_firmware(struct phy_device *phydev)
if (!ucode_flash) {
puts("SF: probe for Cortina ucode failed\n");
} else {
- ret = spi_flash_read(ucode_flash, CONFIG_CORTINA_FW_ADDR,
+ ret = spi_flash_read(ucode_flash, cortina_fw_addr,
CONFIG_CORTINA_FW_LENGTH, addr);
if (ret)
puts("SF: read for Cortina ucode failed\n");
@@ -168,7 +226,7 @@ void cs4340_upload_firmware(struct phy_device *phydev)
#elif defined(CONFIG_SYS_CORTINA_FW_IN_MMC)
int dev = CONFIG_SYS_MMC_ENV_DEV;
u32 cnt = CONFIG_CORTINA_FW_LENGTH / 512;
- u32 blk = CONFIG_CORTINA_FW_ADDR / 512;
+ u32 blk = cortina_fw_addr / 512;
struct mmc *mmc = find_mmc_device(CONFIG_SYS_MMC_ENV_DEV);
if (!mmc) {
@@ -187,6 +245,7 @@ void cs4340_upload_firmware(struct phy_device *phydev)
#endif
}
#endif
+#endif
while (*addr != 'Q') {
i = 0;
@@ -195,7 +254,7 @@ void cs4340_upload_firmware(struct phy_device *phydev)
line_temp[i++] = *addr++;
if (0x50 < i) {
printf("Not found Cortina PHY ucode at 0x%p\n",
- (char *)CONFIG_CORTINA_FW_ADDR);
+ (char *)cortina_fw_addr);
return;
}
}
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 83d7a4e403c..bdfdec98a08 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_PCI_GT64120) += pci_gt64120.o
obj-$(CONFIG_PCI_MPC85XX) += pci_mpc85xx.o
obj-$(CONFIG_PCI_MSC01) += pci_msc01.o
obj-$(CONFIG_PCIE_IMX) += pcie_imx.o
-obj-$(CONFIG_FTPCI100) += pci_ftpci100.o
obj-$(CONFIG_PCI_MVEBU) += pci_mvebu.o
obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
obj-$(CONFIG_PCI_RCAR_GEN3) += pci-rcar-gen3.o
diff --git a/drivers/pci/pci-aardvark.c b/drivers/pci/pci-aardvark.c
index 1b9bae7cca7..815b26162f1 100644
--- a/drivers/pci/pci-aardvark.c
+++ b/drivers/pci/pci-aardvark.c
@@ -177,7 +177,6 @@
#define LINK_MAX_RETRIES 10
#define LINK_WAIT_TIMEOUT 100000
-#define CFG_RD_UR_VAL 0xFFFFFFFF
#define CFG_RD_CRS_VAL 0xFFFF0001
/**
@@ -263,12 +262,12 @@ static int pcie_advk_wait_pio(struct pcie_advk *pcie)
* pcie_advk_check_pio_status() - Validate PIO status and get the read result
*
* @pcie: Pointer to the PCI bus
- * @read: Read from or write to configuration space - true(read) false(write)
- * @read_val: Pointer to the read result, only valid when read is true
+ * @allow_crs: Only for read requests, if CRS response is allowed
+ * @read_val: Pointer to the read result
*
*/
static int pcie_advk_check_pio_status(struct pcie_advk *pcie,
- bool read,
+ bool allow_crs,
uint *read_val)
{
uint reg;
@@ -286,22 +285,16 @@ static int pcie_advk_check_pio_status(struct pcie_advk *pcie,
break;
}
/* Get the read result */
- if (read)
+ if (read_val)
*read_val = advk_readl(pcie, PIO_RD_DATA);
/* No error */
strcomp_status = NULL;
break;
case PIO_COMPLETION_STATUS_UR:
- if (read) {
- /* For reading, UR is not an error status. */
- *read_val = CFG_RD_UR_VAL;
- strcomp_status = NULL;
- } else {
- strcomp_status = "UR";
- }
+ strcomp_status = "UR";
break;
case PIO_COMPLETION_STATUS_CRS:
- if (read) {
+ if (allow_crs && read_val) {
/* For reading, CRS is not an error status. */
*read_val = CFG_RD_CRS_VAL;
strcomp_status = NULL;
@@ -352,6 +345,7 @@ static int pcie_advk_read_config(const struct udevice *bus, pci_dev_t bdf,
enum pci_size_t size)
{
struct pcie_advk *pcie = dev_get_priv(bus);
+ bool allow_crs;
uint reg;
int ret;
@@ -364,13 +358,17 @@ static int pcie_advk_read_config(const struct udevice *bus, pci_dev_t bdf,
return 0;
}
+ allow_crs = (offset == PCI_VENDOR_ID) && (size == 4);
+
if (advk_readl(pcie, PIO_START)) {
dev_err(pcie->dev,
"Previous PIO read/write transfer is still running\n");
- if (offset != PCI_VENDOR_ID)
- return -EINVAL;
- *valuep = CFG_RD_CRS_VAL;
- return 0;
+ if (allow_crs) {
+ *valuep = CFG_RD_CRS_VAL;
+ return 0;
+ }
+ *valuep = pci_get_ff(size);
+ return -EINVAL;
}
/* Program the control register */
@@ -392,16 +390,20 @@ static int pcie_advk_read_config(const struct udevice *bus, pci_dev_t bdf,
advk_writel(pcie, 1, PIO_START);
if (!pcie_advk_wait_pio(pcie)) {
- if (offset != PCI_VENDOR_ID)
- return -EINVAL;
- *valuep = CFG_RD_CRS_VAL;
- return 0;
+ if (allow_crs) {
+ *valuep = CFG_RD_CRS_VAL;
+ return 0;
+ }
+ *valuep = pci_get_ff(size);
+ return -EINVAL;
}
/* Check PIO status and get the read result */
- ret = pcie_advk_check_pio_status(pcie, true, &reg);
- if (ret)
+ ret = pcie_advk_check_pio_status(pcie, allow_crs, &reg);
+ if (ret) {
+ *valuep = pci_get_ff(size);
return ret;
+ }
dev_dbg(pcie->dev, "(addr,size,val)=(0x%04x, %d, 0x%08x)\n",
offset, size, reg);
@@ -511,9 +513,7 @@ static int pcie_advk_write_config(struct udevice *bus, pci_dev_t bdf,
}
/* Check PIO status */
- pcie_advk_check_pio_status(pcie, false, &reg);
-
- return 0;
+ return pcie_advk_check_pio_status(pcie, false, NULL);
}
/**
diff --git a/drivers/pci/pci_common.c b/drivers/pci/pci_common.c
index 5231b69dc9a..02a71da30fa 100644
--- a/drivers/pci/pci_common.c
+++ b/drivers/pci/pci_common.c
@@ -99,7 +99,7 @@ __weak int pci_skip_dev(struct pci_controller *hose, pci_dev_t dev)
return 0;
}
-#if !defined(CONFIG_DM_PCI) || defined(CONFIG_DM_PCI_COMPAT)
+#if defined(CONFIG_DM_PCI_COMPAT)
/* Get a virtual address associated with a BAR region */
void *pci_map_bar(pci_dev_t pdev, int bar, int flags)
{
@@ -361,4 +361,4 @@ pci_dev_t pci_find_class(uint find_class, int index)
return -ENODEV;
}
-#endif /* !CONFIG_DM_PCI || CONFIG_DM_PCI_COMPAT */
+#endif /* CONFIG_DM_PCI_COMPAT */
diff --git a/drivers/pci/pci_ftpci100.c b/drivers/pci/pci_ftpci100.c
deleted file mode 100644
index 32fac878a67..00000000000
--- a/drivers/pci/pci_ftpci100.c
+++ /dev/null
@@ -1,319 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Faraday FTPCI100 PCI Bridge Controller Device Driver Implementation
- *
- * Copyright (C) 2011 Andes Technology Corporation
- * Gavin Guo, Andes Technology Corporation <gavinguo@andestech.com>
- * Macpaul Lin, Andes Technology Corporation <macpaul@andestech.com>
- */
-#include <common.h>
-#include <init.h>
-#include <log.h>
-#include <malloc.h>
-#include <pci.h>
-
-#include <faraday/ftpci100.h>
-
-#include <asm/io.h>
-#include <asm/types.h> /* u32, u16.... used by pci.h */
-
-struct ftpci100_data {
- unsigned int reg_base;
- unsigned int io_base;
- unsigned int mem_base;
- unsigned int mmio_base;
- unsigned int ndevs;
-};
-
-static struct pci_config devs[FTPCI100_MAX_FUNCTIONS];
-static struct pci_controller local_hose;
-
-static void setup_pci_bar(unsigned int bus, unsigned int dev, unsigned func,
- unsigned char header, struct ftpci100_data *priv)
-{
- struct pci_controller *hose = (struct pci_controller *)&local_hose;
- unsigned int i, tmp32, bar_no, iovsmem = 1;
- pci_dev_t dev_nu;
-
- /* A device is present, add an entry to the array */
- devs[priv->ndevs].bus = bus;
- devs[priv->ndevs].dev = dev;
- devs[priv->ndevs].func = func;
-
- dev_nu = PCI_BDF(bus, dev, func);
-
- if ((header & 0x7f) == 0x01)
- /* PCI-PCI Bridge */
- bar_no = 2;
- else
- bar_no = 6;
-
- /* Allocate address spaces by configuring BARs */
- for (i = 0; i < bar_no; i++) {
- pci_hose_write_config_dword(hose, dev_nu,
- PCI_BASE_ADDRESS_0 + i * 4, 0xffffffff);
- pci_hose_read_config_dword(hose, dev_nu,
- PCI_BASE_ADDRESS_0 + i * 4, &tmp32);
-
- if (tmp32 == 0x0)
- continue;
-
- /* IO space */
- if (tmp32 & 0x1) {
- iovsmem = 0;
- unsigned int size_mask = ~(tmp32 & 0xfffffffc);
-
- if (priv->io_base & size_mask)
- priv->io_base = (priv->io_base & ~size_mask) + \
- size_mask + 1;
-
- devs[priv->ndevs].bar[i].addr = priv->io_base;
- devs[priv->ndevs].bar[i].size = size_mask + 1;
-
- pci_hose_write_config_dword(hose, dev_nu,
- PCI_BASE_ADDRESS_0 + i * 4,
- priv->io_base);
-
- debug("Allocated IO address 0x%X-" \
- "0x%X for Bus %d, Device %d, Function %d\n",
- priv->io_base,
- priv->io_base + size_mask, bus, dev, func);
-
- priv->io_base += size_mask + 1;
- } else {
- /* Memory space */
- unsigned int is_64bit = ((tmp32 & 0x6) == 0x4);
- unsigned int is_pref = tmp32 & 0x8;
- unsigned int size_mask = ~(tmp32 & 0xfffffff0);
- unsigned int alloc_base;
- unsigned int *addr_mem_base;
-
- if (is_pref)
- addr_mem_base = &priv->mem_base;
- else
- addr_mem_base = &priv->mmio_base;
-
- alloc_base = *addr_mem_base;
-
- if (alloc_base & size_mask)
- alloc_base = (alloc_base & ~size_mask) \
- + size_mask + 1;
-
- pci_hose_write_config_dword(hose, dev_nu,
- PCI_BASE_ADDRESS_0 + i * 4, alloc_base);
-
- debug("Allocated %s address 0x%X-" \
- "0x%X for Bus %d, Device %d, Function %d\n",
- is_pref ? "MEM" : "MMIO", alloc_base,
- alloc_base + size_mask, bus, dev, func);
-
- devs[priv->ndevs].bar[i].addr = alloc_base;
- devs[priv->ndevs].bar[i].size = size_mask + 1;
-
- debug("BAR address BAR size\n");
- debug("%010x %08d\n",
- devs[priv->ndevs].bar[0].addr,
- devs[priv->ndevs].bar[0].size);
-
- alloc_base += size_mask + 1;
- *addr_mem_base = alloc_base;
-
- if (is_64bit) {
- i++;
- pci_hose_write_config_dword(hose, dev_nu,
- PCI_BASE_ADDRESS_0 + i * 4, 0x0);
- }
- }
- }
-
- /* Enable Bus Master, Memory Space, and IO Space */
- pci_hose_read_config_dword(hose, dev_nu, PCI_CACHE_LINE_SIZE, &tmp32);
- pci_hose_write_config_dword(hose, dev_nu, PCI_CACHE_LINE_SIZE, 0x08);
- pci_hose_read_config_dword(hose, dev_nu, PCI_CACHE_LINE_SIZE, &tmp32);
-
- pci_hose_read_config_dword(hose, dev_nu, PCI_COMMAND, &tmp32);
-
- tmp32 &= 0xffff;
-
- if (iovsmem == 0)
- tmp32 |= 0x5;
- else
- tmp32 |= 0x6;
-
- pci_hose_write_config_dword(hose, dev_nu, PCI_COMMAND, tmp32);
-}
-
-static void pci_bus_scan(struct ftpci100_data *priv)
-{
- struct pci_controller *hose = (struct pci_controller *)&local_hose;
- unsigned int bus, dev, func;
- pci_dev_t dev_nu;
- unsigned int data32;
- unsigned int tmp;
- unsigned char header;
- unsigned char int_pin;
- unsigned int niobars;
- unsigned int nmbars;
-
- priv->ndevs = 1;
-
- nmbars = 0;
- niobars = 0;
-
- for (bus = 0; bus < MAX_BUS_NUM; bus++)
- for (dev = 0; dev < MAX_DEV_NUM; dev++)
- for (func = 0; func < MAX_FUN_NUM; func++) {
- dev_nu = PCI_BDF(bus, dev, func);
- pci_hose_read_config_dword(hose, dev_nu,
- PCI_VENDOR_ID, &data32);
-
- /*
- * some broken boards return 0 or ~0,
- * if a slot is empty.
- */
- if (data32 == 0xffffffff ||
- data32 == 0x00000000 ||
- data32 == 0x0000ffff ||
- data32 == 0xffff0000)
- continue;
-
- pci_hose_read_config_dword(hose, dev_nu,
- PCI_HEADER_TYPE, &tmp);
- header = (unsigned char)tmp;
- setup_pci_bar(bus, dev, func, header, priv);
-
- devs[priv->ndevs].v_id = (u16)(data32 & \
- 0x0000ffff);
-
- devs[priv->ndevs].d_id = (u16)((data32 & \
- 0xffff0000) >> 16);
-
- /* Figure out what INTX# line the card uses */
- pci_hose_read_config_byte(hose, dev_nu,
- PCI_INTERRUPT_PIN, &int_pin);
-
- /* assign the appropriate irq line */
- if (int_pin > PCI_IRQ_LINES) {
- printf("more irq lines than expect\n");
- } else if (int_pin != 0) {
- /* This device uses an interrupt line */
- devs[priv->ndevs].pin = int_pin;
- }
-
- pci_hose_read_config_dword(hose, dev_nu,
- PCI_CLASS_DEVICE, &data32);
-
- debug("%06d %03d %03d " \
- "%04d %08x %08x " \
- "%03d %08x %06d %08x\n",
- priv->ndevs, devs[priv->ndevs].bus,
- devs[priv->ndevs].dev,
- devs[priv->ndevs].func,
- devs[priv->ndevs].d_id,
- devs[priv->ndevs].v_id,
- devs[priv->ndevs].pin,
- devs[priv->ndevs].bar[0].addr,
- devs[priv->ndevs].bar[0].size,
- data32 >> 8);
-
- priv->ndevs++;
- }
-}
-
-static void ftpci_preinit(struct ftpci100_data *priv)
-{
- struct ftpci100_ahbc *ftpci100;
- struct pci_controller *hose = (struct pci_controller *)&local_hose;
- u32 pci_config_addr;
- u32 pci_config_data;
-
- priv->reg_base = CONFIG_FTPCI100_BASE;
- priv->io_base = CONFIG_FTPCI100_BASE + CONFIG_FTPCI100_IO_SIZE;
- priv->mmio_base = CONFIG_FTPCI100_MEM_BASE;
- priv->mem_base = CONFIG_FTPCI100_MEM_BASE + CONFIG_FTPCI100_MEM_SIZE;
-
- ftpci100 = (struct ftpci100_ahbc *)priv->reg_base;
-
- pci_config_addr = (u32) &ftpci100->conf;
- pci_config_data = (u32) &ftpci100->data;
-
- /* print device name */
- printf("FTPCI100\n");
-
- /* dump basic configuration */
- debug("%s: Config addr is %08X, data port is %08X\n",
- __func__, pci_config_addr, pci_config_data);
-
- /* PCI memory space */
- pci_set_region(hose->regions + 0,
- CONFIG_PCI_MEM_BUS,
- CONFIG_PCI_MEM_PHYS,
- CONFIG_PCI_MEM_SIZE,
- PCI_REGION_MEM);
- hose->region_count++;
-
- /* PCI IO space */
- pci_set_region(hose->regions + 1,
- CONFIG_PCI_IO_BUS,
- CONFIG_PCI_IO_PHYS,
- CONFIG_PCI_IO_SIZE,
- PCI_REGION_IO);
- hose->region_count++;
-
-#if defined(CONFIG_PCI_SYS_BUS)
- /* PCI System Memory space */
- pci_set_region(hose->regions + 2,
- CONFIG_PCI_SYS_BUS,
- CONFIG_PCI_SYS_PHYS,
- CONFIG_PCI_SYS_SIZE,
- PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
- hose->region_count++;
-#endif
-
- /* setup indirect read/write function */
- pci_setup_indirect(hose, pci_config_addr, pci_config_data);
-
- /* register hose */
- pci_register_hose(hose);
-}
-
-void pci_ftpci_init(void)
-{
- struct ftpci100_data *priv = NULL;
- struct pci_controller *hose = (struct pci_controller *)&local_hose;
- pci_dev_t bridge_num;
-
- struct pci_device_id bridge_ids[] = {
- {FTPCI100_BRIDGE_VENDORID, FTPCI100_BRIDGE_DEVICEID},
- {0, 0}
- };
-
- priv = malloc(sizeof(struct ftpci100_data));
-
- if (!priv) {
- printf("%s(): failed to malloc priv\n", __func__);
- return;
- }
-
- memset(priv, 0, sizeof(struct ftpci100_data));
-
- ftpci_preinit(priv);
-
- debug("Device bus dev func deviceID vendorID pin address" \
- " size class\n");
-
- pci_bus_scan(priv);
-
- /*
- * Setup the PCI Bridge Window to 1GB,
- * it will cause USB OHCI Host controller Unrecoverable Error
- * if it is not set.
- */
- bridge_num = pci_find_devices(bridge_ids, 0);
- if (bridge_num == -1) {
- printf("PCI Bridge not found\n");
- return;
- }
- pci_hose_write_config_dword(hose, bridge_num, PCI_MEM_BASE_SIZE1,
- FTPCI100_BASE_ADR_SIZE(1024));
-}
diff --git a/drivers/pci/pci_gt64120.c b/drivers/pci/pci_gt64120.c
index e57fedf036e..153c65b119a 100644
--- a/drivers/pci/pci_gt64120.c
+++ b/drivers/pci/pci_gt64120.c
@@ -114,69 +114,6 @@ static int gt_config_access(struct gt64120_pci_controller *gt,
return 0;
}
-#if !IS_ENABLED(CONFIG_DM_PCI)
-static int gt_read_config_dword(struct pci_controller *hose, pci_dev_t dev,
- int where, u32 *value)
-{
- struct gt64120_pci_controller *gt = hose_to_gt64120(hose);
-
- *value = 0xffffffff;
- return gt_config_access(gt, PCI_ACCESS_READ, dev, where, value);
-}
-
-static int gt_write_config_dword(struct pci_controller *hose, pci_dev_t dev,
- int where, u32 value)
-{
- struct gt64120_pci_controller *gt = hose_to_gt64120(hose);
- u32 data = value;
-
- return gt_config_access(gt, PCI_ACCESS_WRITE, dev, where, &data);
-}
-
-void gt64120_pci_init(void *regs, unsigned long sys_bus, unsigned long sys_phys,
- unsigned long sys_size, unsigned long mem_bus,
- unsigned long mem_phys, unsigned long mem_size,
- unsigned long io_bus, unsigned long io_phys,
- unsigned long io_size)
-{
- static struct gt64120_pci_controller global_gt;
- struct gt64120_pci_controller *gt;
- struct pci_controller *hose;
-
- gt = &global_gt;
- gt->regs = regs;
-
- hose = &gt->hose;
-
- hose->first_busno = 0;
- hose->last_busno = 0;
-
- /* System memory space */
- pci_set_region(&hose->regions[0], sys_bus, sys_phys, sys_size,
- PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
-
- /* PCI memory space */
- pci_set_region(&hose->regions[1], mem_bus, mem_phys, mem_size,
- PCI_REGION_MEM);
-
- /* PCI I/O space */
- pci_set_region(&hose->regions[2], io_bus, io_phys, io_size,
- PCI_REGION_IO);
-
- hose->region_count = 3;
-
- pci_set_ops(hose,
- pci_hose_read_config_byte_via_dword,
- pci_hose_read_config_word_via_dword,
- gt_read_config_dword,
- pci_hose_write_config_byte_via_dword,
- pci_hose_write_config_word_via_dword,
- gt_write_config_dword);
-
- pci_register_hose(hose);
- hose->last_busno = pci_hose_scan(hose);
-}
-#else
static int gt64120_pci_read_config(const struct udevice *dev, pci_dev_t bdf,
uint where, ulong *val,
enum pci_size_t size)
@@ -246,4 +183,3 @@ U_BOOT_DRIVER(gt64120_pci) = {
.probe = gt64120_pci_probe,
.priv_auto = sizeof(struct gt64120_pci_controller),
};
-#endif
diff --git a/drivers/pci/pci_msc01.c b/drivers/pci/pci_msc01.c
index c17da475d01..2f1b688fc32 100644
--- a/drivers/pci/pci_msc01.c
+++ b/drivers/pci/pci_msc01.c
@@ -62,69 +62,6 @@ static int msc01_config_access(struct msc01_pci_controller *msc01,
return 0;
}
-#if !IS_ENABLED(CONFIG_DM_PCI)
-static int msc01_read_config_dword(struct pci_controller *hose, pci_dev_t dev,
- int where, u32 *value)
-{
- struct msc01_pci_controller *msc01 = hose_to_msc01(hose);
-
- *value = 0xffffffff;
- return msc01_config_access(msc01, PCI_ACCESS_READ, dev, where, value);
-}
-
-static int msc01_write_config_dword(struct pci_controller *hose, pci_dev_t dev,
- int where, u32 value)
-{
- struct msc01_pci_controller *gt = hose_to_msc01(hose);
- u32 data = value;
-
- return msc01_config_access(gt, PCI_ACCESS_WRITE, dev, where, &data);
-}
-
-void msc01_pci_init(void *base, unsigned long sys_bus, unsigned long sys_phys,
- unsigned long sys_size, unsigned long mem_bus,
- unsigned long mem_phys, unsigned long mem_size,
- unsigned long io_bus, unsigned long io_phys,
- unsigned long io_size)
-{
- static struct msc01_pci_controller global_msc01;
- struct msc01_pci_controller *msc01;
- struct pci_controller *hose;
-
- msc01 = &global_msc01;
- msc01->base = base;
-
- hose = &msc01->hose;
-
- hose->first_busno = 0;
- hose->last_busno = 0;
-
- /* System memory space */
- pci_set_region(&hose->regions[0], sys_bus, sys_phys, sys_size,
- PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
-
- /* PCI memory space */
- pci_set_region(&hose->regions[1], mem_bus, mem_phys, mem_size,
- PCI_REGION_MEM);
-
- /* PCI I/O space */
- pci_set_region(&hose->regions[2], io_bus, io_phys, io_size,
- PCI_REGION_IO);
-
- hose->region_count = 3;
-
- pci_set_ops(hose,
- pci_hose_read_config_byte_via_dword,
- pci_hose_read_config_word_via_dword,
- msc01_read_config_dword,
- pci_hose_write_config_byte_via_dword,
- pci_hose_write_config_word_via_dword,
- msc01_write_config_dword);
-
- pci_register_hose(hose);
- hose->last_busno = pci_hose_scan(hose);
-}
-#else
static int msc01_pci_read_config(const struct udevice *dev, pci_dev_t bdf,
uint where, ulong *val, enum pci_size_t size)
{
@@ -192,4 +129,3 @@ U_BOOT_DRIVER(msc01_pci) = {
.probe = msc01_pci_probe,
.priv_auto = sizeof(struct msc01_pci_controller),
};
-#endif
diff --git a/drivers/pci/pcie_imx.c b/drivers/pci/pcie_imx.c
index 7b46fdb89a3..756166fd3ea 100644
--- a/drivers/pci/pcie_imx.c
+++ b/drivers/pci/pcie_imx.c
@@ -681,86 +681,6 @@ static int imx_pcie_link_up(struct imx_pcie_priv *priv)
return 0;
}
-#if !CONFIG_IS_ENABLED(DM_PCI)
-static struct imx_pcie_priv imx_pcie_priv = {
- .dbi_base = (void __iomem *)MX6_DBI_ADDR,
- .cfg_base = (void __iomem *)MX6_ROOT_ADDR,
-};
-
-static struct imx_pcie_priv *priv = &imx_pcie_priv;
-
-static int imx_pcie_read_config(struct pci_controller *hose, pci_dev_t d,
- int where, u32 *val)
-{
- struct imx_pcie_priv *priv = hose->priv_data;
-
- return imx_pcie_read_cfg(priv, d, where, val);
-}
-
-static int imx_pcie_write_config(struct pci_controller *hose, pci_dev_t d,
- int where, u32 val)
-{
- struct imx_pcie_priv *priv = hose->priv_data;
-
- return imx_pcie_write_cfg(priv, d, where, val);
-}
-
-void imx_pcie_init(void)
-{
- /* Static instance of the controller. */
- static struct pci_controller pcc;
- struct pci_controller *hose = &pcc;
- int ret;
-
- memset(&pcc, 0, sizeof(pcc));
-
- hose->priv_data = priv;
-
- /* PCI I/O space */
- pci_set_region(&hose->regions[0],
- MX6_IO_ADDR, MX6_IO_ADDR,
- MX6_IO_SIZE, PCI_REGION_IO);
-
- /* PCI memory space */
- pci_set_region(&hose->regions[1],
- MX6_MEM_ADDR, MX6_MEM_ADDR,
- MX6_MEM_SIZE, PCI_REGION_MEM);
-
- /* System memory space */
- pci_set_region(&hose->regions[2],
- MMDC0_ARB_BASE_ADDR, MMDC0_ARB_BASE_ADDR,
- 0xefffffff, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
-
- hose->region_count = 3;
-
- pci_set_ops(hose,
- pci_hose_read_config_byte_via_dword,
- pci_hose_read_config_word_via_dword,
- imx_pcie_read_config,
- pci_hose_write_config_byte_via_dword,
- pci_hose_write_config_word_via_dword,
- imx_pcie_write_config);
-
- /* Start the controller. */
- ret = imx_pcie_link_up(priv);
-
- if (!ret) {
- pci_register_hose(hose);
- hose->last_busno = pci_hose_scan(hose);
- }
-}
-
-void imx_pcie_remove(void)
-{
- imx6_pcie_assert_core_reset(priv, true);
-}
-
-/* Probe function. */
-void pci_init_board(void)
-{
- imx_pcie_init();
-}
-#else
static int imx_pcie_dm_read_config(const struct udevice *dev, pci_dev_t bdf,
uint offset, ulong *value,
enum pci_size_t size)
@@ -852,4 +772,3 @@ U_BOOT_DRIVER(imx_pcie) = {
.priv_auto = sizeof(struct imx_pcie_priv),
.flags = DM_FLAG_OS_PREPARE,
};
-#endif
diff --git a/drivers/pinctrl/nxp/Kconfig b/drivers/pinctrl/nxp/Kconfig
index ec55351e61e..4fb0916a376 100644
--- a/drivers/pinctrl/nxp/Kconfig
+++ b/drivers/pinctrl/nxp/Kconfig
@@ -60,6 +60,20 @@ config PINCTRL_IMX7ULP
only parses the 'fsl,pins' property and configure related
registers.
+config PINCTRL_IMX8ULP
+ bool "IMX8ULP pinctrl driver"
+ depends on ARCH_IMX8ULP && PINCTRL_FULL
+ select DEVRES
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx8ulp pinctrl driver
+
+ This provides a simple pinctrl driver for i.MX8ULP SoC familiy.
+ This feature depends on device tree configuration. This driver
+ is different from the linux one, this is a simple implementation,
+ only parses the 'fsl,pins' property and configure related
+ registers.
+
config PINCTRL_IMX8
bool "IMX8 pinctrl driver"
depends on ARCH_IMX8 && PINCTRL_FULL
diff --git a/drivers/pinctrl/nxp/Makefile b/drivers/pinctrl/nxp/Makefile
index 066ca75b65f..f2fe0d8efa6 100644
--- a/drivers/pinctrl/nxp/Makefile
+++ b/drivers/pinctrl/nxp/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PINCTRL_IMX5) += pinctrl-imx5.o
obj-$(CONFIG_PINCTRL_IMX6) += pinctrl-imx6.o
obj-$(CONFIG_PINCTRL_IMX7) += pinctrl-imx7.o
obj-$(CONFIG_PINCTRL_IMX7ULP) += pinctrl-imx7ulp.o
+obj-$(CONFIG_PINCTRL_IMX8ULP) += pinctrl-imx8ulp.o
obj-$(CONFIG_PINCTRL_IMX_SCU) += pinctrl-scu.o
obj-$(CONFIG_PINCTRL_IMX8) += pinctrl-imx8.o
obj-$(CONFIG_PINCTRL_IMX8M) += pinctrl-imx8m.o
diff --git a/drivers/pinctrl/nxp/pinctrl-imx8ulp.c b/drivers/pinctrl/nxp/pinctrl-imx8ulp.c
new file mode 100644
index 00000000000..3f15f1dd450
--- /dev/null
+++ b/drivers/pinctrl/nxp/pinctrl-imx8ulp.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2020 NXP
+ *
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <dm/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+static struct imx_pinctrl_soc_info imx8ulp_pinctrl_soc_info0 = {
+ .flags = ZERO_OFFSET_VALID | SHARE_MUX_CONF_REG | CONFIG_IBE_OBE,
+};
+
+static struct imx_pinctrl_soc_info imx8ulp_pinctrl_soc_info1 = {
+ .flags = ZERO_OFFSET_VALID | SHARE_MUX_CONF_REG | CONFIG_IBE_OBE,
+};
+
+static int imx8ulp_pinctrl_probe(struct udevice *dev)
+{
+ struct imx_pinctrl_soc_info *info =
+ (struct imx_pinctrl_soc_info *)dev_get_driver_data(dev);
+
+ return imx_pinctrl_probe(dev, info);
+}
+
+static const struct udevice_id imx8ulp_pinctrl_match[] = {
+ { .compatible = "fsl,imx8ulp-iomuxc0", .data = (ulong)&imx8ulp_pinctrl_soc_info0 },
+ { .compatible = "fsl,imx8ulp-iomuxc1", .data = (ulong)&imx8ulp_pinctrl_soc_info1 },
+ { /* sentinel */ }
+};
+
+U_BOOT_DRIVER(imx8ulp_pinctrl) = {
+ .name = "imx8ulp-pinctrl",
+ .id = UCLASS_PINCTRL,
+ .of_match = of_match_ptr(imx8ulp_pinctrl_match),
+ .probe = imx8ulp_pinctrl_probe,
+ .remove = imx_pinctrl_remove,
+ .priv_auto = sizeof(struct imx_pinctrl_priv),
+ .ops = &imx_pinctrl_ops,
+ .flags = DM_FLAG_PRE_RELOC,
+};
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index ce69750c7ff..d93d2419285 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -284,7 +284,6 @@ void scsi_init(void)
*/
for (i = 0; i < ARRAY_SIZE(scsi_device_list); i++) {
/* get PCI Device ID */
-#ifdef CONFIG_DM_PCI
struct udevice *dev;
int ret;
@@ -294,11 +293,6 @@ void scsi_init(void)
busdevfunc = dm_pci_get_bdf(dev);
break;
}
-#else
- busdevfunc = pci_find_device(scsi_device_list[i].vendor,
- scsi_device_list[i].device,
- 0);
-#endif
if (busdevfunc != -1)
break;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 5c2a60a2142..e12699bec7e 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -319,6 +319,14 @@ config RENESAS_RPC_SPI
on Renesas RCar Gen3 SoCs. This uses driver model and requires a
device tree binding to operate.
+config ROCKCHIP_SFC
+ bool "Rockchip SFC Driver"
+ help
+ Enable the Rockchip SFC Driver for SPI NOR flash. This device is
+ a limited purpose SPI controller for driving NOR flash on certain
+ Rockchip SoCs. This uses driver model and requires a device tree
+ binding to operate.
+
config ROCKCHIP_SPI
bool "Rockchip SPI driver"
help
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 216e72c60f3..d2f24bccefd 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_PL022_SPI) += pl022_spi.o
obj-$(CONFIG_SPI_QUP) += spi-qup.o
obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
obj-$(CONFIG_RENESAS_RPC_SPI) += renesas_rpc_spi.o
+obj-$(CONFIG_ROCKCHIP_SFC) += rockchip_sfc.o
obj-$(CONFIG_ROCKCHIP_SPI) += rk_spi.o
obj-$(CONFIG_SANDBOX_SPI) += sandbox_spi.o
obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o
diff --git a/drivers/spi/nxp_fspi.c b/drivers/spi/nxp_fspi.c
index 6c5bad4c2ca..bba7a330e0c 100644
--- a/drivers/spi/nxp_fspi.c
+++ b/drivers/spi/nxp_fspi.c
@@ -428,7 +428,7 @@ static bool nxp_fspi_supports_op(struct spi_slave *slave,
op->data.nbytes > f->devtype_data->txfifo)
return false;
- return true;
+ return spi_mem_default_supports_op(slave, op);
}
/* Instead of busy looping invoke readl_poll_sleep_timeout functionality. */
diff --git a/drivers/spi/rockchip_sfc.c b/drivers/spi/rockchip_sfc.c
new file mode 100644
index 00000000000..4e2b861f224
--- /dev/null
+++ b/drivers/spi/rockchip_sfc.c
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip Serial Flash Controller Driver
+ *
+ * Copyright (c) 2017-2021, Rockchip Inc.
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ * Chris Morgan <macromorgan@hotmail.com>
+ * Jon Lin <Jon.lin@rock-chips.com>
+ */
+
+#include <asm/io.h>
+#include <bouncebuf.h>
+#include <clk.h>
+#include <dm.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <spi.h>
+#include <spi-mem.h>
+
+/* System control */
+#define SFC_CTRL 0x0
+#define SFC_CTRL_PHASE_SEL_NEGETIVE BIT(1)
+#define SFC_CTRL_CMD_BITS_SHIFT 8
+#define SFC_CTRL_ADDR_BITS_SHIFT 10
+#define SFC_CTRL_DATA_BITS_SHIFT 12
+
+/* Interrupt mask */
+#define SFC_IMR 0x4
+#define SFC_IMR_RX_FULL BIT(0)
+#define SFC_IMR_RX_UFLOW BIT(1)
+#define SFC_IMR_TX_OFLOW BIT(2)
+#define SFC_IMR_TX_EMPTY BIT(3)
+#define SFC_IMR_TRAN_FINISH BIT(4)
+#define SFC_IMR_BUS_ERR BIT(5)
+#define SFC_IMR_NSPI_ERR BIT(6)
+#define SFC_IMR_DMA BIT(7)
+
+/* Interrupt clear */
+#define SFC_ICLR 0x8
+#define SFC_ICLR_RX_FULL BIT(0)
+#define SFC_ICLR_RX_UFLOW BIT(1)
+#define SFC_ICLR_TX_OFLOW BIT(2)
+#define SFC_ICLR_TX_EMPTY BIT(3)
+#define SFC_ICLR_TRAN_FINISH BIT(4)
+#define SFC_ICLR_BUS_ERR BIT(5)
+#define SFC_ICLR_NSPI_ERR BIT(6)
+#define SFC_ICLR_DMA BIT(7)
+
+/* FIFO threshold level */
+#define SFC_FTLR 0xc
+#define SFC_FTLR_TX_SHIFT 0
+#define SFC_FTLR_TX_MASK 0x1f
+#define SFC_FTLR_RX_SHIFT 8
+#define SFC_FTLR_RX_MASK 0x1f
+
+/* Reset FSM and FIFO */
+#define SFC_RCVR 0x10
+#define SFC_RCVR_RESET BIT(0)
+
+/* Enhanced mode */
+#define SFC_AX 0x14
+
+/* Address Bit number */
+#define SFC_ABIT 0x18
+
+/* Interrupt status */
+#define SFC_ISR 0x1c
+#define SFC_ISR_RX_FULL_SHIFT BIT(0)
+#define SFC_ISR_RX_UFLOW_SHIFT BIT(1)
+#define SFC_ISR_TX_OFLOW_SHIFT BIT(2)
+#define SFC_ISR_TX_EMPTY_SHIFT BIT(3)
+#define SFC_ISR_TX_FINISH_SHIFT BIT(4)
+#define SFC_ISR_BUS_ERR_SHIFT BIT(5)
+#define SFC_ISR_NSPI_ERR_SHIFT BIT(6)
+#define SFC_ISR_DMA_SHIFT BIT(7)
+
+/* FIFO status */
+#define SFC_FSR 0x20
+#define SFC_FSR_TX_IS_FULL BIT(0)
+#define SFC_FSR_TX_IS_EMPTY BIT(1)
+#define SFC_FSR_RX_IS_EMPTY BIT(2)
+#define SFC_FSR_RX_IS_FULL BIT(3)
+#define SFC_FSR_TXLV_MASK GENMASK(12, 8)
+#define SFC_FSR_TXLV_SHIFT 8
+#define SFC_FSR_RXLV_MASK GENMASK(20, 16)
+#define SFC_FSR_RXLV_SHIFT 16
+
+/* FSM status */
+#define SFC_SR 0x24
+#define SFC_SR_IS_IDLE 0x0
+#define SFC_SR_IS_BUSY 0x1
+
+/* Raw interrupt status */
+#define SFC_RISR 0x28
+#define SFC_RISR_RX_FULL BIT(0)
+#define SFC_RISR_RX_UNDERFLOW BIT(1)
+#define SFC_RISR_TX_OVERFLOW BIT(2)
+#define SFC_RISR_TX_EMPTY BIT(3)
+#define SFC_RISR_TRAN_FINISH BIT(4)
+#define SFC_RISR_BUS_ERR BIT(5)
+#define SFC_RISR_NSPI_ERR BIT(6)
+#define SFC_RISR_DMA BIT(7)
+
+/* Version */
+#define SFC_VER 0x2C
+#define SFC_VER_3 0x3
+#define SFC_VER_4 0x4
+#define SFC_VER_5 0x5
+
+/* Delay line controller resiter */
+#define SFC_DLL_CTRL0 0x3C
+#define SFC_DLL_CTRL0_SCLK_SMP_DLL BIT(15)
+#define SFC_DLL_CTRL0_DLL_MAX_VER4 0xFFU
+#define SFC_DLL_CTRL0_DLL_MAX_VER5 0x1FFU
+
+/* Master trigger */
+#define SFC_DMA_TRIGGER 0x80
+
+/* Src or Dst addr for master */
+#define SFC_DMA_ADDR 0x84
+
+/* Length control register extension 32GB */
+#define SFC_LEN_CTRL 0x88
+#define SFC_LEN_CTRL_TRB_SEL 1
+#define SFC_LEN_EXT 0x8C
+
+/* Command */
+#define SFC_CMD 0x100
+#define SFC_CMD_IDX_SHIFT 0
+#define SFC_CMD_DUMMY_SHIFT 8
+#define SFC_CMD_DIR_SHIFT 12
+#define SFC_CMD_DIR_RD 0
+#define SFC_CMD_DIR_WR 1
+#define SFC_CMD_ADDR_SHIFT 14
+#define SFC_CMD_ADDR_0BITS 0
+#define SFC_CMD_ADDR_24BITS 1
+#define SFC_CMD_ADDR_32BITS 2
+#define SFC_CMD_ADDR_XBITS 3
+#define SFC_CMD_TRAN_BYTES_SHIFT 16
+#define SFC_CMD_CS_SHIFT 30
+
+/* Address */
+#define SFC_ADDR 0x104
+
+/* Data */
+#define SFC_DATA 0x108
+
+/* The controller and documentation reports that it supports up to 4 CS
+ * devices (0-3), however I have only been able to test a single CS (CS 0)
+ * due to the configuration of my device.
+ */
+#define SFC_MAX_CHIPSELECT_NUM 4
+
+/* The SFC can transfer max 16KB - 1 at one time
+ * we set it to 15.5KB here for alignment.
+ */
+#define SFC_MAX_IOSIZE_VER3 (512 * 31)
+
+#define SFC_MAX_IOSIZE_VER4 (0xFFFFFFFFU)
+
+/* DMA is only enabled for large data transmission */
+#define SFC_DMA_TRANS_THRETHOLD (0x40)
+
+/* Maximum clock values from datasheet suggest keeping clock value under
+ * 150MHz. No minimum or average value is suggested, but the U-boot BSP driver
+ * has a minimum of 10MHz and a default of 80MHz which seems reasonable.
+ */
+#define SFC_MIN_SPEED_HZ (10 * 1000 * 1000)
+#define SFC_DEFAULT_SPEED_HZ (80 * 1000 * 1000)
+#define SFC_MAX_SPEED_HZ (150 * 1000 * 1000)
+
+struct rockchip_sfc {
+ void __iomem *regbase;
+ struct clk hclk;
+ struct clk clk;
+ u32 max_freq;
+ u32 speed;
+ bool use_dma;
+ u32 max_iosize;
+ u16 version;
+};
+
+static int rockchip_sfc_reset(struct rockchip_sfc *sfc)
+{
+ int err;
+ u32 status;
+
+ writel(SFC_RCVR_RESET, sfc->regbase + SFC_RCVR);
+
+ err = readl_poll_timeout(sfc->regbase + SFC_RCVR, status,
+ !(status & SFC_RCVR_RESET),
+ 1000000);
+ if (err)
+ printf("SFC reset never finished\n");
+
+ /* Still need to clear the masked interrupt from RISR */
+ writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
+
+ debug("reset\n");
+
+ return err;
+}
+
+static u16 rockchip_sfc_get_version(struct rockchip_sfc *sfc)
+{
+ return (u16)(readl(sfc->regbase + SFC_VER) & 0xffff);
+}
+
+static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc)
+{
+ if (rockchip_sfc_get_version(sfc) >= SFC_VER_4)
+ return SFC_MAX_IOSIZE_VER4;
+
+ return SFC_MAX_IOSIZE_VER3;
+}
+
+static int rockchip_sfc_init(struct rockchip_sfc *sfc)
+{
+ writel(0, sfc->regbase + SFC_CTRL);
+ if (rockchip_sfc_get_version(sfc) >= SFC_VER_4)
+ writel(SFC_LEN_CTRL_TRB_SEL, sfc->regbase + SFC_LEN_CTRL);
+
+ return 0;
+}
+
+static int rockchip_sfc_ofdata_to_platdata(struct udevice *bus)
+{
+ struct rockchip_sfc *sfc = dev_get_plat(bus);
+
+ sfc->regbase = dev_read_addr_ptr(bus);
+ if (ofnode_read_bool(dev_ofnode(bus), "sfc-no-dma"))
+ sfc->use_dma = false;
+ else
+ sfc->use_dma = true;
+
+#if CONFIG_IS_ENABLED(CLK)
+ int ret;
+
+ ret = clk_get_by_index(bus, 0, &sfc->clk);
+ if (ret < 0) {
+ printf("Could not get clock for %s: %d\n", bus->name, ret);
+ return ret;
+ }
+
+ ret = clk_get_by_index(bus, 1, &sfc->hclk);
+ if (ret < 0) {
+ printf("Could not get ahb clock for %s: %d\n", bus->name, ret);
+ return ret;
+ }
+#endif
+
+ return 0;
+}
+
+static int rockchip_sfc_probe(struct udevice *bus)
+{
+ struct rockchip_sfc *sfc = dev_get_plat(bus);
+ int ret;
+
+#if CONFIG_IS_ENABLED(CLK)
+ ret = clk_enable(&sfc->hclk);
+ if (ret)
+ debug("Enable ahb clock fail %s: %d\n", bus->name, ret);
+
+ ret = clk_enable(&sfc->clk);
+ if (ret)
+ debug("Enable clock fail for %s: %d\n", bus->name, ret);
+
+ ret = clk_set_rate(&sfc->clk, SFC_DEFAULT_SPEED_HZ);
+ if (ret)
+ debug("Could not set sfc clock for %s: %d\n", bus->name, ret);
+#endif
+
+ ret = rockchip_sfc_init(sfc);
+ if (ret)
+ goto err_init;
+
+ sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
+ sfc->version = rockchip_sfc_get_version(sfc);
+ sfc->speed = SFC_DEFAULT_SPEED_HZ;
+
+ return 0;
+
+err_init:
+#if CONFIG_IS_ENABLED(CLK)
+ clk_disable(&sfc->clk);
+ clk_disable(&sfc->hclk);
+#endif
+
+ return ret;
+}
+
+static inline int rockchip_sfc_get_fifo_level(struct rockchip_sfc *sfc, int wr)
+{
+ u32 fsr = readl(sfc->regbase + SFC_FSR);
+ int level;
+
+ if (wr)
+ level = (fsr & SFC_FSR_TXLV_MASK) >> SFC_FSR_TXLV_SHIFT;
+ else
+ level = (fsr & SFC_FSR_RXLV_MASK) >> SFC_FSR_RXLV_SHIFT;
+
+ return level;
+}
+
+static int rockchip_sfc_wait_fifo_ready(struct rockchip_sfc *sfc, int wr, u32 timeout)
+{
+ unsigned long tbase = get_timer(0);
+ int level;
+
+ while (!(level = rockchip_sfc_get_fifo_level(sfc, wr))) {
+ if (get_timer(tbase) > timeout) {
+ debug("%s fifo timeout\n", wr ? "write" : "read");
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ }
+
+ return level;
+}
+
+static void rockchip_sfc_adjust_op_work(struct spi_mem_op *op)
+{
+ if (unlikely(op->dummy.nbytes && !op->addr.nbytes)) {
+ /*
+ * SFC not support output DUMMY cycles right after CMD cycles, so
+ * treat it as ADDR cycles.
+ */
+ op->addr.nbytes = op->dummy.nbytes;
+ op->addr.buswidth = op->dummy.buswidth;
+ op->addr.val = 0xFFFFFFFFF;
+
+ op->dummy.nbytes = 0;
+ }
+}
+
+static int rockchip_sfc_wait_for_dma_finished(struct rockchip_sfc *sfc, int timeout)
+{
+ unsigned long tbase;
+
+ /* Wait for the DMA interrupt status */
+ tbase = get_timer(0);
+ while (!(readl(sfc->regbase + SFC_RISR) & SFC_RISR_DMA)) {
+ if (get_timer(tbase) > timeout) {
+ printf("dma timeout\n");
+ rockchip_sfc_reset(sfc);
+
+ return -ETIMEDOUT;
+ }
+
+ udelay(1);
+ }
+
+ writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
+
+ return 0;
+}
+
+static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
+ struct spi_slave *mem,
+ const struct spi_mem_op *op,
+ u32 len)
+{
+ struct dm_spi_slave_plat *plat = dev_get_parent_plat(mem->dev);
+ u32 ctrl = 0, cmd = 0;
+
+ /* set CMD */
+ cmd = op->cmd.opcode;
+ ctrl |= ((op->cmd.buswidth >> 1) << SFC_CTRL_CMD_BITS_SHIFT);
+
+ /* set ADDR */
+ if (op->addr.nbytes) {
+ if (op->addr.nbytes == 4) {
+ cmd |= SFC_CMD_ADDR_32BITS << SFC_CMD_ADDR_SHIFT;
+ } else if (op->addr.nbytes == 3) {
+ cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT;
+ } else {
+ cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT;
+ writel(op->addr.nbytes * 8 - 1, sfc->regbase + SFC_ABIT);
+ }
+
+ ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT);
+ }
+
+ /* set DUMMY */
+ if (op->dummy.nbytes) {
+ if (op->dummy.buswidth == 4)
+ cmd |= op->dummy.nbytes * 2 << SFC_CMD_DUMMY_SHIFT;
+ else if (op->dummy.buswidth == 2)
+ cmd |= op->dummy.nbytes * 4 << SFC_CMD_DUMMY_SHIFT;
+ else
+ cmd |= op->dummy.nbytes * 8 << SFC_CMD_DUMMY_SHIFT;
+ }
+
+ /* set DATA */
+ if (sfc->version >= SFC_VER_4) /* Clear it if no data to transfer */
+ writel(len, sfc->regbase + SFC_LEN_EXT);
+ else
+ cmd |= len << SFC_CMD_TRAN_BYTES_SHIFT;
+ if (len) {
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
+
+ ctrl |= ((op->data.buswidth >> 1) << SFC_CTRL_DATA_BITS_SHIFT);
+ }
+ if (!len && op->addr.nbytes)
+ cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
+
+ /* set the Controller */
+ ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE;
+ cmd |= plat->cs << SFC_CMD_CS_SHIFT;
+
+ debug("addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n",
+ op->addr.nbytes, op->addr.buswidth,
+ op->dummy.nbytes, op->dummy.buswidth);
+ debug("ctrl=%x cmd=%x addr=%llx len=%x\n",
+ ctrl, cmd, op->addr.val, len);
+
+ writel(ctrl, sfc->regbase + SFC_CTRL);
+ writel(cmd, sfc->regbase + SFC_CMD);
+ if (op->addr.nbytes)
+ writel(op->addr.val, sfc->regbase + SFC_ADDR);
+
+ return 0;
+}
+
+static int rockchip_sfc_write_fifo(struct rockchip_sfc *sfc, const u8 *buf, int len)
+{
+ u8 bytes = len & 0x3;
+ u32 dwords;
+ int tx_level;
+ u32 write_words;
+ u32 tmp = 0;
+
+ dwords = len >> 2;
+ while (dwords) {
+ tx_level = rockchip_sfc_wait_fifo_ready(sfc, SFC_CMD_DIR_WR, 1000);
+ if (tx_level < 0)
+ return tx_level;
+ write_words = min_t(u32, tx_level, dwords);
+ writesl(sfc->regbase + SFC_DATA, buf, write_words);
+ buf += write_words << 2;
+ dwords -= write_words;
+ }
+
+ /* write the rest non word aligned bytes */
+ if (bytes) {
+ tx_level = rockchip_sfc_wait_fifo_ready(sfc, SFC_CMD_DIR_WR, 1000);
+ if (tx_level < 0)
+ return tx_level;
+ memcpy(&tmp, buf, bytes);
+ writel(tmp, sfc->regbase + SFC_DATA);
+ }
+
+ return len;
+}
+
+static int rockchip_sfc_read_fifo(struct rockchip_sfc *sfc, u8 *buf, int len)
+{
+ u8 bytes = len & 0x3;
+ u32 dwords;
+ u8 read_words;
+ int rx_level;
+ int tmp;
+
+ /* word aligned access only */
+ dwords = len >> 2;
+ while (dwords) {
+ rx_level = rockchip_sfc_wait_fifo_ready(sfc, SFC_CMD_DIR_RD, 1000);
+ if (rx_level < 0)
+ return rx_level;
+ read_words = min_t(u32, rx_level, dwords);
+ readsl(sfc->regbase + SFC_DATA, buf, read_words);
+ buf += read_words << 2;
+ dwords -= read_words;
+ }
+
+ /* read the rest non word aligned bytes */
+ if (bytes) {
+ rx_level = rockchip_sfc_wait_fifo_ready(sfc, SFC_CMD_DIR_RD, 1000);
+ if (rx_level < 0)
+ return rx_level;
+ tmp = readl(sfc->regbase + SFC_DATA);
+ memcpy(buf, &tmp, bytes);
+ }
+
+ return len;
+}
+
+static int rockchip_sfc_fifo_transfer_dma(struct rockchip_sfc *sfc, dma_addr_t dma_buf, size_t len)
+{
+ writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
+ writel((u32)dma_buf, sfc->regbase + SFC_DMA_ADDR);
+ writel(0x1, sfc->regbase + SFC_DMA_TRIGGER);
+
+ return len;
+}
+
+static int rockchip_sfc_xfer_data_poll(struct rockchip_sfc *sfc,
+ const struct spi_mem_op *op, u32 len)
+{
+ debug("xfer_poll len=%x\n", len);
+
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ return rockchip_sfc_write_fifo(sfc, op->data.buf.out, len);
+ else
+ return rockchip_sfc_read_fifo(sfc, op->data.buf.in, len);
+}
+
+static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
+ const struct spi_mem_op *op, u32 len)
+{
+ struct bounce_buffer bb;
+ unsigned int bb_flags;
+ void *dma_buf;
+ int ret;
+
+ debug("xfer_dma len=%x\n", len);
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ dma_buf = (void *)op->data.buf.out;
+ bb_flags = GEN_BB_READ;
+ } else {
+ dma_buf = (void *)op->data.buf.in;
+ bb_flags = GEN_BB_WRITE;
+ }
+
+ ret = bounce_buffer_start(&bb, dma_buf, len, bb_flags);
+ if (ret)
+ return ret;
+
+ ret = rockchip_sfc_fifo_transfer_dma(sfc, (dma_addr_t)bb.bounce_buffer, len);
+ rockchip_sfc_wait_for_dma_finished(sfc, len * 10);
+ bounce_buffer_stop(&bb);
+
+ return ret;
+}
+
+static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
+{
+ unsigned long tbase = get_timer(0);
+ int ret = 0;
+ u32 timeout = timeout_us;
+
+ while (readl(sfc->regbase + SFC_SR) & SFC_SR_IS_BUSY) {
+ if (get_timer(tbase) > timeout) {
+ printf("wait sfc idle timeout\n");
+ rockchip_sfc_reset(sfc);
+
+ return -ETIMEDOUT;
+ }
+
+ udelay(1);
+ }
+
+ return ret;
+}
+
+static int rockchip_sfc_exec_op(struct spi_slave *mem,
+ const struct spi_mem_op *op)
+{
+ struct rockchip_sfc *sfc = dev_get_plat(mem->dev->parent);
+ u32 len = min_t(u32, op->data.nbytes, sfc->max_iosize);
+ int ret;
+
+#if CONFIG_IS_ENABLED(CLK)
+ if (unlikely(mem->max_hz != sfc->speed)) {
+ ret = clk_set_rate(&sfc->clk, clamp(mem->max_hz, (uint)SFC_MIN_SPEED_HZ,
+ (uint)SFC_MAX_SPEED_HZ));
+ if (ret < 0) {
+ printf("set_freq=%dHz fail, check if it's the cru support level\n",
+ mem->max_hz);
+ return ret;
+ }
+
+ sfc->max_freq = mem->max_hz;
+ sfc->speed = mem->max_hz;
+ debug("set_freq=%dHz real_freq=%dHz\n", sfc->max_freq, sfc->speed);
+ }
+#endif
+
+ rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
+
+ rockchip_sfc_xfer_setup(sfc, mem, op, len);
+ if (len) {
+ if (likely(sfc->use_dma) && !(len & 0x3) && len >= SFC_DMA_TRANS_THRETHOLD)
+ ret = rockchip_sfc_xfer_data_dma(sfc, op, len);
+ else
+ ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
+
+ if (ret != len) {
+ printf("xfer data failed ret %d dir %d\n", ret, op->data.dir);
+
+ return -EIO;
+ }
+ }
+
+ return rockchip_sfc_xfer_done(sfc, 100000);
+}
+
+static int rockchip_sfc_adjust_op_size(struct spi_slave *mem, struct spi_mem_op *op)
+{
+ struct rockchip_sfc *sfc = dev_get_plat(mem->dev->parent);
+
+ op->data.nbytes = min(op->data.nbytes, sfc->max_iosize);
+ return 0;
+}
+
+static int rockchip_sfc_set_speed(struct udevice *bus, uint speed)
+{
+ /* We set up speed later for each transmission.
+ */
+ return 0;
+}
+
+static int rockchip_sfc_set_mode(struct udevice *bus, uint mode)
+{
+ return 0;
+}
+
+static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = {
+ .adjust_op_size = rockchip_sfc_adjust_op_size,
+ .exec_op = rockchip_sfc_exec_op,
+};
+
+static const struct dm_spi_ops rockchip_sfc_ops = {
+ .mem_ops = &rockchip_sfc_mem_ops,
+ .set_speed = rockchip_sfc_set_speed,
+ .set_mode = rockchip_sfc_set_mode,
+};
+
+static const struct udevice_id rockchip_sfc_ids[] = {
+ { .compatible = "rockchip,sfc"},
+ {},
+};
+
+U_BOOT_DRIVER(rockchip_sfc_driver) = {
+ .name = "rockchip_sfc",
+ .id = UCLASS_SPI,
+ .of_match = rockchip_sfc_ids,
+ .ops = &rockchip_sfc_ops,
+ .of_to_plat = rockchip_sfc_ofdata_to_platdata,
+ .plat_auto = sizeof(struct rockchip_sfc),
+ .probe = rockchip_sfc_probe,
+};
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index c62d8feecce..fedf0db9c7e 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -54,7 +54,7 @@
#if defined(CONFIG_CPU_ARM920T) || \
defined(CONFIG_PCI_OHCI) || \
- defined(CONFIG_DM_PCI) || \
+ defined(CONFIG_PCI) || \
defined(CONFIG_SYS_OHCI_USE_NPS)
# define OHCI_USE_NPS /* force NoPowerSwitching mode */
#endif
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index b69ffcae4b2..8b940d70eb2 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -470,7 +470,6 @@ config VIDEO_LCD_TDO_TL070WSH30
config VIDEO_LCD_HITACHI_TX18D42VM
bool "Hitachi tx18d42vm LVDS LCD panel support"
- depends on VIDEO
default n
---help---
Support for Hitachi tx18d42vm LVDS LCD panels, these panels have a
@@ -854,6 +853,18 @@ config VIDEO_DT_SIMPLEFB
The video output is initialized by U-Boot, and kept by the
kernel.
+config VIDEO_MCDE_SIMPLE
+ bool "Simple driver for ST-Ericsson MCDE with preconfigured display"
+ depends on DM_VIDEO
+ help
+ Enables a simple display driver for ST-Ericsson MCDE
+ (Multichannel Display Engine), which reads the configuration from
+ the MCDE registers.
+
+ This driver assumes that the display hardware has been initialized
+ before u-boot starts, and u-boot will simply render to the pre-
+ allocated frame buffer surface.
+
config OSD
bool "Enable OSD support"
depends on DM
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 1c534a6f9ad..7ae0ab2b35c 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_VIDEO_LCD_ORISETECH_OTM8009A) += orisetech_otm8009a.o
obj-$(CONFIG_VIDEO_LCD_RAYDIUM_RM68200) += raydium-rm68200.o
obj-$(CONFIG_VIDEO_LCD_SSD2828) += ssd2828.o
obj-$(CONFIG_VIDEO_LCD_TDO_TL070WSH30) += tdo-tl070wsh30.o
+obj-$(CONFIG_VIDEO_MCDE_SIMPLE) += mcde_simple.o
obj-${CONFIG_VIDEO_MESON} += meson/
obj-${CONFIG_VIDEO_MIPI_DSI} += mipi_dsi.o
obj-$(CONFIG_VIDEO_MVEBU) += mvebu_lcd.o
diff --git a/drivers/video/mcde_simple.c b/drivers/video/mcde_simple.c
new file mode 100644
index 00000000000..0924ceee309
--- /dev/null
+++ b/drivers/video/mcde_simple.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2019 Stephan Gerhold */
+
+#include <common.h>
+#include <dm.h>
+#include <log.h>
+#include <video.h>
+#include <asm/io.h>
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+
+#define MCDE_EXTSRC0A0 0x200
+#define MCDE_EXTSRC0CONF 0x20C
+#define MCDE_EXTSRC0CONF_BPP GENMASK(11, 8)
+#define MCDE_OVL0CONF 0x404
+#define MCDE_OVL0CONF_PPL GENMASK(10, 0)
+#define MCDE_OVL0CONF_LPF GENMASK(26, 16)
+#define MCDE_CHNL0SYNCHMOD 0x608
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH GENMASK(1, 0)
+#define MCDE_CHNL0SYNCHSW 0x60C
+#define MCDE_CHNL0SYNCHSW_SW_TRIG BIT(0)
+#define MCDE_CRA0 0x800
+#define MCDE_CRA0_FLOEN BIT(0)
+
+#define MCDE_FLOW_COMPLETION_TIMEOUT 200000 /* us */
+
+enum mcde_bpp {
+ MCDE_EXTSRC0CONF_BPP_1BPP_PAL,
+ MCDE_EXTSRC0CONF_BPP_2BPP_PAL,
+ MCDE_EXTSRC0CONF_BPP_4BPP_PAL,
+ MCDE_EXTSRC0CONF_BPP_8BPP_PAL,
+ MCDE_EXTSRC0CONF_BPP_RGB444,
+ MCDE_EXTSRC0CONF_BPP_ARGB4444,
+ MCDE_EXTSRC0CONF_BPP_IRGB1555,
+ MCDE_EXTSRC0CONF_BPP_RGB565,
+ MCDE_EXTSRC0CONF_BPP_RGB888,
+ MCDE_EXTSRC0CONF_BPP_XRGB8888,
+ MCDE_EXTSRC0CONF_BPP_ARGB8888,
+ MCDE_EXTSRC0CONF_BPP_YCBCR422,
+};
+
+enum mcde_src_synch {
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE,
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_NO_SYNCH,
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE,
+};
+
+struct mcde_simple_priv {
+ fdt_addr_t base;
+ enum mcde_src_synch src_synch;
+};
+
+static int mcde_simple_probe(struct udevice *dev)
+{
+ struct mcde_simple_priv *priv = dev_get_priv(dev);
+ struct video_uc_plat *plat = dev_get_uclass_plat(dev);
+ struct video_priv *uc_priv = dev_get_uclass_priv(dev);
+ u32 val;
+
+ priv->base = dev_read_addr(dev);
+ if (priv->base == FDT_ADDR_T_NONE)
+ return -EINVAL;
+
+ plat->base = readl(priv->base + MCDE_EXTSRC0A0);
+ if (!plat->base)
+ return -ENODEV;
+
+ val = readl(priv->base + MCDE_OVL0CONF);
+ uc_priv->xsize = FIELD_GET(MCDE_OVL0CONF_PPL, val);
+ uc_priv->ysize = FIELD_GET(MCDE_OVL0CONF_LPF, val);
+ uc_priv->rot = 0;
+
+ val = readl(priv->base + MCDE_EXTSRC0CONF);
+ switch (FIELD_GET(MCDE_EXTSRC0CONF_BPP, val)) {
+ case MCDE_EXTSRC0CONF_BPP_RGB565:
+ uc_priv->bpix = VIDEO_BPP16;
+ break;
+ case MCDE_EXTSRC0CONF_BPP_XRGB8888:
+ case MCDE_EXTSRC0CONF_BPP_ARGB8888:
+ uc_priv->bpix = VIDEO_BPP32;
+ break;
+ default:
+ printf("unsupported format: %#x\n", val);
+ return -EINVAL;
+ }
+
+ val = readl(priv->base + MCDE_CHNL0SYNCHMOD);
+ priv->src_synch = FIELD_GET(MCDE_CHNL0SYNCHMOD_SRC_SYNCH, val);
+
+ plat->size = uc_priv->xsize * uc_priv->ysize * VNBYTES(uc_priv->bpix);
+ debug("MCDE base: %#lx, xsize: %d, ysize: %d, bpp: %d\n",
+ plat->base, uc_priv->xsize, uc_priv->ysize, VNBITS(uc_priv->bpix));
+
+ video_set_flush_dcache(dev, true);
+ return 0;
+}
+
+static int mcde_simple_video_sync(struct udevice *dev)
+{
+ struct mcde_simple_priv *priv = dev_get_priv(dev);
+ unsigned int val;
+
+ if (priv->src_synch != MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE)
+ return 0;
+
+ /* Enable flow */
+ val = readl(priv->base + MCDE_CRA0);
+ val |= MCDE_CRA0_FLOEN;
+ writel(val, priv->base + MCDE_CRA0);
+
+ /* Trigger a software sync */
+ writel(MCDE_CHNL0SYNCHSW_SW_TRIG, priv->base + MCDE_CHNL0SYNCHSW);
+
+ /* Disable flow */
+ val = readl(priv->base + MCDE_CRA0);
+ val &= ~MCDE_CRA0_FLOEN;
+ writel(val, priv->base + MCDE_CRA0);
+
+ /* Wait for completion */
+ return readl_poll_timeout(priv->base + MCDE_CRA0, val,
+ !(val & MCDE_CRA0_FLOEN),
+ MCDE_FLOW_COMPLETION_TIMEOUT);
+}
+
+static struct video_ops mcde_simple_ops = {
+ .video_sync = mcde_simple_video_sync,
+};
+
+static const struct udevice_id mcde_simple_ids[] = {
+ { .compatible = "ste,mcde" },
+ { }
+};
+
+U_BOOT_DRIVER(mcde_simple) = {
+ .name = "mcde_simple",
+ .id = UCLASS_VIDEO,
+ .ops = &mcde_simple_ops,
+ .of_match = mcde_simple_ids,
+ .probe = mcde_simple_probe,
+ .priv_auto = sizeof(struct mcde_simple_priv),
+};