diff options
Diffstat (limited to 'drivers/ufs')
-rw-r--r-- | drivers/ufs/Kconfig | 59 | ||||
-rw-r--r-- | drivers/ufs/Makefile | 12 | ||||
-rw-r--r-- | drivers/ufs/cdns-platform.c | 124 | ||||
-rw-r--r-- | drivers/ufs/ti-j721e-ufs.c | 73 | ||||
-rw-r--r-- | drivers/ufs/ufs-amd-versal2.c | 463 | ||||
-rw-r--r-- | drivers/ufs/ufs-pci.c | 44 | ||||
-rw-r--r-- | drivers/ufs/ufs-qcom.c | 670 | ||||
-rw-r--r-- | drivers/ufs/ufs-qcom.h | 147 | ||||
-rw-r--r-- | drivers/ufs/ufs-renesas.c | 412 | ||||
-rw-r--r-- | drivers/ufs/ufs-uclass.c | 17 | ||||
-rw-r--r-- | drivers/ufs/ufs.c | 2078 | ||||
-rw-r--r-- | drivers/ufs/ufs.h | 787 | ||||
-rw-r--r-- | drivers/ufs/ufshcd-dwc.c | 133 | ||||
-rw-r--r-- | drivers/ufs/ufshcd-dwc.h | 69 | ||||
-rw-r--r-- | drivers/ufs/ufshci-dwc.h | 32 | ||||
-rw-r--r-- | drivers/ufs/ufshci.h | 469 | ||||
-rw-r--r-- | drivers/ufs/unipro.h | 305 |
17 files changed, 5894 insertions, 0 deletions
diff --git a/drivers/ufs/Kconfig b/drivers/ufs/Kconfig new file mode 100644 index 00000000000..b08ca08b07c --- /dev/null +++ b/drivers/ufs/Kconfig @@ -0,0 +1,59 @@ +menu "UFS Host Controller Support" + +config UFS + bool "Support UFS controllers" + depends on SCSI + select CHARSET + help + This selects support for Universal Flash Subsystem (UFS). + Say Y here if you want UFS Support. + +config CADENCE_UFS + bool "Cadence platform driver for UFS" + depends on UFS + help + This selects the platform driver for the Cadence UFS host + controller present on present TI's J721e devices. + +config UFS_PCI + bool "PCI bus based UFS Controller support" + depends on PCI && UFS + help + This selects the PCI UFS Host Controller Interface. Select this if + you have UFS Host Controller with PCI Interface. + + If you have a controller with this interface, say Y here. + + If unsure, say N. + +config QCOM_UFS + bool "Qualcomm Host Controller driver for UFS" + depends on UFS && ARCH_SNAPDRAGON + help + This selects the platform driver for the UFS host + controller present on Qualcomm Snapdragon SoCs. + +config TI_J721E_UFS + bool "Glue Layer driver for UFS on TI J721E devices" + help + This selects the glue layer driver for Cadence controller + present on TI's J721E devices. + +config UFS_RENESAS + bool "Renesas specific hooks to UFS controller platform driver" + depends on UFS + select BOUNCE_BUFFER + help + This selects the Renesas specific additions to UFSHCD platform driver. + UFS host on Renesas needs some vendor specific configuration before + accessing the hardware. + +config UFS_AMD_VERSAL2 + bool "AMD Versal Gen 2 UFS controller platform driver" + depends on UFS && ZYNQMP_FIRMWARE + help + This selects the AMD specific additions to UFSHCD platform driver. + UFS host on AMD needs some vendor specific configuration before accessing + the hardware. + +endmenu diff --git a/drivers/ufs/Makefile b/drivers/ufs/Makefile new file mode 100644 index 00000000000..2a378e45111 --- /dev/null +++ b/drivers/ufs/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com +# + +obj-$(CONFIG_UFS) += ufs.o ufs-uclass.o +obj-$(CONFIG_CADENCE_UFS) += cdns-platform.o +obj-$(CONFIG_QCOM_UFS) += ufs-qcom.o +obj-$(CONFIG_TI_J721E_UFS) += ti-j721e-ufs.o +obj-$(CONFIG_UFS_PCI) += ufs-pci.o +obj-$(CONFIG_UFS_RENESAS) += ufs-renesas.o +obj-$(CONFIG_UFS_AMD_VERSAL2) += ufs-amd-versal2.o ufshcd-dwc.o diff --git a/drivers/ufs/cdns-platform.c b/drivers/ufs/cdns-platform.c new file mode 100644 index 00000000000..510a6a6aa5d --- /dev/null +++ b/drivers/ufs/cdns-platform.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0+ +/** + * cdns-platform.c - Platform driver for Cadence UFSHCI device + * + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com + */ + +#include <clk.h> +#include <dm.h> +#include <ufs.h> +#include <asm/io.h> +#include <dm/device_compat.h> +#include <linux/bitops.h> +#include <linux/err.h> +#include <linux/time.h> + +#include "ufs.h" + +#define CDNS_UFS_REG_HCLKDIV 0xFC +#define CDNS_UFS_REG_PHY_XCFGD1 0x113C + +static int cdns_ufs_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; + switch (status) { + case PRE_CHANGE: + return ufshcd_dme_set(hba, + UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), + 0); + case POST_CHANGE: + ; + } + + return 0; +} + +static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba) +{ + struct clk clk; + unsigned long core_clk_rate = 0; + u32 core_clk_div = 0; + int ret; + + ret = clk_get_by_name(hba->dev, "core_clk", &clk); + if (ret) { + dev_err(hba->dev, "failed to get core_clk clock\n"); + return ret; + } + + core_clk_rate = clk_get_rate(&clk); + if (IS_ERR_VALUE(core_clk_rate)) { + dev_err(hba->dev, "%s: unable to find core_clk rate\n", + __func__); + return core_clk_rate; + } + + core_clk_div = core_clk_rate / USEC_PER_SEC; + ufshcd_writel(hba, core_clk_div, CDNS_UFS_REG_HCLKDIV); + + return 0; +} + +static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + switch (status) { + case PRE_CHANGE: + return cdns_ufs_set_hclkdiv(hba); + case POST_CHANGE: + ; + } + + return 0; +} + +static int cdns_ufs_init(struct ufs_hba *hba) +{ + u32 data; + + /* Increase RX_Advanced_Min_ActivateTime_Capability */ + data = ufshcd_readl(hba, CDNS_UFS_REG_PHY_XCFGD1); + data |= BIT(24); + ufshcd_writel(hba, data, CDNS_UFS_REG_PHY_XCFGD1); + + return 0; +} + +static struct ufs_hba_ops cdns_pltfm_hba_ops = { + .init = cdns_ufs_init, + .hce_enable_notify = cdns_ufs_hce_enable_notify, + .link_startup_notify = cdns_ufs_link_startup_notify, +}; + +static int cdns_ufs_pltfm_probe(struct udevice *dev) +{ + int err = ufshcd_probe(dev, &cdns_pltfm_hba_ops); + if (err) + dev_err(dev, "ufshcd_probe() failed %d\n", err); + + return err; +} + +static int cdns_ufs_pltfm_bind(struct udevice *dev) +{ + struct udevice *scsi_dev; + + return ufs_scsi_bind(dev, &scsi_dev); +} + +static const struct udevice_id cdns_ufs_pltfm_ids[] = { + { + .compatible = "cdns,ufshc-m31-16nm", + }, + {}, +}; + +U_BOOT_DRIVER(cdns_ufs_pltfm) = { + .name = "cdns-ufs-pltfm", + .id = UCLASS_UFS, + .of_match = cdns_ufs_pltfm_ids, + .probe = cdns_ufs_pltfm_probe, + .bind = cdns_ufs_pltfm_bind, +}; diff --git a/drivers/ufs/ti-j721e-ufs.c b/drivers/ufs/ti-j721e-ufs.c new file mode 100644 index 00000000000..c5c08610ffd --- /dev/null +++ b/drivers/ufs/ti-j721e-ufs.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/ + */ + +#include <asm/io.h> +#include <clk.h> +#include <dm.h> +#include <dm/device_compat.h> +#include <linux/bitops.h> +#include <linux/err.h> + +#define UFS_SS_CTRL 0x4 +#define UFS_SS_RST_N_PCS BIT(0) +#define UFS_SS_CLK_26MHZ BIT(4) + +static int ti_j721e_ufs_probe(struct udevice *dev) +{ + void __iomem *base; + unsigned int clock; + struct clk clk; + u32 reg = 0; + int ret; + + ret = clk_get_by_index(dev, 0, &clk); + if (ret) { + dev_err(dev, "failed to get M-PHY clock\n"); + return ret; + } + + clock = clk_get_rate(&clk); + if (IS_ERR_VALUE(clock)) { + dev_err(dev, "failed to get rate\n"); + return ret; + } + + base = dev_remap_addr_index(dev, 0); + + if (clock == 26000000) + reg |= UFS_SS_CLK_26MHZ; + /* Take UFS slave device out of reset */ + reg |= UFS_SS_RST_N_PCS; + writel(reg, base + UFS_SS_CTRL); + + return 0; +} + +static int ti_j721e_ufs_remove(struct udevice *dev) +{ + void __iomem *base = dev_remap_addr_index(dev, 0); + u32 reg = readl(base + UFS_SS_CTRL); + + reg &= ~UFS_SS_RST_N_PCS; + writel(reg, base + UFS_SS_CTRL); + + return 0; +} + +static const struct udevice_id ti_j721e_ufs_ids[] = { + { + .compatible = "ti,j721e-ufs", + }, + {}, +}; + +U_BOOT_DRIVER(ti_j721e_ufs) = { + .name = "ti-j721e-ufs", + .id = UCLASS_MISC, + .of_match = ti_j721e_ufs_ids, + .probe = ti_j721e_ufs_probe, + .remove = ti_j721e_ufs_remove, + .flags = DM_FLAG_OS_PREPARE, +}; diff --git a/drivers/ufs/ufs-amd-versal2.c b/drivers/ufs/ufs-amd-versal2.c new file mode 100644 index 00000000000..1c5ed538370 --- /dev/null +++ b/drivers/ufs/ufs-amd-versal2.c @@ -0,0 +1,463 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Advanced Micro Devices, Inc. + */ + +#include <clk.h> +#include <dm.h> +#include <ufs.h> +#include <asm/io.h> +#include <dm/device_compat.h> +#include <zynqmp_firmware.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/time.h> +#include <reset.h> + +#include "ufs.h" +#include "ufshcd-dwc.h" +#include "ufshci-dwc.h" + +#define SRAM_CSR_INIT_DONE_MASK BIT(0) +#define SRAM_CSR_EXT_LD_DONE_MASK BIT(1) +#define SRAM_CSR_BYPASS_MASK BIT(2) + +#define MPHY_FAST_RX_AFE_CAL BIT(2) +#define MPHY_FW_CALIB_CFG_VAL BIT(8) + +#define TX_RX_CFG_RDY_MASK GENMASK(3, 0) + +#define TIMEOUT_MICROSEC 1000000L + +struct ufs_versal2_priv { + struct ufs_hba *hba; + struct reset_ctl *rstc; + struct reset_ctl *rstphy; + u32 phy_mode; + u32 host_clk; + u8 attcompval0; + u8 attcompval1; + u8 ctlecompval0; + u8 ctlecompval1; +}; + +static int ufs_versal2_phy_reg_write(struct ufs_hba *hba, u32 addr, u32 val) +{ + static struct ufshcd_dme_attr_val phy_write_attrs[] = { + { UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGWRLSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGWRMSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGRDWRSEL), 1, DME_LOCAL }, + { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL } + }; + + phy_write_attrs[0].mib_val = (u8)addr; + phy_write_attrs[1].mib_val = (u8)(addr >> 8); + phy_write_attrs[2].mib_val = (u8)val; + phy_write_attrs[3].mib_val = (u8)(val >> 8); + + return ufshcd_dwc_dme_set_attrs(hba, phy_write_attrs, ARRAY_SIZE(phy_write_attrs)); +} + +static int ufs_versal2_phy_reg_read(struct ufs_hba *hba, u32 addr, u32 *val) +{ + u32 mib_val; + int ret; + static struct ufshcd_dme_attr_val phy_read_attrs[] = { + { UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGRDWRSEL), 0, DME_LOCAL }, + { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL } + }; + + phy_read_attrs[0].mib_val = (u8)addr; + phy_read_attrs[1].mib_val = (u8)(addr >> 8); + + ret = ufshcd_dwc_dme_set_attrs(hba, phy_read_attrs, ARRAY_SIZE(phy_read_attrs)); + if (ret) + return ret; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDLSB), &mib_val); + if (ret) + return ret; + + *val = mib_val; + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDMSB), &mib_val); + if (ret) + return ret; + + *val |= (mib_val << 8); + + return 0; +} + +static int ufs_versal2_enable_phy(struct ufs_hba *hba) +{ + u32 offset, reg; + int ret; + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYDISABLE), 0); + if (ret) + return ret; + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1); + if (ret) + return ret; + + /* Check Tx/Rx FSM states */ + for (offset = 0; offset < 2; offset++) { + u32 time_left, mibsel; + + time_left = TIMEOUT_MICROSEC; + mibsel = UIC_ARG_MIB_SEL(MTX_FSM_STATE, UIC_ARG_MPHY_TX_GEN_SEL_INDEX(offset)); + do { + ret = ufshcd_dme_get(hba, mibsel, ®); + if (ret) + return ret; + + if (reg == TX_STATE_HIBERN8 || reg == TX_STATE_SLEEP || + reg == TX_STATE_LSBURST) + break; + + time_left--; + mdelay(5); + } while (time_left); + + if (!time_left) { + dev_err(hba->dev, "Invalid Tx FSM state.\n"); + return -ETIMEDOUT; + } + + time_left = TIMEOUT_MICROSEC; + mibsel = UIC_ARG_MIB_SEL(MRX_FSM_STATE, UIC_ARG_MPHY_RX_GEN_SEL_INDEX(offset)); + do { + ret = ufshcd_dme_get(hba, mibsel, ®); + if (ret) + return ret; + + if (reg == RX_STATE_HIBERN8 || reg == RX_STATE_SLEEP || + reg == RX_STATE_LSBURST) + break; + + time_left--; + mdelay(5); + } while (time_left); + + if (!time_left) { + dev_err(hba->dev, "Invalid Rx FSM state.\n"); + return -ETIMEDOUT; + } + } + + return 0; +} + +static int ufs_versal2_setup_phy(struct ufs_hba *hba) +{ + struct ufs_versal2_priv *priv = dev_get_priv(hba->dev); + int ret; + u32 reg; + + /* Bypass RX-AFE offset calibrations (ATT/CTLE) */ + ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(0), ®); + if (ret) + return ret; + + reg |= MPHY_FAST_RX_AFE_CAL; + ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(0), reg); + if (ret) + return ret; + + ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(1), ®); + if (ret) + return ret; + + reg |= MPHY_FAST_RX_AFE_CAL; + ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(1), reg); + if (ret) + return ret; + + /* Program ATT and CTLE compensation values */ + if (priv->attcompval0) { + ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(0), priv->attcompval0); + if (ret) + return ret; + } + + if (priv->attcompval1) { + ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(1), priv->attcompval1); + if (ret) + return ret; + } + + if (priv->ctlecompval0) { + ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(0), priv->ctlecompval0); + if (ret) + return ret; + } + + if (priv->ctlecompval1) { + ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(1), priv->ctlecompval1); + if (ret) + return ret; + } + + ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(0), ®); + if (ret) + return ret; + + reg |= MPHY_FW_CALIB_CFG_VAL; + ret = ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(0), reg); + if (ret) + return ret; + + ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(1), ®); + if (ret) + return ret; + + reg |= MPHY_FW_CALIB_CFG_VAL; + return ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(1), reg); +} + +static int ufs_versal2_phy_init(struct ufs_hba *hba) +{ + struct ufs_versal2_priv *priv = dev_get_priv(hba->dev); + u32 reg, time_left; + int ret; + static const struct ufshcd_dme_attr_val rmmi_attrs[] = { + { UIC_ARG_MIB(CBREFCLKCTRL2), CBREFREFCLK_GATE_OVR_EN, DME_LOCAL }, + { UIC_ARG_MIB(CBCRCTRL), 1, DME_LOCAL }, + { UIC_ARG_MIB(CBC10DIRECTCONF2), 1, DME_LOCAL }, + { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL } + }; + + /* Wait for Tx/Rx config_rdy */ + time_left = TIMEOUT_MICROSEC; + do { + time_left--; + ret = zynqmp_pm_ufs_get_txrx_cfgrdy(®); + if (ret) + return ret; + + reg &= TX_RX_CFG_RDY_MASK; + if (!reg) + break; + + mdelay(5); + } while (time_left); + + if (!time_left) { + dev_err(hba->dev, "Tx/Rx configuration signal busy.\n"); + return -ETIMEDOUT; + } + + ret = ufshcd_dwc_dme_set_attrs(hba, rmmi_attrs, ARRAY_SIZE(rmmi_attrs)); + if (ret) + return ret; + + /* DeAssert PHY reset */ + ret = reset_deassert(priv->rstphy); + if (ret) { + dev_err(hba->dev, "ufsphy reset deassert failed\n"); + return ret; + } + + /* Wait for SRAM init done */ + time_left = TIMEOUT_MICROSEC; + do { + time_left--; + ret = zynqmp_pm_ufs_sram_csr_read(®); + if (ret) + return ret; + + reg &= SRAM_CSR_INIT_DONE_MASK; + if (reg) + break; + + mdelay(5); + } while (time_left); + + if (!time_left) { + dev_err(hba->dev, "SRAM initialization failed.\n"); + return -ETIMEDOUT; + } + + ret = ufs_versal2_setup_phy(hba); + if (ret) + return ret; + + return ufs_versal2_enable_phy(hba); +} + +static int ufs_versal2_init(struct ufs_hba *hba) +{ + struct ufs_versal2_priv *priv = dev_get_priv(hba->dev); + struct clk clk; + unsigned long core_clk_rate = 0; + u32 cal; + int ret = 0; + + priv->phy_mode = UFSHCD_DWC_PHY_MODE_ROM; + + ret = clk_get_by_name(hba->dev, "core_clk", &clk); + if (ret) { + dev_err(hba->dev, "failed to get core_clk clock\n"); + return ret; + } + + core_clk_rate = clk_get_rate(&clk); + if (IS_ERR_VALUE(core_clk_rate)) { + dev_err(hba->dev, "%s: unable to find core_clk rate\n", + __func__); + return core_clk_rate; + } + priv->host_clk = core_clk_rate; + + priv->rstc = devm_reset_control_get(hba->dev, "ufshc-rst"); + if (IS_ERR(priv->rstc)) { + dev_err(hba->dev, "failed to get reset ctl: ufshc-rst\n"); + return PTR_ERR(priv->rstc); + } + priv->rstphy = devm_reset_control_get(hba->dev, "ufsphy-rst"); + if (IS_ERR(priv->rstphy)) { + dev_err(hba->dev, "failed to get reset ctl: ufsphy-rst\n"); + return PTR_ERR(priv->rstphy); + } + + ret = zynqmp_pm_ufs_cal_reg(&cal); + if (ret) + return ret; + + priv->attcompval0 = (u8)cal; + priv->attcompval1 = (u8)(cal >> 8); + priv->ctlecompval0 = (u8)(cal >> 16); + priv->ctlecompval1 = (u8)(cal >> 24); + + return ret; +} + +static int ufs_versal2_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct ufs_versal2_priv *priv = dev_get_priv(hba->dev); + u32 sram_csr; + int ret; + + switch (status) { + case PRE_CHANGE: + /* Assert RST_UFS Reset for UFS block in PMX_IOU */ + ret = reset_assert(priv->rstc); + if (ret) { + dev_err(hba->dev, "ufshc reset assert failed, err = %d\n", ret); + return ret; + } + + /* Assert PHY reset */ + ret = reset_assert(priv->rstphy); + if (ret) { + dev_err(hba->dev, "ufsphy reset assert failed, err = %d\n", ret); + return ret; + } + + ret = zynqmp_pm_ufs_sram_csr_read(&sram_csr); + if (ret) + return ret; + + if (!priv->phy_mode) { + sram_csr &= ~SRAM_CSR_EXT_LD_DONE_MASK; + sram_csr |= SRAM_CSR_BYPASS_MASK; + } else { + dev_err(hba->dev, "Invalid phy-mode %d.\n", priv->phy_mode); + return -EINVAL; + } + + ret = zynqmp_pm_ufs_sram_csr_write(&sram_csr); + if (ret) + return ret; + + /* De Assert RST_UFS Reset for UFS block in PMX_IOU */ + ret = reset_deassert(priv->rstc); + if (ret) + dev_err(hba->dev, "ufshc reset deassert failed, err = %d\n", ret); + + break; + case POST_CHANGE: + ret = ufs_versal2_phy_init(hba); + if (ret) + dev_err(hba->dev, "Phy init failed (%d)\n", ret); + + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int ufs_versal2_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct ufs_versal2_priv *priv = dev_get_priv(hba->dev); + int ret = 0; + + switch (status) { + case PRE_CHANGE: + if (priv->host_clk) { + u32 core_clk_div = priv->host_clk / TIMEOUT_MICROSEC; + + ufshcd_writel(hba, core_clk_div, DWC_UFS_REG_HCLKDIV); + } + break; + case POST_CHANGE: + ret = ufshcd_dwc_link_startup_notify(hba, status); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static struct ufs_hba_ops ufs_versal2_hba_ops = { + .init = ufs_versal2_init, + .link_startup_notify = ufs_versal2_link_startup_notify, + .hce_enable_notify = ufs_versal2_hce_enable_notify, +}; + +static int ufs_versal2_probe(struct udevice *dev) +{ + int ret; + + /* Perform generic probe */ + ret = ufshcd_probe(dev, &ufs_versal2_hba_ops); + if (ret) + dev_err(dev, "ufshcd_probe() failed %d\n", ret); + + return ret; +} + +static int ufs_versal2_bind(struct udevice *dev) +{ + struct udevice *scsi_dev; + + return ufs_scsi_bind(dev, &scsi_dev); +} + +static const struct udevice_id ufs_versal2_ids[] = { + { + .compatible = "amd,versal2-ufs", + }, + {}, +}; + +U_BOOT_DRIVER(ufs_versal2_pltfm) = { + .name = "ufs-versal2-pltfm", + .id = UCLASS_UFS, + .of_match = ufs_versal2_ids, + .probe = ufs_versal2_probe, + .bind = ufs_versal2_bind, +}; diff --git a/drivers/ufs/ufs-pci.c b/drivers/ufs/ufs-pci.c new file mode 100644 index 00000000000..871f3f50f5c --- /dev/null +++ b/drivers/ufs/ufs-pci.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2023 tinylab.org + * Author: Bin Meng <bmeng@tinylab.org> + */ + +#include <dm.h> +#include <errno.h> +#include <pci.h> +#include <ufs.h> +#include <dm/device_compat.h> +#include "ufs.h" + +static int ufs_pci_bind(struct udevice *dev) +{ + struct udevice *scsi_dev; + + return ufs_scsi_bind(dev, &scsi_dev); +} + +static int ufs_pci_probe(struct udevice *dev) +{ + int err; + + err = ufshcd_probe(dev, NULL); + if (err) + dev_err(dev, "%s failed (ret=%d)\n", __func__, err); + + return err; +} + +U_BOOT_DRIVER(ufs_pci) = { + .name = "ufs_pci", + .id = UCLASS_UFS, + .bind = ufs_pci_bind, + .probe = ufs_pci_probe, +}; + +static struct pci_device_id ufs_supported[] = { + { PCI_DEVICE(PCI_VENDOR_ID_REDHAT, PCI_DEVICE_ID_REDHAT_UFS) }, + {}, +}; + +U_BOOT_PCI_DEVICE(ufs_pci, ufs_supported); diff --git a/drivers/ufs/ufs-qcom.c b/drivers/ufs/ufs-qcom.c new file mode 100644 index 00000000000..843585726c7 --- /dev/null +++ b/drivers/ufs/ufs-qcom.c @@ -0,0 +1,670 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2016, Linux Foundation. All rights reserved. + * Copyright (C) 2023-2024 Linaro Limited + * Authors: + * - Bhupesh Sharma <bhupesh.sharma@linaro.org> + * - Neil Armstrong <neil.armstrong@linaro.org> + * + * Based on Linux driver + */ + +#include <asm/io.h> +#include <clk.h> +#include <dm.h> +#include <dm/device_compat.h> +#include <generic-phy.h> +#include <ufs.h> +#include <asm/gpio.h> + +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/err.h> + +#include "ufs.h" +#include "ufs-qcom.h" + +#define ceil(freq, div) ((freq) % (div) == 0 ? ((freq) / (div)) : ((freq) / (div) + 1)) + +static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_hba *hba, bool enable); + +static int ufs_qcom_enable_clks(struct ufs_qcom_priv *priv) +{ + int err; + + if (priv->is_clks_enabled) + return 0; + + err = clk_enable_bulk(&priv->clks); + if (err) + return err; + + priv->is_clks_enabled = true; + + return 0; +} + +static int ufs_qcom_init_clks(struct ufs_qcom_priv *priv) +{ + int err; + struct udevice *dev = priv->hba->dev; + + err = clk_get_bulk(dev, &priv->clks); + if (err) + return err; + + return 0; +} + +static int ufs_qcom_check_hibern8(struct ufs_hba *hba) +{ + int err, retry_count = 50; + u32 tx_fsm_val = 0; + + do { + err = ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), + &tx_fsm_val); + if (err || tx_fsm_val == TX_FSM_HIBERN8) + break; + + /* max. 200us */ + udelay(200); + retry_count--; + } while (retry_count != 0); + + /* Check the state again */ + err = ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), + &tx_fsm_val); + + if (err) { + dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", + __func__, err); + } else if (tx_fsm_val != TX_FSM_HIBERN8) { + err = tx_fsm_val; + dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", + __func__, err); + } + + return err; +} + +static void ufs_qcom_select_unipro_mode(struct ufs_qcom_priv *priv) +{ + ufshcd_rmwl(priv->hba, QUNIPRO_SEL, QUNIPRO_SEL, REG_UFS_CFG1); + + if (priv->hw_ver.major >= 0x05) + ufshcd_rmwl(priv->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0); +} + +/* + * ufs_qcom_reset - reset host controller and PHY + */ +static int ufs_qcom_reset(struct ufs_hba *hba) +{ + struct ufs_qcom_priv *priv = dev_get_priv(hba->dev); + int ret; + + ret = reset_assert(&priv->core_reset); + if (ret) { + dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", + __func__, ret); + return ret; + } + + /* + * The hardware requirement for delay between assert/deassert + * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to + * ~125us (4/32768). To be on the safe side add 200us delay. + */ + udelay(210); + + ret = reset_deassert(&priv->core_reset); + if (ret) + dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n", + __func__, ret); + + udelay(1100); + + return 0; +} + +/** + * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks + * @hba: host controller instance + * + * QCOM UFS host controller might have some non standard behaviours (quirks) + * than what is specified by UFSHCI specification. Advertise all such + * quirks to standard UFS host controller driver so standard takes them into + * account. + */ +static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) +{ + struct ufs_qcom_priv *priv = dev_get_priv(hba->dev); + + if (priv->hw_ver.major == 0x2) + hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; + + if (priv->hw_ver.major > 0x3) + hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; +} + +/** + * ufs_qcom_setup_clocks - enables/disable clocks + * @hba: host controller instance + * @on: If true, enable clocks else disable them. + * @status: PRE_CHANGE or POST_CHANGE notify + * + * Returns 0 on success, non-zero on failure. + */ +static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, + enum ufs_notify_change_status status) +{ + switch (status) { + case PRE_CHANGE: + if (!on) + /* disable device ref_clk */ + ufs_qcom_dev_ref_clk_ctrl(hba, false); + break; + case POST_CHANGE: + if (on) + /* enable the device ref clock for HS mode*/ + ufs_qcom_dev_ref_clk_ctrl(hba, true); + break; + } + + return 0; +} + +static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba) +{ + struct ufs_qcom_priv *priv = dev_get_priv(hba->dev); + + /* + * TOFIX: v4 controllers *should* be able to support HS Gear 4 + * but so far pwr_mode switch is failing on v4 controllers and HS Gear 4. + * only enable HS Gear > 3 for Controlers major version 5 and later. + */ + if (priv->hw_ver.major > 0x4) + return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0)); + + /* Default is HS-G3 */ + return UFS_HS_G3; +} + +static int ufs_get_max_pwr_mode(struct ufs_hba *hba, + struct ufs_pwr_mode_info *max_pwr_info) +{ + struct ufs_qcom_priv *priv = dev_get_priv(hba->dev); + u32 max_gear = ufs_qcom_get_hs_gear(hba); + + max_pwr_info->info.gear_rx = min(max_pwr_info->info.gear_rx, max_gear); + /* Qualcomm UFS only support symmetric Gear */ + max_pwr_info->info.gear_tx = max_pwr_info->info.gear_rx; + + if (priv->hw_ver.major >= 0x4 && max_pwr_info->info.gear_rx > UFS_HS_G3) + ufshcd_dme_set(hba, + UIC_ARG_MIB(PA_TXHSADAPTTYPE), + PA_INITIAL_ADAPT); + + dev_info(hba->dev, "Max HS Gear: %d\n", max_pwr_info->info.gear_rx); + + return 0; +} + +static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) +{ + struct ufs_qcom_priv *priv = dev_get_priv(hba->dev); + struct phy phy; + int ret; + + /* Reset UFS Host Controller and PHY */ + ret = ufs_qcom_reset(hba); + if (ret) + dev_warn(hba->dev, "%s: host reset returned %d\n", + __func__, ret); + + /* get phy */ + ret = generic_phy_get_by_name(hba->dev, "ufsphy", &phy); + if (ret) { + dev_warn(hba->dev, "%s: Unable to get QMP ufs phy, ret = %d\n", + __func__, ret); + return ret; + } + + /* phy initialization */ + ret = generic_phy_init(&phy); + if (ret) { + dev_err(hba->dev, "%s: phy init failed, ret = %d\n", + __func__, ret); + return ret; + } + + /* power on phy */ + ret = generic_phy_power_on(&phy); + if (ret) { + dev_err(hba->dev, "%s: phy power on failed, ret = %d\n", + __func__, ret); + goto out_disable_phy; + } + + ufs_qcom_select_unipro_mode(priv); + + return 0; + +out_disable_phy: + generic_phy_exit(&phy); + + return ret; +} + +/* + * The UTP controller has a number of internal clock gating cells (CGCs). + * Internal hardware sub-modules within the UTP controller control the CGCs. + * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved + * in a specific operation, UTP controller CGCs are by default disabled and + * this function enables them (after every UFS link startup) to save some power + * leakage. + */ +static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) +{ + ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL, + REG_UFS_CFG2); + + /* Ensure that HW clock gating is enabled before next operations */ + ufshcd_readl(hba, REG_UFS_CFG2); +} + +static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct ufs_qcom_priv *priv = dev_get_priv(hba->dev); + int err; + + switch (status) { + case PRE_CHANGE: + ufs_qcom_power_up_sequence(hba); + /* + * The PHY PLL output is the source of tx/rx lane symbol + * clocks, hence, enable the lane clocks only after PHY + * is initialized. + */ + err = ufs_qcom_enable_clks(priv); + break; + case POST_CHANGE: + /* check if UFS PHY moved from DISABLED to HIBERN8 */ + err = ufs_qcom_check_hibern8(hba); + ufs_qcom_enable_hw_clk_gating(hba); + break; + default: + dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); + err = -EINVAL; + break; + } + + return err; +} + +/* Look for the maximum core_clk_unipro clock value */ +static u32 ufs_qcom_get_core_clk_unipro_max_freq(struct ufs_hba *hba) +{ + struct ufs_qcom_priv *priv = dev_get_priv(hba->dev); + ofnode node = dev_ofnode(priv->hba->dev); + struct ofnode_phandle_args opp_table; + int pos, ret; + u32 clk = 0; + + /* Get core_clk_unipro clock index */ + pos = ofnode_stringlist_search(node, "clock-names", "core_clk_unipro"); + if (pos < 0) + goto fallback; + + /* Try parsing the opps */ + if (!ofnode_parse_phandle_with_args(node, "required-opps", + NULL, 0, 0, &opp_table) && + ofnode_device_is_compatible(opp_table.node, "operating-points-v2")) { + ofnode opp_node; + + ofnode_for_each_subnode(opp_node, opp_table.node) { + u64 opp_clk; + /* opp-hw contains the OPP frequency */ + ret = ofnode_read_u64_index(opp_node, "opp-hz", pos, &opp_clk); + if (ret) + continue; + + /* We don't handle larger clock values, ignore */ + if (opp_clk > U32_MAX) + continue; + + /* Only keep the largest value */ + if (opp_clk > clk) + clk = opp_clk; + } + + /* If we get a valid clock, return it or check legacy*/ + if (clk) + return clk; + } + + /* Legacy freq-table-hz has a pair of u32 per clocks entry, min then max */ + if (!ofnode_read_u32_index(node, "freq-table-hz", pos * 2 + 1, &clk) && + clk > 0) + return clk; + +fallback: + /* default for backwards compatibility */ + return UNIPRO_CORE_CLK_FREQ_150_MHZ * 1000 * 1000; +}; + +static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba, + u32 cycles_in_1us) +{ + struct ufs_qcom_priv *priv = dev_get_priv(hba->dev); + u32 cycles_in_40ns; + int err; + u32 reg; + + /* + * UFS host controller V4.0.0 onwards needs to program + * PA_VS_CORE_CLK_40NS_CYCLES attribute per programmed + * frequency of unipro core clk of UFS host controller. + */ + if (priv->hw_ver.major < 4) + return 0; + + /* + * Generic formulae for cycles_in_40ns = (freq_unipro/25) is not + * applicable for all frequencies. For ex: ceil(37.5 MHz/25) will + * be 2 and ceil(403 MHZ/25) will be 17 whereas Hardware + * specification expect to be 16. Hence use exact hardware spec + * mandated value for cycles_in_40ns instead of calculating using + * generic formulae. + */ + switch (cycles_in_1us) { + case UNIPRO_CORE_CLK_FREQ_403_MHZ: + cycles_in_40ns = 16; + break; + case UNIPRO_CORE_CLK_FREQ_300_MHZ: + cycles_in_40ns = 12; + break; + case UNIPRO_CORE_CLK_FREQ_201_5_MHZ: + cycles_in_40ns = 8; + break; + case UNIPRO_CORE_CLK_FREQ_150_MHZ: + cycles_in_40ns = 6; + break; + case UNIPRO_CORE_CLK_FREQ_100_MHZ: + cycles_in_40ns = 4; + break; + case UNIPRO_CORE_CLK_FREQ_75_MHZ: + cycles_in_40ns = 3; + break; + case UNIPRO_CORE_CLK_FREQ_37_5_MHZ: + cycles_in_40ns = 2; + break; + default: + dev_err(hba->dev, "UNIPRO clk freq %u MHz not supported\n", + cycles_in_1us); + return -EINVAL; + } + + err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), ®); + if (err) + return err; + + reg &= ~PA_VS_CORE_CLK_40NS_CYCLES_MASK; + reg |= cycles_in_40ns; + + return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg); +} + +static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba) +{ + struct ufs_qcom_priv *priv = dev_get_priv(hba->dev); + u32 core_clk_ctrl_reg; + u32 cycles_in_1us; + int err; + + cycles_in_1us = ceil(ufs_qcom_get_core_clk_unipro_max_freq(hba), + (1000 * 1000)); + err = ufshcd_dme_get(hba, + UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), + &core_clk_ctrl_reg); + if (err) + return err; + + /* Bit mask is different for UFS host controller V4.0.0 onwards */ + if (priv->hw_ver.major >= 4) { + core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK_V4; + core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK_V4, cycles_in_1us); + } else { + core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK; + core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK, cycles_in_1us); + } + + /* Clear CORE_CLK_DIV_EN */ + core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; + + err = ufshcd_dme_set(hba, + UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), + core_clk_ctrl_reg); + if (err) + return err; + + /* Configure unipro core clk 40ns attribute */ + return ufs_qcom_set_clk_40ns_cycles(hba, cycles_in_1us); +} + +static u32 ufs_qcom_get_local_unipro_ver(struct ufs_hba *hba) +{ + /* HCI version 1.0 and 1.1 supports UniPro 1.41 */ + switch (hba->version) { + case UFSHCI_VERSION_10: + case UFSHCI_VERSION_11: + return UFS_UNIPRO_VER_1_41; + + case UFSHCI_VERSION_20: + case UFSHCI_VERSION_21: + default: + return UFS_UNIPRO_VER_1_6; + } +} + +static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int err = 0; + + switch (status) { + case PRE_CHANGE: + err = ufs_qcom_set_core_clk_ctrl(hba); + if (err) + dev_err(hba->dev, "cfg core clk ctrl failed\n"); + /* + * Some UFS devices (and may be host) have issues if LCC is + * enabled. So we are setting PA_Local_TX_LCC_Enable to 0 + * before link startup which will make sure that both host + * and device TX LCC are disabled once link startup is + * completed. + */ + if (ufs_qcom_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) + err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); + + break; + default: + break; + } + + return err; +} + +static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_hba *hba, bool enable) +{ + struct ufs_qcom_priv *priv = dev_get_priv(hba->dev); + + if (enable ^ priv->is_dev_ref_clk_enabled) { + u32 temp = readl_relaxed(hba->mmio_base + REG_UFS_CFG1); + + if (enable) + temp |= BIT(26); + else + temp &= ~BIT(26); + + /* + * If we are here to disable this clock it might be immediately + * after entering into hibern8 in which case we need to make + * sure that device ref_clk is active for specific time after + * hibern8 enter. + */ + if (!enable) + udelay(10); + + writel_relaxed(temp, hba->mmio_base + REG_UFS_CFG1); + + /* + * Make sure the write to ref_clk reaches the destination and + * not stored in a Write Buffer (WB). + */ + readl(hba->mmio_base + REG_UFS_CFG1); + + /* + * If we call hibern8 exit after this, we need to make sure that + * device ref_clk is stable for at least 1us before the hibern8 + * exit command. + */ + if (enable) + udelay(1); + + priv->is_dev_ref_clk_enabled = enable; + } +} + +/** + * ufs_qcom_init - bind phy with controller + * @hba: host controller instance + * + * Powers up PHY enabling clocks and regulators. + * + * Returns -EPROBE_DEFER if binding fails, returns negative error + * on phy power up failure and returns zero on success. + */ +static int ufs_qcom_init(struct ufs_hba *hba) +{ + struct ufs_qcom_priv *priv = dev_get_priv(hba->dev); + int err; + + priv->hba = hba; + + /* setup clocks */ + ufs_qcom_setup_clocks(hba, true, PRE_CHANGE); + + if (priv->hw_ver.major >= 0x4) + ufshcd_dme_set(hba, + UIC_ARG_MIB(PA_TXHSADAPTTYPE), + PA_NO_ADAPT); + + ufs_qcom_setup_clocks(hba, true, POST_CHANGE); + + ufs_qcom_get_controller_revision(hba, &priv->hw_ver.major, + &priv->hw_ver.minor, + &priv->hw_ver.step); + dev_info(hba->dev, "Qcom UFS HC version: %d.%d.%d\n", + priv->hw_ver.major, + priv->hw_ver.minor, + priv->hw_ver.step); + + err = ufs_qcom_init_clks(priv); + if (err) { + dev_err(hba->dev, "failed to initialize clocks, err:%d\n", err); + return err; + } + + ufs_qcom_advertise_quirks(hba); + ufs_qcom_setup_clocks(hba, true, POST_CHANGE); + + return 0; +} + +/** + * ufs_qcom_device_reset() - toggle the (optional) device reset line + * @hba: per-adapter instance + * + * Toggles the (optional) reset line to reset the attached device. + */ +static int ufs_qcom_device_reset(struct ufs_hba *hba) +{ + struct ufs_qcom_priv *priv = dev_get_priv(hba->dev); + + if (!dm_gpio_is_valid(&priv->reset)) + return 0; + + /* + * The UFS device shall detect reset pulses of 1us, sleep for 10us to + * be on the safe side. + */ + dm_gpio_set_value(&priv->reset, true); + udelay(10); + + dm_gpio_set_value(&priv->reset, false); + udelay(10); + + return 0; +} + +static struct ufs_hba_ops ufs_qcom_hba_ops = { + .init = ufs_qcom_init, + .get_max_pwr_mode = ufs_get_max_pwr_mode, + .hce_enable_notify = ufs_qcom_hce_enable_notify, + .link_startup_notify = ufs_qcom_link_startup_notify, + .device_reset = ufs_qcom_device_reset, +}; + +static int ufs_qcom_probe(struct udevice *dev) +{ + struct ufs_qcom_priv *priv = dev_get_priv(dev); + int ret; + + /* get resets */ + ret = reset_get_by_name(dev, "rst", &priv->core_reset); + if (ret) { + dev_err(dev, "failed to get reset, ret:%d\n", ret); + return ret; + } + + ret = gpio_request_by_name(dev, "reset-gpios", 0, &priv->reset, GPIOD_IS_OUT); + if (ret) { + dev_err(dev, "Warning: cannot get reset GPIO\n"); + } + + ret = ufshcd_probe(dev, &ufs_qcom_hba_ops); + if (ret) { + dev_err(dev, "ufshcd_probe() failed, ret:%d\n", ret); + return ret; + } + + return 0; +} + +static int ufs_qcom_bind(struct udevice *dev) +{ + struct udevice *scsi_dev; + + return ufs_scsi_bind(dev, &scsi_dev); +} + +static const struct udevice_id ufs_qcom_ids[] = { + { .compatible = "qcom,ufshc" }, + {}, +}; + +U_BOOT_DRIVER(qcom_ufshcd) = { + .name = "qcom-ufshcd", + .id = UCLASS_UFS, + .of_match = ufs_qcom_ids, + .probe = ufs_qcom_probe, + .bind = ufs_qcom_bind, + .priv_auto = sizeof(struct ufs_qcom_priv), +}; diff --git a/drivers/ufs/ufs-qcom.h b/drivers/ufs/ufs-qcom.h new file mode 100644 index 00000000000..de957ae60f3 --- /dev/null +++ b/drivers/ufs/ufs-qcom.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + */ + +#ifndef UFS_QCOM_H_ +#define UFS_QCOM_H_ + +#include <reset.h> +#include <linux/bitfield.h> + +#define MPHY_TX_FSM_STATE 0x41 +#define TX_FSM_HIBERN8 0x1 +#define DEFAULT_CLK_RATE_HZ 1000000 + +#define UFS_HW_VER_MAJOR_MASK GENMASK(31, 28) +#define UFS_HW_VER_MINOR_MASK GENMASK(27, 16) +#define UFS_HW_VER_STEP_MASK GENMASK(15, 0) + +/* QCOM UFS host controller vendor specific registers */ +enum { + REG_UFS_SYS1CLK_1US = 0xC0, + REG_UFS_TX_SYMBOL_CLK_NS_US = 0xC4, + REG_UFS_LOCAL_PORT_ID_REG = 0xC8, + REG_UFS_PA_ERR_CODE = 0xCC, + /* On older UFS revisions, this register is called "RETRY_TIMER_REG" */ + REG_UFS_PARAM0 = 0xD0, + /* On older UFS revisions, this register is called "REG_UFS_PA_LINK_STARTUP_TIMER" */ + REG_UFS_CFG0 = 0xD8, + REG_UFS_CFG1 = 0xDC, + REG_UFS_CFG2 = 0xE0, + REG_UFS_HW_VERSION = 0xE4, + + UFS_TEST_BUS = 0xE8, + UFS_TEST_BUS_CTRL_0 = 0xEC, + UFS_TEST_BUS_CTRL_1 = 0xF0, + UFS_TEST_BUS_CTRL_2 = 0xF4, + UFS_UNIPRO_CFG = 0xF8, + + /* + * QCOM UFS host controller vendor specific registers + * added in HW Version 3.0.0 + */ + UFS_AH8_CFG = 0xFC, + + REG_UFS_CFG3 = 0x271C, +}; + +/* bit definitions for REG_UFS_CFG0 register */ +#define QUNIPRO_G4_SEL BIT(5) + +/* bit definitions for REG_UFS_CFG1 register */ +#define QUNIPRO_SEL BIT(0) +#define UFS_PHY_SOFT_RESET BIT(1) +#define UTP_DBG_RAMS_EN BIT(17) +#define TEST_BUS_EN BIT(18) +#define TEST_BUS_SEL GENMASK(22, 19) +#define UFS_REG_TEST_BUS_EN BIT(30) + +#define UFS_PHY_RESET_ENABLE 1 +#define UFS_PHY_RESET_DISABLE 0 + +/* bit definitions for REG_UFS_CFG2 register */ +#define UAWM_HW_CGC_EN BIT(0) +#define UARM_HW_CGC_EN BIT(1) +#define TXUC_HW_CGC_EN BIT(2) +#define RXUC_HW_CGC_EN BIT(3) +#define DFC_HW_CGC_EN BIT(4) +#define TRLUT_HW_CGC_EN BIT(5) +#define TMRLUT_HW_CGC_EN BIT(6) +#define OCSC_HW_CGC_EN BIT(7) + +/* bit definitions for REG_UFS_PARAM0 */ +#define MAX_HS_GEAR_MASK GENMASK(6, 4) +#define UFS_QCOM_MAX_GEAR(x) FIELD_GET(MAX_HS_GEAR_MASK, (x)) + +/* bit definition for UFS_UFS_TEST_BUS_CTRL_n */ +#define TEST_BUS_SUB_SEL_MASK GENMASK(4, 0) /* All XXX_SEL fields are 5 bits wide */ + +#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\ + TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\ + DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\ + TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN) + +/* bit offset */ +#define OFFSET_CLK_NS_REG 0xa + +/* bit masks */ +#define MASK_TX_SYMBOL_CLK_1US_REG GENMASK(9, 0) +#define MASK_CLK_NS_REG GENMASK(23, 10) + +/* QUniPro Vendor specific attributes */ +#define PA_VS_CONFIG_REG1 0x9000 +#define DME_VS_CORE_CLK_CTRL 0xD002 +/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */ +#define CLK_1US_CYCLES_MASK_V4 GENMASK(27, 16) +#define CLK_1US_CYCLES_MASK GENMASK(7, 0) +#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8) +#define PA_VS_CORE_CLK_40NS_CYCLES 0x9007 +#define PA_VS_CORE_CLK_40NS_CYCLES_MASK GENMASK(6, 0) + +/* QCOM UFS host controller core clk frequencies */ +#define UNIPRO_CORE_CLK_FREQ_37_5_MHZ 38 +#define UNIPRO_CORE_CLK_FREQ_75_MHZ 75 +#define UNIPRO_CORE_CLK_FREQ_100_MHZ 100 +#define UNIPRO_CORE_CLK_FREQ_150_MHZ 150 +#define UNIPRO_CORE_CLK_FREQ_300_MHZ 300 +#define UNIPRO_CORE_CLK_FREQ_201_5_MHZ 202 +#define UNIPRO_CORE_CLK_FREQ_403_MHZ 403 + +static inline void +ufs_qcom_get_controller_revision(struct ufs_hba *hba, + u8 *major, u16 *minor, u16 *step) +{ + u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION); + + *major = FIELD_GET(UFS_HW_VER_MAJOR_MASK, ver); + *minor = FIELD_GET(UFS_HW_VER_MINOR_MASK, ver); + *step = FIELD_GET(UFS_HW_VER_STEP_MASK, ver); +}; + +/* Host controller hardware version: major.minor.step */ +struct ufs_hw_version { + u16 step; + u16 minor; + u8 major; +}; + +struct gpio_desc; + +struct ufs_qcom_priv { + struct phy *generic_phy; + struct ufs_hba *hba; + + struct clk_bulk clks; + bool is_clks_enabled; + + struct ufs_hw_version hw_ver; + + /* Reset control of HCI */ + struct reset_ctl core_reset; + + struct gpio_desc reset; + + bool is_dev_ref_clk_enabled; +}; + +#endif /* UFS_QCOM_H_ */ diff --git a/drivers/ufs/ufs-renesas.c b/drivers/ufs/ufs-renesas.c new file mode 100644 index 00000000000..ae05bdc8102 --- /dev/null +++ b/drivers/ufs/ufs-renesas.c @@ -0,0 +1,412 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Renesas UFS host controller driver + * + * Copyright (C) 2022 Renesas Electronics Corporation + */ + +#include <clk.h> +#include <dm.h> +#include <ufs.h> +#include <asm/io.h> +#include <dm/device_compat.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/bug.h> +#include <linux/iopoll.h> + +#include "ufs.h" + +struct ufs_renesas_priv { + struct clk_bulk clks; + bool initialized; /* The hardware needs initialization once */ +}; + +enum { + SET_PHY_INDEX_LO = 0, + SET_PHY_INDEX_HI, + TIMER_INDEX, + MAX_INDEX +}; + +enum ufs_renesas_init_param_mode { + MODE_RESTORE, + MODE_SET, + MODE_SAVE, + MODE_POLL, + MODE_WAIT, + MODE_WRITE, +}; + +#define PARAM_RESTORE(_reg, _index) \ + { .mode = MODE_RESTORE, .reg = _reg, .index = _index } +#define PARAM_SET(_index, _set) \ + { .mode = MODE_SET, .index = _index, .u.set = _set } +#define PARAM_SAVE(_reg, _mask, _index) \ + { .mode = MODE_SAVE, .reg = _reg, .mask = (u32)(_mask), \ + .index = _index } +#define PARAM_POLL(_reg, _expected, _mask) \ + { .mode = MODE_POLL, .reg = _reg, .u.expected = _expected, \ + .mask = (u32)(_mask) } +#define PARAM_WAIT(_delay_us) \ + { .mode = MODE_WAIT, .u.delay_us = _delay_us } + +#define PARAM_WRITE(_reg, _val) \ + { .mode = MODE_WRITE, .reg = _reg, .u.val = _val } + +#define PARAM_WRITE_D0_D4(_d0, _d4) \ + PARAM_WRITE(0xd0, _d0), PARAM_WRITE(0xd4, _d4) + +#define PARAM_WRITE_800_80C_POLL(_addr, _data_800) \ + PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \ + PARAM_WRITE_D0_D4(0x00000800, ((_data_800) << 16) | BIT(8) | (_addr)), \ + PARAM_WRITE(0xd0, 0x0000080c), \ + PARAM_POLL(0xd4, BIT(8), BIT(8)) + +#define PARAM_RESTORE_800_80C_POLL(_index) \ + PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \ + PARAM_WRITE(0xd0, 0x00000800), \ + PARAM_RESTORE(0xd4, (_index)), \ + PARAM_WRITE(0xd0, 0x0000080c), \ + PARAM_POLL(0xd4, BIT(8), BIT(8)) + +#define PARAM_WRITE_804_80C_POLL(_addr, _data_804) \ + PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \ + PARAM_WRITE_D0_D4(0x00000804, ((_data_804) << 16) | BIT(8) | (_addr)), \ + PARAM_WRITE(0xd0, 0x0000080c), \ + PARAM_POLL(0xd4, BIT(8), BIT(8)) + +#define PARAM_WRITE_828_82C_POLL(_data_828) \ + PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000), \ + PARAM_WRITE_D0_D4(0x00000828, _data_828), \ + PARAM_WRITE(0xd0, 0x0000082c), \ + PARAM_POLL(0xd4, _data_828, _data_828) + +#define PARAM_WRITE_PHY(_addr16, _data16) \ + PARAM_WRITE(0xf0, 1), \ + PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x18, (_data16) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x19, ((_data16) >> 8) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \ + PARAM_WRITE_828_82C_POLL(0x0f000000), \ + PARAM_WRITE(0xf0, 0) + +#define PARAM_SET_PHY(_addr16, _data16) \ + PARAM_WRITE(0xf0, 1), \ + PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \ + PARAM_WRITE_828_82C_POLL(0x0f000000), \ + PARAM_WRITE_804_80C_POLL(0x1a, 0), \ + PARAM_WRITE(0xd0, 0x00000808), \ + PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_LO), \ + PARAM_WRITE_804_80C_POLL(0x1b, 0), \ + PARAM_WRITE(0xd0, 0x00000808), \ + PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_HI), \ + PARAM_WRITE_828_82C_POLL(0x0f000000), \ + PARAM_WRITE(0xf0, 0), \ + PARAM_WRITE(0xf0, 1), \ + PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \ + PARAM_SET(SET_PHY_INDEX_LO, (((_data16) & 0xff) << 16) | BIT(8) | 0x18), \ + PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_LO), \ + PARAM_SET(SET_PHY_INDEX_HI, ((((_data16) >> 8) & 0xff) << 16) | BIT(8) | 0x19), \ + PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_HI), \ + PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \ + PARAM_WRITE_828_82C_POLL(0x0f000000), \ + PARAM_WRITE(0xf0, 0) + +#define PARAM_INDIRECT_WRITE(_gpio, _addr, _data_800) \ + PARAM_WRITE(0xf0, _gpio), \ + PARAM_WRITE_800_80C_POLL((_addr), _data_800), \ + PARAM_WRITE_828_82C_POLL(0x0f000000), \ + PARAM_WRITE(0xf0, 0) + +#define PARAM_INDIRECT_POLL(_gpio, _addr, _expected, _mask) \ + PARAM_WRITE(0xf0, _gpio), \ + PARAM_WRITE_800_80C_POLL((_addr), 0), \ + PARAM_WRITE(0xd0, 0x00000808), \ + PARAM_POLL(0xd4, (_expected), (_mask)), \ + PARAM_WRITE(0xf0, 0) + +struct ufs_renesas_init_param { + enum ufs_renesas_init_param_mode mode; + u32 reg; + union { + u32 expected; + u32 delay_us; + u32 set; + u32 val; + } u; + u32 mask; + u32 index; +}; + +/* This setting is for SERIES B */ +static const struct ufs_renesas_init_param ufs_param[] = { + PARAM_WRITE(0xc0, 0x49425308), + PARAM_WRITE_D0_D4(0x00000104, 0x00000002), + PARAM_WAIT(1), + PARAM_WRITE_D0_D4(0x00000828, 0x00000200), + PARAM_WAIT(1), + PARAM_WRITE_D0_D4(0x00000828, 0x00000000), + PARAM_WRITE_D0_D4(0x00000104, 0x00000001), + PARAM_WRITE_D0_D4(0x00000940, 0x00000001), + PARAM_WAIT(1), + PARAM_WRITE_D0_D4(0x00000940, 0x00000000), + + PARAM_WRITE(0xc0, 0x49425308), + PARAM_WRITE(0xc0, 0x41584901), + + PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), + PARAM_WRITE_D0_D4(0x00000804, 0x00000000), + PARAM_WRITE(0xd0, 0x0000080c), + PARAM_POLL(0xd4, BIT(8), BIT(8)), + + PARAM_WRITE(REG_CONTROLLER_ENABLE, 0x00000001), + + PARAM_WRITE(0xd0, 0x00000804), + PARAM_POLL(0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0)), + + PARAM_WRITE(0xd0, 0x00000d00), + PARAM_SAVE(0xd4, 0x0000ffff, TIMER_INDEX), + PARAM_WRITE(0xd4, 0x00000000), + PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000), + PARAM_WRITE_D0_D4(0x00000828, 0x08000000), + PARAM_WRITE(0xd0, 0x0000082c), + PARAM_POLL(0xd4, BIT(27), BIT(27)), + PARAM_WRITE(0xd0, 0x00000d2c), + PARAM_POLL(0xd4, BIT(0), BIT(0)), + + /* phy setup */ + PARAM_INDIRECT_WRITE(1, 0x01, 0x001f), + PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014), + PARAM_INDIRECT_WRITE(7, 0x5e, 0x0014), + PARAM_INDIRECT_WRITE(7, 0x0d, 0x0003), + PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007), + PARAM_INDIRECT_WRITE(7, 0x5f, 0x0003), + PARAM_INDIRECT_WRITE(7, 0x60, 0x0003), + PARAM_INDIRECT_WRITE(7, 0x5b, 0x00a6), + PARAM_INDIRECT_WRITE(7, 0x5c, 0x0003), + + PARAM_INDIRECT_POLL(7, 0x3c, 0, BIT(7)), + PARAM_INDIRECT_POLL(7, 0x4c, 0, BIT(4)), + + PARAM_INDIRECT_WRITE(1, 0x32, 0x0080), + PARAM_INDIRECT_WRITE(1, 0x1f, 0x0001), + PARAM_INDIRECT_WRITE(0, 0x2c, 0x0001), + PARAM_INDIRECT_WRITE(0, 0x32, 0x0087), + + PARAM_INDIRECT_WRITE(1, 0x4d, 0x0061), + PARAM_INDIRECT_WRITE(4, 0x9b, 0x0009), + PARAM_INDIRECT_WRITE(4, 0xa6, 0x0005), + PARAM_INDIRECT_WRITE(4, 0xa5, 0x0058), + PARAM_INDIRECT_WRITE(1, 0x39, 0x0027), + PARAM_INDIRECT_WRITE(1, 0x47, 0x004c), + + PARAM_INDIRECT_WRITE(7, 0x0d, 0x0002), + PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007), + + PARAM_WRITE_PHY(0x0028, 0x0061), + PARAM_WRITE_PHY(0x4014, 0x0061), + PARAM_SET_PHY(0x401c, BIT(2)), + PARAM_WRITE_PHY(0x4000, 0x0000), + PARAM_WRITE_PHY(0x4001, 0x0000), + + PARAM_WRITE_PHY(0x10ae, 0x0001), + PARAM_WRITE_PHY(0x10ad, 0x0000), + PARAM_WRITE_PHY(0x10af, 0x0001), + PARAM_WRITE_PHY(0x10b6, 0x0001), + PARAM_WRITE_PHY(0x10ae, 0x0000), + + PARAM_WRITE_PHY(0x10ae, 0x0001), + PARAM_WRITE_PHY(0x10ad, 0x0000), + PARAM_WRITE_PHY(0x10af, 0x0002), + PARAM_WRITE_PHY(0x10b6, 0x0001), + PARAM_WRITE_PHY(0x10ae, 0x0000), + + PARAM_WRITE_PHY(0x10ae, 0x0001), + PARAM_WRITE_PHY(0x10ad, 0x0080), + PARAM_WRITE_PHY(0x10af, 0x0000), + PARAM_WRITE_PHY(0x10b6, 0x0001), + PARAM_WRITE_PHY(0x10ae, 0x0000), + + PARAM_WRITE_PHY(0x10ae, 0x0001), + PARAM_WRITE_PHY(0x10ad, 0x0080), + PARAM_WRITE_PHY(0x10af, 0x001a), + PARAM_WRITE_PHY(0x10b6, 0x0001), + PARAM_WRITE_PHY(0x10ae, 0x0000), + + PARAM_INDIRECT_WRITE(7, 0x70, 0x0016), + PARAM_INDIRECT_WRITE(7, 0x71, 0x0016), + PARAM_INDIRECT_WRITE(7, 0x72, 0x0014), + PARAM_INDIRECT_WRITE(7, 0x73, 0x0014), + PARAM_INDIRECT_WRITE(7, 0x74, 0x0000), + PARAM_INDIRECT_WRITE(7, 0x75, 0x0000), + PARAM_INDIRECT_WRITE(7, 0x76, 0x0010), + PARAM_INDIRECT_WRITE(7, 0x77, 0x0010), + PARAM_INDIRECT_WRITE(7, 0x78, 0x00ff), + PARAM_INDIRECT_WRITE(7, 0x79, 0x0000), + + PARAM_INDIRECT_WRITE(7, 0x19, 0x0007), + + PARAM_INDIRECT_WRITE(7, 0x1a, 0x0007), + + PARAM_INDIRECT_WRITE(7, 0x24, 0x000c), + + PARAM_INDIRECT_WRITE(7, 0x25, 0x000c), + + PARAM_INDIRECT_WRITE(7, 0x62, 0x0000), + PARAM_INDIRECT_WRITE(7, 0x63, 0x0000), + PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014), + PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017), + PARAM_INDIRECT_WRITE(7, 0x5d, 0x0004), + PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017), + PARAM_INDIRECT_POLL(7, 0x55, 0, BIT(6)), + PARAM_INDIRECT_POLL(7, 0x41, 0, BIT(7)), + /* end of phy setup */ + + PARAM_WRITE(0xf0, 0), + PARAM_WRITE(0xd0, 0x00000d00), + PARAM_RESTORE(0xd4, TIMER_INDEX), +}; + +static void ufs_renesas_reg_control(struct ufs_hba *hba, + const struct ufs_renesas_init_param *p) +{ + static u32 save[MAX_INDEX]; + int ret; + u32 val; + + WARN_ON(p->index >= MAX_INDEX); + + switch (p->mode) { + case MODE_RESTORE: + ufshcd_writel(hba, save[p->index], p->reg); + break; + case MODE_SET: + save[p->index] |= p->u.set; + break; + case MODE_SAVE: + save[p->index] = ufshcd_readl(hba, p->reg) & p->mask; + break; + case MODE_POLL: + ret = readl_poll_timeout(hba->mmio_base + p->reg, val, + (val & p->mask) == p->u.expected, + 10000); + if (ret) + dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n", + __func__, ret, val, p->mask, p->u.expected); + break; + case MODE_WAIT: + if (p->u.delay_us > 1000) + mdelay(DIV_ROUND_UP(p->u.delay_us, 1000)); + else + udelay(p->u.delay_us); + break; + case MODE_WRITE: + ufshcd_writel(hba, p->u.val, p->reg); + break; + default: + break; + } +} + +static void ufs_renesas_pre_init(struct ufs_hba *hba) +{ + const struct ufs_renesas_init_param *p = ufs_param; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ufs_param); i++) + ufs_renesas_reg_control(hba, &p[i]); +} + +static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct ufs_renesas_priv *priv = dev_get_priv(hba->dev); + + if (priv->initialized) + return 0; + + if (status == PRE_CHANGE) + ufs_renesas_pre_init(hba); + + priv->initialized = true; + + return 0; +} + +static int ufs_renesas_init(struct ufs_hba *hba) +{ + hba->quirks |= UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS | UFSHCD_QUIRK_HIBERN_FASTAUTO; + + return 0; +} + +static struct ufs_hba_ops ufs_renesas_vops = { + .init = ufs_renesas_init, + .hce_enable_notify = ufs_renesas_hce_enable_notify, +}; + +static int ufs_renesas_pltfm_bind(struct udevice *dev) +{ + struct udevice *scsi_dev; + + return ufs_scsi_bind(dev, &scsi_dev); +} + +static int ufs_renesas_pltfm_probe(struct udevice *dev) +{ + struct ufs_renesas_priv *priv = dev_get_priv(dev); + int err; + + err = clk_get_bulk(dev, &priv->clks); + if (err < 0) + return err; + + err = clk_enable_bulk(&priv->clks); + if (err) + goto err_clk_enable; + + err = ufshcd_probe(dev, &ufs_renesas_vops); + if (err) { + dev_err(dev, "ufshcd_probe() failed %d\n", err); + goto err_ufshcd_probe; + } + + return 0; + +err_ufshcd_probe: + clk_disable_bulk(&priv->clks); +err_clk_enable: + clk_release_bulk(&priv->clks); + return err; +} + +static int ufs_renesas_pltfm_remove(struct udevice *dev) +{ + struct ufs_renesas_priv *priv = dev_get_priv(dev); + + clk_disable_bulk(&priv->clks); + clk_release_bulk(&priv->clks); + + return 0; +} + +static const struct udevice_id ufs_renesas_pltfm_ids[] = { + { .compatible = "renesas,r8a779f0-ufs" }, + { /* sentinel */ } +}; + +U_BOOT_DRIVER(ufs_renesas) = { + .name = "ufs-renesas", + .id = UCLASS_UFS, + .of_match = ufs_renesas_pltfm_ids, + .bind = ufs_renesas_pltfm_bind, + .probe = ufs_renesas_pltfm_probe, + .remove = ufs_renesas_pltfm_remove, + .priv_auto = sizeof(struct ufs_renesas_priv), +}; diff --git a/drivers/ufs/ufs-uclass.c b/drivers/ufs/ufs-uclass.c new file mode 100644 index 00000000000..334bfcfa06a --- /dev/null +++ b/drivers/ufs/ufs-uclass.c @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * ufs-uclass.c - Universal Flash Storage (UFS) Uclass driver + * + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com + */ + +#define LOG_CATEGORY UCLASS_UFS + +#include "ufs.h" +#include <dm.h> + +UCLASS_DRIVER(ufs) = { + .id = UCLASS_UFS, + .name = "ufs", + .per_device_auto = sizeof(struct ufs_hba), +}; diff --git a/drivers/ufs/ufs.c b/drivers/ufs/ufs.c new file mode 100644 index 00000000000..91f6ad3bfef --- /dev/null +++ b/drivers/ufs/ufs.c @@ -0,0 +1,2078 @@ +// SPDX-License-Identifier: GPL-2.0+ +/** + * ufs.c - Universal Flash Storage (UFS) driver + * + * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported + * to u-boot. + * + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com + */ + +#include <bouncebuf.h> +#include <charset.h> +#include <dm.h> +#include <log.h> +#include <dm/device_compat.h> +#include <dm/devres.h> +#include <dm/lists.h> +#include <dm/device-internal.h> +#include <malloc.h> +#include <hexdump.h> +#include <scsi.h> +#include <ufs.h> +#include <asm/io.h> +#include <asm/dma-mapping.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> + +#include "ufs.h" + +#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ + UTP_TASK_REQ_COMPL |\ + UFSHCD_ERROR_MASK) +/* maximum number of link-startup retries */ +#define DME_LINKSTARTUP_RETRIES 3 + +/* maximum number of retries for a general UIC command */ +#define UFS_UIC_COMMAND_RETRIES 3 + +/* Query request retries */ +#define QUERY_REQ_RETRIES 3 +/* Query request timeout */ +#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ + +/* maximum timeout in ms for a general UIC command */ +#define UFS_UIC_CMD_TIMEOUT 1000 +/* NOP OUT retries waiting for NOP IN response */ +#define NOP_OUT_RETRIES 10 +/* Timeout after 30 msecs if NOP OUT hangs without response */ +#define NOP_OUT_TIMEOUT 30 /* msecs */ + +/* Only use one Task Tag for all requests */ +#define TASK_TAG 0 + +/* Expose the flag value from utp_upiu_query.value */ +#define MASK_QUERY_UPIU_FLAG_LOC 0xFF + +#define MAX_PRDT_ENTRY 262144 + +/* maximum bytes per request */ +#define UFS_MAX_BYTES (128 * 256 * 1024) + +static inline bool ufshcd_is_hba_active(struct ufs_hba *hba); +static inline void ufshcd_hba_stop(struct ufs_hba *hba); +static int ufshcd_hba_enable(struct ufs_hba *hba); + +/* + * ufshcd_wait_for_register - wait for register value to change + */ +static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, + u32 val, unsigned long timeout_ms) +{ + int err = 0; + unsigned long start = get_timer(0); + + /* ignore bits that we don't intend to wait on */ + val = val & mask; + + while ((ufshcd_readl(hba, reg) & mask) != val) { + if (get_timer(start) > timeout_ms) { + if ((ufshcd_readl(hba, reg) & mask) != val) + err = -ETIMEDOUT; + break; + } + } + + return err; +} + +/** + * ufshcd_init_pwr_info - setting the POR (power on reset) + * values in hba power info + */ +static void ufshcd_init_pwr_info(struct ufs_hba *hba) +{ + hba->pwr_info.gear_rx = UFS_PWM_G1; + hba->pwr_info.gear_tx = UFS_PWM_G1; + hba->pwr_info.lane_rx = 1; + hba->pwr_info.lane_tx = 1; + hba->pwr_info.pwr_rx = SLOWAUTO_MODE; + hba->pwr_info.pwr_tx = SLOWAUTO_MODE; + hba->pwr_info.hs_rate = 0; +} + +/** + * ufshcd_print_pwr_info - print power params as saved in hba + * power info + */ +static void ufshcd_print_pwr_info(struct ufs_hba *hba) +{ + static const char * const names[] = { + "INVALID MODE", + "FAST MODE", + "SLOW_MODE", + "INVALID MODE", + "FASTAUTO_MODE", + "SLOWAUTO_MODE", + "INVALID MODE", + }; + + dev_err(hba->dev, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", + hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, + hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, + names[hba->pwr_info.pwr_rx], + names[hba->pwr_info.pwr_tx], + hba->pwr_info.hs_rate); +} + +static void ufshcd_device_reset(struct ufs_hba *hba) +{ + ufshcd_vops_device_reset(hba); +} + +/** + * ufshcd_ready_for_uic_cmd - Check if controller is ready + * to accept UIC commands + */ +static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) +{ + if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) + return true; + else + return false; +} + +/** + * ufshcd_get_uic_cmd_result - Get the UIC command result + */ +static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & + MASK_UIC_COMMAND_RESULT; +} + +/** + * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command + */ +static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); +} + +/** + * ufshcd_is_device_present - Check if any device connected to + * the host controller + */ +static inline bool ufshcd_is_device_present(struct ufs_hba *hba) +{ + return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & + DEVICE_PRESENT) ? true : false; +} + +/** + * ufshcd_send_uic_cmd - UFS Interconnect layer command API + * + */ +static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) +{ + unsigned long start = 0; + u32 intr_status; + u32 enabled_intr_status; + + if (!ufshcd_ready_for_uic_cmd(hba)) { + dev_err(hba->dev, + "Controller not ready to accept UIC commands\n"); + return -EIO; + } + + debug("sending uic command:%d\n", uic_cmd->command); + + /* Write Args */ + ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); + ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); + ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); + + /* Write UIC Cmd */ + ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, + REG_UIC_COMMAND); + + start = get_timer(0); + do { + intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + enabled_intr_status = intr_status & hba->intr_mask; + ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); + + if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) { + dev_err(hba->dev, + "Timedout waiting for UIC response\n"); + + return -ETIMEDOUT; + } + + if (enabled_intr_status & UFSHCD_ERROR_MASK) { + dev_err(hba->dev, "Error in status:%08x\n", + enabled_intr_status); + + return -1; + } + } while (!(enabled_intr_status & UFSHCD_UIC_MASK)); + + uic_cmd->argument2 = ufshcd_get_uic_cmd_result(hba); + uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba); + + debug("Sent successfully\n"); + + return 0; +} + +/** + * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET + * + */ +int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set, + u32 mib_val, u8 peer) +{ + struct uic_command uic_cmd = {0}; + static const char *const action[] = { + "dme-set", + "dme-peer-set" + }; + const char *set = action[!!peer]; + int ret; + int retries = UFS_UIC_COMMAND_RETRIES; + + uic_cmd.command = peer ? + UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; + uic_cmd.argument1 = attr_sel; + uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); + uic_cmd.argument3 = mib_val; + + do { + /* for peer attributes we retry upon failure */ + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", + set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); + } while (ret && peer && --retries); + + if (ret) + dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", + set, UIC_GET_ATTR_ID(attr_sel), mib_val, + UFS_UIC_COMMAND_RETRIES - retries); + + return ret; +} + +/** + * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET + * + */ +int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, + u32 *mib_val, u8 peer) +{ + struct uic_command uic_cmd = {0}; + static const char *const action[] = { + "dme-get", + "dme-peer-get" + }; + const char *get = action[!!peer]; + int ret; + int retries = UFS_UIC_COMMAND_RETRIES; + + uic_cmd.command = peer ? + UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; + uic_cmd.argument1 = attr_sel; + + do { + /* for peer attributes we retry upon failure */ + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", + get, UIC_GET_ATTR_ID(attr_sel), ret); + } while (ret && peer && --retries); + + if (ret) + dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", + get, UIC_GET_ATTR_ID(attr_sel), + UFS_UIC_COMMAND_RETRIES - retries); + + if (mib_val && !ret) + *mib_val = uic_cmd.argument3; + + return ret; +} + +static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) +{ + u32 tx_lanes, i, err = 0; + + if (!peer) + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), + &tx_lanes); + else + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), + &tx_lanes); + for (i = 0; i < tx_lanes; i++) { + unsigned int val = UIC_ARG_MIB_SEL(TX_LCC_ENABLE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)); + if (!peer) + err = ufshcd_dme_set(hba, val, 0); + else + err = ufshcd_dme_peer_set(hba, val, 0); + if (err) { + dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d\n", + __func__, peer, i, err); + break; + } + } + + return err; +} + +static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) +{ + return ufshcd_disable_tx_lcc(hba, true); +} + +/** + * ufshcd_dme_link_startup - Notify Unipro to perform link startup + * + */ +static int ufshcd_dme_link_startup(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, + "dme-link-startup: error code %d\n", ret); + return ret; +} + +/** + * ufshcd_disable_intr_aggr - Disables interrupt aggregation. + * + */ +static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) +{ + ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); +} + +/** + * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY + */ +static inline int ufshcd_get_lists_status(u32 reg) +{ + return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY); +} + +/** + * ufshcd_enable_run_stop_reg - Enable run-stop registers, + * When run-stop registers are set to 1, it indicates the + * host controller that it can process the requests + */ +static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) +{ + ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, + REG_UTP_TASK_REQ_LIST_RUN_STOP); + ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, + REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); +} + +/** + * ufshcd_enable_intr - enable interrupts + */ +static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) +{ + u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + u32 rw; + + if (hba->version == UFSHCI_VERSION_10) { + rw = set & INTERRUPT_MASK_RW_VER_10; + set = rw | ((set ^ intrs) & intrs); + } else { + set |= intrs; + } + + ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); + + hba->intr_mask = set; +} + +/** + * ufshcd_make_hba_operational - Make UFS controller operational + * + * To bring UFS host controller to operational state, + * 1. Enable required interrupts + * 2. Configure interrupt aggregation + * 3. Program UTRL and UTMRL base address + * 4. Configure run-stop-registers + * + */ +static int ufshcd_make_hba_operational(struct ufs_hba *hba) +{ + int err = 0; + u32 reg; + + /* Enable required interrupts */ + ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); + + /* Disable interrupt aggregation */ + ufshcd_disable_intr_aggr(hba); + + /* Configure UTRL and UTMRL base address registers */ + ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utrdl), + REG_UTP_TRANSFER_REQ_LIST_BASE_L); + ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utrdl), + REG_UTP_TRANSFER_REQ_LIST_BASE_H); + ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utmrdl), + REG_UTP_TASK_REQ_LIST_BASE_L); + ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utmrdl), + REG_UTP_TASK_REQ_LIST_BASE_H); + + /* + * Make sure base address and interrupt setup are updated before + * enabling the run/stop registers below. + */ + wmb(); + + /* + * UCRDY, UTMRLDY and UTRLRDY bits must be 1 + */ + reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); + if (!(ufshcd_get_lists_status(reg))) { + ufshcd_enable_run_stop_reg(hba); + } else { + dev_err(hba->dev, + "Host controller not ready to process requests\n"); + err = -EIO; + goto out; + } + +out: + return err; +} + +/** + * ufshcd_link_startup - Initialize unipro link startup + */ +static int ufshcd_link_startup(struct ufs_hba *hba) +{ + int ret; + int retries = DME_LINKSTARTUP_RETRIES; + + do { + ufshcd_ops_link_startup_notify(hba, PRE_CHANGE); + + ret = ufshcd_dme_link_startup(hba); + + /* check if device is detected by inter-connect layer */ + if (!ret && !ufshcd_is_device_present(hba)) { + dev_err(hba->dev, "%s: Device not present\n", __func__); + ret = -ENXIO; + goto out; + } + + /* + * DME link lost indication is only received when link is up, + * but we can't be sure if the link is up until link startup + * succeeds. So reset the local Uni-Pro and try again. + */ + if (ret && ufshcd_hba_enable(hba)) + goto out; + } while (ret && retries--); + + if (ret) + /* failed to get the link up... retire */ + goto out; + + /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */ + ufshcd_init_pwr_info(hba); + + if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { + ret = ufshcd_disable_device_tx_lcc(hba); + if (ret) + goto out; + } + + /* Include any host controller configuration via UIC commands */ + ret = ufshcd_ops_link_startup_notify(hba, POST_CHANGE); + if (ret) + goto out; + + /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */ + ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); + ret = ufshcd_make_hba_operational(hba); +out: + if (ret) + dev_err(hba->dev, "link startup failed %d\n", ret); + + return ret; +} + +/** + * ufshcd_hba_stop - Send controller to reset state + */ +static inline void ufshcd_hba_stop(struct ufs_hba *hba) +{ + int err; + + ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); + err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, + CONTROLLER_ENABLE, CONTROLLER_DISABLE, + 10); + if (err) + dev_err(hba->dev, "%s: Controller disable failed\n", __func__); +} + +/** + * ufshcd_is_hba_active - Get controller state + */ +static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) +{ + return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE) + ? false : true; +} + +/** + * ufshcd_hba_start - Start controller initialization sequence + */ +static inline void ufshcd_hba_start(struct ufs_hba *hba) +{ + ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); +} + +/** + * ufshcd_hba_enable - initialize the controller + */ +static int ufshcd_hba_enable(struct ufs_hba *hba) +{ + int retry; + + if (!ufshcd_is_hba_active(hba)) + /* change controller state to "reset state" */ + ufshcd_hba_stop(hba); + + ufshcd_ops_hce_enable_notify(hba, PRE_CHANGE); + + /* start controller initialization sequence */ + ufshcd_hba_start(hba); + + /* + * To initialize a UFS host controller HCE bit must be set to 1. + * During initialization the HCE bit value changes from 1->0->1. + * When the host controller completes initialization sequence + * it sets the value of HCE bit to 1. The same HCE bit is read back + * to check if the controller has completed initialization sequence. + * So without this delay the value HCE = 1, set in the previous + * instruction might be read back. + * This delay can be changed based on the controller. + */ + mdelay(1); + + /* wait for the host controller to complete initialization */ + retry = 10; + while (ufshcd_is_hba_active(hba)) { + if (retry) { + retry--; + } else { + dev_err(hba->dev, "Controller enable failed\n"); + return -EIO; + } + mdelay(5); + } + + /* enable UIC related interrupts */ + ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); + + ufshcd_ops_hce_enable_notify(hba, POST_CHANGE); + + return 0; +} + +/** + * ufshcd_host_memory_configure - configure local reference block with + * memory offsets + */ +static void ufshcd_host_memory_configure(struct ufs_hba *hba) +{ + struct utp_transfer_req_desc *utrdlp; + dma_addr_t cmd_desc_dma_addr; + u16 response_offset; + u16 prdt_offset; + + utrdlp = hba->utrdl; + cmd_desc_dma_addr = (dma_addr_t)hba->ucdl; + + utrdlp->command_desc_base_addr_lo = + cpu_to_le32(lower_32_bits(cmd_desc_dma_addr)); + utrdlp->command_desc_base_addr_hi = + cpu_to_le32(upper_32_bits(cmd_desc_dma_addr)); + + response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu); + prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); + + utrdlp->response_upiu_offset = cpu_to_le16(response_offset >> 2); + utrdlp->prd_table_offset = cpu_to_le16(prdt_offset >> 2); + utrdlp->response_upiu_length = cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); + + hba->ucd_req_ptr = (struct utp_upiu_req *)hba->ucdl; + hba->ucd_rsp_ptr = + (struct utp_upiu_rsp *)&hba->ucdl->response_upiu; + hba->ucd_prdt_ptr = + (struct ufshcd_sg_entry *)&hba->ucdl->prd_table; +} + +/** + * ufshcd_memory_alloc - allocate memory for host memory space data structures + */ +static int ufshcd_memory_alloc(struct ufs_hba *hba) +{ + /* Allocate one Transfer Request Descriptor + * Should be aligned to 1k boundary. + */ + hba->utrdl = memalign(1024, + ALIGN(sizeof(struct utp_transfer_req_desc), + ARCH_DMA_MINALIGN)); + if (!hba->utrdl) { + dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n"); + return -ENOMEM; + } + + /* Allocate one Command Descriptor + * Should be aligned to 1k boundary. + */ + hba->ucdl = memalign(1024, + ALIGN(sizeof(struct utp_transfer_cmd_desc), + ARCH_DMA_MINALIGN)); + if (!hba->ucdl) { + dev_err(hba->dev, "Command descriptor memory allocation failed\n"); + return -ENOMEM; + } + + return 0; +} + +/** + * ufshcd_get_intr_mask - Get the interrupt bit mask + */ +static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) +{ + u32 intr_mask = 0; + + switch (hba->version) { + case UFSHCI_VERSION_10: + intr_mask = INTERRUPT_MASK_ALL_VER_10; + break; + case UFSHCI_VERSION_11: + case UFSHCI_VERSION_20: + intr_mask = INTERRUPT_MASK_ALL_VER_11; + break; + case UFSHCI_VERSION_21: + default: + intr_mask = INTERRUPT_MASK_ALL_VER_21; + break; + } + + return intr_mask; +} + +/** + * ufshcd_get_ufs_version - Get the UFS version supported by the HBA + */ +static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_UFS_VERSION); +} + +/** + * ufshcd_get_upmcrs - Get the power mode change request status + */ +static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) +{ + return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; +} + +/** + * ufshcd_cache_flush - Flush cache + * + * Flush cache in aligned address..address+size range. + */ +static void ufshcd_cache_flush(void *addr, unsigned long size) +{ + uintptr_t start_addr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1); + uintptr_t end_addr = ALIGN((uintptr_t)addr + size, ARCH_DMA_MINALIGN); + + flush_dcache_range(start_addr, end_addr); +} + +/** + * ufshcd_cache_invalidate - Invalidate cache + * + * Invalidate cache in aligned address..address+size range. + */ +static void ufshcd_cache_invalidate(void *addr, unsigned long size) +{ + uintptr_t start_addr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1); + uintptr_t end_addr = ALIGN((uintptr_t)addr + size, ARCH_DMA_MINALIGN); + + invalidate_dcache_range(start_addr, end_addr); +} + +/** + * ufshcd_prepare_req_desc_hdr() - Fills the requests header + * descriptor according to request + */ +static void ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, + u32 *upiu_flags, + enum dma_data_direction cmd_dir) +{ + struct utp_transfer_req_desc *req_desc = hba->utrdl; + u32 data_direction; + u32 dword_0; + + if (cmd_dir == DMA_FROM_DEVICE) { + data_direction = UTP_DEVICE_TO_HOST; + *upiu_flags = UPIU_CMD_FLAGS_READ; + } else if (cmd_dir == DMA_TO_DEVICE) { + data_direction = UTP_HOST_TO_DEVICE; + *upiu_flags = UPIU_CMD_FLAGS_WRITE; + } else { + data_direction = UTP_NO_DATA_TRANSFER; + *upiu_flags = UPIU_CMD_FLAGS_NONE; + } + + dword_0 = data_direction | (0x1 << UPIU_COMMAND_TYPE_OFFSET); + + /* Enable Interrupt for command */ + dword_0 |= UTP_REQ_DESC_INT_CMD; + + /* Transfer request descriptor header fields */ + req_desc->header.dword_0 = cpu_to_le32(dword_0); + /* dword_1 is reserved, hence it is set to 0 */ + req_desc->header.dword_1 = 0; + /* + * assigning invalid value for command status. Controller + * updates OCS on command completion, with the command + * status + */ + req_desc->header.dword_2 = + cpu_to_le32(OCS_INVALID_COMMAND_STATUS); + /* dword_3 is reserved, hence it is set to 0 */ + req_desc->header.dword_3 = 0; + + req_desc->prd_table_length = 0; + + ufshcd_cache_flush(req_desc, sizeof(*req_desc)); +} + +static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, + u32 upiu_flags) +{ + struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; + struct ufs_query *query = &hba->dev_cmd.query; + u16 len = be16_to_cpu(query->request.upiu_req.length); + + /* Query request header */ + ucd_req_ptr->header.dword_0 = + UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ, + upiu_flags, 0, TASK_TAG); + ucd_req_ptr->header.dword_1 = + UPIU_HEADER_DWORD(0, query->request.query_func, + 0, 0); + + /* Data segment length only need for WRITE_DESC */ + if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) + ucd_req_ptr->header.dword_2 = + UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len); + else + ucd_req_ptr->header.dword_2 = 0; + + /* Copy the Query Request buffer as is */ + memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE); + + /* Copy the Descriptor */ + if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) { + memcpy(ucd_req_ptr + 1, query->descriptor, len); + ufshcd_cache_flush(ucd_req_ptr, 2 * sizeof(*ucd_req_ptr)); + } else { + ufshcd_cache_flush(ucd_req_ptr, sizeof(*ucd_req_ptr)); + } + + memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); + ufshcd_cache_flush(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); +} + +static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba) +{ + struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; + + memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); + + /* command descriptor fields */ + ucd_req_ptr->header.dword_0 = + UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT, 0, 0, TASK_TAG); + /* clear rest of the fields of basic header */ + ucd_req_ptr->header.dword_1 = 0; + ucd_req_ptr->header.dword_2 = 0; + + memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); + + ufshcd_cache_flush(ucd_req_ptr, sizeof(*ucd_req_ptr)); + ufshcd_cache_flush(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); +} + +/** + * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU) + * for Device Management Purposes + */ +static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, + enum dev_cmd_type cmd_type) +{ + u32 upiu_flags; + int ret = 0; + + hba->dev_cmd.type = cmd_type; + + ufshcd_prepare_req_desc_hdr(hba, &upiu_flags, DMA_NONE); + switch (cmd_type) { + case DEV_CMD_TYPE_QUERY: + ufshcd_prepare_utp_query_req_upiu(hba, upiu_flags); + break; + case DEV_CMD_TYPE_NOP: + ufshcd_prepare_utp_nop_upiu(hba); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) +{ + unsigned long start; + u32 intr_status; + u32 enabled_intr_status; + + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); + + /* Make sure doorbell reg is updated before reading interrupt status */ + wmb(); + + start = get_timer(0); + do { + intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + enabled_intr_status = intr_status & hba->intr_mask; + ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); + + if (get_timer(start) > QUERY_REQ_TIMEOUT) { + dev_err(hba->dev, + "Timedout waiting for UTP response\n"); + + return -ETIMEDOUT; + } + + if (enabled_intr_status & UFSHCD_ERROR_MASK) { + dev_err(hba->dev, "Error in status:%08x\n", + enabled_intr_status); + + return -1; + } + } while (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL)); + + return 0; +} + +/** + * ufshcd_get_req_rsp - returns the TR response transaction type + */ +static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + ufshcd_cache_invalidate(ucd_rsp_ptr, sizeof(*ucd_rsp_ptr)); + + return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; +} + +/** + * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status + * + */ +static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba) +{ + struct utp_transfer_req_desc *req_desc = hba->utrdl; + + ufshcd_cache_invalidate(req_desc, sizeof(*req_desc)); + + return le32_to_cpu(req_desc->header.dword_2) & MASK_OCS; +} + +static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; +} + +static int ufshcd_check_query_response(struct ufs_hba *hba) +{ + struct ufs_query_res *query_res = &hba->dev_cmd.query.response; + + /* Get the UPIU response */ + query_res->response = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr) >> + UPIU_RSP_CODE_OFFSET; + return query_res->response; +} + +/** + * ufshcd_copy_query_response() - Copy the Query Response and the data + * descriptor + */ +static int ufshcd_copy_query_response(struct ufs_hba *hba) +{ + struct ufs_query_res *query_res = &hba->dev_cmd.query.response; + + memcpy(&query_res->upiu_res, &hba->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); + + /* Get the descriptor */ + if (hba->dev_cmd.query.descriptor && + hba->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { + u8 *descp = (u8 *)hba->ucd_rsp_ptr + + GENERAL_UPIU_REQUEST_SIZE; + u16 resp_len; + u16 buf_len; + + /* data segment length */ + resp_len = be32_to_cpu(hba->ucd_rsp_ptr->header.dword_2) & + MASK_QUERY_DATA_SEG_LEN; + buf_len = + be16_to_cpu(hba->dev_cmd.query.request.upiu_req.length); + if (likely(buf_len >= resp_len)) { + memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); + } else { + dev_warn(hba->dev, + "%s: Response size is bigger than buffer\n", + __func__); + return -EINVAL; + } + } + + return 0; +} + +/** + * ufshcd_exec_dev_cmd - API for sending device management requests + */ +static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, + int timeout) +{ + int err; + int resp; + + err = ufshcd_comp_devman_upiu(hba, cmd_type); + if (err) + return err; + + err = ufshcd_send_command(hba, TASK_TAG); + if (err) + return err; + + err = ufshcd_get_tr_ocs(hba); + if (err) { + dev_err(hba->dev, "Error in OCS:%d\n", err); + return -EINVAL; + } + + resp = ufshcd_get_req_rsp(hba->ucd_rsp_ptr); + switch (resp) { + case UPIU_TRANSACTION_NOP_IN: + break; + case UPIU_TRANSACTION_QUERY_RSP: + err = ufshcd_check_query_response(hba); + if (!err) + err = ufshcd_copy_query_response(hba); + break; + case UPIU_TRANSACTION_REJECT_UPIU: + /* TODO: handle Reject UPIU Response */ + err = -EPERM; + dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", + __func__); + break; + default: + err = -EINVAL; + dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", + __func__, resp); + } + + return err; +} + +/** + * ufshcd_init_query() - init the query response and request parameters + */ +static inline void ufshcd_init_query(struct ufs_hba *hba, + struct ufs_query_req **request, + struct ufs_query_res **response, + enum query_opcode opcode, + u8 idn, u8 index, u8 selector) +{ + *request = &hba->dev_cmd.query.request; + *response = &hba->dev_cmd.query.response; + memset(*request, 0, sizeof(struct ufs_query_req)); + memset(*response, 0, sizeof(struct ufs_query_res)); + (*request)->upiu_req.opcode = opcode; + (*request)->upiu_req.idn = idn; + (*request)->upiu_req.index = index; + (*request)->upiu_req.selector = selector; +} + +/** + * ufshcd_query_flag() - API function for sending flag query requests + */ +static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, + enum flag_idn idn, bool *flag_res) +{ + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; + int err, index = 0, selector = 0; + int timeout = QUERY_REQ_TIMEOUT; + + ufshcd_init_query(hba, &request, &response, opcode, idn, index, + selector); + + switch (opcode) { + case UPIU_QUERY_OPCODE_SET_FLAG: + case UPIU_QUERY_OPCODE_CLEAR_FLAG: + case UPIU_QUERY_OPCODE_TOGGLE_FLAG: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + break; + case UPIU_QUERY_OPCODE_READ_FLAG: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + if (!flag_res) { + /* No dummy reads */ + dev_err(hba->dev, "%s: Invalid argument for read request\n", + __func__); + err = -EINVAL; + goto out; + } + break; + default: + dev_err(hba->dev, + "%s: Expected query flag opcode but got = %d\n", + __func__, opcode); + err = -EINVAL; + goto out; + } + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); + + if (err) { + dev_err(hba->dev, + "%s: Sending flag query for idn %d failed, err = %d\n", + __func__, idn, err); + goto out; + } + + if (flag_res) + *flag_res = (be32_to_cpu(response->upiu_res.value) & + MASK_QUERY_UPIU_FLAG_LOC) & 0x1; + +out: + return err; +} + +static int ufshcd_query_flag_retry(struct ufs_hba *hba, + enum query_opcode opcode, + enum flag_idn idn, bool *flag_res) +{ + int ret; + int retries; + + for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { + ret = ufshcd_query_flag(hba, opcode, idn, flag_res); + if (ret) + dev_dbg(hba->dev, + "%s: failed with error %d, retries %d\n", + __func__, ret, retries); + else + break; + } + + if (ret) + dev_err(hba->dev, + "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n", + __func__, opcode, idn, ret, retries); + return ret; +} + +static int __ufshcd_query_descriptor(struct ufs_hba *hba, + enum query_opcode opcode, + enum desc_idn idn, u8 index, u8 selector, + u8 *desc_buf, int *buf_len) +{ + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; + int err; + + if (!desc_buf) { + dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", + __func__, opcode); + err = -EINVAL; + goto out; + } + + if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { + dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", + __func__, *buf_len); + err = -EINVAL; + goto out; + } + + ufshcd_init_query(hba, &request, &response, opcode, idn, index, + selector); + hba->dev_cmd.query.descriptor = desc_buf; + request->upiu_req.length = cpu_to_be16(*buf_len); + + switch (opcode) { + case UPIU_QUERY_OPCODE_WRITE_DESC: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + break; + case UPIU_QUERY_OPCODE_READ_DESC: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + break; + default: + dev_err(hba->dev, "%s: Expected query descriptor opcode but got = 0x%.2x\n", + __func__, opcode); + err = -EINVAL; + goto out; + } + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + + if (err) { + dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", + __func__, opcode, idn, index, err); + goto out; + } + + hba->dev_cmd.query.descriptor = NULL; + *buf_len = be16_to_cpu(response->upiu_res.length); + +out: + return err; +} + +/** + * ufshcd_query_descriptor_retry - API function for sending descriptor requests + */ +static int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode, + enum desc_idn idn, u8 index, u8 selector, + u8 *desc_buf, int *buf_len) +{ + int err; + int retries; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + err = __ufshcd_query_descriptor(hba, opcode, idn, index, + selector, desc_buf, buf_len); + if (!err || err == -EINVAL) + break; + } + + return err; +} + +/** + * ufshcd_read_desc_length - read the specified descriptor length from header + */ +static int ufshcd_read_desc_length(struct ufs_hba *hba, enum desc_idn desc_id, + int desc_index, int *desc_length) +{ + int ret; + u8 header[QUERY_DESC_HDR_SIZE]; + int header_len = QUERY_DESC_HDR_SIZE; + + if (desc_id >= QUERY_DESC_IDN_MAX) + return -EINVAL; + + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + desc_id, desc_index, 0, header, + &header_len); + + if (ret) { + dev_err(hba->dev, "%s: Failed to get descriptor header id %d\n", + __func__, desc_id); + return ret; + } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) { + dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch\n", + __func__, header[QUERY_DESC_DESC_TYPE_OFFSET], + desc_id); + ret = -EINVAL; + } + + *desc_length = header[QUERY_DESC_LENGTH_OFFSET]; + + return ret; +} + +static void ufshcd_init_desc_sizes(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0, + &hba->desc_size.dev_desc); + if (err) + hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0, + &hba->desc_size.pwr_desc); + if (err) + hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0, + &hba->desc_size.interc_desc); + if (err) + hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0, + &hba->desc_size.conf_desc); + if (err) + hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0, + &hba->desc_size.unit_desc); + if (err) + hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0, + &hba->desc_size.geom_desc); + if (err) + hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0, + &hba->desc_size.hlth_desc); + if (err) + hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; +} + +/** + * ufshcd_map_desc_id_to_length - map descriptor IDN to its length + * + */ +static int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, + int *desc_len) +{ + switch (desc_id) { + case QUERY_DESC_IDN_DEVICE: + *desc_len = hba->desc_size.dev_desc; + break; + case QUERY_DESC_IDN_POWER: + *desc_len = hba->desc_size.pwr_desc; + break; + case QUERY_DESC_IDN_GEOMETRY: + *desc_len = hba->desc_size.geom_desc; + break; + case QUERY_DESC_IDN_CONFIGURATION: + *desc_len = hba->desc_size.conf_desc; + break; + case QUERY_DESC_IDN_UNIT: + *desc_len = hba->desc_size.unit_desc; + break; + case QUERY_DESC_IDN_INTERCONNECT: + *desc_len = hba->desc_size.interc_desc; + break; + case QUERY_DESC_IDN_STRING: + *desc_len = QUERY_DESC_MAX_SIZE; + break; + case QUERY_DESC_IDN_HEALTH: + *desc_len = hba->desc_size.hlth_desc; + break; + case QUERY_DESC_IDN_RFU_0: + case QUERY_DESC_IDN_RFU_1: + *desc_len = 0; + break; + default: + *desc_len = 0; + return -EINVAL; + } + return 0; +} + +/** + * ufshcd_read_desc_param - read the specified descriptor parameter + * + */ +static int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, + int desc_index, u8 param_offset, + u8 *param_read_buf, u8 param_size) +{ + int ret; + u8 *desc_buf; + int buff_len; + bool is_kmalloc = true; + + /* Safety check */ + if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) + return -EINVAL; + + /* Get the max length of descriptor from structure filled up at probe + * time. + */ + ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); + + /* Sanity checks */ + if (ret || !buff_len) { + dev_err(hba->dev, "%s: Failed to get full descriptor length\n", + __func__); + return ret; + } + + /* Check whether we need temp memory */ + if (param_offset != 0 || param_size < buff_len) { + desc_buf = kmalloc(buff_len, GFP_KERNEL); + if (!desc_buf) + return -ENOMEM; + } else { + desc_buf = param_read_buf; + is_kmalloc = false; + } + + /* Request for full descriptor */ + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + desc_id, desc_index, 0, desc_buf, + &buff_len); + + if (ret) { + dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n", + __func__, desc_id, desc_index, param_offset, ret); + goto out; + } + + /* Sanity check */ + if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) { + dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n", + __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]); + ret = -EINVAL; + goto out; + } + + /* Check wherher we will not copy more data, than available */ + if (is_kmalloc && param_size > buff_len) + param_size = buff_len; + + if (is_kmalloc) + memcpy(param_read_buf, &desc_buf[param_offset], param_size); +out: + if (is_kmalloc) + kfree(desc_buf); + return ret; +} + +/* replace non-printable or non-ASCII characters with spaces */ +static inline void ufshcd_remove_non_printable(uint8_t *val) +{ + if (!val) + return; + + if (*val < 0x20 || *val > 0x7e) + *val = ' '; +} + +/** + * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power + * state) and waits for it to take effect. + * + */ +static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) +{ + unsigned long start = 0; + u8 status; + int ret; + + ret = ufshcd_send_uic_cmd(hba, cmd); + if (ret) { + dev_err(hba->dev, + "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", + cmd->command, cmd->argument3, ret); + + return ret; + } + + start = get_timer(0); + do { + status = ufshcd_get_upmcrs(hba); + if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) { + dev_err(hba->dev, + "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n", + cmd->command, status); + ret = (status != PWR_OK) ? status : -1; + break; + } + } while (status != PWR_LOCAL); + + return ret; +} + +/** + * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change + * using DME_SET primitives. + */ +static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_SET; + uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); + uic_cmd.argument3 = mode; + ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + + return ret; +} + +static +void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba, + struct scsi_cmd *pccb, u32 upiu_flags) +{ + struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; + unsigned int cdb_len; + + /* command descriptor fields */ + ucd_req_ptr->header.dword_0 = + UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, upiu_flags, + pccb->lun, TASK_TAG); + ucd_req_ptr->header.dword_1 = + UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); + + /* Total EHS length and Data segment length will be zero */ + ucd_req_ptr->header.dword_2 = 0; + + ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(pccb->datalen); + + cdb_len = min_t(unsigned short, pccb->cmdlen, UFS_CDB_SIZE); + memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); + memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len); + + memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); + ufshcd_cache_flush(ucd_req_ptr, sizeof(*ucd_req_ptr)); + ufshcd_cache_flush(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); +} + +static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry, + unsigned char *buf, ulong len) +{ + entry->size = cpu_to_le32(len) | GENMASK(1, 0); + entry->base_addr = cpu_to_le32(lower_32_bits((unsigned long)buf)); + entry->upper_addr = cpu_to_le32(upper_32_bits((unsigned long)buf)); +} + +static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb) +{ + struct utp_transfer_req_desc *req_desc = hba->utrdl; + struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr; + ulong datalen = pccb->datalen; + int table_length; + u8 *buf; + int i; + + if (!datalen) { + req_desc->prd_table_length = 0; + ufshcd_cache_flush(req_desc, sizeof(*req_desc)); + return; + } + + table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY); + buf = pccb->pdata; + i = table_length; + while (--i) { + prepare_prdt_desc(&prd_table[table_length - i - 1], buf, + MAX_PRDT_ENTRY - 1); + buf += MAX_PRDT_ENTRY; + datalen -= MAX_PRDT_ENTRY; + } + + prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1); + + req_desc->prd_table_length = table_length; + ufshcd_cache_flush(prd_table, sizeof(*prd_table) * table_length); + ufshcd_cache_flush(req_desc, sizeof(*req_desc)); +} + +static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb) +{ + struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent); + u32 upiu_flags; + int ocs, result = 0; + u8 scsi_status; + + ufshcd_prepare_req_desc_hdr(hba, &upiu_flags, pccb->dma_dir); + ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags); + prepare_prdt_table(hba, pccb); + + ufshcd_cache_flush(pccb->pdata, pccb->datalen); + + ufshcd_send_command(hba, TASK_TAG); + + ufshcd_cache_invalidate(pccb->pdata, pccb->datalen); + + ocs = ufshcd_get_tr_ocs(hba); + switch (ocs) { + case OCS_SUCCESS: + result = ufshcd_get_req_rsp(hba->ucd_rsp_ptr); + switch (result) { + case UPIU_TRANSACTION_RESPONSE: + result = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr); + + scsi_status = result & MASK_SCSI_STATUS; + if (scsi_status) + return -EINVAL; + + break; + case UPIU_TRANSACTION_REJECT_UPIU: + /* TODO: handle Reject UPIU Response */ + dev_err(hba->dev, + "Reject UPIU not fully implemented\n"); + return -EINVAL; + default: + dev_err(hba->dev, + "Unexpected request response code = %x\n", + result); + return -EINVAL; + } + break; + default: + dev_err(hba->dev, "OCS error from controller = %x\n", ocs); + return -EINVAL; + } + + return 0; +} + +static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id, + int desc_index, u8 *buf, u32 size) +{ + return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); +} + +static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) +{ + return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); +} + +/** + * ufshcd_read_string_desc - read string descriptor + * + */ +static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, + u8 *buf, u32 size, bool ascii) +{ + int err = 0; + + err = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, desc_index, buf, + size); + + if (err) { + dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", + __func__, QUERY_REQ_RETRIES, err); + goto out; + } + + if (ascii) { + int desc_len; + int ascii_len; + int i; + u8 *buff_ascii; + + desc_len = buf[0]; + /* remove header and divide by 2 to move from UTF16 to UTF8 */ + ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; + if (size < ascii_len + QUERY_DESC_HDR_SIZE) { + dev_err(hba->dev, "%s: buffer allocated size is too small\n", + __func__); + err = -ENOMEM; + goto out; + } + + buff_ascii = kmalloc(ascii_len, GFP_KERNEL); + if (!buff_ascii) { + err = -ENOMEM; + goto out; + } + + /* + * the descriptor contains string in UTF16 format + * we need to convert to utf-8 so it can be displayed + */ + utf16_to_utf8(buff_ascii, + (uint16_t *)&buf[QUERY_DESC_HDR_SIZE], ascii_len); + + /* replace non-printable or non-ASCII characters with spaces */ + for (i = 0; i < ascii_len; i++) + ufshcd_remove_non_printable(&buff_ascii[i]); + + memset(buf + QUERY_DESC_HDR_SIZE, 0, + size - QUERY_DESC_HDR_SIZE); + memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); + buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; + kfree(buff_ascii); + } +out: + return err; +} + +static int ufs_get_device_desc(struct ufs_hba *hba, + struct ufs_dev_desc *dev_desc) +{ + int err; + size_t buff_len; + u8 model_index; + u8 *desc_buf; + + buff_len = max_t(size_t, hba->desc_size.dev_desc, + QUERY_DESC_MAX_SIZE + 1); + desc_buf = kmalloc(buff_len, GFP_KERNEL); + if (!desc_buf) { + err = -ENOMEM; + goto out; + } + + err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); + if (err) { + dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", + __func__, err); + goto out; + } + + /* + * getting vendor (manufacturerID) and Bank Index in big endian + * format + */ + dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | + desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; + + model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; + + /* Zero-pad entire buffer for string termination. */ + memset(desc_buf, 0, buff_len); + + err = ufshcd_read_string_desc(hba, model_index, desc_buf, + QUERY_DESC_MAX_SIZE, true/*ASCII*/); + if (err) { + dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", + __func__, err); + goto out; + } + + desc_buf[QUERY_DESC_MAX_SIZE] = '\0'; + strlcpy(dev_desc->model, (char *)(desc_buf + QUERY_DESC_HDR_SIZE), + min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET], + MAX_MODEL_LEN)); + + /* Null terminate the model string */ + dev_desc->model[MAX_MODEL_LEN] = '\0'; + +out: + kfree(desc_buf); + return err; +} + +/** + * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device + */ +static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) +{ + struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; + + if (hba->max_pwr_info.is_valid) + return 0; + + if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) { + pwr_info->pwr_tx = FASTAUTO_MODE; + pwr_info->pwr_rx = FASTAUTO_MODE; + } else { + pwr_info->pwr_tx = FAST_MODE; + pwr_info->pwr_rx = FAST_MODE; + } + pwr_info->hs_rate = PA_HS_MODE_B; + + /* Get the connected lane count */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), + &pwr_info->lane_rx); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), + &pwr_info->lane_tx); + + if (!pwr_info->lane_rx || !pwr_info->lane_tx) { + dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", + __func__, pwr_info->lane_rx, pwr_info->lane_tx); + return -EINVAL; + } + + /* + * First, get the maximum gears of HS speed. + * If a zero value, it means there is no HSGEAR capability. + * Then, get the maximum gears of PWM speed. + */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); + if (!pwr_info->gear_rx) { + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), + &pwr_info->gear_rx); + if (!pwr_info->gear_rx) { + dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", + __func__, pwr_info->gear_rx); + return -EINVAL; + } + pwr_info->pwr_rx = SLOW_MODE; + } + + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), + &pwr_info->gear_tx); + if (!pwr_info->gear_tx) { + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), + &pwr_info->gear_tx); + if (!pwr_info->gear_tx) { + dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", + __func__, pwr_info->gear_tx); + return -EINVAL; + } + pwr_info->pwr_tx = SLOW_MODE; + } + + hba->max_pwr_info.is_valid = true; + return ufshcd_ops_get_max_pwr_mode(hba, &hba->max_pwr_info); +} + +static int ufshcd_change_power_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode) +{ + int ret; + + /* if already configured to the requested pwr_mode */ + if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && + pwr_mode->gear_tx == hba->pwr_info.gear_tx && + pwr_mode->lane_rx == hba->pwr_info.lane_rx && + pwr_mode->lane_tx == hba->pwr_info.lane_tx && + pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && + pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && + pwr_mode->hs_rate == hba->pwr_info.hs_rate) { + dev_dbg(hba->dev, "%s: power already configured\n", __func__); + return 0; + } + + /* + * Configure attributes for power mode change with below. + * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, + * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, + * - PA_HSSERIES + */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), + pwr_mode->lane_rx); + if (pwr_mode->pwr_rx == FASTAUTO_MODE || pwr_mode->pwr_rx == FAST_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); + else + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), + pwr_mode->lane_tx); + if (pwr_mode->pwr_tx == FASTAUTO_MODE || pwr_mode->pwr_tx == FAST_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); + else + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); + + if (pwr_mode->pwr_rx == FASTAUTO_MODE || + pwr_mode->pwr_tx == FASTAUTO_MODE || + pwr_mode->pwr_rx == FAST_MODE || + pwr_mode->pwr_tx == FAST_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), + pwr_mode->hs_rate); + + ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 | + pwr_mode->pwr_tx); + + if (ret) { + dev_err(hba->dev, + "%s: power mode change failed %d\n", __func__, ret); + + return ret; + } + + /* Copy new Power Mode to power info */ + memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr)); + + return ret; +} + +/** + * ufshcd_verify_dev_init() - Verify device initialization + * + */ +static int ufshcd_verify_dev_init(struct ufs_hba *hba) +{ + int retries; + int err; + + for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, + NOP_OUT_TIMEOUT); + if (!err || err == -ETIMEDOUT) + break; + + dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); + } + + if (err) + dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); + + return err; +} + +/** + * ufshcd_complete_dev_init() - checks device readiness + */ +static int ufshcd_complete_dev_init(struct ufs_hba *hba) +{ + int i; + int err; + bool flag_res = 1; + + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_FDEVICEINIT, NULL); + if (err) { + dev_err(hba->dev, + "%s setting fDeviceInit flag failed with error %d\n", + __func__, err); + goto out; + } + + /* poll for max. 1000 iterations for fDeviceInit flag to clear */ + for (i = 0; i < 1000 && !err && flag_res; i++) + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_FDEVICEINIT, + &flag_res); + + if (err) + dev_err(hba->dev, + "%s reading fDeviceInit flag failed with error %d\n", + __func__, err); + else if (flag_res) + dev_err(hba->dev, + "%s fDeviceInit was not cleared by the device\n", + __func__); + +out: + return err; +} + +static void ufshcd_def_desc_sizes(struct ufs_hba *hba) +{ + hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; + hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; + hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; + hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; + hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; + hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; + hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; +} + +static int ufs_start(struct ufs_hba *hba) +{ + struct ufs_dev_desc card = {0}; + int ret; + + ret = ufshcd_link_startup(hba); + if (ret) + return ret; + + ret = ufshcd_verify_dev_init(hba); + if (ret) + return ret; + + ret = ufshcd_complete_dev_init(hba); + if (ret) + return ret; + + /* Init check for device descriptor sizes */ + ufshcd_init_desc_sizes(hba); + + ret = ufs_get_device_desc(hba, &card); + if (ret) { + dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", + __func__, ret); + + return ret; + } + + if (ufshcd_get_max_pwr_mode(hba)) { + dev_err(hba->dev, + "%s: Failed getting max supported power mode\n", + __func__); + } else { + ret = ufshcd_change_power_mode(hba, &hba->max_pwr_info.info); + if (ret) { + dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", + __func__, ret); + + return ret; + } + + debug("UFS Device %s is up!\n", hba->dev->name); + ufshcd_print_pwr_info(hba); + } + + return 0; +} + +int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops) +{ + struct ufs_hba *hba = dev_get_uclass_priv(ufs_dev); + struct scsi_plat *scsi_plat; + struct udevice *scsi_dev; + void __iomem *mmio_base; + int err; + + device_find_first_child(ufs_dev, &scsi_dev); + if (!scsi_dev) + return -ENODEV; + + scsi_plat = dev_get_uclass_plat(scsi_dev); + scsi_plat->max_id = UFSHCD_MAX_ID; + scsi_plat->max_lun = UFS_MAX_LUNS; + scsi_plat->max_bytes_per_req = UFS_MAX_BYTES; + + hba->dev = ufs_dev; + hba->ops = hba_ops; + + if (device_is_on_pci_bus(ufs_dev)) { + mmio_base = dm_pci_map_bar(ufs_dev, PCI_BASE_ADDRESS_0, 0, 0, + PCI_REGION_TYPE, PCI_REGION_MEM); + } else { + mmio_base = dev_read_addr_ptr(ufs_dev); + } + hba->mmio_base = mmio_base; + + /* Set descriptor lengths to specification defaults */ + ufshcd_def_desc_sizes(hba); + + ufshcd_ops_init(hba); + + /* Read capabilities registers */ + hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); + if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) + hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; + + /* Get UFS version supported by the controller */ + hba->version = ufshcd_get_ufs_version(hba); + if (hba->version != UFSHCI_VERSION_10 && + hba->version != UFSHCI_VERSION_11 && + hba->version != UFSHCI_VERSION_20 && + hba->version != UFSHCI_VERSION_21 && + hba->version != UFSHCI_VERSION_30 && + hba->version != UFSHCI_VERSION_31 && + hba->version != UFSHCI_VERSION_40) + dev_err(hba->dev, "invalid UFS version 0x%x\n", + hba->version); + + /* Get Interrupt bit mask per version */ + hba->intr_mask = ufshcd_get_intr_mask(hba); + + /* Allocate memory for host memory space */ + err = ufshcd_memory_alloc(hba); + if (err) { + dev_err(hba->dev, "Memory allocation failed\n"); + return err; + } + + /* Configure Local data structures */ + ufshcd_host_memory_configure(hba); + + /* + * In order to avoid any spurious interrupt immediately after + * registering UFS controller interrupt handler, clear any pending UFS + * interrupt status and disable all the UFS interrupts. + */ + ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), + REG_INTERRUPT_STATUS); + ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); + + mb(); /* flush previous writes */ + + /* Reset the attached device */ + ufshcd_device_reset(hba); + + err = ufshcd_hba_enable(hba); + if (err) { + dev_err(hba->dev, "Host controller enable failed\n"); + return err; + } + + err = ufs_start(hba); + if (err) + return err; + + return 0; +} + +int ufs_scsi_bind(struct udevice *ufs_dev, struct udevice **scsi_devp) +{ + int ret = device_bind_driver(ufs_dev, "ufs_scsi", "ufs_scsi", + scsi_devp); + + return ret; +} + +#if IS_ENABLED(CONFIG_BOUNCE_BUFFER) +static int ufs_scsi_buffer_aligned(struct udevice *scsi_dev, struct bounce_buffer *state) +{ +#ifdef CONFIG_PHYS_64BIT + struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent); + uintptr_t ubuf = (uintptr_t)state->user_buffer; + size_t len = state->len_aligned; + + /* Check if below 32bit boundary */ + if ((hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) && + ((ubuf >> 32) || (ubuf + len) >> 32)) { + dev_dbg(scsi_dev, "Buffer above 32bit boundary %lx-%lx\n", + ubuf, ubuf + len); + return 0; + } +#endif + return 1; +} +#endif /* CONFIG_BOUNCE_BUFFER */ + +static struct scsi_ops ufs_ops = { + .exec = ufs_scsi_exec, +#if IS_ENABLED(CONFIG_BOUNCE_BUFFER) + .buffer_aligned = ufs_scsi_buffer_aligned, +#endif /* CONFIG_BOUNCE_BUFFER */ +}; + +int ufs_probe_dev(int index) +{ + struct udevice *dev; + + return uclass_get_device(UCLASS_UFS, index, &dev); +} + +int ufs_probe(void) +{ + struct udevice *dev; + int ret, i; + + for (i = 0;; i++) { + ret = uclass_get_device(UCLASS_UFS, i, &dev); + if (ret == -ENODEV) + break; + } + + return 0; +} + +U_BOOT_DRIVER(ufs_scsi) = { + .id = UCLASS_SCSI, + .name = "ufs_scsi", + .ops = &ufs_ops, +}; diff --git a/drivers/ufs/ufs.h b/drivers/ufs/ufs.h new file mode 100644 index 00000000000..53137fae3a8 --- /dev/null +++ b/drivers/ufs/ufs.h @@ -0,0 +1,787 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef __UFS_H +#define __UFS_H + +#include <linux/types.h> +#include <asm/io.h> +#include "ufshci.h" +#include "unipro.h" + +struct udevice; + +#define UFS_CDB_SIZE 16 +#define UPIU_TRANSACTION_UIC_CMD 0x1F +#define UIC_CMD_SIZE (sizeof(u32) * 4) +#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18 +#define UFS_MAX_LUNS 0x7F + + +/* UFS device power modes */ +enum ufs_dev_pwr_mode { + UFS_ACTIVE_PWR_MODE = 1, + UFS_SLEEP_PWR_MODE = 2, + UFS_POWERDOWN_PWR_MODE = 3, +}; + +enum ufs_notify_change_status { + PRE_CHANGE, + POST_CHANGE, +}; + +struct ufs_pa_layer_attr { + u32 gear_rx; + u32 gear_tx; + u32 lane_rx; + u32 lane_tx; + u32 pwr_rx; + u32 pwr_tx; + u32 hs_rate; +}; + +struct ufs_pwr_mode_info { + bool is_valid; + struct ufs_pa_layer_attr info; +}; + +enum ufs_desc_def_size { + QUERY_DESC_DEVICE_DEF_SIZE = 0x40, + QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, + QUERY_DESC_UNIT_DEF_SIZE = 0x23, + QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, + QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48, + QUERY_DESC_POWER_DEF_SIZE = 0x62, + QUERY_DESC_HEALTH_DEF_SIZE = 0x25, +}; + +struct ufs_desc_size { + int dev_desc; + int pwr_desc; + int geom_desc; + int interc_desc; + int unit_desc; + int conf_desc; + int hlth_desc; +}; + +/* + * Request Descriptor Definitions + */ + +/* Transfer request command type */ +enum { + UTP_CMD_TYPE_SCSI = 0x0, + UTP_CMD_TYPE_UFS = 0x1, + UTP_CMD_TYPE_DEV_MANAGE = 0x2, +}; + +/* UTP Transfer Request Command Offset */ +#define UPIU_COMMAND_TYPE_OFFSET 28 + +/* Offset of the response code in the UPIU header */ +#define UPIU_RSP_CODE_OFFSET 8 + +#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req)) +#define QUERY_DESC_MAX_SIZE 255 +#define QUERY_DESC_MIN_SIZE 2 +#define QUERY_DESC_HDR_SIZE 2 +#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \ + (sizeof(struct utp_upiu_header))) +#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18 +#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\ + cpu_to_be32(((byte3) << 24) | ((byte2) << 16) |\ + ((byte1) << 8) | (byte0)) +/* + * UFS Protocol Information Unit related definitions + */ + +/* Task management functions */ +enum { + UFS_ABORT_TASK = 0x01, + UFS_ABORT_TASK_SET = 0x02, + UFS_CLEAR_TASK_SET = 0x04, + UFS_LOGICAL_RESET = 0x08, + UFS_QUERY_TASK = 0x80, + UFS_QUERY_TASK_SET = 0x81, +}; + +/* UTP UPIU Transaction Codes Initiator to Target */ +enum { + UPIU_TRANSACTION_NOP_OUT = 0x00, + UPIU_TRANSACTION_COMMAND = 0x01, + UPIU_TRANSACTION_DATA_OUT = 0x02, + UPIU_TRANSACTION_TASK_REQ = 0x04, + UPIU_TRANSACTION_QUERY_REQ = 0x16, +}; + +/* UTP UPIU Transaction Codes Target to Initiator */ +enum { + UPIU_TRANSACTION_NOP_IN = 0x20, + UPIU_TRANSACTION_RESPONSE = 0x21, + UPIU_TRANSACTION_DATA_IN = 0x22, + UPIU_TRANSACTION_TASK_RSP = 0x24, + UPIU_TRANSACTION_READY_XFER = 0x31, + UPIU_TRANSACTION_QUERY_RSP = 0x36, + UPIU_TRANSACTION_REJECT_UPIU = 0x3F, +}; + +/* UPIU Read/Write flags */ +enum { + UPIU_CMD_FLAGS_NONE = 0x00, + UPIU_CMD_FLAGS_WRITE = 0x20, + UPIU_CMD_FLAGS_READ = 0x40, +}; + +/* UPIU Task Attributes */ +enum { + UPIU_TASK_ATTR_SIMPLE = 0x00, + UPIU_TASK_ATTR_ORDERED = 0x01, + UPIU_TASK_ATTR_HEADQ = 0x02, + UPIU_TASK_ATTR_ACA = 0x03, +}; + +/* UPIU Query request function */ +enum { + UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01, + UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81, +}; + +/* Offset of the response code in the UPIU header */ +#define UPIU_RSP_CODE_OFFSET 8 + +enum { + MASK_SCSI_STATUS = 0xFF, + MASK_TASK_RESPONSE = 0xFF00, + MASK_RSP_UPIU_RESULT = 0xFFFF, + MASK_QUERY_DATA_SEG_LEN = 0xFFFF, + MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF, + MASK_RSP_EXCEPTION_EVENT = 0x10000, + MASK_TM_SERVICE_RESP = 0xFF, + MASK_TM_FUNC = 0xFF, +}; + +/* UTP QUERY Transaction Specific Fields OpCode */ +enum query_opcode { + UPIU_QUERY_OPCODE_NOP = 0x0, + UPIU_QUERY_OPCODE_READ_DESC = 0x1, + UPIU_QUERY_OPCODE_WRITE_DESC = 0x2, + UPIU_QUERY_OPCODE_READ_ATTR = 0x3, + UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4, + UPIU_QUERY_OPCODE_READ_FLAG = 0x5, + UPIU_QUERY_OPCODE_SET_FLAG = 0x6, + UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7, + UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8, +}; + +/* Query response result code */ +enum { + QUERY_RESULT_SUCCESS = 0x00, + QUERY_RESULT_NOT_READABLE = 0xF6, + QUERY_RESULT_NOT_WRITEABLE = 0xF7, + QUERY_RESULT_ALREADY_WRITTEN = 0xF8, + QUERY_RESULT_INVALID_LENGTH = 0xF9, + QUERY_RESULT_INVALID_VALUE = 0xFA, + QUERY_RESULT_INVALID_SELECTOR = 0xFB, + QUERY_RESULT_INVALID_INDEX = 0xFC, + QUERY_RESULT_INVALID_IDN = 0xFD, + QUERY_RESULT_INVALID_OPCODE = 0xFE, + QUERY_RESULT_GENERAL_FAILURE = 0xFF, +}; + +enum { + UPIU_COMMAND_SET_TYPE_SCSI = 0x0, + UPIU_COMMAND_SET_TYPE_UFS = 0x1, + UPIU_COMMAND_SET_TYPE_QUERY = 0x2, +}; + +/* Flag idn for Query Requests*/ +enum flag_idn { + QUERY_FLAG_IDN_FDEVICEINIT = 0x01, + QUERY_FLAG_IDN_PERMANENT_WPE = 0x02, + QUERY_FLAG_IDN_PWR_ON_WPE = 0x03, + QUERY_FLAG_IDN_BKOPS_EN = 0x04, + QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE = 0x05, + QUERY_FLAG_IDN_PURGE_ENABLE = 0x06, + QUERY_FLAG_IDN_RESERVED2 = 0x07, + QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL = 0x08, + QUERY_FLAG_IDN_BUSY_RTC = 0x09, + QUERY_FLAG_IDN_RESERVED3 = 0x0A, + QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B, +}; + +/* Attribute idn for Query requests */ +enum attr_idn { + QUERY_ATTR_IDN_BOOT_LU_EN = 0x00, + QUERY_ATTR_IDN_RESERVED = 0x01, + QUERY_ATTR_IDN_POWER_MODE = 0x02, + QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03, + QUERY_ATTR_IDN_OOO_DATA_EN = 0x04, + QUERY_ATTR_IDN_BKOPS_STATUS = 0x05, + QUERY_ATTR_IDN_PURGE_STATUS = 0x06, + QUERY_ATTR_IDN_MAX_DATA_IN = 0x07, + QUERY_ATTR_IDN_MAX_DATA_OUT = 0x08, + QUERY_ATTR_IDN_DYN_CAP_NEEDED = 0x09, + QUERY_ATTR_IDN_REF_CLK_FREQ = 0x0A, + QUERY_ATTR_IDN_CONF_DESC_LOCK = 0x0B, + QUERY_ATTR_IDN_MAX_NUM_OF_RTT = 0x0C, + QUERY_ATTR_IDN_EE_CONTROL = 0x0D, + QUERY_ATTR_IDN_EE_STATUS = 0x0E, + QUERY_ATTR_IDN_SECONDS_PASSED = 0x0F, + QUERY_ATTR_IDN_CNTX_CONF = 0x10, + QUERY_ATTR_IDN_CORR_PRG_BLK_NUM = 0x11, + QUERY_ATTR_IDN_RESERVED2 = 0x12, + QUERY_ATTR_IDN_RESERVED3 = 0x13, + QUERY_ATTR_IDN_FFU_STATUS = 0x14, + QUERY_ATTR_IDN_PSA_STATE = 0x15, + QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16, +}; + +/* Descriptor idn for Query requests */ +enum desc_idn { + QUERY_DESC_IDN_DEVICE = 0x0, + QUERY_DESC_IDN_CONFIGURATION = 0x1, + QUERY_DESC_IDN_UNIT = 0x2, + QUERY_DESC_IDN_RFU_0 = 0x3, + QUERY_DESC_IDN_INTERCONNECT = 0x4, + QUERY_DESC_IDN_STRING = 0x5, + QUERY_DESC_IDN_RFU_1 = 0x6, + QUERY_DESC_IDN_GEOMETRY = 0x7, + QUERY_DESC_IDN_POWER = 0x8, + QUERY_DESC_IDN_HEALTH = 0x9, + QUERY_DESC_IDN_MAX, +}; + +enum desc_header_offset { + QUERY_DESC_LENGTH_OFFSET = 0x00, + QUERY_DESC_DESC_TYPE_OFFSET = 0x01, +}; + +/** + * struct utp_upiu_query - upiu request buffer structure for + * query request. + * @opcode: command to perform B-0 + * @idn: a value that indicates the particular type of data B-1 + * @index: Index to further identify data B-2 + * @selector: Index to further identify data B-3 + * @reserved_osf: spec reserved field B-4,5 + * @length: number of descriptor bytes to read/write B-6,7 + * @value: Attribute value to be written DW-5 + * @reserved: spec reserved DW-6,7 + */ +struct utp_upiu_query { + __u8 opcode; + __u8 idn; + __u8 index; + __u8 selector; + __be16 reserved_osf; + __be16 length; + __be32 value; + __be32 reserved[2]; +}; + +/** + * struct utp_upiu_cmd - Command UPIU structure + * @data_transfer_len: Data Transfer Length DW-3 + * @cdb: Command Descriptor Block CDB DW-4 to DW-7 + */ +struct utp_upiu_cmd { + __be32 exp_data_transfer_len; + u8 cdb[UFS_CDB_SIZE]; +}; + +/** + * struct utp_upiu_req - general upiu request structure + * @header:UPIU header structure DW-0 to DW-2 + * @sc: fields structure for scsi command DW-3 to DW-7 + * @qr: fields structure for query request DW-3 to DW-7 + */ +struct utp_upiu_req { + struct utp_upiu_header header; + union { + struct utp_upiu_cmd sc; + struct utp_upiu_query qr; + struct utp_upiu_query tr; + /* use utp_upiu_query to host the 4 dwords of uic command */ + struct utp_upiu_query uc; + }; +}; + +/** + * struct utp_cmd_rsp - Response UPIU structure + * @residual_transfer_count: Residual transfer count DW-3 + * @reserved: Reserved double words DW-4 to DW-7 + * @sense_data_len: Sense data length DW-8 U16 + * @sense_data: Sense data field DW-8 to DW-12 + */ +struct utp_cmd_rsp { + __be32 residual_transfer_count; + __be32 reserved[4]; + __be16 sense_data_len; + u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH]; +}; + +/** + * struct utp_upiu_rsp - general upiu response structure + * @header: UPIU header structure DW-0 to DW-2 + * @sr: fields structure for scsi command DW-3 to DW-12 + * @qr: fields structure for query request DW-3 to DW-7 + */ +struct utp_upiu_rsp { + struct utp_upiu_header header; + union { + struct utp_cmd_rsp sr; + struct utp_upiu_query qr; + }; +}; + +#define MAX_MODEL_LEN 16 +/** + * ufs_dev_desc - ufs device details from the device descriptor + * + * @wmanufacturerid: card details + * @model: card model + */ +struct ufs_dev_desc { + u16 wmanufacturerid; + char model[MAX_MODEL_LEN + 1]; +}; + +/* Device descriptor parameters offsets in bytes*/ +enum device_desc_param { + DEVICE_DESC_PARAM_LEN = 0x0, + DEVICE_DESC_PARAM_TYPE = 0x1, + DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2, + DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3, + DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4, + DEVICE_DESC_PARAM_PRTCL = 0x5, + DEVICE_DESC_PARAM_NUM_LU = 0x6, + DEVICE_DESC_PARAM_NUM_WLU = 0x7, + DEVICE_DESC_PARAM_BOOT_ENBL = 0x8, + DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9, + DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA, + DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB, + DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC, + DEVICE_DESC_PARAM_SEC_LU = 0xD, + DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE, + DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF, + DEVICE_DESC_PARAM_SPEC_VER = 0x10, + DEVICE_DESC_PARAM_MANF_DATE = 0x12, + DEVICE_DESC_PARAM_MANF_NAME = 0x14, + DEVICE_DESC_PARAM_PRDCT_NAME = 0x15, + DEVICE_DESC_PARAM_SN = 0x16, + DEVICE_DESC_PARAM_OEM_ID = 0x17, + DEVICE_DESC_PARAM_MANF_ID = 0x18, + DEVICE_DESC_PARAM_UD_OFFSET = 0x1A, + DEVICE_DESC_PARAM_UD_LEN = 0x1B, + DEVICE_DESC_PARAM_RTT_CAP = 0x1C, + DEVICE_DESC_PARAM_FRQ_RTC = 0x1D, + DEVICE_DESC_PARAM_UFS_FEAT = 0x1F, + DEVICE_DESC_PARAM_FFU_TMT = 0x20, + DEVICE_DESC_PARAM_Q_DPTH = 0x21, + DEVICE_DESC_PARAM_DEV_VER = 0x22, + DEVICE_DESC_PARAM_NUM_SEC_WPA = 0x24, + DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25, + DEVICE_DESC_PARAM_PSA_TMT = 0x29, + DEVICE_DESC_PARAM_PRDCT_REV = 0x2A, +}; + +struct ufs_hba; + +enum { + UFSHCD_MAX_CHANNEL = 0, + UFSHCD_MAX_ID = 1, +}; + +enum dev_cmd_type { + DEV_CMD_TYPE_NOP = 0x0, + DEV_CMD_TYPE_QUERY = 0x1, +}; + +/** + * struct uic_command - UIC command structure + * @command: UIC command + * @argument1: UIC command argument 1 + * @argument2: UIC command argument 2 + * @argument3: UIC command argument 3 + * @cmd_active: Indicate if UIC command is outstanding + * @result: UIC command result + * @done: UIC command completion + */ +struct uic_command { + u32 command; + u32 argument1; + u32 argument2; + u32 argument3; + int cmd_active; + int result; +}; + +/* Host <-> Device UniPro Link state */ +enum uic_link_state { + UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */ + UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */ + UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */ +}; + +/* UIC command interfaces for DME primitives */ +#define DME_LOCAL 0 +#define DME_PEER 1 +#define ATTR_SET_NOR 0 /* NORMAL */ +#define ATTR_SET_ST 1 /* STATIC */ + +int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, + u8 attr_set, u32 mib_val, u8 peer); +int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, + u32 *mib_val, u8 peer); + +static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel, + u32 mib_val) +{ + return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, + mib_val, DME_LOCAL); +} + +static inline int ufshcd_dme_get(struct ufs_hba *hba, + u32 attr_sel, u32 *mib_val) +{ + return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL); +} + +static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, + u32 attr_sel, u32 *mib_val) +{ + return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); +} + +static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel, + u32 mib_val) +{ + return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, + mib_val, DME_PEER); +} + +/** + * struct ufs_query_req - parameters for building a query request + * @query_func: UPIU header query function + * @upiu_req: the query request data + */ +struct ufs_query_req { + u8 query_func; + struct utp_upiu_query upiu_req; +}; + +/** + * struct ufs_query_resp - UPIU QUERY + * @response: device response code + * @upiu_res: query response data + */ +struct ufs_query_res { + u8 response; + struct utp_upiu_query upiu_res; +}; + +/** + * struct ufs_query - holds relevant data structures for query request + * @request: request upiu and function + * @descriptor: buffer for sending/receiving descriptor + * @response: response upiu and response + */ +struct ufs_query { + struct ufs_query_req request; + u8 *descriptor; + struct ufs_query_res response; +}; + +/** + * struct ufs_dev_cmd - all assosiated fields with device management commands + * @type: device management command type - Query, NOP OUT + * @tag_wq: wait queue until free command slot is available + */ +struct ufs_dev_cmd { + enum dev_cmd_type type; + struct ufs_query query; +}; + +struct ufs_hba_ops { + int (*init)(struct ufs_hba *hba); + int (*get_max_pwr_mode)(struct ufs_hba *hba, + struct ufs_pwr_mode_info *max_pwr_info); + int (*hce_enable_notify)(struct ufs_hba *hba, + enum ufs_notify_change_status); + int (*link_startup_notify)(struct ufs_hba *hba, + enum ufs_notify_change_status); + int (*phy_initialization)(struct ufs_hba *hba); + int (*device_reset)(struct ufs_hba *hba); +}; + +enum ufshcd_quirks { + /* Interrupt aggregation support is broken */ + UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0, + + /* + * delay before each dme command is required as the unipro + * layer has shown instabilities + */ + UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1, + + /* + * If UFS host controller is having issue in processing LCC (Line + * Control Command) coming from device then enable this quirk. + * When this quirk is enabled, host controller driver should disable + * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE + * attribute of device to 0). + */ + UFSHCD_QUIRK_BROKEN_LCC = 1 << 2, + + /* + * The attribute PA_RXHSUNTERMCAP specifies whether or not the + * inbound Link supports unterminated line in HS mode. Setting this + * attribute to 1 fixes moving to HS gear. + */ + UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3, + + /* + * This quirk needs to be enabled if the host controller only allows + * accessing the peer dme attributes in AUTO mode (FAST AUTO or + * SLOW AUTO). + */ + UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4, + + /* + * This quirk needs to be enabled if the host controller doesn't + * advertise the correct version in UFS_VER register. If this quirk + * is enabled, standard UFS host driver will call the vendor specific + * ops (get_ufs_hci_version) to get the correct version. + */ + UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5, + + /* + * Clear handling for transfer/task request list is just opposite. + */ + UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6, + + /* + * This quirk needs to be enabled if host controller doesn't allow + * that the interrupt aggregation timer and counter are reset by s/w. + */ + UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7, + + /* + * This quirks needs to be enabled if host controller cannot be + * enabled via HCE register. + */ + UFSHCI_QUIRK_BROKEN_HCE = 1 << 8, + + /* + * This quirk needs to be enabled if the host controller regards + * resolution of the values of PRDTO and PRDTL in UTRD as byte. + */ + UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9, + + /* + * This quirk needs to be enabled if the host controller reports + * OCS FATAL ERROR with device error through sense data + */ + UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10, + + /* + * This quirk needs to be enabled if the host controller has + * auto-hibernate capability but it doesn't work. + */ + UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11, + + /* + * This quirk needs to disable manual flush for write booster + */ + UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12, + + /* + * This quirk needs to disable unipro timeout values + * before power mode change + */ + UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13, + + /* + * This quirk needs to be enabled if the host controller does not + * support UIC command + */ + UFSHCD_QUIRK_BROKEN_UIC_CMD = 1 << 15, + + /* + * This quirk needs to be enabled if the host controller cannot + * support physical host configuration. + */ + UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16, + + /* + * This quirk needs to be enabled if the host controller has + * 64-bit addressing supported capability but it doesn't work. + */ + UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS = 1 << 17, + + /* + * This quirk needs to be enabled if the host controller has + * auto-hibernate capability but it's FASTAUTO only. + */ + UFSHCD_QUIRK_HIBERN_FASTAUTO = 1 << 18, + + /* + * This quirk needs to be enabled if the host controller needs + * to reinit the device after switching to maximum gear. + */ + UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH = 1 << 19, + + /* + * Some host raises interrupt (per queue) in addition to + * CQES (traditional) when ESI is disabled. + * Enable this quirk will disable CQES and use per queue interrupt. + */ + UFSHCD_QUIRK_MCQ_BROKEN_INTR = 1 << 20, + + /* + * Some host does not implement SQ Run Time Command (SQRTC) register + * thus need this quirk to skip related flow. + */ + UFSHCD_QUIRK_MCQ_BROKEN_RTC = 1 << 21, + + /* + * This quirk needs to be enabled if the host controller supports inline + * encryption but it needs to initialize the crypto capabilities in a + * nonstandard way and/or needs to override blk_crypto_ll_ops. If + * enabled, the standard code won't initialize the blk_crypto_profile; + * ufs_hba_variant_ops::init() must do it instead. + */ + UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE = 1 << 22, + + /* + * This quirk needs to be enabled if the host controller supports inline + * encryption but does not support the CRYPTO_GENERAL_ENABLE bit, i.e. + * host controller initialization fails if that bit is set. + */ + UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE = 1 << 23, + + /* + * This quirk needs to be enabled if the host controller driver copies + * cryptographic keys into the PRDT in order to send them to hardware, + * and therefore the PRDT should be zeroized after each request (as per + * the standard best practice for managing keys). + */ + UFSHCD_QUIRK_KEYS_IN_PRDT = 1 << 24, + + /* + * This quirk indicates that the controller reports the value 1 (not + * supported) in the Legacy Single DoorBell Support (LSDBS) bit of the + * Controller Capabilities register although it supports the legacy + * single doorbell mode. + */ + UFSHCD_QUIRK_BROKEN_LSDBS_CAP = 1 << 25, +}; + +struct ufs_hba { + struct udevice *dev; + void __iomem *mmio_base; + struct ufs_hba_ops *ops; + struct ufs_desc_size desc_size; + u32 capabilities; + u32 version; + u32 intr_mask; + enum ufshcd_quirks quirks; + + /* Virtual memory reference */ + struct utp_transfer_cmd_desc *ucdl; + struct utp_transfer_req_desc *utrdl; + /* TODO: Add Task Manegement Support */ + struct utp_task_req_desc *utmrdl; + + struct utp_upiu_req *ucd_req_ptr; + struct utp_upiu_rsp *ucd_rsp_ptr; + struct ufshcd_sg_entry *ucd_prdt_ptr; + + /* Power Mode information */ + enum ufs_dev_pwr_mode curr_dev_pwr_mode; + struct ufs_pa_layer_attr pwr_info; + struct ufs_pwr_mode_info max_pwr_info; + + struct ufs_dev_cmd dev_cmd; +}; + +static inline int ufshcd_ops_init(struct ufs_hba *hba) +{ + if (hba->ops && hba->ops->init) + return hba->ops->init(hba); + + return 0; +} + +static inline int ufshcd_ops_get_max_pwr_mode(struct ufs_hba *hba, + struct ufs_pwr_mode_info *max_pwr_info) +{ + if (hba->ops && hba->ops->get_max_pwr_mode) + return hba->ops->get_max_pwr_mode(hba, max_pwr_info); + + return 0; +} + +static inline int ufshcd_ops_hce_enable_notify(struct ufs_hba *hba, + bool status) +{ + if (hba->ops && hba->ops->hce_enable_notify) + return hba->ops->hce_enable_notify(hba, status); + + return 0; +} + +static inline int ufshcd_ops_link_startup_notify(struct ufs_hba *hba, + bool status) +{ + if (hba->ops && hba->ops->link_startup_notify) + return hba->ops->link_startup_notify(hba, status); + + return 0; +} + +static inline int ufshcd_vops_device_reset(struct ufs_hba *hba) +{ + if (hba->ops && hba->ops->device_reset) + return hba->ops->device_reset(hba); + + return 0; +} + +/* Interrupt disable masks */ +enum { + /* Interrupt disable mask for UFSHCI v1.0 */ + INTERRUPT_MASK_ALL_VER_10 = 0x30FFF, + INTERRUPT_MASK_RW_VER_10 = 0x30000, + + /* Interrupt disable mask for UFSHCI v1.1 */ + INTERRUPT_MASK_ALL_VER_11 = 0x31FFF, + + /* Interrupt disable mask for UFSHCI v2.1 */ + INTERRUPT_MASK_ALL_VER_21 = 0x71FFF, +}; + +#define ufshcd_writel(hba, val, reg) \ + writel((val), (hba)->mmio_base + (reg)) +#define ufshcd_readl(hba, reg) \ + readl((hba)->mmio_base + (reg)) + +/** + * ufshcd_rmwl - perform read/modify/write for a controller register + * @hba: per adapter instance + * @mask: mask to apply on read value + * @val: actual value to write + * @reg: register address + */ +static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg) +{ + u32 tmp; + + tmp = ufshcd_readl(hba, reg); + tmp &= ~mask; + tmp |= (val & mask); + ufshcd_writel(hba, tmp, reg); +} + +int ufshcd_probe(struct udevice *dev, struct ufs_hba_ops *hba_ops); + +#endif diff --git a/drivers/ufs/ufshcd-dwc.c b/drivers/ufs/ufshcd-dwc.c new file mode 100644 index 00000000000..3f62e59a060 --- /dev/null +++ b/drivers/ufs/ufshcd-dwc.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * UFS Host driver for Synopsys Designware Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + */ +#include <clk.h> +#include <dm.h> +#include <ufs.h> +#include <asm/io.h> +#include <dm/device_compat.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/time.h> + +#include "ufs.h" +#include "ufshci-dwc.h" +#include "ufshcd-dwc.h" + +int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba, + const struct ufshcd_dme_attr_val *v, int n) +{ + int ret = 0; + int attr_node = 0; + + for (attr_node = 0; attr_node < n; attr_node++) { + ret = ufshcd_dme_set_attr(hba, v[attr_node].attr_sel, + ATTR_SET_NOR, v[attr_node].mib_val, v[attr_node].peer); + if (ret) + return ret; + } + + return 0; +} + +/** + * ufshcd_dwc_program_clk_div() - program clock divider. + * @hba: Private Structure pointer + * @divider_val: clock divider value to be programmed + * + */ +static void ufshcd_dwc_program_clk_div(struct ufs_hba *hba, u32 divider_val) +{ + ufshcd_writel(hba, divider_val, DWC_UFS_REG_HCLKDIV); +} + +/** + * ufshcd_dwc_link_is_up() - check if link is up. + * @hba: private structure pointer + * + * Return: 0 on success, non-zero value on failure. + */ +static int ufshcd_dwc_link_is_up(struct ufs_hba *hba) +{ + int dme_result = 0; + + ufshcd_dme_get(hba, UIC_ARG_MIB(VS_POWERSTATE), &dme_result); + + if (dme_result == UFSHCD_LINK_IS_UP) + return 0; + + return 1; +} + +/** + * ufshcd_dwc_connection_setup() - configure unipro attributes. + * @hba: pointer to drivers private data + * + * This function configures both the local side (host) and the peer side + * (device) unipro attributes to establish the connection to application/ + * cport. + * This function is not required if the hardware is properly configured to + * have this connection setup on reset. But invoking this function does no + * harm and should be fine even working with any ufs device. + * + * Return: 0 on success non-zero value on failure. + */ +static int ufshcd_dwc_connection_setup(struct ufs_hba *hba) +{ + static const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL }, + { UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL }, + { UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL }, + { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_LOCAL }, + { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_LOCAL }, + { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_LOCAL }, + { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_LOCAL }, + { UIC_ARG_MIB(T_CPORTMODE), 1, DME_LOCAL }, + { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_LOCAL }, + { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_PEER }, + { UIC_ARG_MIB(N_DEVICEID), 1, DME_PEER }, + { UIC_ARG_MIB(N_DEVICEID_VALID), 1, DME_PEER }, + { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_PEER }, + { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_PEER }, + { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_PEER }, + { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_PEER }, + { UIC_ARG_MIB(T_CPORTMODE), 1, DME_PEER }, + { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_PEER } + }; + return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, ARRAY_SIZE(setup_attrs)); +} + +/** + * ufshcd_dwc_link_startup_notify() - program clock divider. + * @hba: private structure pointer + * @status: Callback notify status + * + * Return: 0 on success, non-zero value on failure. + */ +int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int err = 0; + + if (status == PRE_CHANGE) { + ufshcd_dwc_program_clk_div(hba, DWC_UFS_REG_HCLKDIV_DIV_125); + } else { /* POST_CHANGE */ + err = ufshcd_dwc_link_is_up(hba); + if (err) { + dev_err(hba->dev, "Link is not up\n"); + return err; + } + + err = ufshcd_dwc_connection_setup(hba); + if (err) + dev_err(hba->dev, "Connection setup failed (%d)\n", + err); + } + + return err; +} diff --git a/drivers/ufs/ufshcd-dwc.h b/drivers/ufs/ufshcd-dwc.h new file mode 100644 index 00000000000..fc1bcca8ccb --- /dev/null +++ b/drivers/ufs/ufshcd-dwc.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * UFS Host driver for Synopsys Designware Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <jpinto@synopsys.com> + */ + +#ifndef _UFSHCD_DWC_H +#define _UFSHCD_DWC_H + +/* PHY modes */ +#define UFSHCD_DWC_PHY_MODE_ROM 0 + +/* RMMI Attributes */ +#define CBREFCLKCTRL2 0x8132 +#define CBCRCTRL 0x811F +#define CBC10DIRECTCONF2 0x810E +#define CBCREGADDRLSB 0x8116 +#define CBCREGADDRMSB 0x8117 +#define CBCREGWRLSB 0x8118 +#define CBCREGWRMSB 0x8119 +#define CBCREGRDLSB 0x811A +#define CBCREGRDMSB 0x811B +#define CBCREGRDWRSEL 0x811C + +#define CBREFREFCLK_GATE_OVR_EN BIT(7) + +/* M-PHY Attributes */ +#define MTX_FSM_STATE 0x41 +#define MRX_FSM_STATE 0xC1 + +/* M-PHY registers */ +#define FAST_FLAGS(n) (0x401C + ((n) * 0x100)) +#define RX_AFE_ATT_IDAC(n) (0x4000 + ((n) * 0x100)) +#define RX_AFE_CTLE_IDAC(n) (0x4001 + ((n) * 0x100)) +#define FW_CALIB_CCFG(n) (0x404D + ((n) * 0x100)) + +/* Tx/Rx FSM state */ +enum rx_fsm_state { + RX_STATE_DISABLED = 0, + RX_STATE_HIBERN8 = 1, + RX_STATE_SLEEP = 2, + RX_STATE_STALL = 3, + RX_STATE_LSBURST = 4, + RX_STATE_HSBURST = 5, +}; + +enum tx_fsm_state { + TX_STATE_DISABLED = 0, + TX_STATE_HIBERN8 = 1, + TX_STATE_SLEEP = 2, + TX_STATE_STALL = 3, + TX_STATE_LSBURST = 4, + TX_STATE_HSBURST = 5, +}; + +struct ufshcd_dme_attr_val { + u32 attr_sel; + u32 mib_val; + u8 peer; +}; + +int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status); +int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba, + const struct ufshcd_dme_attr_val *v, int n); +#endif /* End of Header */ diff --git a/drivers/ufs/ufshci-dwc.h b/drivers/ufs/ufshci-dwc.h new file mode 100644 index 00000000000..9e24c230c64 --- /dev/null +++ b/drivers/ufs/ufshci-dwc.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * UFS Host driver for Synopsys Designware Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <jpinto@synopsys.com> + */ + +#ifndef _UFSHCI_DWC_H +#define _UFSHCI_DWC_H + +/* DWC HC UFSHCI specific Registers */ +enum dwc_specific_registers { + DWC_UFS_REG_HCLKDIV = 0xFC, +}; + +/* Clock Divider Values: Hex equivalent of frequency in MHz */ +enum clk_div_values { + DWC_UFS_REG_HCLKDIV_DIV_62_5 = 0x3e, + DWC_UFS_REG_HCLKDIV_DIV_125 = 0x7d, + DWC_UFS_REG_HCLKDIV_DIV_200 = 0xc8, +}; + +/* Selector Index */ +enum selector_index { + SELIND_LN0_TX = 0x00, + SELIND_LN1_TX = 0x01, + SELIND_LN0_RX = 0x04, + SELIND_LN1_RX = 0x05, +}; +#endif diff --git a/drivers/ufs/ufshci.h b/drivers/ufs/ufshci.h new file mode 100644 index 00000000000..90cbf87a3a4 --- /dev/null +++ b/drivers/ufs/ufshci.h @@ -0,0 +1,469 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef __UFSHCI_H +#define __UFSHCI_H + +enum { + TASK_REQ_UPIU_SIZE_DWORDS = 8, + TASK_RSP_UPIU_SIZE_DWORDS = 8, + ALIGNED_UPIU_SIZE = 512, +}; + +/* UFSHCI Registers */ +enum { + REG_CONTROLLER_CAPABILITIES = 0x00, + REG_MCQCAP = 0x04, + REG_UFS_VERSION = 0x08, + REG_EXT_CONTROLLER_CAPABILITIES = 0x0C, + REG_CONTROLLER_PID = 0x10, + REG_CONTROLLER_MID = 0x14, + REG_AUTO_HIBERNATE_IDLE_TIMER = 0x18, + REG_INTERRUPT_STATUS = 0x20, + REG_INTERRUPT_ENABLE = 0x24, + REG_CONTROLLER_STATUS = 0x30, + REG_CONTROLLER_ENABLE = 0x34, + REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER = 0x38, + REG_UIC_ERROR_CODE_DATA_LINK_LAYER = 0x3C, + REG_UIC_ERROR_CODE_NETWORK_LAYER = 0x40, + REG_UIC_ERROR_CODE_TRANSPORT_LAYER = 0x44, + REG_UIC_ERROR_CODE_DME = 0x48, + REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL = 0x4C, + REG_UTP_TRANSFER_REQ_LIST_BASE_L = 0x50, + REG_UTP_TRANSFER_REQ_LIST_BASE_H = 0x54, + REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58, + REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C, + REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60, + REG_UTP_TASK_REQ_LIST_BASE_L = 0x70, + REG_UTP_TASK_REQ_LIST_BASE_H = 0x74, + REG_UTP_TASK_REQ_DOOR_BELL = 0x78, + REG_UTP_TASK_REQ_LIST_CLEAR = 0x7C, + REG_UTP_TASK_REQ_LIST_RUN_STOP = 0x80, + REG_UIC_COMMAND = 0x90, + REG_UIC_COMMAND_ARG_1 = 0x94, + REG_UIC_COMMAND_ARG_2 = 0x98, + REG_UIC_COMMAND_ARG_3 = 0x9C, + + UFSHCI_REG_SPACE_SIZE = 0xA0, + + REG_UFS_CCAP = 0x100, + REG_UFS_CRYPTOCAP = 0x104, + + REG_UFS_MEM_CFG = 0x300, + REG_UFS_MCQ_CFG = 0x380, + REG_UFS_ESILBA = 0x384, + REG_UFS_ESIUBA = 0x388, + UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400, +}; + +/* Controller capability masks */ +enum { + MASK_TRANSFER_REQUESTS_SLOTS_SDB = 0x0000001F, + MASK_TRANSFER_REQUESTS_SLOTS_MCQ = 0x000000FF, + MASK_NUMBER_OUTSTANDING_RTT = 0x0000FF00, + MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000, + MASK_EHSLUTRD_SUPPORTED = 0x00400000, + MASK_AUTO_HIBERN8_SUPPORT = 0x00800000, + MASK_64_ADDRESSING_SUPPORT = 0x01000000, + MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000, + MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000, + MASK_CRYPTO_SUPPORT = 0x10000000, + MASK_LSDB_SUPPORT = 0x20000000, + MASK_MCQ_SUPPORT = 0x40000000, +}; + +/* MCQ capability mask */ +enum { + MASK_EXT_IID_SUPPORT = 0x00000400, +}; + +enum { + REG_SQATTR = 0x0, + REG_SQLBA = 0x4, + REG_SQUBA = 0x8, + REG_SQDAO = 0xC, + REG_SQISAO = 0x10, + + REG_CQATTR = 0x20, + REG_CQLBA = 0x24, + REG_CQUBA = 0x28, + REG_CQDAO = 0x2C, + REG_CQISAO = 0x30, +}; + +enum { + REG_SQHP = 0x0, + REG_SQTP = 0x4, + REG_SQRTC = 0x8, + REG_SQCTI = 0xC, + REG_SQRTS = 0x10, +}; + +enum { + REG_CQHP = 0x0, + REG_CQTP = 0x4, +}; + +enum { + REG_CQIS = 0x0, + REG_CQIE = 0x4, +}; + +enum { + SQ_START = 0x0, + SQ_STOP = 0x1, + SQ_ICU = 0x2, +}; + +enum { + SQ_STS = 0x1, + SQ_CUS = 0x2, +}; + +#define SQ_ICU_ERR_CODE_MASK GENMASK(7, 4) +#define UFS_MASK(mask, offset) ((mask) << (offset)) + +/* UFS Version 08h */ +#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0) +#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16) + +/* Controller UFSHCI version */ +enum { + UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */ + UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */ + UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */ + UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */ + UFSHCI_VERSION_30 = 0x00000300, /* 3.0 */ + UFSHCI_VERSION_31 = 0x00000310, /* 3.1 */ + UFSHCI_VERSION_40 = 0x00000400, /* 4.0 */ +}; + +/* + * IS - Interrupt Status - 20h + */ +#define UTP_TRANSFER_REQ_COMPL 0x1 +#define UIC_DME_END_PT_RESET 0x2 +#define UIC_ERROR 0x4 +#define UIC_TEST_MODE 0x8 +#define UIC_POWER_MODE 0x10 +#define UIC_HIBERNATE_EXIT 0x20 +#define UIC_HIBERNATE_ENTER 0x40 +#define UIC_LINK_LOST 0x80 +#define UIC_LINK_STARTUP 0x100 +#define UTP_TASK_REQ_COMPL 0x200 +#define UIC_COMMAND_COMPL 0x400 +#define DEVICE_FATAL_ERROR 0x800 +#define CONTROLLER_FATAL_ERROR 0x10000 +#define SYSTEM_BUS_FATAL_ERROR 0x20000 +#define CRYPTO_ENGINE_FATAL_ERROR 0x40000 +#define MCQ_CQ_EVENT_STATUS 0x100000 + +#define UFSHCD_UIC_HIBERN8_MASK (UIC_HIBERNATE_ENTER |\ + UIC_HIBERNATE_EXIT) + +#define UFSHCD_UIC_PWR_MASK (UFSHCD_UIC_HIBERN8_MASK |\ + UIC_POWER_MODE) + +#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK) + +#define UFSHCD_ERROR_MASK (UIC_ERROR | INT_FATAL_ERRORS) + +#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\ + CONTROLLER_FATAL_ERROR |\ + SYSTEM_BUS_FATAL_ERROR |\ + CRYPTO_ENGINE_FATAL_ERROR |\ + UIC_LINK_LOST) + +/* HCS - Host Controller Status 30h */ +#define DEVICE_PRESENT 0x1 +#define UTP_TRANSFER_REQ_LIST_READY 0x2 +#define UTP_TASK_REQ_LIST_READY 0x4 +#define UIC_COMMAND_READY 0x8 +#define HOST_ERROR_INDICATOR 0x10 +#define DEVICE_ERROR_INDICATOR 0x20 +#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8) + +#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\ + UTP_TASK_REQ_LIST_READY |\ + UIC_COMMAND_READY) + +enum { + PWR_OK = 0x0, + PWR_LOCAL = 0x01, + PWR_REMOTE = 0x02, + PWR_BUSY = 0x03, + PWR_ERROR_CAP = 0x04, + PWR_FATAL_ERROR = 0x05, +}; + +/* HCE - Host Controller Enable 34h */ +#define CONTROLLER_ENABLE 0x1 +#define CONTROLLER_DISABLE 0x0 +#define CRYPTO_GENERAL_ENABLE 0x2 + +/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */ +#define UIC_PHY_ADAPTER_LAYER_ERROR 0x80000000 +#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F +#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF +#define UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR 0x10 + +/* UECDL - Host UIC Error Code Data Link Layer 3Ch */ +#define UIC_DATA_LINK_LAYER_ERROR 0x80000000 +#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0xFFFF +#define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2 +#define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4 +#define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8 +#define UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF 0x20 +#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000 +#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001 +#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002 + +/* UECN - Host UIC Error Code Network Layer 40h */ +#define UIC_NETWORK_LAYER_ERROR 0x80000000 +#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7 +#define UIC_NETWORK_UNSUPPORTED_HEADER_TYPE 0x1 +#define UIC_NETWORK_BAD_DEVICEID_ENC 0x2 +#define UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING 0x4 + +/* UECT - Host UIC Error Code Transport Layer 44h */ +#define UIC_TRANSPORT_LAYER_ERROR 0x80000000 +#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F +#define UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE 0x1 +#define UIC_TRANSPORT_UNKNOWN_CPORTID 0x2 +#define UIC_TRANSPORT_NO_CONNECTION_RX 0x4 +#define UIC_TRANSPORT_CONTROLLED_SEGMENT_DROPPING 0x8 +#define UIC_TRANSPORT_BAD_TC 0x10 +#define UIC_TRANSPORT_E2E_CREDIT_OVERFOW 0x20 +#define UIC_TRANSPORT_SAFETY_VALUE_DROPPING 0x40 + +/* UECDME - Host UIC Error Code DME 48h */ +#define UIC_DME_ERROR 0x80000000 +#define UIC_DME_ERROR_CODE_MASK 0x1 + +/* UTRIACR - Interrupt Aggregation control register - 0x4Ch */ +#define INT_AGGR_TIMEOUT_VAL_MASK 0xFF +#define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8) +#define INT_AGGR_COUNTER_AND_TIMER_RESET 0x10000 +#define INT_AGGR_STATUS_BIT 0x100000 +#define INT_AGGR_PARAM_WRITE 0x1000000 +#define INT_AGGR_ENABLE 0x80000000 + +/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */ +#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT 0x1 + +/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */ +#define UTP_TASK_REQ_LIST_RUN_STOP_BIT 0x1 + +/* REG_UFS_MEM_CFG - Global Config Registers 300h */ +#define MCQ_MODE_SELECT BIT(0) + +/* CQISy - CQ y Interrupt Status Register */ +#define UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS 0x1 + +/* UICCMD - UIC Command */ +#define COMMAND_OPCODE_MASK 0xFF +#define GEN_SELECTOR_INDEX_MASK 0xFFFF + +#define MIB_ATTRIBUTE_MASK UFS_MASK(0xFFFF, 16) +#define RESET_LEVEL 0xFF + +#define ATTR_SET_TYPE_MASK UFS_MASK(0xFF, 16) +#define CFG_RESULT_CODE_MASK 0xFF +#define GENERIC_ERROR_CODE_MASK 0xFF + +/* GenSelectorIndex calculation macros for M-PHY attributes */ +#define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane) +#define UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane) (PA_MAXDATALANES + (lane)) + +#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\ + ((sel) & 0xFFFF)) +#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0) +#define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16) +#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF) + +/* Link Status*/ +enum link_status { + UFSHCD_LINK_IS_DOWN = 1, + UFSHCD_LINK_IS_UP = 2, +}; + +/* UIC Commands */ +enum uic_cmd_dme { + UIC_CMD_DME_GET = 0x01, + UIC_CMD_DME_SET = 0x02, + UIC_CMD_DME_PEER_GET = 0x03, + UIC_CMD_DME_PEER_SET = 0x04, + UIC_CMD_DME_POWERON = 0x10, + UIC_CMD_DME_POWEROFF = 0x11, + UIC_CMD_DME_ENABLE = 0x12, + UIC_CMD_DME_RESET = 0x14, + UIC_CMD_DME_END_PT_RST = 0x15, + UIC_CMD_DME_LINK_STARTUP = 0x16, + UIC_CMD_DME_HIBER_ENTER = 0x17, + UIC_CMD_DME_HIBER_EXIT = 0x18, + UIC_CMD_DME_TEST_MODE = 0x1A, +}; + +/* UIC Config result code / Generic error code */ +enum { + UIC_CMD_RESULT_SUCCESS = 0x00, + UIC_CMD_RESULT_INVALID_ATTR = 0x01, + UIC_CMD_RESULT_FAILURE = 0x01, + UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02, + UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03, + UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04, + UIC_CMD_RESULT_BAD_INDEX = 0x05, + UIC_CMD_RESULT_LOCKED_ATTR = 0x06, + UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07, + UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08, + UIC_CMD_RESULT_BUSY = 0x09, + UIC_CMD_RESULT_DME_FAILURE = 0x0A, +}; + +#define MASK_UIC_COMMAND_RESULT 0xFF + +#define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8) +#define INT_AGGR_TIMEOUT_VAL(t) (((t) & 0xFF) << 0) + +/* + * Request Descriptor Definitions + */ + +/* To accommodate UFS2.0 required Command type */ +enum { + UTP_CMD_TYPE_UFS_STORAGE = 0x1, +}; + +enum { + UTP_SCSI_COMMAND = 0x00000000, + UTP_REQ_DESC_INT_CMD = 0x01000000, + UTP_NATIVE_UFS_COMMAND = 0x10000000, + UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000, +}; + +/* UTP Transfer Request Data Direction (DD) */ +enum utp_data_direction { + UTP_NO_DATA_TRANSFER = 0, + UTP_HOST_TO_DEVICE = 1, + UTP_DEVICE_TO_HOST = 2, +}; + +/* Overall command status values */ +enum utp_ocs { + OCS_SUCCESS = 0x0, + OCS_INVALID_CMD_TABLE_ATTR = 0x1, + OCS_INVALID_PRDT_ATTR = 0x2, + OCS_MISMATCH_DATA_BUF_SIZE = 0x3, + OCS_MISMATCH_RESP_UPIU_SIZE = 0x4, + OCS_PEER_COMM_FAILURE = 0x5, + OCS_ABORTED = 0x6, + OCS_FATAL_ERROR = 0x7, + OCS_DEVICE_FATAL_ERROR = 0x8, + OCS_INVALID_CRYPTO_CONFIG = 0x9, + OCS_GENERAL_CRYPTO_ERROR = 0xA, + OCS_INVALID_COMMAND_STATUS = 0x0F, +}; + +enum { + MASK_OCS = 0x0F, +}; + +/* The maximum length of the data byte count field in the PRDT is 256KB */ +#define PRDT_DATA_BYTE_COUNT_MAX SZ_256K +/* The granularity of the data byte count field in the PRDT is 32-bit */ +#define PRDT_DATA_BYTE_COUNT_PAD 4 + + +struct ufshcd_sg_entry { + __le32 base_addr; + __le32 upper_addr; + __le32 reserved; + __le32 size; +}; + +#define MAX_BUFF 128 +/** + * struct utp_transfer_cmd_desc - UFS Command Descriptor structure + * @command_upiu: Command UPIU Frame address + * @response_upiu: Response UPIU Frame address + * @prd_table: Physical Region Descriptor + */ +struct utp_transfer_cmd_desc { + u8 command_upiu[ALIGNED_UPIU_SIZE]; + u8 response_upiu[ALIGNED_UPIU_SIZE]; + struct ufshcd_sg_entry prd_table[MAX_BUFF]; +}; + +/** + * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD + * @dword0: Descriptor Header DW0 + * @dword1: Descriptor Header DW1 + * @dword2: Descriptor Header DW2 + * @dword3: Descriptor Header DW3 + */ +struct request_desc_header { + __le32 dword_0; + __le32 dword_1; + __le32 dword_2; + __le32 dword_3; +}; + +/** + * struct utp_transfer_req_desc - UTRD structure + * @header: UTRD header DW-0 to DW-3 + * @command_desc_base_addr_lo: UCD base address low DW-4 + * @command_desc_base_addr_hi: UCD base address high DW-5 + * @response_upiu_length: response UPIU length DW-6 + * @response_upiu_offset: response UPIU offset DW-6 + * @prd_table_length: Physical region descriptor length DW-7 + * @prd_table_offset: Physical region descriptor offset DW-7 + */ +struct utp_transfer_req_desc { + /* DW 0-3 */ + struct request_desc_header header; + + /* DW 4-5*/ + __le32 command_desc_base_addr_lo; + __le32 command_desc_base_addr_hi; + + /* DW 6 */ + __le16 response_upiu_length; + __le16 response_upiu_offset; + + /* DW 7 */ + __le16 prd_table_length; + __le16 prd_table_offset; +}; + +/** + * struct utp_upiu_header - UPIU header structure + * @dword_0: UPIU header DW-0 + * @dword_1: UPIU header DW-1 + * @dword_2: UPIU header DW-2 + */ +struct utp_upiu_header { + __be32 dword_0; + __be32 dword_1; + __be32 dword_2; +}; + +/* + * UTMRD structure. + */ +struct utp_task_req_desc { + /* DW 0-3 */ + struct request_desc_header header; + + /* DW 4-11 - Task request UPIU structure */ + struct utp_upiu_header req_header; + __be32 input_param1; + __be32 input_param2; + __be32 input_param3; + __be32 __reserved1[2]; + + /* DW 12-19 - Task Management Response UPIU structure */ + struct utp_upiu_header rsp_header; + __be32 output_param1; + __be32 output_param2; + __be32 __reserved2[3]; +}; + +#endif diff --git a/drivers/ufs/unipro.h b/drivers/ufs/unipro.h new file mode 100644 index 00000000000..56833602b77 --- /dev/null +++ b/drivers/ufs/unipro.h @@ -0,0 +1,305 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef _UNIPRO_H_ +#define _UNIPRO_H_ + +/* + * M-TX Configuration Attributes + */ +#define TX_HIBERN8TIME_CAPABILITY 0x000F +#define TX_MODE 0x0021 +#define TX_HSRATE_SERIES 0x0022 +#define TX_HSGEAR 0x0023 +#define TX_PWMGEAR 0x0024 +#define TX_AMPLITUDE 0x0025 +#define TX_HS_SLEWRATE 0x0026 +#define TX_SYNC_SOURCE 0x0027 +#define TX_HS_SYNC_LENGTH 0x0028 +#define TX_HS_PREPARE_LENGTH 0x0029 +#define TX_LS_PREPARE_LENGTH 0x002A +#define TX_HIBERN8_CONTROL 0x002B +#define TX_LCC_ENABLE 0x002C +#define TX_PWM_BURST_CLOSURE_EXTENSION 0x002D +#define TX_BYPASS_8B10B_ENABLE 0x002E +#define TX_DRIVER_POLARITY 0x002F +#define TX_HS_UNTERMINATED_LINE_DRIVE_ENABLE 0x0030 +#define TX_LS_TERMINATED_LINE_DRIVE_ENABLE 0x0031 +#define TX_LCC_SEQUENCER 0x0032 +#define TX_MIN_ACTIVATETIME 0x0033 +#define TX_PWM_G6_G7_SYNC_LENGTH 0x0034 +#define TX_REFCLKFREQ 0x00EB +#define TX_CFGCLKFREQVAL 0x00EC +#define CFGEXTRATTR 0x00F0 +#define DITHERCTRL2 0x00F1 + +/* + * M-RX Configuration Attributes + */ +#define RX_HS_G1_SYNC_LENGTH_CAP 0x008B +#define RX_HS_G1_PREP_LENGTH_CAP 0x008C +#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F +#define RX_HIBERN8TIME_CAPABILITY 0x0092 +#define RX_HS_G2_SYNC_LENGTH_CAP 0x0094 +#define RX_HS_G3_SYNC_LENGTH_CAP 0x0095 +#define RX_HS_G2_PREP_LENGTH_CAP 0x0096 +#define RX_HS_G3_PREP_LENGTH_CAP 0x0097 +#define RX_ADV_GRANULARITY_CAP 0x0098 +#define RX_HIBERN8TIME_CAP 0x0092 +#define RX_ADV_HIBERN8TIME_CAP 0x0099 +#define RX_ADV_MIN_ACTIVATETIME_CAP 0x009A +#define RX_MODE 0x00A1 +#define RX_HSRATE_SERIES 0x00A2 +#define RX_HSGEAR 0x00A3 +#define RX_PWMGEAR 0x00A4 +#define RX_LS_TERMINATED_ENABLE 0x00A5 +#define RX_HS_UNTERMINATED_ENABLE 0x00A6 +#define RX_ENTER_HIBERN8 0x00A7 +#define RX_BYPASS_8B10B_ENABLE 0x00A8 +#define RX_TERMINATION_FORCE_ENABLE 0x00A9 +#define RXCALCTRL 0x00B4 +#define RXSQCTRL 0x00B5 +#define CFGRXCDR8 0x00BA +#define CFGRXOVR8 0x00BD +#define CFGRXOVR6 0x00BF +#define RXDIRECTCTRL2 0x00C7 +#define CFGRXOVR4 0x00E9 +#define RX_REFCLKFREQ 0x00EB +#define RX_CFGCLKFREQVAL 0x00EC +#define CFGWIDEINLN 0x00F0 +#define ENARXDIRECTCFG4 0x00F2 +#define ENARXDIRECTCFG3 0x00F3 +#define ENARXDIRECTCFG2 0x00F4 + +#define is_mphy_tx_attr(attr) ((attr) < RX_MODE) +#define RX_ADV_FINE_GRAN_STEP(x) ((((x) & 0x3) << 1) | 0x1) +#define SYNC_LEN_FINE(x) ((x) & 0x3F) +#define SYNC_LEN_COARSE(x) ((1 << 6) | ((x) & 0x3F)) +#define PREP_LEN(x) ((x) & 0xF) + +#define RX_MIN_ACTIVATETIME_UNIT_US 100 +#define HIBERN8TIME_UNIT_US 100 + +/* + * Common Block Attributes + */ +#define TX_GLOBALHIBERNATE UNIPRO_CB_OFFSET(0x002B) +#define REFCLKMODE UNIPRO_CB_OFFSET(0x00BF) +#define DIRECTCTRL19 UNIPRO_CB_OFFSET(0x00CD) +#define DIRECTCTRL10 UNIPRO_CB_OFFSET(0x00E6) +#define CDIRECTCTRL6 UNIPRO_CB_OFFSET(0x00EA) +#define RTOBSERVESELECT UNIPRO_CB_OFFSET(0x00F0) +#define CBDIVFACTOR UNIPRO_CB_OFFSET(0x00F1) +#define CBDCOCTRL5 UNIPRO_CB_OFFSET(0x00F3) +#define CBPRGPLL2 UNIPRO_CB_OFFSET(0x00F8) +#define CBPRGTUNING UNIPRO_CB_OFFSET(0x00FB) + +#define UNIPRO_CB_OFFSET(x) (0x8000 | (x)) + +/* + * PHY Adapter attributes + */ +#define PA_PHY_TYPE 0x1500 +#define PA_AVAILTXDATALANES 0x1520 +#define PA_MAXTXSPEEDFAST 0x1521 +#define PA_MAXTXSPEEDSLOW 0x1522 +#define PA_MAXRXSPEEDFAST 0x1541 +#define PA_MAXRXSPEEDSLOW 0x1542 +#define PA_TXLINKSTARTUPHS 0x1544 +#define PA_AVAILRXDATALANES 0x1540 +#define PA_MINRXTRAILINGCLOCKS 0x1543 +#define PA_LOCAL_TX_LCC_ENABLE 0x155E +#define PA_ACTIVETXDATALANES 0x1560 +#define PA_CONNECTEDTXDATALANES 0x1561 +#define PA_TXFORCECLOCK 0x1562 +#define PA_TXPWRMODE 0x1563 +#define PA_TXTRAILINGCLOCKS 0x1564 +#define PA_TXSPEEDFAST 0x1565 +#define PA_TXSPEEDSLOW 0x1566 +#define PA_TXPWRSTATUS 0x1567 +#define PA_TXGEAR 0x1568 +#define PA_TXTERMINATION 0x1569 +#define PA_HSSERIES 0x156A +#define PA_LEGACYDPHYESCDL 0x1570 +#define PA_PWRMODE 0x1571 +#define PA_ACTIVERXDATALANES 0x1580 +#define PA_CONNECTEDRXDATALANES 0x1581 +#define PA_RXPWRSTATUS 0x1582 +#define PA_RXGEAR 0x1583 +#define PA_RXTERMINATION 0x1584 +#define PA_MAXRXPWMGEAR 0x1586 +#define PA_MAXRXHSGEAR 0x1587 +#define PA_PACPREQTIMEOUT 0x1590 +#define PA_PACPREQEOBTIMEOUT 0x1591 +#define PA_REMOTEVERINFO 0x15A0 +#define PA_LOGICALLANEMAP 0x15A1 +#define PA_SLEEPNOCONFIGTIME 0x15A2 +#define PA_STALLNOCONFIGTIME 0x15A3 +#define PA_SAVECONFIGTIME 0x15A4 +#define PA_RXHSUNTERMCAP 0x15A5 +#define PA_RXLSTERMCAP 0x15A6 +#define PA_HIBERN8TIME 0x15A7 +#define PA_LOCALVERINFO 0x15A9 +#define PA_GRANULARITY 0x15AA +#define PA_TACTIVATE 0x15A8 +#define PA_PWRMODEUSERDATA0 0x15B0 +#define PA_PWRMODEUSERDATA1 0x15B1 +#define PA_PWRMODEUSERDATA2 0x15B2 +#define PA_PWRMODEUSERDATA3 0x15B3 +#define PA_PWRMODEUSERDATA4 0x15B4 +#define PA_PWRMODEUSERDATA5 0x15B5 +#define PA_PWRMODEUSERDATA6 0x15B6 +#define PA_PWRMODEUSERDATA7 0x15B7 +#define PA_PWRMODEUSERDATA8 0x15B8 +#define PA_PWRMODEUSERDATA9 0x15B9 +#define PA_PWRMODEUSERDATA10 0x15BA +#define PA_PWRMODEUSERDATA11 0x15BB +#define PA_PACPFRAMECOUNT 0x15C0 +#define PA_PACPERRORCOUNT 0x15C1 +#define PA_PHYTESTCONTROL 0x15C2 +#define PA_TXHSADAPTTYPE 0x15D4 + +/* Adapt type for PA_TXHSADAPTTYPE attribute */ +#define PA_REFRESH_ADAPT 0x00 +#define PA_INITIAL_ADAPT 0x01 +#define PA_NO_ADAPT 0x03 + +#define PA_TACTIVATE_TIME_UNIT_US 10 +#define PA_HIBERN8_TIME_UNIT_US 100 + +/*Other attributes*/ +#define VS_POWERSTATE 0xD083 +#define VS_MPHYCFGUPDT 0xD085 +#define VS_DEBUGOMC 0xD09E +#define VS_MPHYDISABLE 0xD0C1 + +#define PA_GRANULARITY_MIN_VAL 1 +#define PA_GRANULARITY_MAX_VAL 6 + +/* PHY Adapter Protocol Constants */ +#define PA_MAXDATALANES 4 + +/* PA power modes */ +enum ufs_pa_pwr_mode { + FAST_MODE = 1, + SLOW_MODE = 2, + FASTAUTO_MODE = 4, + SLOWAUTO_MODE = 5, + UNCHANGED = 7, +}; + +#define PWRMODE_MASK 0xF +#define PWRMODE_RX_OFFSET 4 + +/* PA TX/RX Frequency Series */ +enum ufs_hs_gear_rate { + PA_HS_MODE_A = 1, + PA_HS_MODE_B = 2, +}; + +enum ufs_pwm_gear_tag { + UFS_PWM_DONT_CHANGE, /* Don't change Gear */ + UFS_PWM_G1, /* PWM Gear 1 (default for reset) */ + UFS_PWM_G2, /* PWM Gear 2 */ + UFS_PWM_G3, /* PWM Gear 3 */ + UFS_PWM_G4, /* PWM Gear 4 */ + UFS_PWM_G5, /* PWM Gear 5 */ + UFS_PWM_G6, /* PWM Gear 6 */ + UFS_PWM_G7, /* PWM Gear 7 */ +}; + +enum ufs_hs_gear_tag { + UFS_HS_DONT_CHANGE, /* Don't change Gear */ + UFS_HS_G1, /* HS Gear 1 (default for reset) */ + UFS_HS_G2, /* HS Gear 2 */ + UFS_HS_G3, /* HS Gear 3 */ + UFS_HS_G4, /* HS Gear 4 */ + UFS_HS_G5 /* HS Gear 5 */ +}; + +enum ufs_lanes { + UFS_LANE_DONT_CHANGE, /* Don't change Lane */ + UFS_LANE_1, /* Lane 1 (default for reset) */ + UFS_LANE_2, /* Lane 2 */ +}; + +enum ufs_unipro_ver { + UFS_UNIPRO_VER_RESERVED = 0, + UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */ + UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */ + UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */ + UFS_UNIPRO_VER_1_61 = 4, /* UniPro version 1.61 */ + UFS_UNIPRO_VER_1_8 = 5, /* UniPro version 1.8 */ + UFS_UNIPRO_VER_MAX = 6, /* UniPro unsupported version */ + /* UniPro version field mask in PA_LOCALVERINFO */ + UFS_UNIPRO_VER_MASK = 0xF, +}; + +/* + * Data Link Layer Attributes + */ +#define DL_TXPREEMPTIONCAP 0x2000 +#define DL_TC0TXMAXSDUSIZE 0x2001 +#define DL_TC0RXINITCREDITVAL 0x2002 +#define DL_TC1TXMAXSDUSIZE 0x2003 +#define DL_TC1RXINITCREDITVAL 0x2004 +#define DL_TC0TXBUFFERSIZE 0x2005 +#define DL_TC1TXBUFFERSIZE 0x2006 +#define DL_TC0TXFCTHRESHOLD 0x2040 +#define DL_FC0PROTTIMEOUTVAL 0x2041 +#define DL_TC0REPLAYTIMEOUTVAL 0x2042 +#define DL_AFC0REQTIMEOUTVAL 0x2043 +#define DL_AFC0CREDITTHRESHOLD 0x2044 +#define DL_TC0OUTACKTHRESHOLD 0x2045 +#define DL_PEERTC0PRESENT 0x2046 +#define DL_PEERTC0RXINITCREVAL 0x2047 +#define DL_TC1TXFCTHRESHOLD 0x2060 +#define DL_FC1PROTTIMEOUTVAL 0x2061 +#define DL_TC1REPLAYTIMEOUTVAL 0x2062 +#define DL_AFC1REQTIMEOUTVAL 0x2063 +#define DL_AFC1CREDITTHRESHOLD 0x2064 +#define DL_TC1OUTACKTHRESHOLD 0x2065 +#define DL_PEERTC1PRESENT 0x2066 +#define DL_PEERTC1RXINITCREVAL 0x2067 + +/* + * Network Layer Attributes + */ +#define N_DEVICEID 0x3000 +#define N_DEVICEID_VALID 0x3001 +#define N_TC0TXMAXSDUSIZE 0x3020 +#define N_TC1TXMAXSDUSIZE 0x3021 + +/* + * Transport Layer Attributes + */ +#define T_NUMCPORTS 0x4000 +#define T_NUMTESTFEATURES 0x4001 +#define T_CONNECTIONSTATE 0x4020 +#define T_PEERDEVICEID 0x4021 +#define T_PEERCPORTID 0x4022 +#define T_TRAFFICCLASS 0x4023 +#define T_PROTOCOLID 0x4024 +#define T_CPORTFLAGS 0x4025 +#define T_TXTOKENVALUE 0x4026 +#define T_RXTOKENVALUE 0x4027 +#define T_LOCALBUFFERSPACE 0x4028 +#define T_PEERBUFFERSPACE 0x4029 +#define T_CREDITSTOSEND 0x402A +#define T_CPORTMODE 0x402B +#define T_TC0TXMAXSDUSIZE 0x4060 +#define T_TC1TXMAXSDUSIZE 0x4061 + +#ifdef FALSE +#undef FALSE +#endif + +#ifdef TRUE +#undef TRUE +#endif + +/* Boolean attribute values */ +enum { + FALSE = 0, + TRUE, +}; + +#endif /* _UNIPRO_H_ */ |