summaryrefslogtreecommitdiff
path: root/drivers/ufs/ufs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/ufs.c')
-rw-r--r--drivers/ufs/ufs.c111
1 files changed, 69 insertions, 42 deletions
diff --git a/drivers/ufs/ufs.c b/drivers/ufs/ufs.c
index be64bf971f1..f7d8c40c448 100644
--- a/drivers/ufs/ufs.c
+++ b/drivers/ufs/ufs.c
@@ -125,6 +125,11 @@ static void ufshcd_print_pwr_info(struct ufs_hba *hba)
hba->pwr_info.hs_rate);
}
+static void ufshcd_device_reset(struct ufs_hba *hba)
+{
+ ufshcd_vops_device_reset(hba);
+}
+
/**
* ufshcd_ready_for_uic_cmd - Check if controller is ready
* to accept UIC commands
@@ -433,6 +438,12 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
REG_UTP_TASK_REQ_LIST_BASE_H);
/*
+ * Make sure base address and interrupt setup are updated before
+ * enabling the run/stop registers below.
+ */
+ wmb();
+
+ /*
* UCRDY, UTMRLDY and UTRLRDY bits must be 1
*/
reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
@@ -456,9 +467,7 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
{
int ret;
int retries = DME_LINKSTARTUP_RETRIES;
- bool link_startup_again = true;
-link_startup:
do {
ufshcd_ops_link_startup_notify(hba, PRE_CHANGE);
@@ -484,12 +493,6 @@ link_startup:
/* failed to get the link up... retire */
goto out;
- if (link_startup_again) {
- link_startup_again = false;
- retries = DME_LINKSTARTUP_RETRIES;
- goto link_startup;
- }
-
/* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
ufshcd_init_pwr_info(hba);
@@ -504,6 +507,8 @@ link_startup:
if (ret)
goto out;
+ /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
+ ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
ret = ufshcd_make_hba_operational(hba);
out:
if (ret)
@@ -633,7 +638,9 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/* Allocate one Transfer Request Descriptor
* Should be aligned to 1k boundary.
*/
- hba->utrdl = memalign(1024, sizeof(struct utp_transfer_req_desc));
+ hba->utrdl = memalign(1024,
+ ALIGN(sizeof(struct utp_transfer_req_desc),
+ ARCH_DMA_MINALIGN));
if (!hba->utrdl) {
dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n");
return -ENOMEM;
@@ -642,7 +649,9 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/* Allocate one Command Descriptor
* Should be aligned to 1k boundary.
*/
- hba->ucdl = memalign(1024, sizeof(struct utp_transfer_cmd_desc));
+ hba->ucdl = memalign(1024,
+ ALIGN(sizeof(struct utp_transfer_cmd_desc),
+ ARCH_DMA_MINALIGN));
if (!hba->ucdl) {
dev_err(hba->dev, "Command descriptor memory allocation failed\n");
return -ENOMEM;
@@ -692,18 +701,29 @@ static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
}
/**
- * ufshcd_cache_flush_and_invalidate - Flush and invalidate cache
+ * ufshcd_cache_flush - Flush cache
+ *
+ * Flush cache in aligned address..address+size range.
+ */
+static void ufshcd_cache_flush(void *addr, unsigned long size)
+{
+ uintptr_t start_addr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1);
+ uintptr_t end_addr = ALIGN((uintptr_t)addr + size, ARCH_DMA_MINALIGN);
+
+ flush_dcache_range(start_addr, end_addr);
+}
+
+/**
+ * ufshcd_cache_invalidate - Invalidate cache
*
- * Flush and invalidate cache in aligned address..address+size range.
- * The invalidation is in place to avoid stale data in cache.
+ * Invalidate cache in aligned address..address+size range.
*/
-static void ufshcd_cache_flush_and_invalidate(void *addr, unsigned long size)
+static void ufshcd_cache_invalidate(void *addr, unsigned long size)
{
- uintptr_t aaddr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1);
- unsigned long asize = ALIGN(size, ARCH_DMA_MINALIGN);
+ uintptr_t start_addr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1);
+ uintptr_t end_addr = ALIGN((uintptr_t)addr + size, ARCH_DMA_MINALIGN);
- flush_dcache_range(aaddr, aaddr + asize);
- invalidate_dcache_range(aaddr, aaddr + asize);
+ invalidate_dcache_range(start_addr, end_addr);
}
/**
@@ -750,7 +770,7 @@ static void ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
req_desc->prd_table_length = 0;
- ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc));
+ ufshcd_cache_flush(req_desc, sizeof(*req_desc));
}
static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
@@ -781,13 +801,13 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
/* Copy the Descriptor */
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) {
memcpy(ucd_req_ptr + 1, query->descriptor, len);
- ufshcd_cache_flush_and_invalidate(ucd_req_ptr, 2 * sizeof(*ucd_req_ptr));
+ ufshcd_cache_flush(ucd_req_ptr, 2 * sizeof(*ucd_req_ptr));
} else {
- ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr));
+ ufshcd_cache_flush(ucd_req_ptr, sizeof(*ucd_req_ptr));
}
memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
- ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
+ ufshcd_cache_flush(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
}
static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba)
@@ -805,8 +825,8 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba)
memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
- ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr));
- ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
+ ufshcd_cache_flush(ucd_req_ptr, sizeof(*ucd_req_ptr));
+ ufshcd_cache_flush(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
}
/**
@@ -844,6 +864,9 @@ static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ /* Make sure doorbell reg is updated before reading interrupt status */
+ wmb();
+
start = get_timer(0);
do {
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
@@ -873,6 +896,8 @@ static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
*/
static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
{
+ ufshcd_cache_invalidate(ucd_rsp_ptr, sizeof(*ucd_rsp_ptr));
+
return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
}
@@ -884,6 +909,8 @@ static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba)
{
struct utp_transfer_req_desc *req_desc = hba->utrdl;
+ ufshcd_cache_invalidate(req_desc, sizeof(*req_desc));
+
return le32_to_cpu(req_desc->header.dword_2) & MASK_OCS;
}
@@ -1433,8 +1460,8 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba,
memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len);
memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
- ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr));
- ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
+ ufshcd_cache_flush(ucd_req_ptr, sizeof(*ucd_req_ptr));
+ ufshcd_cache_flush(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
}
static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry,
@@ -1449,7 +1476,6 @@ static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
{
struct utp_transfer_req_desc *req_desc = hba->utrdl;
struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr;
- uintptr_t aaddr = (uintptr_t)(pccb->pdata) & ~(ARCH_DMA_MINALIGN - 1);
ulong datalen = pccb->datalen;
int table_length;
u8 *buf;
@@ -1457,19 +1483,10 @@ static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
if (!datalen) {
req_desc->prd_table_length = 0;
- ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc));
+ ufshcd_cache_flush(req_desc, sizeof(*req_desc));
return;
}
- if (pccb->dma_dir == DMA_TO_DEVICE) { /* Write to device */
- flush_dcache_range(aaddr, aaddr +
- ALIGN(datalen, ARCH_DMA_MINALIGN));
- }
-
- /* In any case, invalidate cache to avoid stale data in it. */
- invalidate_dcache_range(aaddr, aaddr +
- ALIGN(datalen, ARCH_DMA_MINALIGN));
-
table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY);
buf = pccb->pdata;
i = table_length;
@@ -1483,8 +1500,8 @@ static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1);
req_desc->prd_table_length = table_length;
- ufshcd_cache_flush_and_invalidate(prd_table, sizeof(*prd_table) * table_length);
- ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc));
+ ufshcd_cache_flush(prd_table, sizeof(*prd_table) * table_length);
+ ufshcd_cache_flush(req_desc, sizeof(*req_desc));
}
static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb)
@@ -1498,8 +1515,12 @@ static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb)
ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags);
prepare_prdt_table(hba, pccb);
+ ufshcd_cache_flush(pccb->pdata, pccb->datalen);
+
ufshcd_send_command(hba, TASK_TAG);
+ ufshcd_cache_invalidate(pccb->pdata, pccb->datalen);
+
ocs = ufshcd_get_tr_ocs(hba);
switch (ocs) {
case OCS_SUCCESS:
@@ -1723,7 +1744,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
}
hba->max_pwr_info.is_valid = true;
- return 0;
+ return ufshcd_ops_get_max_pwr_mode(hba, &hba->max_pwr_info);
}
static int ufshcd_change_power_mode(struct ufs_hba *hba,
@@ -1901,7 +1922,7 @@ int ufs_start(struct ufs_hba *hba)
return ret;
}
- printf("Device at %s up at:", hba->dev->name);
+ debug("UFS Device %s is up!\n", hba->dev->name);
ufshcd_print_pwr_info(hba);
}
@@ -1953,7 +1974,8 @@ int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops)
hba->version != UFSHCI_VERSION_20 &&
hba->version != UFSHCI_VERSION_21 &&
hba->version != UFSHCI_VERSION_30 &&
- hba->version != UFSHCI_VERSION_31)
+ hba->version != UFSHCI_VERSION_31 &&
+ hba->version != UFSHCI_VERSION_40)
dev_err(hba->dev, "invalid UFS version 0x%x\n",
hba->version);
@@ -1979,6 +2001,11 @@ int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops)
REG_INTERRUPT_STATUS);
ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
+ mb();
+
+ /* Reset the attached device */
+ ufshcd_device_reset(hba);
+
err = ufshcd_hba_enable(hba);
if (err) {
dev_err(hba->dev, "Host controller enable failed\n");