diff options
Diffstat (limited to 'drivers/scsi/ufs/ufshcd.c')
-rw-r--r-- | drivers/scsi/ufs/ufshcd.c | 409 |
1 files changed, 232 insertions, 177 deletions
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index b888117f4ecd..b81eebc7e2df 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -239,7 +239,7 @@ static struct ufs_dev_fix ufs_fixups[] = { END_FIX }; -static void ufshcd_tmc_handler(struct ufs_hba *hba); +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba); static void ufshcd_async_scan(void *data, async_cookie_t cookie); static int ufshcd_reset_and_restore(struct ufs_hba *hba); static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); @@ -496,8 +496,8 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) static void ufshcd_print_host_state(struct ufs_hba *hba) { dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); - dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n", - hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks); + dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", + hba->outstanding_reqs, hba->outstanding_tasks); dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", hba->saved_err, hba->saved_uic_err); dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", @@ -645,40 +645,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) } /** - * ufshcd_get_tm_free_slot - get a free slot for task management request - * @hba: per adapter instance - * @free_slot: pointer to variable with available slot value - * - * Get a free tag and lock it until ufshcd_put_tm_slot() is called. - * Returns 0 if free slot is not available, else return 1 with tag value - * in @free_slot. - */ -static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot) -{ - int tag; - bool ret = false; - - if (!free_slot) - goto out; - - do { - tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs); - if (tag >= hba->nutmrs) - goto out; - } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use)); - - *free_slot = tag; - ret = true; -out: - return ret; -} - -static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot) -{ - clear_bit_unlock(slot, &hba->tm_slots_in_use); -} - -/** * ufshcd_utrl_clear - Clear a bit in UTRLCLR register * @hba: per adapter instance * @pos: position of the bit to be cleared @@ -1279,6 +1245,24 @@ out: return ret; } +static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved) +{ + int *busy = priv; + + WARN_ON_ONCE(reserved); + (*busy)++; + return false; +} + +/* Whether or not any tag is in use by a request that is in progress. */ +static bool ufshcd_any_tag_in_use(struct ufs_hba *hba) +{ + struct request_queue *q = hba->cmd_queue; + int busy = 0; + + blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy); + return busy; +} static int ufshcd_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) @@ -1633,7 +1617,7 @@ static void ufshcd_gate_work(struct work_struct *work) if (hba->clk_gating.active_reqs || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL - || hba->lrb_in_use || hba->outstanding_tasks + || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks || hba->active_uic_cmd || hba->uic_async_done) goto rel_lock; @@ -1687,7 +1671,7 @@ static void __ufshcd_release(struct ufs_hba *hba) if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL - || hba->lrb_in_use || hba->outstanding_tasks + || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks || hba->active_uic_cmd || hba->uic_async_done || ufshcd_eh_in_progress(hba)) return; @@ -2457,22 +2441,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) hba->req_abort_count = 0; - /* acquire the tag to make sure device cmds don't use it */ - if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { - /* - * Dev manage command in progress, requeue the command. - * Requeuing the command helps in cases where the request *may* - * find different tag instead of waiting for dev manage command - * completion. - */ - err = SCSI_MLQUEUE_HOST_BUSY; - goto out; - } - err = ufshcd_hold(hba, true); if (err) { err = SCSI_MLQUEUE_HOST_BUSY; - clear_bit_unlock(tag, &hba->lrb_in_use); goto out; } WARN_ON(hba->clk_gating.state != CLKS_ON); @@ -2494,7 +2465,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) if (err) { ufshcd_release(hba); lrbp->cmd = NULL; - clear_bit_unlock(tag, &hba->lrb_in_use); goto out; } /* Make sure descriptors are ready before ringing the doorbell */ @@ -2642,44 +2612,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, } /** - * ufshcd_get_dev_cmd_tag - Get device management command tag - * @hba: per-adapter instance - * @tag_out: pointer to variable with available slot value - * - * Get a free slot and lock it until device management command - * completes. - * - * Returns false if free slot is unavailable for locking, else - * return true with tag value in @tag. - */ -static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out) -{ - int tag; - bool ret = false; - unsigned long tmp; - - if (!tag_out) - goto out; - - do { - tmp = ~hba->lrb_in_use; - tag = find_last_bit(&tmp, hba->nutrs); - if (tag >= hba->nutrs) - goto out; - } while (test_and_set_bit_lock(tag, &hba->lrb_in_use)); - - *tag_out = tag; - ret = true; -out: - return ret; -} - -static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) -{ - clear_bit_unlock(tag, &hba->lrb_in_use); -} - -/** * ufshcd_exec_dev_cmd - API for sending device management requests * @hba: UFS hba * @cmd_type: specifies the type (NOP, Query...) @@ -2691,6 +2623,8 @@ static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, int timeout) { + struct request_queue *q = hba->cmd_queue; + struct request *req; struct ufshcd_lrb *lrbp; int err; int tag; @@ -2704,7 +2638,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, * Even though we use wait_event() which sleeps indefinitely, * the maximum wait time is bounded by SCSI request timeout. */ - wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); + req = blk_get_request(q, REQ_OP_DRV_OUT, 0); + if (IS_ERR(req)) + return PTR_ERR(req); + tag = req->tag; + WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag)); init_completion(&wait); lrbp = &hba->lrb[tag]; @@ -2729,8 +2667,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, err ? "query_complete_err" : "query_complete"); out_put_tag: - ufshcd_put_dev_cmd_tag(hba, tag); - wake_up(&hba->dev_cmd.tag_wq); + blk_put_request(req); up_read(&hba->clk_scaling_lock); return err; } @@ -4815,19 +4752,29 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) * ufshcd_uic_cmd_compl - handle completion of uic command * @hba: per adapter instance * @intr_status: interrupt status generated by the controller + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) +static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) { + irqreturn_t retval = IRQ_NONE; + if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { hba->active_uic_cmd->argument2 |= ufshcd_get_uic_cmd_result(hba); hba->active_uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba); complete(&hba->active_uic_cmd->done); + retval = IRQ_HANDLED; } - if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) + if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { complete(hba->uic_async_done); + retval = IRQ_HANDLED; + } + return retval; } /** @@ -4853,7 +4800,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, cmd->result = result; /* Mark completed command as NULL in LRB */ lrbp->cmd = NULL; - clear_bit_unlock(index, &hba->lrb_in_use); /* Do not touch lrbp after scsi done */ cmd->scsi_done(cmd); __ufshcd_release(hba); @@ -4875,16 +4821,17 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, hba->outstanding_reqs ^= completed_reqs; ufshcd_clk_scaling_update_busy(hba); - - /* we might have free'd some tags above */ - wake_up(&hba->dev_cmd.tag_wq); } /** * ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_transfer_req_compl(struct ufs_hba *hba) +static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) { unsigned long completed_reqs; u32 tr_doorbell; @@ -4903,7 +4850,12 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); completed_reqs = tr_doorbell ^ hba->outstanding_reqs; - __ufshcd_transfer_req_compl(hba, completed_reqs); + if (completed_reqs) { + __ufshcd_transfer_req_compl(hba, completed_reqs); + return IRQ_HANDLED; + } else { + return IRQ_NONE; + } } /** @@ -5424,61 +5376,77 @@ out: /** * ufshcd_update_uic_error - check and set fatal UIC error flags. * @hba: per-adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_update_uic_error(struct ufs_hba *hba) +static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba) { u32 reg; + irqreturn_t retval = IRQ_NONE; /* PHY layer lane error */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); /* Ignore LINERESET indication, as this is not an error */ if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) && - (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) { + (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) { /* * To know whether this error is fatal or not, DB timeout * must be checked but this error is handled separately. */ dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__); ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg); + retval |= IRQ_HANDLED; } /* PA_INIT_ERROR is fatal and needs UIC reset */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); - if (reg) + if ((reg & UIC_DATA_LINK_LAYER_ERROR) && + (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) { ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg); - if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) - hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; - else if (hba->dev_quirks & - UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { - if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) - hba->uic_error |= - UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; - else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) - hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; + if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) + hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; + else if (hba->dev_quirks & + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { + if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) + hba->uic_error |= + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; + else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) + hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; + } + retval |= IRQ_HANDLED; } /* UIC NL/TL/DME errors needs software retry */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); - if (reg) { + if ((reg & UIC_NETWORK_LAYER_ERROR) && + (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) { ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg); hba->uic_error |= UFSHCD_UIC_NL_ERROR; + retval |= IRQ_HANDLED; } reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); - if (reg) { + if ((reg & UIC_TRANSPORT_LAYER_ERROR) && + (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) { ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg); hba->uic_error |= UFSHCD_UIC_TL_ERROR; + retval |= IRQ_HANDLED; } reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); - if (reg) { + if ((reg & UIC_DME_ERROR) && + (reg & UIC_DME_ERROR_CODE_MASK)) { ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg); hba->uic_error |= UFSHCD_UIC_DME_ERROR; + retval |= IRQ_HANDLED; } dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", __func__, hba->uic_error); + return retval; } static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, @@ -5502,10 +5470,15 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, /** * ufshcd_check_errors - Check for errors that need s/w attention * @hba: per-adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_check_errors(struct ufs_hba *hba) +static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba) { bool queue_eh_work = false; + irqreturn_t retval = IRQ_NONE; if (hba->errors & INT_FATAL_ERRORS) { ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors); @@ -5514,7 +5487,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba) if (hba->errors & UIC_ERROR) { hba->uic_error = 0; - ufshcd_update_uic_error(hba); + retval = ufshcd_update_uic_error(hba); if (hba->uic_error) queue_eh_work = true; } @@ -5562,6 +5535,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba) } schedule_work(&hba->eh_work); } + retval |= IRQ_HANDLED; } /* * if (!queue_eh_work) - @@ -5569,44 +5543,81 @@ static void ufshcd_check_errors(struct ufs_hba *hba) * itself without s/w intervention or errors that will be * handled by the SCSI core layer. */ + return retval; +} + +struct ctm_info { + struct ufs_hba *hba; + unsigned long pending; + unsigned int ncpl; +}; + +static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved) +{ + struct ctm_info *const ci = priv; + struct completion *c; + + WARN_ON_ONCE(reserved); + if (test_bit(req->tag, &ci->pending)) + return true; + ci->ncpl++; + c = req->end_io_data; + if (c) + complete(c); + return true; } /** * ufshcd_tmc_handler - handle task management function completion * @hba: per adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_tmc_handler(struct ufs_hba *hba) +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) { - u32 tm_doorbell; + struct request_queue *q = hba->tmf_queue; + struct ctm_info ci = { + .hba = hba, + .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL), + }; - tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); - hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; - wake_up(&hba->tm_wq); + blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci); + return ci.ncpl ? IRQ_HANDLED : IRQ_NONE; } /** * ufshcd_sl_intr - Interrupt service routine * @hba: per adapter instance * @intr_status: contains interrupts generated by the controller + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) +static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) { + irqreturn_t retval = IRQ_NONE; + hba->errors = UFSHCD_ERROR_MASK & intr_status; if (ufshcd_is_auto_hibern8_error(hba, intr_status)) hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); if (hba->errors) - ufshcd_check_errors(hba); + retval |= ufshcd_check_errors(hba); if (intr_status & UFSHCD_UIC_MASK) - ufshcd_uic_cmd_compl(hba, intr_status); + retval |= ufshcd_uic_cmd_compl(hba, intr_status); if (intr_status & UTP_TASK_REQ_COMPL) - ufshcd_tmc_handler(hba); + retval |= ufshcd_tmc_handler(hba); if (intr_status & UTP_TRANSFER_REQ_COMPL) - ufshcd_transfer_req_compl(hba); + retval |= ufshcd_transfer_req_compl(hba); + + return retval; } /** @@ -5614,8 +5625,9 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) * @irq: irq number * @__hba: pointer to adapter instance * - * Returns IRQ_HANDLED - If interrupt is valid - * IRQ_NONE - If invalid interrupt + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ static irqreturn_t ufshcd_intr(int irq, void *__hba) { @@ -5638,14 +5650,18 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); if (intr_status) ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); - if (enabled_intr_status) { - ufshcd_sl_intr(hba, enabled_intr_status); - retval = IRQ_HANDLED; - } + if (enabled_intr_status) + retval |= ufshcd_sl_intr(hba, enabled_intr_status); intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); } + if (retval == IRQ_NONE) { + dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n", + __func__, intr_status); + ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); + } + spin_unlock(hba->host->host_lock); return retval; } @@ -5674,33 +5690,36 @@ out: static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, struct utp_task_req_desc *treq, u8 tm_function) { + struct request_queue *q = hba->tmf_queue; struct Scsi_Host *host = hba->host; + DECLARE_COMPLETION_ONSTACK(wait); + struct request *req; unsigned long flags; - int free_slot, task_tag, err; + int task_tag, err; /* - * Get free slot, sleep if slots are unavailable. - * Even though we use wait_event() which sleeps indefinitely, - * the maximum wait time is bounded by %TM_CMD_TIMEOUT. + * blk_get_request() is used here only to get a free tag. */ - wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); + req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED); + req->end_io_data = &wait; ufshcd_hold(hba, false); spin_lock_irqsave(host->host_lock, flags); - task_tag = hba->nutrs + free_slot; + blk_mq_start_request(req); + task_tag = req->tag; treq->req_header.dword_0 |= cpu_to_be32(task_tag); - memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq)); - ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function); + memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); + ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); /* send command to the controller */ - __set_bit(free_slot, &hba->outstanding_tasks); + __set_bit(task_tag, &hba->outstanding_tasks); /* Make sure descriptors are ready before ringing the task doorbell */ wmb(); - ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); /* Make sure that doorbell is committed immediately */ wmb(); @@ -5709,33 +5728,35 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send"); /* wait until the task management command is completed */ - err = wait_event_timeout(hba->tm_wq, - test_bit(free_slot, &hba->tm_condition), + err = wait_for_completion_io_timeout(&wait, msecs_to_jiffies(TM_CMD_TIMEOUT)); if (!err) { + /* + * Make sure that ufshcd_compl_tm() does not trigger a + * use-after-free. + */ + req->end_io_data = NULL; ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err"); dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", __func__, tm_function); - if (ufshcd_clear_tm_cmd(hba, free_slot)) - dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", - __func__, free_slot); + if (ufshcd_clear_tm_cmd(hba, task_tag)) + dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n", + __func__, task_tag); err = -ETIMEDOUT; } else { err = 0; - memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq)); + memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq)); ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete"); } spin_lock_irqsave(hba->host->host_lock, flags); - __clear_bit(free_slot, &hba->outstanding_tasks); + __clear_bit(task_tag, &hba->outstanding_tasks); spin_unlock_irqrestore(hba->host->host_lock, flags); - clear_bit(free_slot, &hba->tm_condition); - ufshcd_put_tm_slot(hba, free_slot); - wake_up(&hba->tm_tag_wq); - ufshcd_release(hba); + blk_put_request(req); + return err; } @@ -5809,6 +5830,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, int cmd_type, enum query_opcode desc_op) { + struct request_queue *q = hba->cmd_queue; + struct request *req; struct ufshcd_lrb *lrbp; int err = 0; int tag; @@ -5818,7 +5841,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, down_read(&hba->clk_scaling_lock); - wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); + req = blk_get_request(q, REQ_OP_DRV_OUT, 0); + if (IS_ERR(req)) + return PTR_ERR(req); + tag = req->tag; + WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag)); init_completion(&wait); lrbp = &hba->lrb[tag]; @@ -5892,8 +5919,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, } } - ufshcd_put_dev_cmd_tag(hba, tag); - wake_up(&hba->dev_cmd.tag_wq); + blk_put_request(req); up_read(&hba->clk_scaling_lock); return err; } @@ -5980,19 +6006,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) { struct Scsi_Host *host; struct ufs_hba *hba; - unsigned int tag; u32 pos; int err; - u8 resp = 0xF; - struct ufshcd_lrb *lrbp; + u8 resp = 0xF, lun; unsigned long flags; host = cmd->device->host; hba = shost_priv(host); - tag = cmd->request->tag; - lrbp = &hba->lrb[tag]; - err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp); + lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); + err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp); if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { if (!err) err = resp; @@ -6001,7 +6024,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) /* clear the commands that were pending for corresponding LUN */ for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { - if (hba->lrb[pos].lun == lrbp->lun) { + if (hba->lrb[pos].lun == lun) { err = ufshcd_clear_cmd(hba, pos); if (err) break; @@ -6189,9 +6212,6 @@ cleanup: hba->lrb[tag].cmd = NULL; spin_unlock_irqrestore(host->host_lock, flags); - clear_bit_unlock(tag, &hba->lrb_in_use); - wake_up(&hba->dev_cmd.tag_wq); - out: if (!err) { err = SUCCESS; @@ -8187,6 +8207,9 @@ void ufshcd_remove(struct ufs_hba *hba) { ufs_bsg_remove(hba); ufs_sysfs_remove_nodes(hba->dev); + blk_cleanup_queue(hba->tmf_queue); + blk_mq_free_tag_set(&hba->tmf_tag_set); + blk_cleanup_queue(hba->cmd_queue); scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); @@ -8265,6 +8288,18 @@ out_error: } EXPORT_SYMBOL(ufshcd_alloc_host); +/* This function exists because blk_mq_alloc_tag_set() requires this. */ +static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *qd) +{ + WARN_ON_ONCE(true); + return BLK_STS_NOTSUPP; +} + +static const struct blk_mq_ops ufshcd_tmf_ops = { + .queue_rq = ufshcd_queue_tmf, +}; + /** * ufshcd_init - Driver initialization routine * @hba: per-adapter instance @@ -8334,10 +8369,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->max_pwr_info.is_valid = false; - /* Initailize wait queue for task management */ - init_waitqueue_head(&hba->tm_wq); - init_waitqueue_head(&hba->tm_tag_wq); - /* Initialize work queues */ INIT_WORK(&hba->eh_work, ufshcd_err_handler); INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); @@ -8350,9 +8381,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) init_rwsem(&hba->clk_scaling_lock); - /* Initialize device management tag acquire wait queue */ - init_waitqueue_head(&hba->dev_cmd.tag_wq); - ufshcd_init_clk_gating(hba); ufshcd_init_clk_scaling(hba); @@ -8386,6 +8414,27 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) goto exit_gating; } + hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set); + if (IS_ERR(hba->cmd_queue)) { + err = PTR_ERR(hba->cmd_queue); + goto out_remove_scsi_host; + } + + hba->tmf_tag_set = (struct blk_mq_tag_set) { + .nr_hw_queues = 1, + .queue_depth = hba->nutmrs, + .ops = &ufshcd_tmf_ops, + .flags = BLK_MQ_F_NO_SCHED, + }; + err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); + if (err < 0) + goto free_cmd_queue; + hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); + if (IS_ERR(hba->tmf_queue)) { + err = PTR_ERR(hba->tmf_queue); + goto free_tmf_tag_set; + } + /* Reset the attached device */ ufshcd_vops_device_reset(hba); @@ -8395,7 +8444,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) dev_err(hba->dev, "Host controller enable failed\n"); ufshcd_print_host_regs(hba); ufshcd_print_host_state(hba); - goto out_remove_scsi_host; + goto free_tmf_queue; } /* @@ -8432,6 +8481,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) return 0; +free_tmf_queue: + blk_cleanup_queue(hba->tmf_queue); +free_tmf_tag_set: + blk_mq_free_tag_set(&hba->tmf_tag_set); +free_cmd_queue: + blk_cleanup_queue(hba->cmd_queue); out_remove_scsi_host: scsi_remove_host(hba->host); exit_gating: |