diff options
author | Keith Busch <keith.busch@intel.com> | 2017-11-07 15:13:12 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2017-11-10 19:53:25 -0700 |
commit | ad22c355b707a8d8d48e282aadc01c0b0604b2e9 (patch) | |
tree | 14337d9cd3461d7b675ce3ef034f2a57e446f445 | |
parent | 08e1507544839b98fc3732aea935e70ed9c209ec (diff) |
nvme: remove handling of multiple AEN requests
The driver can handle tracking only one AEN request, so this patch
removes handling for multiple ones.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | drivers/nvme/host/core.c | 28 | ||||
-rw-r--r-- | drivers/nvme/host/fc.c | 9 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 3 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 4 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 5 | ||||
-rw-r--r-- | drivers/nvme/target/loop.c | 2 |
6 files changed, 11 insertions, 40 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 315087dfcd8d..dedbf12847b6 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2670,15 +2670,7 @@ static void nvme_async_event_work(struct work_struct *work) struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, async_event_work); - spin_lock_irq(&ctrl->lock); - while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) { - int aer_idx = --ctrl->event_limit; - - spin_unlock_irq(&ctrl->lock); - ctrl->ops->submit_async_event(ctrl, aer_idx); - spin_lock_irq(&ctrl->lock); - } - spin_unlock_irq(&ctrl->lock); + ctrl->ops->submit_async_event(ctrl); } static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) @@ -2745,22 +2737,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, union nvme_result *res) { u32 result = le32_to_cpu(res->u32); - bool done = true; - switch (le16_to_cpu(status) >> 1) { - case NVME_SC_SUCCESS: - done = false; - /*FALLTHRU*/ - case NVME_SC_ABORT_REQ: - ++ctrl->event_limit; - if (ctrl->state == NVME_CTRL_LIVE) - queue_work(nvme_wq, &ctrl->async_event_work); - break; - default: - break; - } - - if (done) + if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) return; switch (result & 0xff07) { @@ -2774,12 +2752,12 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, default: dev_warn(ctrl->device, "async event result %08x\n", result); } + queue_work(nvme_wq, &ctrl->async_event_work); } EXPORT_SYMBOL_GPL(nvme_complete_async_event); void nvme_queue_async_events(struct nvme_ctrl *ctrl) { - ctrl->event_limit = NVME_NR_AEN_COMMANDS; queue_work(nvme_wq, &ctrl->async_event_work); } EXPORT_SYMBOL_GPL(nvme_queue_async_events); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 3aa595029192..6eb460b117d6 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2382,7 +2382,7 @@ nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) } static void -nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx) +nvme_fc_submit_async_event(struct nvme_ctrl *arg) { struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); struct nvme_fc_fcp_op *aen_op; @@ -2390,9 +2390,6 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx) bool terminating = false; blk_status_t ret; - if (aer_idx > NVME_NR_AEN_COMMANDS) - return; - spin_lock_irqsave(&ctrl->lock, flags); if (ctrl->flags & FCCTRL_TERMIO) terminating = true; @@ -2401,13 +2398,13 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx) if (terminating) return; - aen_op = &ctrl->aen_ops[aer_idx]; + aen_op = &ctrl->aen_ops[0]; ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, NVMEFC_FCP_NODATA); if (ret) dev_err(ctrl->ctrl.device, - "failed async event work [%d]\n", aer_idx); + "failed async event work\n"); } static void diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index a6d750cfa6b2..b55c97ecea31 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -162,7 +162,6 @@ struct nvme_ctrl { u16 nssa; u16 nr_streams; atomic_t abort_limit; - u8 event_limit; u8 vwc; u32 vs; u32 sgls; @@ -237,7 +236,7 @@ struct nvme_ctrl_ops { int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); void (*free_ctrl)(struct nvme_ctrl *ctrl); - void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx); + void (*submit_async_event)(struct nvme_ctrl *ctrl); void (*delete_ctrl)(struct nvme_ctrl *ctrl); int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); int (*reinit_request)(void *data, struct request *rq); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index c3dfd84feef7..429d56f1a19e 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1043,7 +1043,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) return __nvme_poll(nvmeq, tag); } -static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx) +static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) { struct nvme_dev *dev = to_nvme_dev(ctrl); struct nvme_queue *nvmeq = dev->queues[0]; @@ -1051,7 +1051,7 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx) memset(&c, 0, sizeof(c)); c.common.opcode = nvme_admin_async_event; - c.common.command_id = NVME_AQ_BLK_MQ_DEPTH + aer_idx; + c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; spin_lock_irq(&nvmeq->q_lock); __nvme_submit_cmd(nvmeq, &c); diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 008791bbdfb3..c8d854474a5b 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1293,7 +1293,7 @@ static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue) return queue->ctrl->tag_set.tags[queue_idx - 1]; } -static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx) +static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg); struct nvme_rdma_queue *queue = &ctrl->queues[0]; @@ -1303,9 +1303,6 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx) struct ib_sge sge; int ret; - if (WARN_ON_ONCE(aer_idx != 0)) - return; - ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); memset(cmd, 0, sizeof(*cmd)); diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 7258b796f209..f40e70eb4a38 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -184,7 +184,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_STS_OK; } -static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx) +static void nvme_loop_submit_async_event(struct nvme_ctrl *arg) { struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg); struct nvme_loop_queue *queue = &ctrl->queues[0]; |