diff options
| author | Keith Busch <kbusch@kernel.org> | 2025-09-03 12:33:17 -0700 |
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2025-09-09 10:33:35 -0600 |
| commit | d57447ffb5fadffdba920f2fb933296fb6c5ff57 (patch) | |
| tree | dbd4a5c9aab4476d8e7420cae87090c767354775 /drivers | |
| parent | 05ceea5d3ec9a1b1d6858ffd4739fdb0ed1b8eaf (diff) | |
blk-mq-dma: bring back p2p request flags
We only need to consider data and metadata dma mapping types separately.
The request and bio integrity payload have enough flag bits to
internally track the mapping type for each. Use these so the caller
doesn't need to track them, and provide separete request and integrity
helpers to the common code. This will make it easier to scale new
mappings, like the proposed MMIO attribute, without burdening the caller
to track such things.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/nvme/host/pci.c | 21 |
1 files changed, 4 insertions, 17 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d8a9dee55de3..28e203b894eb 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -260,12 +260,6 @@ enum nvme_iod_flags { /* single segment dma mapping */ IOD_SINGLE_SEGMENT = 1U << 2, - /* DMA mapped with PCI_P2PDMA_MAP_BUS_ADDR */ - IOD_P2P_BUS_ADDR = 1U << 3, - - /* Metadata DMA mapped with PCI_P2PDMA_MAP_BUS_ADDR */ - IOD_META_P2P_BUS_ADDR = 1U << 4, - /* Metadata using non-coalesced MPTR */ IOD_SINGLE_META_SEGMENT = 1U << 5, }; @@ -737,9 +731,8 @@ static void nvme_unmap_metadata(struct request *req) return; } - if (!blk_rq_dma_unmap(req, dma_dev, &iod->meta_dma_state, - iod->meta_total_len, - iod->flags & IOD_META_P2P_BUS_ADDR)) { + if (!blk_rq_integrity_dma_unmap(req, dma_dev, &iod->meta_dma_state, + iod->meta_total_len)) { if (nvme_pci_cmd_use_meta_sgl(&iod->cmd)) nvme_free_sgls(req, sge, &sge[1]); else @@ -766,8 +759,7 @@ static void nvme_unmap_data(struct request *req) return; } - if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len, - iod->flags & IOD_P2P_BUS_ADDR)) { + if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) { if (nvme_pci_cmd_use_sgl(&iod->cmd)) nvme_free_sgls(req, iod->descriptors[0], &iod->cmd.common.dptr.sgl); @@ -1043,9 +1035,6 @@ static blk_status_t nvme_map_data(struct request *req) if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter)) return iter.status; - if (iter.p2pdma.map == PCI_P2PDMA_MAP_BUS_ADDR) - iod->flags |= IOD_P2P_BUS_ADDR; - if (use_sgl == SGL_FORCED || (use_sgl == SGL_SUPPORTED && (sgl_threshold && nvme_pci_avg_seg_size(req) >= sgl_threshold))) @@ -1068,9 +1057,7 @@ static blk_status_t nvme_pci_setup_meta_sgls(struct request *req) &iod->meta_dma_state, &iter)) return iter.status; - if (iter.p2pdma.map == PCI_P2PDMA_MAP_BUS_ADDR) - iod->flags |= IOD_META_P2P_BUS_ADDR; - else if (blk_rq_dma_map_coalesce(&iod->meta_dma_state)) + if (blk_rq_dma_map_coalesce(&iod->meta_dma_state)) entries = 1; /* |
