summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/virtio/virtio_ring.c211
-rw-r--r--drivers/virtio/virtio_vdpa.c3
2 files changed, 172 insertions, 42 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 46515b017ccb..f91a432b3e53 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -297,8 +297,14 @@ size_t virtio_max_dma_size(const struct virtio_device *vdev)
{
size_t max_segment_size = SIZE_MAX;
- if (vring_use_map_api(vdev))
- max_segment_size = dma_max_mapping_size(vdev->dev.parent);
+ if (vring_use_map_api(vdev)) {
+ if (vdev->map) {
+ max_segment_size =
+ vdev->map->max_mapping_size(vdev->vmap);
+ } else
+ max_segment_size =
+ dma_max_mapping_size(vdev->dev.parent);
+ }
return max_segment_size;
}
@@ -309,8 +315,8 @@ static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
union virtio_map map)
{
if (vring_use_map_api(vdev)) {
- return dma_alloc_coherent(map.dma_dev, size,
- map_handle, flag);
+ return virtqueue_map_alloc_coherent(vdev, map, size,
+ map_handle, flag);
} else {
void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
@@ -343,7 +349,8 @@ static void vring_free_queue(struct virtio_device *vdev, size_t size,
union virtio_map map)
{
if (vring_use_map_api(vdev))
- dma_free_coherent(map.dma_dev, size, queue, map_handle);
+ virtqueue_map_free_coherent(vdev, map, size,
+ queue, map_handle);
else
free_pages_exact(queue, PAGE_ALIGN(size));
}
@@ -358,6 +365,20 @@ static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
return vq->map.dma_dev;
}
+static int vring_mapping_error(const struct vring_virtqueue *vq,
+ dma_addr_t addr)
+{
+ struct virtio_device *vdev = vq->vq.vdev;
+
+ if (!vq->use_map_api)
+ return 0;
+
+ if (vdev->map)
+ return vdev->map->mapping_error(vq->map, addr);
+ else
+ return dma_mapping_error(vring_dma_dev(vq), addr);
+}
+
/* Map one sg entry. */
static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
enum dma_data_direction direction, dma_addr_t *addr,
@@ -387,11 +408,11 @@ static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist
* the way it expects (we don't guarantee that the scatterlist
* will exist for the lifetime of the mapping).
*/
- *addr = dma_map_page(vring_dma_dev(vq),
- sg_page(sg), sg->offset, sg->length,
- direction);
+ *addr = virtqueue_map_page_attrs(&vq->vq, sg_page(sg),
+ sg->offset, sg->length,
+ direction, 0);
- if (dma_mapping_error(vring_dma_dev(vq), *addr))
+ if (vring_mapping_error(vq, *addr))
return -ENOMEM;
return 0;
@@ -408,15 +429,6 @@ static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
size, direction, 0);
}
-static int vring_mapping_error(const struct vring_virtqueue *vq,
- dma_addr_t addr)
-{
- if (!vq->use_map_api)
- return 0;
-
- return dma_mapping_error(vring_dma_dev(vq), addr);
-}
-
static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
{
vq->vq.num_free = num;
@@ -453,11 +465,12 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
} else if (!vring_need_unmap_buffer(vq, extra))
goto out;
- dma_unmap_page(vring_dma_dev(vq),
- extra->addr,
- extra->len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ virtqueue_unmap_page_attrs(&vq->vq,
+ extra->addr,
+ extra->len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE,
+ 0);
out:
return extra->next;
@@ -1271,10 +1284,11 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
} else if (!vring_need_unmap_buffer(vq, extra))
return;
- dma_unmap_page(vring_dma_dev(vq),
- extra->addr, extra->len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ virtqueue_unmap_page_attrs(&vq->vq,
+ extra->addr, extra->len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE,
+ 0);
}
static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
@@ -2433,7 +2447,7 @@ struct device *virtqueue_dma_dev(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- if (vq->use_map_api)
+ if (vq->use_map_api && !_vq->vdev->map)
return vq->map.dma_dev;
else
return NULL;
@@ -3124,6 +3138,107 @@ const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
EXPORT_SYMBOL_GPL(virtqueue_get_vring);
/**
+ * virtqueue_map_alloc_coherent - alloc coherent mapping
+ * @vdev: the virtio device we are talking to
+ * @map: metadata for performing mapping
+ * @size: the size of the buffer
+ * @map_handle: the pointer to the mapped address
+ * @gfp: allocation flag (GFP_XXX)
+ *
+ * return virtual address or NULL on error
+ */
+void *virtqueue_map_alloc_coherent(struct virtio_device *vdev,
+ union virtio_map map,
+ size_t size, dma_addr_t *map_handle,
+ gfp_t gfp)
+{
+ if (vdev->map)
+ return vdev->map->alloc(map, size,
+ map_handle, gfp);
+ else
+ return dma_alloc_coherent(map.dma_dev, size,
+ map_handle, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_map_alloc_coherent);
+
+/**
+ * virtqueue_map_free_coherent - free coherent mapping
+ * @vdev: the virtio device we are talking to
+ * @map: metadata for performing mapping
+ * @size: the size of the buffer
+ * @map_handle: the mapped address that needs to be freed
+ *
+ */
+void virtqueue_map_free_coherent(struct virtio_device *vdev,
+ union virtio_map map, size_t size, void *vaddr,
+ dma_addr_t map_handle)
+{
+ if (vdev->map)
+ vdev->map->free(map, size, vaddr,
+ map_handle, 0);
+ else
+ dma_free_coherent(map.dma_dev, size, vaddr, map_handle);
+}
+EXPORT_SYMBOL_GPL(virtqueue_map_free_coherent);
+
+/**
+ * virtqueue_map_page_attrs - map a page to the device
+ * @_vq: the virtqueue we are talking to
+ * @page: the page that will be mapped by the device
+ * @offset: the offset in the page for a buffer
+ * @size: the buffer size
+ * @dir: mapping direction
+ * @attrs: mapping attributes
+ *
+ * Returns mapped address. Caller should check that by virtqueue_mapping_error().
+ */
+dma_addr_t virtqueue_map_page_attrs(const struct virtqueue *_vq,
+ struct page *page,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ const struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+
+ if (vdev->map)
+ return vdev->map->map_page(vq->map,
+ page, offset, size,
+ dir, attrs);
+
+ return dma_map_page_attrs(vring_dma_dev(vq),
+ page, offset, size,
+ dir, attrs);
+}
+EXPORT_SYMBOL_GPL(virtqueue_map_page_attrs);
+
+/**
+ * virtqueue_unmap_page_attrs - map a page to the device
+ * @_vq: the virtqueue we are talking to
+ * @map_handle: the mapped address
+ * @size: the buffer size
+ * @dir: mapping direction
+ * @attrs: unmapping attributes
+ */
+void virtqueue_unmap_page_attrs(const struct virtqueue *_vq,
+ dma_addr_t map_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ const struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+
+ if (vdev->map)
+ vdev->map->unmap_page(vq->map,
+ map_handle, size, dir, attrs);
+ else
+ dma_unmap_page_attrs(vring_dma_dev(vq), map_handle,
+ size, dir, attrs);
+}
+EXPORT_SYMBOL_GPL(virtqueue_unmap_page_attrs);
+
+/**
* virtqueue_map_single_attrs - map DMA for _vq
* @_vq: the struct virtqueue we're talking about.
* @ptr: the pointer of the buffer to do dma
@@ -3134,7 +3249,7 @@ EXPORT_SYMBOL_GPL(virtqueue_get_vring);
* The caller calls this to do dma mapping in advance. The DMA address can be
* passed to this _vq when it is in pre-mapped mode.
*
- * return DMA address. Caller should check that by virtqueue_mapping_error().
+ * return mapped address. Caller should check that by virtqueue_mapping_error().
*/
dma_addr_t virtqueue_map_single_attrs(const struct virtqueue *_vq, void *ptr,
size_t size,
@@ -3153,8 +3268,8 @@ dma_addr_t virtqueue_map_single_attrs(const struct virtqueue *_vq, void *ptr,
"rejecting DMA map of vmalloc memory\n"))
return DMA_MAPPING_ERROR;
- return dma_map_page_attrs(vring_dma_dev(vq), virt_to_page(ptr),
- offset_in_page(ptr), size, dir, attrs);
+ return virtqueue_map_page_attrs(&vq->vq, virt_to_page(ptr),
+ offset_in_page(ptr), size, dir, attrs);
}
EXPORT_SYMBOL_GPL(virtqueue_map_single_attrs);
@@ -3179,12 +3294,12 @@ void virtqueue_unmap_single_attrs(const struct virtqueue *_vq,
if (!vq->use_map_api)
return;
- dma_unmap_page_attrs(vring_dma_dev(vq), addr, size, dir, attrs);
+ virtqueue_unmap_page_attrs(_vq, addr, size, dir, attrs);
}
EXPORT_SYMBOL_GPL(virtqueue_unmap_single_attrs);
/**
- * virtqueue_map_mapping_error - check dma address
+ * virtqueue_mapping_error - check dma address
* @_vq: the struct virtqueue we're talking about.
* @addr: DMA address
*
@@ -3194,10 +3309,7 @@ int virtqueue_map_mapping_error(const struct virtqueue *_vq, dma_addr_t addr)
{
const struct vring_virtqueue *vq = to_vvq(_vq);
- if (!vq->use_map_api)
- return 0;
-
- return dma_mapping_error(vring_dma_dev(vq), addr);
+ return vring_mapping_error(vq, addr);
}
EXPORT_SYMBOL_GPL(virtqueue_map_mapping_error);
@@ -3214,11 +3326,15 @@ EXPORT_SYMBOL_GPL(virtqueue_map_mapping_error);
bool virtqueue_map_need_sync(const struct virtqueue *_vq, dma_addr_t addr)
{
const struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
if (!vq->use_map_api)
return false;
- return dma_need_sync(vring_dma_dev(vq), addr);
+ if (vdev->map)
+ return vdev->map->need_sync(vq->map, addr);
+ else
+ return dma_need_sync(vring_dma_dev(vq), addr);
}
EXPORT_SYMBOL_GPL(virtqueue_map_need_sync);
@@ -3240,12 +3356,17 @@ void virtqueue_map_sync_single_range_for_cpu(const struct virtqueue *_vq,
enum dma_data_direction dir)
{
const struct vring_virtqueue *vq = to_vvq(_vq);
- struct device *dev = vring_dma_dev(vq);
+ struct virtio_device *vdev = _vq->vdev;
if (!vq->use_map_api)
return;
- dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
+ if (vdev->map)
+ vdev->map->sync_single_for_cpu(vq->map,
+ addr + offset, size, dir);
+ else
+ dma_sync_single_range_for_cpu(vring_dma_dev(vq),
+ addr, offset, size, dir);
}
EXPORT_SYMBOL_GPL(virtqueue_map_sync_single_range_for_cpu);
@@ -3266,12 +3387,18 @@ void virtqueue_map_sync_single_range_for_device(const struct virtqueue *_vq,
enum dma_data_direction dir)
{
const struct vring_virtqueue *vq = to_vvq(_vq);
- struct device *dev = vring_dma_dev(vq);
+ struct virtio_device *vdev = _vq->vdev;
if (!vq->use_map_api)
return;
- dma_sync_single_range_for_device(dev, addr, offset, size, dir);
+ if (vdev->map)
+ vdev->map->sync_single_for_device(vq->map,
+ addr + offset,
+ size, dir);
+ else
+ dma_sync_single_range_for_device(vring_dma_dev(vq), addr,
+ offset, size, dir);
}
EXPORT_SYMBOL_GPL(virtqueue_map_sync_single_range_for_device);
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index dc557aa7c825..d4be689e3626 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -195,6 +195,9 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
goto error_new_virtqueue;
}
+ if (index == 0)
+ vdev->vmap = map;
+
vq->num_max = max_num;
/* Setup virtqueue callback */