summaryrefslogtreecommitdiff
path: root/drivers/xen/grant-dma-ops.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-10-12 14:39:38 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-10-12 14:39:38 -0700
commit778ce723e93ee803ef5883619fe2391e00dbc209 (patch)
tree861f03223f34780fecce597b45588118236874d7 /drivers/xen/grant-dma-ops.c
parent1440f576022887004f719883acb094e7e0dd4944 (diff)
parent7880672bdc975daa586e8256714d9906d30c615e (diff)
Merge tag 'for-linus-6.1-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen updates from Juergen Gross: - Some minor typo fixes - A fix of the Xen pcifront driver for supporting the device model to run in a Linux stub domain - A cleanup of the pcifront driver - A series to enable grant-based virtio with Xen on x86 - A cleanup of Xen PV guests to distinguish between safe and faulting MSR accesses - Two fixes of the Xen gntdev driver - Two fixes of the new xen grant DMA driver * tag 'for-linus-6.1-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen: Kconfig: Fix spelling mistake "Maxmium" -> "Maximum" xen/pv: support selecting safe/unsafe msr accesses xen/pv: refactor msr access functions to support safe and unsafe accesses xen/pv: fix vendor checks for pmu emulation xen/pv: add fault recovery control to pmu msr accesses xen/virtio: enable grant based virtio on x86 xen/virtio: use dom0 as default backend for CONFIG_XEN_VIRTIO_FORCE_GRANT xen/virtio: restructure xen grant dma setup xen/pcifront: move xenstore config scanning into sub-function xen/gntdev: Accommodate VMA splitting xen/gntdev: Prevent leaking grants xen/virtio: Fix potential deadlock when accessing xen_grant_dma_devices xen/virtio: Fix n_pages calculation in xen_grant_dma_map(unmap)_page() xen/xenbus: Fix spelling mistake "hardward" -> "hardware" xen-pcifront: Handle missed Connected state
Diffstat (limited to 'drivers/xen/grant-dma-ops.c')
-rw-r--r--drivers/xen/grant-dma-ops.c112
1 files changed, 79 insertions, 33 deletions
diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
index 8973fc1e9ccc..860f37c93af4 100644
--- a/drivers/xen/grant-dma-ops.c
+++ b/drivers/xen/grant-dma-ops.c
@@ -25,7 +25,7 @@ struct xen_grant_dma_data {
bool broken;
};
-static DEFINE_XARRAY(xen_grant_dma_devices);
+static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
#define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63)
@@ -42,14 +42,29 @@ static inline grant_ref_t dma_to_grant(dma_addr_t dma)
static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
{
struct xen_grant_dma_data *data;
+ unsigned long flags;
- xa_lock(&xen_grant_dma_devices);
+ xa_lock_irqsave(&xen_grant_dma_devices, flags);
data = xa_load(&xen_grant_dma_devices, (unsigned long)dev);
- xa_unlock(&xen_grant_dma_devices);
+ xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
return data;
}
+static int store_xen_grant_dma_data(struct device *dev,
+ struct xen_grant_dma_data *data)
+{
+ unsigned long flags;
+ int ret;
+
+ xa_lock_irqsave(&xen_grant_dma_devices, flags);
+ ret = xa_err(__xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
+ GFP_ATOMIC));
+ xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
+
+ return ret;
+}
+
/*
* DMA ops for Xen frontends (e.g. virtio).
*
@@ -153,7 +168,7 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
struct xen_grant_dma_data *data;
- unsigned int i, n_pages = PFN_UP(size);
+ unsigned int i, n_pages = PFN_UP(offset + size);
grant_ref_t grant;
dma_addr_t dma_handle;
@@ -185,7 +200,8 @@ static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
unsigned long attrs)
{
struct xen_grant_dma_data *data;
- unsigned int i, n_pages = PFN_UP(size);
+ unsigned long offset = dma_handle & (PAGE_SIZE - 1);
+ unsigned int i, n_pages = PFN_UP(offset + size);
grant_ref_t grant;
if (WARN_ON(dir == DMA_NONE))
@@ -273,72 +289,91 @@ static const struct dma_map_ops xen_grant_dma_ops = {
.dma_supported = xen_grant_dma_supported,
};
-bool xen_is_grant_dma_device(struct device *dev)
+static bool xen_is_dt_grant_dma_device(struct device *dev)
{
struct device_node *iommu_np;
bool has_iommu;
- /* XXX Handle only DT devices for now */
- if (!dev->of_node)
- return false;
-
iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
- has_iommu = iommu_np && of_device_is_compatible(iommu_np, "xen,grant-dma");
+ has_iommu = iommu_np &&
+ of_device_is_compatible(iommu_np, "xen,grant-dma");
of_node_put(iommu_np);
return has_iommu;
}
+bool xen_is_grant_dma_device(struct device *dev)
+{
+ /* XXX Handle only DT devices for now */
+ if (dev->of_node)
+ return xen_is_dt_grant_dma_device(dev);
+
+ return false;
+}
+
bool xen_virtio_mem_acc(struct virtio_device *dev)
{
- if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT))
+ if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain())
return true;
return xen_is_grant_dma_device(dev->dev.parent);
}
-void xen_grant_setup_dma_ops(struct device *dev)
+static int xen_dt_grant_init_backend_domid(struct device *dev,
+ struct xen_grant_dma_data *data)
{
- struct xen_grant_dma_data *data;
struct of_phandle_args iommu_spec;
- data = find_xen_grant_dma_data(dev);
- if (data) {
- dev_err(dev, "Xen grant DMA data is already created\n");
- return;
- }
-
- /* XXX ACPI device unsupported for now */
- if (!dev->of_node)
- goto err;
-
if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
0, &iommu_spec)) {
dev_err(dev, "Cannot parse iommus property\n");
- goto err;
+ return -ESRCH;
}
if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
iommu_spec.args_count != 1) {
dev_err(dev, "Incompatible IOMMU node\n");
of_node_put(iommu_spec.np);
- goto err;
+ return -ESRCH;
}
of_node_put(iommu_spec.np);
+ /*
+ * The endpoint ID here means the ID of the domain where the
+ * corresponding backend is running
+ */
+ data->backend_domid = iommu_spec.args[0];
+
+ return 0;
+}
+
+void xen_grant_setup_dma_ops(struct device *dev)
+{
+ struct xen_grant_dma_data *data;
+
+ data = find_xen_grant_dma_data(dev);
+ if (data) {
+ dev_err(dev, "Xen grant DMA data is already created\n");
+ return;
+ }
+
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
goto err;
- /*
- * The endpoint ID here means the ID of the domain where the corresponding
- * backend is running
- */
- data->backend_domid = iommu_spec.args[0];
+ if (dev->of_node) {
+ if (xen_dt_grant_init_backend_domid(dev, data))
+ goto err;
+ } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) {
+ dev_info(dev, "Using dom0 as backend\n");
+ data->backend_domid = 0;
+ } else {
+ /* XXX ACPI device unsupported for now */
+ goto err;
+ }
- if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
- GFP_KERNEL))) {
+ if (store_xen_grant_dma_data(dev, data)) {
dev_err(dev, "Cannot store Xen grant DMA data\n");
goto err;
}
@@ -348,9 +383,20 @@ void xen_grant_setup_dma_ops(struct device *dev)
return;
err:
+ devm_kfree(dev, data);
dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n");
}
+bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
+{
+ bool ret = xen_virtio_mem_acc(dev);
+
+ if (ret)
+ xen_grant_setup_dma_ops(dev->dev.parent);
+
+ return ret;
+}
+
MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
MODULE_LICENSE("GPL");