summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt3
-rw-r--r--MAINTAINERS10
-rw-r--r--drivers/iommu/arm-smmu-v3.c60
-rw-r--r--drivers/iommu/intel-iommu.c9
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/virtio/Makefile (renamed from drivers/s390/kvm/Makefile)0
-rw-r--r--drivers/s390/virtio/kvm_virtio.c (renamed from drivers/s390/kvm/kvm_virtio.c)0
-rw-r--r--drivers/s390/virtio/virtio_ccw.c (renamed from drivers/s390/kvm/virtio_ccw.c)0
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/vhost/vhost.c67
-rw-r--r--fs/namespace.c42
-rw-r--r--fs/pnode.h2
-rw-r--r--include/uapi/linux/virtio_net.h16
-rw-r--r--include/uapi/linux/virtio_pci.h6
-rw-r--r--include/uapi/linux/virtio_ring.h5
-rw-r--r--net/9p/trans_virtio.c1
16 files changed, 179 insertions, 48 deletions
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
index c03eec116872..3443e0f838df 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
@@ -35,3 +35,6 @@ the PCIe specification.
NOTE: this only applies to the SMMU itself, not
masters connected upstream of the SMMU.
+
+- hisilicon,broken-prefetch-cmd
+ : Avoid sending CMD_PREFETCH_* commands to the SMMU.
diff --git a/MAINTAINERS b/MAINTAINERS
index a2264167791a..2eb627252239 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5899,7 +5899,6 @@ S: Supported
F: Documentation/s390/kvm.txt
F: arch/s390/include/asm/kvm*
F: arch/s390/kvm/
-F: drivers/s390/kvm/
KERNEL VIRTUAL MACHINE (KVM) FOR ARM
M: Christoffer Dall <christoffer.dall@linaro.org>
@@ -10896,6 +10895,15 @@ F: drivers/block/virtio_blk.c
F: include/linux/virtio_*.h
F: include/uapi/linux/virtio_*.h
+VIRTIO DRIVERS FOR S390
+M: Christian Borntraeger <borntraeger@de.ibm.com>
+M: Cornelia Huck <cornelia.huck@de.ibm.com>
+L: linux-s390@vger.kernel.org
+L: virtualization@lists.linux-foundation.org
+L: kvm@vger.kernel.org
+S: Supported
+F: drivers/s390/virtio/
+
VIRTIO GPU DRIVER
M: David Airlie <airlied@linux.ie>
M: Gerd Hoffmann <kraxel@redhat.com>
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 8e9ec81ce4bb..da902baaa794 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -199,9 +199,10 @@
* Stream table.
*
* Linear: Enough to cover 1 << IDR1.SIDSIZE entries
- * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus)
+ * 2lvl: 128k L1 entries,
+ * 256 lazy entries per table (each table covers a PCI bus)
*/
-#define STRTAB_L1_SZ_SHIFT 16
+#define STRTAB_L1_SZ_SHIFT 20
#define STRTAB_SPLIT 8
#define STRTAB_L1_DESC_DWORDS 1
@@ -269,10 +270,10 @@
#define ARM64_TCR_TG0_SHIFT 14
#define ARM64_TCR_TG0_MASK 0x3UL
#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
-#define ARM64_TCR_IRGN0_SHIFT 24
+#define ARM64_TCR_IRGN0_SHIFT 8
#define ARM64_TCR_IRGN0_MASK 0x3UL
#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
-#define ARM64_TCR_ORGN0_SHIFT 26
+#define ARM64_TCR_ORGN0_SHIFT 10
#define ARM64_TCR_ORGN0_MASK 0x3UL
#define CTXDESC_CD_0_TCR_SH0_SHIFT 12
#define ARM64_TCR_SH0_SHIFT 12
@@ -542,6 +543,9 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_HYP (1 << 12)
u32 features;
+#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
+ u32 options;
+
struct arm_smmu_cmdq cmdq;
struct arm_smmu_evtq evtq;
struct arm_smmu_priq priq;
@@ -602,11 +606,35 @@ struct arm_smmu_domain {
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
static LIST_HEAD(arm_smmu_devices);
+struct arm_smmu_option_prop {
+ u32 opt;
+ const char *prop;
+};
+
+static struct arm_smmu_option_prop arm_smmu_options[] = {
+ { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
+ { 0, NULL},
+};
+
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
}
+static void parse_driver_options(struct arm_smmu_device *smmu)
+{
+ int i = 0;
+
+ do {
+ if (of_property_read_bool(smmu->dev->of_node,
+ arm_smmu_options[i].prop)) {
+ smmu->options |= arm_smmu_options[i].opt;
+ dev_notice(smmu->dev, "option %s\n",
+ arm_smmu_options[i].prop);
+ }
+ } while (arm_smmu_options[++i].opt);
+}
+
/* Low-level queue manipulation functions */
static bool queue_full(struct arm_smmu_queue *q)
{
@@ -1036,7 +1064,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
arm_smmu_sync_ste_for_sid(smmu, sid);
/* It's likely that we'll want to use the new STE soon */
- arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+ if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
+ arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
}
static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
@@ -1064,7 +1093,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return 0;
size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
- strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS];
+ strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
desc->span = STRTAB_SPLIT + 1;
desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
@@ -2020,21 +2049,23 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
{
void *strtab;
u64 reg;
- u32 size;
+ u32 size, l1size;
int ret;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
/* Calculate the L1 size, capped to the SIDSIZE */
size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
size = min(size, smmu->sid_bits - STRTAB_SPLIT);
- if (size + STRTAB_SPLIT < smmu->sid_bits)
+ cfg->num_l1_ents = 1 << size;
+
+ size += STRTAB_SPLIT;
+ if (size < smmu->sid_bits)
dev_warn(smmu->dev,
"2-level strtab only covers %u/%u bits of SID\n",
- size + STRTAB_SPLIT, smmu->sid_bits);
+ size, smmu->sid_bits);
- cfg->num_l1_ents = 1 << size;
- size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
- strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
+ l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
+ strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
GFP_KERNEL);
if (!strtab) {
dev_err(smmu->dev,
@@ -2055,8 +2086,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
ret = arm_smmu_init_l1_strtab(smmu);
if (ret)
dma_free_coherent(smmu->dev,
- cfg->num_l1_ents *
- (STRTAB_L1_DESC_DWORDS << 3),
+ l1size,
strtab,
cfg->strtab_dma);
return ret;
@@ -2573,6 +2603,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (irq > 0)
smmu->gerr_irq = irq;
+ parse_driver_options(smmu);
+
/* Probe the h/w */
ret = arm_smmu_device_probe(smmu);
if (ret)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a98a7b27aca1..0649b94f5958 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1830,8 +1830,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
static void domain_exit(struct dmar_domain *domain)
{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
struct page *freelist = NULL;
- int i;
/* Domain 0 is reserved, so dont process it */
if (!domain)
@@ -1851,8 +1852,10 @@ static void domain_exit(struct dmar_domain *domain)
/* clear attached or cached domains */
rcu_read_lock();
- for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
- iommu_detach_domain(domain, g_iommus[i]);
+ for_each_active_iommu(iommu, drhd)
+ if (domain_type_is_vm(domain) ||
+ test_bit(iommu->seq_id, domain->iommu_bmp))
+ iommu_detach_domain(domain, iommu);
rcu_read_unlock();
dma_free_pagelist(freelist);
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 95bccfd3f169..e5225ad9c5b1 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,7 +2,7 @@
# Makefile for the S/390 specific device drivers
#
-obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
+obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/
drivers-y += drivers/s390/built-in.o
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/virtio/Makefile
index 241891a57caf..241891a57caf 100644
--- a/drivers/s390/kvm/Makefile
+++ b/drivers/s390/virtio/Makefile
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c
index 53fb975c404b..53fb975c404b 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/virtio/kvm_virtio.c
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index f8d8fdb26b72..f8d8fdb26b72 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 285f77544c36..7dbbb29d24c6 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -949,7 +949,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
{
struct Scsi_Host *shost;
struct virtio_scsi *vscsi;
- int err, host_prot;
+ int err;
u32 sg_elems, num_targets;
u32 cmd_per_lun;
u32 num_queues;
@@ -1009,6 +1009,8 @@ static int virtscsi_probe(struct virtio_device *vdev)
#ifdef CONFIG_BLK_DEV_INTEGRITY
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
+ int host_prot;
+
host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9e8e004bb1c3..a9fe859f43c8 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -22,14 +22,20 @@
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/kthread.h>
#include <linux/cgroup.h>
#include <linux/module.h>
+#include <linux/sort.h>
#include "vhost.h"
+static ushort max_mem_regions = 64;
+module_param(max_mem_regions, ushort, 0444);
+MODULE_PARM_DESC(max_mem_regions,
+ "Maximum number of memory regions in memory map. (default: 64)");
+
enum {
- VHOST_MEMORY_MAX_NREGIONS = 64,
VHOST_MEMORY_F_LOG = 0x1,
};
@@ -543,7 +549,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
fput(dev->log_file);
dev->log_file = NULL;
/* No one will access memory at this point */
- kfree(dev->memory);
+ kvfree(dev->memory);
dev->memory = NULL;
WARN_ON(!list_empty(&dev->work_list));
if (dev->worker) {
@@ -663,6 +669,28 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
}
EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
+static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
+{
+ const struct vhost_memory_region *r1 = p1, *r2 = p2;
+ if (r1->guest_phys_addr < r2->guest_phys_addr)
+ return 1;
+ if (r1->guest_phys_addr > r2->guest_phys_addr)
+ return -1;
+ return 0;
+}
+
+static void *vhost_kvzalloc(unsigned long size)
+{
+ void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+
+ if (!n) {
+ n = vzalloc(size);
+ if (!n)
+ return ERR_PTR(-ENOMEM);
+ }
+ return n;
+}
+
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
struct vhost_memory mem, *newmem, *oldmem;
@@ -673,21 +701,23 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
return -EFAULT;
if (mem.padding)
return -EOPNOTSUPP;
- if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
+ if (mem.nregions > max_mem_regions)
return -E2BIG;
- newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
+ newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
if (!newmem)
return -ENOMEM;
memcpy(newmem, &mem, size);
if (copy_from_user(newmem->regions, m->regions,
mem.nregions * sizeof *m->regions)) {
- kfree(newmem);
+ kvfree(newmem);
return -EFAULT;
}
+ sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
+ vhost_memory_reg_sort_cmp, NULL);
if (!memory_access_ok(d, newmem, 0)) {
- kfree(newmem);
+ kvfree(newmem);
return -EFAULT;
}
oldmem = d->memory;
@@ -699,7 +729,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
d->vqs[i]->memory = newmem;
mutex_unlock(&d->vqs[i]->mutex);
}
- kfree(oldmem);
+ kvfree(oldmem);
return 0;
}
@@ -992,17 +1022,22 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
__u64 addr, __u32 len)
{
- struct vhost_memory_region *reg;
- int i;
+ const struct vhost_memory_region *reg;
+ int start = 0, end = mem->nregions;
- /* linear search is not brilliant, but we really have on the order of 6
- * regions in practice */
- for (i = 0; i < mem->nregions; ++i) {
- reg = mem->regions + i;
- if (reg->guest_phys_addr <= addr &&
- reg->guest_phys_addr + reg->memory_size - 1 >= addr)
- return reg;
+ while (start < end) {
+ int slot = start + (end - start) / 2;
+ reg = mem->regions + slot;
+ if (addr >= reg->guest_phys_addr)
+ end = slot;
+ else
+ start = slot + 1;
}
+
+ reg = mem->regions + start;
+ if (addr >= reg->guest_phys_addr &&
+ reg->guest_phys_addr + reg->memory_size > addr)
+ return reg;
return NULL;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index c7cb8a526c05..2b8aa15fd6df 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1361,6 +1361,36 @@ enum umount_tree_flags {
UMOUNT_PROPAGATE = 2,
UMOUNT_CONNECTED = 4,
};
+
+static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
+{
+ /* Leaving mounts connected is only valid for lazy umounts */
+ if (how & UMOUNT_SYNC)
+ return true;
+
+ /* A mount without a parent has nothing to be connected to */
+ if (!mnt_has_parent(mnt))
+ return true;
+
+ /* Because the reference counting rules change when mounts are
+ * unmounted and connected, umounted mounts may not be
+ * connected to mounted mounts.
+ */
+ if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
+ return true;
+
+ /* Has it been requested that the mount remain connected? */
+ if (how & UMOUNT_CONNECTED)
+ return false;
+
+ /* Is the mount locked such that it needs to remain connected? */
+ if (IS_MNT_LOCKED(mnt))
+ return false;
+
+ /* By default disconnect the mount */
+ return true;
+}
+
/*
* mount_lock must be held
* namespace_sem must be held for write
@@ -1398,10 +1428,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
if (how & UMOUNT_SYNC)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
- disconnect = !(((how & UMOUNT_CONNECTED) &&
- mnt_has_parent(p) &&
- (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
- IS_MNT_LOCKED_AND_LAZY(p));
+ disconnect = disconnect_mount(p, how);
pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
disconnect ? &unmounted : NULL);
@@ -1538,11 +1565,8 @@ void __detach_mounts(struct dentry *dentry)
while (!hlist_empty(&mp->m_list)) {
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
- struct mount *p, *tmp;
- list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
- hlist_add_head(&p->mnt_umount.s_list, &unmounted);
- umount_mnt(p);
- }
+ hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
+ umount_mnt(mnt);
}
else umount_tree(mnt, UMOUNT_CONNECTED);
}
diff --git a/fs/pnode.h b/fs/pnode.h
index 7114ce6e6b9e..0fcdbe7ca648 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -20,8 +20,6 @@
#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
-#define IS_MNT_LOCKED_AND_LAZY(m) \
- (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
#define CL_EXPIRE 0x01
#define CL_SLAVE 0x02
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index 7bbee79ca293..ec32293a00db 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -34,6 +34,7 @@
/* The feature bitmap for virtio net */
#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
+#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */
#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
@@ -226,4 +227,19 @@ struct virtio_net_ctrl_mq {
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
+/*
+ * Control network offloads
+ *
+ * Reconfigures the network offloads that Guest can handle.
+ *
+ * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit.
+ *
+ * Command data format matches the feature bit mask exactly.
+ *
+ * See VIRTIO_NET_F_GUEST_* for the list of offloads
+ * that can be enabled/disabled.
+ */
+#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5
+#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0
+
#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 75301468359f..90007a1abcab 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -157,6 +157,12 @@ struct virtio_pci_common_cfg {
__le32 queue_used_hi; /* read-write */
};
+/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
+struct virtio_pci_cfg_cap {
+ struct virtio_pci_cap cap;
+ __u8 pci_cfg_data[4]; /* Data for BAR access. */
+};
+
/* Macro versions of offsets for the Old Timers! */
#define VIRTIO_PCI_CAP_VNDR 0
#define VIRTIO_PCI_CAP_NEXT 1
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 915980ac68df..c07295969b7e 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -31,6 +31,9 @@
* SUCH DAMAGE.
*
* Copyright Rusty Russell IBM Corporation 2007. */
+#ifndef __KERNEL__
+#include <stdint.h>
+#endif
#include <linux/types.h>
#include <linux/virtio_types.h>
@@ -143,7 +146,7 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p,
vr->num = num;
vr->desc = p;
vr->avail = p + num*sizeof(struct vring_desc);
- vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16)
+ vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
+ align-1) & ~(align - 1));
}
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 9dd49ca67dbc..6e70ddb158b4 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -704,6 +704,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
mutex_unlock(&virtio_9p_lock);
+ vdev->config->reset(vdev);
vdev->config->del_vqs(vdev);
sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);