summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/binary_stats.c2
-rw-r--r--virt/kvm/guest_memfd.c17
-rw-r--r--virt/kvm/kvm_main.c95
3 files changed, 57 insertions, 57 deletions
diff --git a/virt/kvm/binary_stats.c b/virt/kvm/binary_stats.c
index eefca6c69f51..76ce697c773b 100644
--- a/virt/kvm/binary_stats.c
+++ b/virt/kvm/binary_stats.c
@@ -50,7 +50,7 @@
* Return: the number of bytes that has been successfully read
*/
ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
- const struct _kvm_stats_desc *desc,
+ const struct kvm_stats_desc *desc,
void *stats, size_t size_stats,
char __user *user_buffer, size_t size, loff_t *offset)
{
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 017d84a7adf3..69c9d6d546b2 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -30,6 +30,7 @@ struct gmem_file {
struct gmem_inode {
struct shared_policy policy;
struct inode vfs_inode;
+ struct list_head gmem_file_list;
u64 flags;
};
@@ -39,8 +40,8 @@ static __always_inline struct gmem_inode *GMEM_I(struct inode *inode)
return container_of(inode, struct gmem_inode, vfs_inode);
}
-#define kvm_gmem_for_each_file(f, mapping) \
- list_for_each_entry(f, &(mapping)->i_private_list, entry)
+#define kvm_gmem_for_each_file(f, inode) \
+ list_for_each_entry(f, &GMEM_I(inode)->gmem_file_list, entry)
/**
* folio_file_pfn - like folio_file_page, but return a pfn.
@@ -126,14 +127,13 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
* Fast-path: See if folio is already present in mapping to avoid
* policy_lookup.
*/
- folio = __filemap_get_folio(inode->i_mapping, index,
- FGP_LOCK | FGP_ACCESSED, 0);
+ folio = filemap_lock_folio(inode->i_mapping, index);
if (!IS_ERR(folio))
return folio;
policy = mpol_shared_policy_lookup(&GMEM_I(inode)->policy, index);
folio = __filemap_get_folio_mpol(inode->i_mapping, index,
- FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ FGP_LOCK | FGP_CREAT,
mapping_gfp_mask(inode->i_mapping), policy);
mpol_cond_put(policy);
@@ -202,7 +202,7 @@ static void kvm_gmem_invalidate_begin(struct inode *inode, pgoff_t start,
attr_filter = kvm_gmem_get_invalidate_filter(inode);
- kvm_gmem_for_each_file(f, inode->i_mapping)
+ kvm_gmem_for_each_file(f, inode)
__kvm_gmem_invalidate_begin(f, start, end, attr_filter);
}
@@ -223,7 +223,7 @@ static void kvm_gmem_invalidate_end(struct inode *inode, pgoff_t start,
{
struct gmem_file *f;
- kvm_gmem_for_each_file(f, inode->i_mapping)
+ kvm_gmem_for_each_file(f, inode)
__kvm_gmem_invalidate_end(f, start, end);
}
@@ -609,7 +609,7 @@ static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
kvm_get_kvm(kvm);
f->kvm = kvm;
xa_init(&f->bindings);
- list_add(&f->entry, &inode->i_mapping->i_private_list);
+ list_add(&f->entry, &GMEM_I(inode)->gmem_file_list);
fd_install(fd, file);
return fd;
@@ -945,6 +945,7 @@ static struct inode *kvm_gmem_alloc_inode(struct super_block *sb)
mpol_shared_policy_init(&gi->policy, NULL);
gi->flags = 0;
+ INIT_LIST_HEAD(&gi->gmem_file_list);
return &gi->vfs_inode;
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1bc1da66b4b0..89489996fbc1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -41,7 +41,6 @@
#include <linux/spinlock.h>
#include <linux/compat.h>
#include <linux/srcu.h>
-#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/bsearch.h>
@@ -76,22 +75,22 @@ MODULE_DESCRIPTION("Kernel-based Virtual Machine (KVM) Hypervisor");
MODULE_LICENSE("GPL");
/* Architectures should define their poll value according to the halt latency */
-unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
+unsigned int __read_mostly halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
module_param(halt_poll_ns, uint, 0644);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns);
/* Default doubles per-vcpu halt_poll_ns. */
-unsigned int halt_poll_ns_grow = 2;
+unsigned int __read_mostly halt_poll_ns_grow = 2;
module_param(halt_poll_ns_grow, uint, 0644);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_grow);
/* The start value to grow halt_poll_ns from */
-unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
+unsigned int __read_mostly halt_poll_ns_grow_start = 10000; /* 10us */
module_param(halt_poll_ns_grow_start, uint, 0644);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_grow_start);
/* Default halves per-vcpu halt_poll_ns. */
-unsigned int halt_poll_ns_shrink = 2;
+unsigned int __read_mostly halt_poll_ns_shrink = 2;
module_param(halt_poll_ns_shrink, uint, 0644);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_shrink);
@@ -99,7 +98,7 @@ EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_shrink);
* Allow direct access (from KVM or the CPU) without MMU notifier protection
* to unpinned pages.
*/
-static bool allow_unsafe_mappings;
+static bool __ro_after_init allow_unsafe_mappings;
module_param(allow_unsafe_mappings, bool, 0444);
/*
@@ -647,11 +646,9 @@ mmu_unlock:
return r;
}
-static __always_inline int kvm_age_hva_range(struct mmu_notifier *mn,
- unsigned long start,
- unsigned long end,
- gfn_handler_t handler,
- bool flush_on_ret)
+static __always_inline bool kvm_age_hva_range(struct mmu_notifier *mn,
+ unsigned long start, unsigned long end, gfn_handler_t handler,
+ bool flush_on_ret)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
const struct kvm_mmu_notifier_range range = {
@@ -667,10 +664,8 @@ static __always_inline int kvm_age_hva_range(struct mmu_notifier *mn,
return kvm_handle_hva_range(kvm, &range).ret;
}
-static __always_inline int kvm_age_hva_range_no_flush(struct mmu_notifier *mn,
- unsigned long start,
- unsigned long end,
- gfn_handler_t handler)
+static __always_inline bool kvm_age_hva_range_no_flush(struct mmu_notifier *mn,
+ unsigned long start, unsigned long end, gfn_handler_t handler)
{
return kvm_age_hva_range(mn, start, end, handler, false);
}
@@ -830,10 +825,8 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
}
-static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
+static bool kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
+ struct mm_struct *mm, unsigned long start, unsigned long end)
{
trace_kvm_age_hva(start, end);
@@ -841,10 +834,8 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
!IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG));
}
-static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
+static bool kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
+ struct mm_struct *mm, unsigned long start, unsigned long end)
{
trace_kvm_age_hva(start, end);
@@ -864,9 +855,8 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
return kvm_age_hva_range_no_flush(mn, start, end, kvm_age_gfn);
}
-static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
+static bool kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
+ struct mm_struct *mm, unsigned long address)
{
trace_kvm_test_age_hva(address);
@@ -973,9 +963,9 @@ static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
kvm_free_memslot(kvm, memslot);
}
-static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
+static umode_t kvm_stats_debugfs_mode(const struct kvm_stats_desc *desc)
{
- switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
+ switch (desc->flags & KVM_STATS_TYPE_MASK) {
case KVM_STATS_TYPE_INSTANT:
return 0444;
case KVM_STATS_TYPE_CUMULATIVE:
@@ -1010,7 +1000,7 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
struct dentry *dent;
char dir_name[ITOA_MAX_LEN * 2];
struct kvm_stat_data *stat_data;
- const struct _kvm_stats_desc *pdesc;
+ const struct kvm_stats_desc *pdesc;
int i, ret = -ENOMEM;
int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
kvm_vcpu_stats_header.num_desc;
@@ -1102,6 +1092,9 @@ static inline struct kvm_io_bus *kvm_get_bus_for_destruction(struct kvm *kvm,
!refcount_read(&kvm->users_count));
}
+static int kvm_enable_virtualization(void);
+static void kvm_disable_virtualization(void);
+
static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
{
struct kvm *kvm = kvm_arch_alloc_vm();
@@ -5574,17 +5567,19 @@ static struct miscdevice kvm_dev = {
};
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
-bool enable_virt_at_load = true;
+bool __ro_after_init enable_virt_at_load = true;
module_param(enable_virt_at_load, bool, 0444);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_virt_at_load);
-__visible bool kvm_rebooting;
-EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_rebooting);
-
static DEFINE_PER_CPU(bool, virtualization_enabled);
static DEFINE_MUTEX(kvm_usage_lock);
static int kvm_usage_count;
+__weak void kvm_arch_shutdown(void)
+{
+
+}
+
__weak void kvm_arch_enable_virtualization(void)
{
@@ -5638,10 +5633,9 @@ static int kvm_offline_cpu(unsigned int cpu)
static void kvm_shutdown(void *data)
{
+ kvm_arch_shutdown();
+
/*
- * Disable hardware virtualization and set kvm_rebooting to indicate
- * that KVM has asynchronously disabled hardware virtualization, i.e.
- * that relevant errors and exceptions aren't entirely unexpected.
* Some flavors of hardware virtualization need to be disabled before
* transferring control to firmware (to perform shutdown/reboot), e.g.
* on x86, virtualization can block INIT interrupts, which are used by
@@ -5650,7 +5644,6 @@ static void kvm_shutdown(void *data)
* 100% comprehensive.
*/
pr_info("kvm: exiting hardware virtualization\n");
- kvm_rebooting = true;
on_each_cpu(kvm_disable_virtualization_cpu, NULL, 1);
}
@@ -5689,7 +5682,7 @@ static struct syscore kvm_syscore = {
.ops = &kvm_syscore_ops,
};
-int kvm_enable_virtualization(void)
+static int kvm_enable_virtualization(void)
{
int r;
@@ -5734,9 +5727,8 @@ err_cpuhp:
--kvm_usage_count;
return r;
}
-EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_enable_virtualization);
-void kvm_disable_virtualization(void)
+static void kvm_disable_virtualization(void)
{
guard(mutex)(&kvm_usage_lock);
@@ -5747,7 +5739,6 @@ void kvm_disable_virtualization(void)
cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
kvm_arch_disable_virtualization();
}
-EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_disable_virtualization);
static int kvm_init_virtualization(void)
{
@@ -5763,6 +5754,14 @@ static void kvm_uninit_virtualization(void)
kvm_disable_virtualization();
}
#else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
+static int kvm_enable_virtualization(void)
+{
+ return 0;
+}
+static void kvm_disable_virtualization(void)
+{
+
+}
static int kvm_init_virtualization(void)
{
return 0;
@@ -6171,11 +6170,11 @@ static int kvm_stat_data_get(void *data, u64 *val)
switch (stat_data->kind) {
case KVM_STAT_VM:
r = kvm_get_stat_per_vm(stat_data->kvm,
- stat_data->desc->desc.offset, val);
+ stat_data->desc->offset, val);
break;
case KVM_STAT_VCPU:
r = kvm_get_stat_per_vcpu(stat_data->kvm,
- stat_data->desc->desc.offset, val);
+ stat_data->desc->offset, val);
break;
}
@@ -6193,11 +6192,11 @@ static int kvm_stat_data_clear(void *data, u64 val)
switch (stat_data->kind) {
case KVM_STAT_VM:
r = kvm_clear_stat_per_vm(stat_data->kvm,
- stat_data->desc->desc.offset);
+ stat_data->desc->offset);
break;
case KVM_STAT_VCPU:
r = kvm_clear_stat_per_vcpu(stat_data->kvm,
- stat_data->desc->desc.offset);
+ stat_data->desc->offset);
break;
}
@@ -6345,7 +6344,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
static void kvm_init_debug(void)
{
const struct file_operations *fops;
- const struct _kvm_stats_desc *pdesc;
+ const struct kvm_stats_desc *pdesc;
int i;
kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
@@ -6358,7 +6357,7 @@ static void kvm_init_debug(void)
fops = &vm_stat_readonly_fops;
debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
kvm_debugfs_dir,
- (void *)(long)pdesc->desc.offset, fops);
+ (void *)(long)pdesc->offset, fops);
}
for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
@@ -6369,7 +6368,7 @@ static void kvm_init_debug(void)
fops = &vcpu_stat_readonly_fops;
debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
kvm_debugfs_dir,
- (void *)(long)pdesc->desc.offset, fops);
+ (void *)(long)pdesc->offset, fops);
}
}