diff options
Diffstat (limited to 'include/asm-generic')
| -rw-r--r-- | include/asm-generic/Kbuild | 1 | ||||
| -rw-r--r-- | include/asm-generic/atomic64.h | 2 | ||||
| -rw-r--r-- | include/asm-generic/audit_change_attr.h | 3 | ||||
| -rw-r--r-- | include/asm-generic/audit_read.h | 6 | ||||
| -rw-r--r-- | include/asm-generic/mshyperv.h | 13 | ||||
| -rw-r--r-- | include/asm-generic/rqspinlock.h | 4 | ||||
| -rw-r--r-- | include/asm-generic/tlb.h | 84 | ||||
| -rw-r--r-- | include/asm-generic/topology.h | 8 | ||||
| -rw-r--r-- | include/asm-generic/vmlinux.lds.h | 3 |
9 files changed, 108 insertions, 16 deletions
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 295c94a3ccc1..9aff61e7b8f2 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -32,6 +32,7 @@ mandatory-y += irq_work.h mandatory-y += kdebug.h mandatory-y += kmap_size.h mandatory-y += kprobes.h +mandatory-y += kvm_types.h mandatory-y += linkage.h mandatory-y += local.h mandatory-y += local64.h diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index 100d24b02e52..f22ccfc0df98 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -10,7 +10,7 @@ #include <linux/types.h> typedef struct { - s64 counter; + s64 __aligned(sizeof(s64)) counter; } atomic64_t; #define ATOMIC64_INIT(i) { (i) } diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h index cc840537885f..ddd90bbe40df 100644 --- a/include/asm-generic/audit_change_attr.h +++ b/include/asm-generic/audit_change_attr.h @@ -26,6 +26,9 @@ __NR_fremovexattr, __NR_fchownat, __NR_fchmodat, #endif +#ifdef __NR_fchmodat2 +__NR_fchmodat2, +#endif #ifdef __NR_chown32 __NR_chown32, __NR_fchown32, diff --git a/include/asm-generic/audit_read.h b/include/asm-generic/audit_read.h index 7bb7b5a83ae2..fb9991f53fb6 100644 --- a/include/asm-generic/audit_read.h +++ b/include/asm-generic/audit_read.h @@ -4,9 +4,15 @@ __NR_readlink, #endif __NR_quotactl, __NR_listxattr, +#ifdef __NR_listxattrat +__NR_listxattrat, +#endif __NR_llistxattr, __NR_flistxattr, __NR_getxattr, +#ifdef __NR_getxattrat +__NR_getxattrat, +#endif __NR_lgetxattr, __NR_fgetxattr, #ifdef __NR_readlinkat diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index ecedab554c80..d37b68238c97 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -342,6 +342,9 @@ static inline bool hv_parent_partition(void) { return hv_root_partition() || hv_l1vh_partition(); } + +bool hv_result_needs_memory(u64 status); +int hv_deposit_memory_node(int node, u64 partition_id, u64 status); int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages); int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id); int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags); @@ -350,6 +353,11 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags); static inline bool hv_root_partition(void) { return false; } static inline bool hv_l1vh_partition(void) { return false; } static inline bool hv_parent_partition(void) { return false; } +static inline bool hv_result_needs_memory(u64 status) { return false; } +static inline int hv_deposit_memory_node(int node, u64 partition_id, u64 status) +{ + return -EOPNOTSUPP; +} static inline int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages) { return -EOPNOTSUPP; @@ -364,6 +372,11 @@ static inline int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u3 } #endif /* CONFIG_MSHV_ROOT */ +static inline int hv_deposit_memory(u64 partition_id, u64 status) +{ + return hv_deposit_memory_node(NUMA_NO_NODE, partition_id, status); +} + #if IS_ENABLED(CONFIG_HYPERV_VTL_MODE) u8 __init get_vtl(void); #else diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h index 0f2dcbbfee2f..151d267a496b 100644 --- a/include/asm-generic/rqspinlock.h +++ b/include/asm-generic/rqspinlock.h @@ -28,7 +28,7 @@ struct rqspinlock { */ struct bpf_res_spin_lock { u32 val; -}; +} __aligned(__alignof__(struct rqspinlock)); struct qspinlock; #ifdef CONFIG_QUEUED_SPINLOCKS @@ -191,7 +191,7 @@ static __always_inline int res_spin_lock(rqspinlock_t *lock) #else -#define res_spin_lock(lock) resilient_tas_spin_lock(lock) +#define res_spin_lock(lock) ({ grab_held_lock_entry(lock); resilient_tas_spin_lock(lock); }) #endif /* CONFIG_QUEUED_SPINLOCKS */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 1fff717cae51..4aeac0c3d3f0 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -46,7 +46,8 @@ * * The mmu_gather API consists of: * - * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu() + * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_gather_mmu_vma() / + * tlb_finish_mmu() * * start and finish a mmu_gather * @@ -212,7 +213,7 @@ struct mmu_table_batch { #define MAX_TABLE_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) -#ifndef __HAVE_ARCH_TLB_REMOVE_TABLE +#ifndef CONFIG_HAVE_ARCH_TLB_REMOVE_TABLE static inline void __tlb_remove_table(void *table) { struct ptdesc *ptdesc = (struct ptdesc *)table; @@ -286,8 +287,7 @@ struct mmu_gather_batch { */ #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) -extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, - bool delay_rmap, int page_size); +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size); bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page, unsigned int nr_pages, bool delay_rmap); @@ -364,6 +364,20 @@ struct mmu_gather { unsigned int vma_huge : 1; unsigned int vma_pfn : 1; + /* + * Did we unshare (unmap) any shared page tables? For now only + * used for hugetlb PMD table sharing. + */ + unsigned int unshared_tables : 1; + + /* + * Did we unshare any page tables such that they are now exclusive + * and could get reused+modified by the new owner? When setting this + * flag, "unshared_tables" will be set as well. For now only used + * for hugetlb PMD table sharing. + */ + unsigned int fully_unshared_tables : 1; + unsigned int batch_count; #ifndef CONFIG_MMU_GATHER_NO_GATHER @@ -400,6 +414,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) tlb->cleared_pmds = 0; tlb->cleared_puds = 0; tlb->cleared_p4ds = 0; + tlb->unshared_tables = 0; /* * Do not reset mmu_gather::vma_* fields here, we do not * call into tlb_start_vma() again to set them if there is an @@ -484,7 +499,7 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) * these bits. */ if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds || - tlb->cleared_puds || tlb->cleared_p4ds)) + tlb->cleared_puds || tlb->cleared_p4ds || tlb->unshared_tables)) return; tlb_flush(tlb); @@ -494,7 +509,7 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) static inline void tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { - if (__tlb_remove_page_size(tlb, page, false, page_size)) + if (__tlb_remove_page_size(tlb, page, page_size)) tlb_flush_mmu(tlb); } @@ -773,6 +788,63 @@ static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd) } #endif +#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING +static inline void tlb_unshare_pmd_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt, + unsigned long addr) +{ + /* + * The caller must make sure that concurrent unsharing + exclusive + * reuse is impossible until tlb_flush_unshared_tables() was called. + */ + VM_WARN_ON_ONCE(!ptdesc_pmd_is_shared(pt)); + ptdesc_pmd_pts_dec(pt); + + /* Clearing a PUD pointing at a PMD table with PMD leaves. */ + tlb_flush_pmd_range(tlb, addr & PUD_MASK, PUD_SIZE); + + /* + * If the page table is now exclusively owned, we fully unshared + * a page table. + */ + if (!ptdesc_pmd_is_shared(pt)) + tlb->fully_unshared_tables = true; + tlb->unshared_tables = true; +} + +static inline void tlb_flush_unshared_tables(struct mmu_gather *tlb) +{ + /* + * As soon as the caller drops locks to allow for reuse of + * previously-shared tables, these tables could get modified and + * even reused outside of hugetlb context, so we have to make sure that + * any page table walkers (incl. TLB, GUP-fast) are aware of that + * change. + * + * Even if we are not fully unsharing a PMD table, we must + * flush the TLB for the unsharer now. + */ + if (tlb->unshared_tables) + tlb_flush_mmu_tlbonly(tlb); + + /* + * Similarly, we must make sure that concurrent GUP-fast will not + * walk previously-shared page tables that are getting modified+reused + * elsewhere. So broadcast an IPI to wait for any concurrent GUP-fast. + * + * We only perform this when we are the last sharer of a page table, + * as the IPI will reach all CPUs: any GUP-fast. + * + * Note that on configs where tlb_remove_table_sync_one() is a NOP, + * the expectation is that the tlb_flush_mmu_tlbonly() would have issued + * required IPIs already for us. + */ + if (tlb->fully_unshared_tables) { + tlb_remove_table_sync_one(); + tlb->fully_unshared_tables = false; + } +} +#endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */ + #endif /* CONFIG_MMU */ #endif /* _ASM_GENERIC__TLB_H */ diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 4dbe715be65b..9865ba48c5b1 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -45,11 +45,7 @@ #endif #ifndef cpumask_of_node - #ifdef CONFIG_NUMA - #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) - #else - #define cpumask_of_node(node) ((void)(node), cpu_online_mask) - #endif +#define cpumask_of_node(node) ((void)(node), cpu_online_mask) #endif #ifndef pcibus_to_node #define pcibus_to_node(bus) ((void)(bus), -1) @@ -61,7 +57,7 @@ cpumask_of_node(pcibus_to_node(bus))) #endif -#endif /* CONFIG_NUMA */ +#endif /* !CONFIG_NUMA */ #if !defined(CONFIG_NUMA) || !defined(CONFIG_HAVE_MEMORYLESS_NODES) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 8ca130af301f..eeb070f330bd 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -972,7 +972,8 @@ #define RUNTIME_CONST_VARIABLES \ RUNTIME_CONST(shift, d_hash_shift) \ RUNTIME_CONST(ptr, dentry_hashtable) \ - RUNTIME_CONST(ptr, __dentry_cache) + RUNTIME_CONST(ptr, __dentry_cache) \ + RUNTIME_CONST(ptr, __names_cache) /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */ #define KUNIT_TABLE() \ |
