diff options
| author | Xie Yuanbin <qq570070308@gmail.com> | 2026-02-17 00:49:48 +0800 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2026-03-06 06:21:27 +0100 |
| commit | 4b9ef32c57a68eb98c45835c2beaa77f8e51c5c4 (patch) | |
| tree | 2cccce37305f61b388233ff6c68002cce1d58a4d /arch | |
| parent | 12f8069115d5ff9d292c6b00c74e1984b01b6fc1 (diff) | |
x86/mm/tlb: Make enter_lazy_tlb() always inline on x86
enter_lazy_tlb() on x86 is short enough, and is called in context
switching, which is the hot code path.
Make enter_lazy_tlb() always inline on x86 to optimize performance.
Suggested-by: Dave Hansen <dave.hansen@intel.com>
Signed-off-by: Xie Yuanbin <qq570070308@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://patch.msgid.link/20260216164950.147617-2-qq570070308@gmail.com
Diffstat (limited to 'arch')
| -rw-r--r-- | arch/x86/include/asm/mmu_context.h | 3 | ||||
| -rw-r--r-- | arch/x86/include/asm/tlbflush.h | 26 | ||||
| -rw-r--r-- | arch/x86/mm/tlb.c | 21 |
3 files changed, 26 insertions, 24 deletions
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 1acafb1c6a93..ef5b507de34e 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -136,9 +136,6 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm) } #endif -#define enter_lazy_tlb enter_lazy_tlb -extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); - extern void mm_init_global_asid(struct mm_struct *mm); extern void mm_free_global_asid(struct mm_struct *mm); diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 5a3cdc439e38..0545fe75c3fa 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -172,6 +172,28 @@ struct tlb_state_shared { }; DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared); +/* + * Please ignore the name of this function. It should be called + * switch_to_kernel_thread(). + * + * enter_lazy_tlb() is a hint from the scheduler that we are entering a + * kernel thread or other context without an mm. Acceptable implementations + * include doing nothing whatsoever, switching to init_mm, or various clever + * lazy tricks to try to minimize TLB flushes. + * + * The scheduler reserves the right to call enter_lazy_tlb() several times + * in a row. It will notify us that we're going back to a real mm by + * calling switch_mm_irqs_off(). + */ +#define enter_lazy_tlb enter_lazy_tlb +static __always_inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ + if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) + return; + + this_cpu_write(cpu_tlbstate_shared.is_lazy, true); +} + bool nmi_uaccess_okay(void); #define nmi_uaccess_okay nmi_uaccess_okay @@ -480,6 +502,10 @@ static inline void cpu_tlbstate_update_lam(unsigned long lam, u64 untag_mask) { } #endif +#else /* !MODULE */ +#define enter_lazy_tlb enter_lazy_tlb +extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) + __compiletime_error("enter_lazy_tlb() should not be used in modules"); #endif /* !MODULE */ static inline void __native_tlb_flush_global(unsigned long cr4) diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 621e09d049cb..af43d177087e 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -972,27 +972,6 @@ reload_tlb: } /* - * Please ignore the name of this function. It should be called - * switch_to_kernel_thread(). - * - * enter_lazy_tlb() is a hint from the scheduler that we are entering a - * kernel thread or other context without an mm. Acceptable implementations - * include doing nothing whatsoever, switching to init_mm, or various clever - * lazy tricks to try to minimize TLB flushes. - * - * The scheduler reserves the right to call enter_lazy_tlb() several times - * in a row. It will notify us that we're going back to a real mm by - * calling switch_mm_irqs_off(). - */ -void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ - if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) - return; - - this_cpu_write(cpu_tlbstate_shared.is_lazy, true); -} - -/* * Using a temporary mm allows to set temporary mappings that are not accessible * by other CPUs. Such mappings are needed to perform sensitive memory writes * that override the kernel memory protections (e.g., W^X), without exposing the |
