summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUros Bizjak <ubizjak@gmail.com>2026-01-14 22:18:15 +0100
committerBorislav Petkov (AMD) <bp@alien8.de>2026-01-15 11:44:29 +0100
commit31911d3c394d6556a67ff63cf0093049ef6dcdd7 (patch)
tree023699fe35083bdd83395da2a4e0af73a14cded5
parentbaea32b242be8ff857cc27b910c6c325c24a7247 (diff)
x86/paravirt: Use XOR r32,r32 to clear register in pv_vcpu_is_preempted()
x86_64 zero extends 32bit operations, so for 64bit operands, XOR r32,r32 is functionally equal to XOR r64,r64, but avoids a REX prefix byte when legacy registers are used. Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Reviewed-by: Juergen Gross <jgross@suse.com> Acked-by: H. Peter Anvin <hpa@zytor.com> Acked-by: Alexey Makhalov <alexey.makhalov@broadcom.com> Link: https://patch.msgid.link/20260114211948.74774-2-ubizjak@gmail.com
-rw-r--r--arch/x86/include/asm/paravirt-spinlock.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/x86/include/asm/paravirt-spinlock.h b/arch/x86/include/asm/paravirt-spinlock.h
index 458b888aba84..7beffcb08ed6 100644
--- a/arch/x86/include/asm/paravirt-spinlock.h
+++ b/arch/x86/include/asm/paravirt-spinlock.h
@@ -45,7 +45,7 @@ static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
static __always_inline bool pv_vcpu_is_preempted(long cpu)
{
return PVOP_ALT_CALLEE1(bool, pv_ops_lock, vcpu_is_preempted, cpu,
- "xor %%" _ASM_AX ", %%" _ASM_AX,
+ "xor %%eax, %%eax",
ALT_NOT(X86_FEATURE_VCPUPREEMPT));
}