summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/entry_64.S
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2008-11-11 13:51:52 -0800
committerH. Peter Anvin <hpa@zytor.com>2008-11-11 13:51:52 -0800
commit939b787130bf22887a09d8fd2641a094dcef8c22 (patch)
tree6bdd272bb742bf2916d35c04cb8a6dd24e2dd135 /arch/x86/kernel/entry_64.S
parentb7c6244f13d37592003b46e12500a90e9781ad9d (diff)
x86: 64 bits: shrink and align IRQ stubs
Move the IRQ stub generation to assembly to simplify it and for consistency with 32 bits. Doing it in a C file with asm() statements doesn't help clarity, and it prevents some optimizations. Shrink the IRQ stubs down to just over four bytes per (we fit seven into a 32-byte chunk.) This shrinks the total icache consumption of the IRQ stubs down to an even kilobyte, if all of them are in active use. The downside is that we end up with a double jump, which could have a negative effect on some pipelines. The double jump is always inside the same cacheline on any modern chips. To get the most effect, cache-align the IRQ stubs. This makes the 64-bit code match changes already done to the 32-bit code, and should open up irqinit*.c for unification. Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/kernel/entry_64.S')
-rw-r--r--arch/x86/kernel/entry_64.S48
1 files changed, 45 insertions, 3 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b86f332c96a6..9b2aeaac9a6b 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -627,6 +627,46 @@ END(stub_rt_sigreturn)
vector already pushed) */
#define XCPT_FRAME _frame ORIG_RAX
+/*
+ * Build the entry stubs and pointer table with some assembler magic.
+ * We pack 7 stubs into a single 32-byte chunk, which will fit in a
+ * single cache line on all modern x86 implementations.
+ */
+ .section .init.rodata,"a"
+ENTRY(interrupt)
+ .text
+ .p2align 5
+ .p2align CONFIG_X86_L1_CACHE_SHIFT
+ENTRY(irq_entries_start)
+ INTR_FRAME
+vector=FIRST_EXTERNAL_VECTOR
+.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
+ .balign 32
+ .rept 7
+ .if vector < NR_VECTORS
+ .if vector != FIRST_EXTERNAL_VECTOR
+ CFI_ADJUST_CFA_OFFSET -8
+ .endif
+1: pushq $(~vector+0x80) /* Note: always in signed byte range */
+ CFI_ADJUST_CFA_OFFSET 8
+ .if ((vector-FIRST_EXTERNAL_VECTOR)%7) != 6
+ jmp 2f
+ .endif
+ .previous
+ .quad 1b
+ .text
+vector=vector+1
+ .endif
+ .endr
+2: jmp common_interrupt
+.endr
+ CFI_ENDPROC
+END(irq_entries_start)
+
+.previous
+END(interrupt)
+.previous
+
/*
* Interrupt entry/exit.
*
@@ -635,11 +675,12 @@ END(stub_rt_sigreturn)
* Entry runs with interrupts off.
*/
-/* 0(%rsp): interrupt number */
+/* 0(%rsp): ~(interrupt number)+0x80 */
.macro interrupt func
+ addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
cld
SAVE_ARGS
- leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
+ leaq -ARGOFFSET(%rsp),%rdi /* arg1 for handler */
pushq %rbp
/*
* Save rbp twice: One is for marking the stack frame, as usual, and the
@@ -670,7 +711,8 @@ END(stub_rt_sigreturn)
call \func
.endm
-ENTRY(common_interrupt)
+ .p2align CONFIG_X86_L1_CACHE_SHIFT
+common_interrupt:
XCPT_FRAME
interrupt do_IRQ
/* 0(%rsp): oldrsp-ARGOFFSET */