diff options
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r-- | include/asm-x86_64/bitops.h | 3 | ||||
-rw-r--r-- | include/asm-x86_64/bug.h | 13 | ||||
-rw-r--r-- | include/asm-x86_64/desc.h | 1 | ||||
-rw-r--r-- | include/asm-x86_64/emergency-restart.h | 6 | ||||
-rw-r--r-- | include/asm-x86_64/ia32_unistd.h | 7 | ||||
-rw-r--r-- | include/asm-x86_64/ipi.h | 45 | ||||
-rw-r--r-- | include/asm-x86_64/irq.h | 2 | ||||
-rw-r--r-- | include/asm-x86_64/msr.h | 2 | ||||
-rw-r--r-- | include/asm-x86_64/pgtable.h | 2 | ||||
-rw-r--r-- | include/asm-x86_64/smp.h | 6 | ||||
-rw-r--r-- | include/asm-x86_64/system.h | 7 | ||||
-rw-r--r-- | include/asm-x86_64/tlbflush.h | 9 | ||||
-rw-r--r-- | include/asm-x86_64/unistd.h | 10 |
13 files changed, 69 insertions, 44 deletions
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h index a31bb99be53f..05a0d374404b 100644 --- a/include/asm-x86_64/bitops.h +++ b/include/asm-x86_64/bitops.h @@ -348,8 +348,7 @@ static inline int sched_find_first_bit(const unsigned long *b) return __ffs(b[0]); if (b[1]) return __ffs(b[1]) + 64; - if (b[2]) - return __ffs(b[2]) + 128; + return __ffs(b[2]) + 128; } /** diff --git a/include/asm-x86_64/bug.h b/include/asm-x86_64/bug.h index 3d2a666a5dd5..eed785667289 100644 --- a/include/asm-x86_64/bug.h +++ b/include/asm-x86_64/bug.h @@ -8,17 +8,24 @@ * this frame. */ struct bug_frame { - unsigned char ud2[2]; + unsigned char ud2[2]; + unsigned char mov; /* should use 32bit offset instead, but the assembler doesn't like it */ char *filename; + unsigned char ret; unsigned short line; } __attribute__((packed)); #ifdef CONFIG_BUG #define HAVE_ARCH_BUG -#define BUG() \ - asm volatile("ud2 ; .quad %c1 ; .short %c0" :: \ +/* We turn the bug frame into valid instructions to not confuse + the disassembler. Thanks to Jan Beulich & Suresh Siddha + for nice instruction selection. + The magic numbers generate mov $64bitimm,%eax ; ret $offset. */ +#define BUG() \ + asm volatile( \ + "ud2 ; .byte 0xa3 ; .quad %c1 ; .byte 0xc2 ; .short %c0" :: \ "i"(__LINE__), "i" (__stringify(__FILE__))) void out_of_line_bug(void); #else diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h index 6aefb9c0280d..c89b58bebee2 100644 --- a/include/asm-x86_64/desc.h +++ b/include/asm-x86_64/desc.h @@ -75,6 +75,7 @@ struct desc_ptr { */ extern struct desc_struct default_ldt[]; extern struct gate_struct idt_table[]; +extern struct desc_ptr cpu_gdt_descr[]; static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist) { diff --git a/include/asm-x86_64/emergency-restart.h b/include/asm-x86_64/emergency-restart.h new file mode 100644 index 000000000000..680c39563345 --- /dev/null +++ b/include/asm-x86_64/emergency-restart.h @@ -0,0 +1,6 @@ +#ifndef _ASM_EMERGENCY_RESTART_H +#define _ASM_EMERGENCY_RESTART_H + +extern void machine_emergency_restart(void); + +#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h index f3b7111cf33d..d5166ec3868d 100644 --- a/include/asm-x86_64/ia32_unistd.h +++ b/include/asm-x86_64/ia32_unistd.h @@ -294,7 +294,12 @@ #define __NR_ia32_add_key 286 #define __NR_ia32_request_key 287 #define __NR_ia32_keyctl 288 +#define __NR_ia32_ioprio_set 289 +#define __NR_ia32_ioprio_get 290 +#define __NR_ia32_inotify_init 291 +#define __NR_ia32_inotify_add_watch 292 +#define __NR_ia32_inotify_rm_watch 293 -#define IA32_NR_syscalls 290 /* must be > than biggest syscall! */ +#define IA32_NR_syscalls 294 /* must be > than biggest syscall! */ #endif /* _ASM_X86_64_IA32_UNISTD_H_ */ diff --git a/include/asm-x86_64/ipi.h b/include/asm-x86_64/ipi.h index d1841847ed89..5e166b9d3bde 100644 --- a/include/asm-x86_64/ipi.h +++ b/include/asm-x86_64/ipi.h @@ -82,30 +82,27 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) */ local_irq_save(flags); - for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) { - if (cpu_isset(query_cpu, mask)) { - - /* - * Wait for idle. - */ - apic_wait_icr_idle(); - - /* - * prepare target chip field - */ - cfg = __prepare_ICR2(x86_cpu_to_apicid[query_cpu]); - apic_write_around(APIC_ICR2, cfg); - - /* - * program the ICR - */ - cfg = __prepare_ICR(0, vector, APIC_DEST_PHYSICAL); - - /* - * Send the IPI. The write to APIC_ICR fires this off. - */ - apic_write_around(APIC_ICR, cfg); - } + for_each_cpu_mask(query_cpu, mask) { + /* + * Wait for idle. + */ + apic_wait_icr_idle(); + + /* + * prepare target chip field + */ + cfg = __prepare_ICR2(x86_cpu_to_apicid[query_cpu]); + apic_write_around(APIC_ICR2, cfg); + + /* + * program the ICR + */ + cfg = __prepare_ICR(0, vector, APIC_DEST_PHYSICAL); + + /* + * Send the IPI. The write to APIC_ICR fires this off. + */ + apic_write_around(APIC_ICR, cfg); } local_irq_restore(flags); } diff --git a/include/asm-x86_64/irq.h b/include/asm-x86_64/irq.h index eb3b7aa9eb9f..4482657777bb 100644 --- a/include/asm-x86_64/irq.h +++ b/include/asm-x86_64/irq.h @@ -57,4 +57,6 @@ int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); extern void fixup_irqs(cpumask_t map); #endif +#define __ARCH_HAS_DO_SOFTIRQ 1 + #endif /* _ASM_IRQ_H */ diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h index bc700232728d..ba15279a79d0 100644 --- a/include/asm-x86_64/msr.h +++ b/include/asm-x86_64/msr.h @@ -218,7 +218,7 @@ extern inline unsigned int cpuid_edx(unsigned int op) #define MSR_K7_PERFCTR3 0xC0010007 #define MSR_K8_TOP_MEM1 0xC001001A #define MSR_K8_TOP_MEM2 0xC001001D -#define MSR_K8_SYSCFG 0xC0000010 +#define MSR_K8_SYSCFG 0xC0010010 /* K6 MSRs */ #define MSR_K6_EFER 0xC0000080 diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 4eec176c3c39..4e167b5ea8f3 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -176,6 +176,8 @@ extern inline void pgd_clear (pgd_t * pgd) (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD) #define __PAGE_KERNEL_LARGE \ (__PAGE_KERNEL | _PAGE_PSE) +#define __PAGE_KERNEL_LARGE_EXEC \ + (__PAGE_KERNEL_EXEC | _PAGE_PSE) #define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL) diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index aeb1b73e21e1..de8b57b2b62b 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -46,12 +46,12 @@ extern int pic_mode; extern void lock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void); extern int smp_num_siblings; -extern void smp_flush_tlb(void); -extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); extern void smp_send_reschedule(int cpu); -extern void smp_invalidate_rcv(void); /* Process an NMI */ extern void zap_low_mappings(void); void smp_stop_cpu(void); +extern int smp_call_function_single(int cpuid, void (*func) (void *info), + void *info, int retry, int wait); + extern cpumask_t cpu_sibling_map[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS]; extern u8 phys_proc_id[NR_CPUS]; diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index 76165736e43a..8606e170a7dc 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -116,12 +116,12 @@ struct alt_instr { /* * Alternative inline assembly with input. * - * Pecularities: + * Peculiarities: * No memory clobber here. * Argument numbers start with 1. * Best is to use constraints that are fixed size (like (%1) ... "r") * If you use variable sized constraints like "m" or "g" in the - * replacement maake sure to pad to the worst case length. + * replacement make sure to pad to the worst case length. */ #define alternative_input(oldinstr, newinstr, feature, input...) \ asm volatile ("661:\n\t" oldinstr "\n662:\n" \ @@ -335,9 +335,6 @@ void cpu_idle_wait(void); void disable_hlt(void); void enable_hlt(void); -#define HAVE_EAT_KEY -void eat_key(void); - extern unsigned long arch_align_stack(unsigned long sp); #endif diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h index 061742382520..505b0cf906de 100644 --- a/include/asm-x86_64/tlbflush.h +++ b/include/asm-x86_64/tlbflush.h @@ -56,8 +56,9 @@ extern unsigned long pgkern_mask; * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * - * ..but the x86_64 has somewhat limited tlb flushing capabilities, - * and page-granular flushes are available only on i486 and up. + * x86-64 can only flush individual pages or full VMs. For a range flush + * we always do the full VM. Might be worth trying if for a small + * range a few INVLPGs in a row are a win. */ #ifndef CONFIG_SMP @@ -115,7 +116,9 @@ static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long st static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) { - /* x86_64 does not keep any page table caches in TLB */ + /* x86_64 does not keep any page table caches in a software TLB. + The CPUs do in their hardware TLBs, but they are handled + by the normal TLB flushing algorithms. */ } #endif /* _X8664_TLBFLUSH_H */ diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index 6560439a83e4..11ba931cf82f 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -565,8 +565,14 @@ __SYSCALL(__NR_keyctl, sys_keyctl) __SYSCALL(__NR_ioprio_set, sys_ioprio_set) #define __NR_ioprio_get 252 __SYSCALL(__NR_ioprio_get, sys_ioprio_get) - -#define __NR_syscall_max __NR_ioprio_get +#define __NR_inotify_init 253 +__SYSCALL(__NR_inotify_init, sys_inotify_init) +#define __NR_inotify_add_watch 254 +__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch) +#define __NR_inotify_rm_watch 255 +__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch) + +#define __NR_syscall_max __NR_inotify_rm_watch #ifndef __NO_STUBS /* user-visible error numbers are in the range -1 - -4095 */ |