diff options
Diffstat (limited to 'include/asm-i386')
-rw-r--r-- | include/asm-i386/bugs.h | 2 | ||||
-rw-r--r-- | include/asm-i386/elf.h | 2 | ||||
-rw-r--r-- | include/asm-i386/hw_irq.h | 3 | ||||
-rw-r--r-- | include/asm-i386/hypertransport.h | 42 | ||||
-rw-r--r-- | include/asm-i386/io_apic.h | 42 | ||||
-rw-r--r-- | include/asm-i386/mach-default/irq_vectors_limits.h | 5 | ||||
-rw-r--r-- | include/asm-i386/mach-summit/mach_apic.h | 2 | ||||
-rw-r--r-- | include/asm-i386/mca_dma.h | 3 | ||||
-rw-r--r-- | include/asm-i386/msi.h | 23 | ||||
-rw-r--r-- | include/asm-i386/msidef.h | 47 | ||||
-rw-r--r-- | include/asm-i386/pgtable-2level.h | 1 | ||||
-rw-r--r-- | include/asm-i386/pgtable-3level.h | 16 | ||||
-rw-r--r-- | include/asm-i386/pgtable.h | 80 | ||||
-rw-r--r-- | include/asm-i386/ptrace.h | 3 | ||||
-rw-r--r-- | include/asm-i386/smp.h | 2 | ||||
-rw-r--r-- | include/asm-i386/spinlock.h | 4 | ||||
-rw-r--r-- | include/asm-i386/topology.h | 1 | ||||
-rw-r--r-- | include/asm-i386/unistd.h | 39 |
18 files changed, 186 insertions, 131 deletions
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h index 2a9e4ee5904d..592ffeeda45e 100644 --- a/include/asm-i386/bugs.h +++ b/include/asm-i386/bugs.h @@ -189,6 +189,6 @@ static void __init check_bugs(void) check_fpu(); check_hlt(); check_popad(); - system_utsname.machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); + init_utsname()->machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); alternative_instructions(); } diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h index db4344d9f73f..3a05436f31c0 100644 --- a/include/asm-i386/elf.h +++ b/include/asm-i386/elf.h @@ -112,7 +112,7 @@ typedef struct user_fxsr_struct elf_fpxregset_t; For the moment, we have only optimizations for the Intel generations, but that could change... */ -#define ELF_PLATFORM (system_utsname.machine) +#define ELF_PLATFORM (utsname()->machine) #define SET_PERSONALITY(ex, ibcs2) do { } while (0) diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h index 87e5a351d881..88f02a073561 100644 --- a/include/asm-i386/hw_irq.h +++ b/include/asm-i386/hw_irq.h @@ -17,8 +17,6 @@ #include <asm/irq.h> #include <asm/sections.h> -struct hw_interrupt_type; - #define NMI_VECTOR 0x02 /* @@ -30,7 +28,6 @@ struct hw_interrupt_type; extern u8 irq_vector[NR_IRQ_VECTORS]; #define IO_APIC_VECTOR(irq) (irq_vector[irq]) -#define AUTO_ASSIGN -1 extern void (*interrupt[NR_IRQS])(void); diff --git a/include/asm-i386/hypertransport.h b/include/asm-i386/hypertransport.h new file mode 100644 index 000000000000..c16c6ff4bdd7 --- /dev/null +++ b/include/asm-i386/hypertransport.h @@ -0,0 +1,42 @@ +#ifndef ASM_HYPERTRANSPORT_H +#define ASM_HYPERTRANSPORT_H + +/* + * Constants for x86 Hypertransport Interrupts. + */ + +#define HT_IRQ_LOW_BASE 0xf8000000 + +#define HT_IRQ_LOW_VECTOR_SHIFT 16 +#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000 +#define HT_IRQ_LOW_VECTOR(v) (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK) + +#define HT_IRQ_LOW_DEST_ID_SHIFT 8 +#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00 +#define HT_IRQ_LOW_DEST_ID(v) (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK) + +#define HT_IRQ_LOW_DM_PHYSICAL 0x0000000 +#define HT_IRQ_LOW_DM_LOGICAL 0x0000040 + +#define HT_IRQ_LOW_RQEOI_EDGE 0x0000000 +#define HT_IRQ_LOW_RQEOI_LEVEL 0x0000020 + + +#define HT_IRQ_LOW_MT_FIXED 0x0000000 +#define HT_IRQ_LOW_MT_ARBITRATED 0x0000004 +#define HT_IRQ_LOW_MT_SMI 0x0000008 +#define HT_IRQ_LOW_MT_NMI 0x000000c +#define HT_IRQ_LOW_MT_INIT 0x0000010 +#define HT_IRQ_LOW_MT_STARTUP 0x0000014 +#define HT_IRQ_LOW_MT_EXTINT 0x0000018 +#define HT_IRQ_LOW_MT_LINT1 0x000008c +#define HT_IRQ_LOW_MT_LINT0 0x0000098 + +#define HT_IRQ_LOW_IRQ_MASKED 0x0000001 + + +#define HT_IRQ_HIGH_DEST_ID_SHIFT 0 +#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff +#define HT_IRQ_HIGH_DEST_ID(v) ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) + +#endif /* ASM_HYPERTRANSPORT_H */ diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h index 5d309275a1dc..276ea7e8144a 100644 --- a/include/asm-i386/io_apic.h +++ b/include/asm-i386/io_apic.h @@ -12,46 +12,6 @@ #ifdef CONFIG_X86_IO_APIC -#ifdef CONFIG_PCI_MSI -static inline int use_pci_vector(void) {return 1;} -static inline void disable_edge_ioapic_vector(unsigned int vector) { } -static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { } -static inline void end_edge_ioapic_vector (unsigned int vector) { } -#define startup_level_ioapic startup_level_ioapic_vector -#define shutdown_level_ioapic mask_IO_APIC_vector -#define enable_level_ioapic unmask_IO_APIC_vector -#define disable_level_ioapic mask_IO_APIC_vector -#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_vector -#define end_level_ioapic end_level_ioapic_vector -#define set_ioapic_affinity set_ioapic_affinity_vector - -#define startup_edge_ioapic startup_edge_ioapic_vector -#define shutdown_edge_ioapic disable_edge_ioapic_vector -#define enable_edge_ioapic unmask_IO_APIC_vector -#define disable_edge_ioapic disable_edge_ioapic_vector -#define ack_edge_ioapic ack_edge_ioapic_vector -#define end_edge_ioapic end_edge_ioapic_vector -#else -static inline int use_pci_vector(void) {return 0;} -static inline void disable_edge_ioapic_irq(unsigned int irq) { } -static inline void mask_and_ack_level_ioapic_irq(unsigned int irq) { } -static inline void end_edge_ioapic_irq (unsigned int irq) { } -#define startup_level_ioapic startup_level_ioapic_irq -#define shutdown_level_ioapic mask_IO_APIC_irq -#define enable_level_ioapic unmask_IO_APIC_irq -#define disable_level_ioapic mask_IO_APIC_irq -#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_irq -#define end_level_ioapic end_level_ioapic_irq -#define set_ioapic_affinity set_ioapic_affinity_irq - -#define startup_edge_ioapic startup_edge_ioapic_irq -#define shutdown_edge_ioapic disable_edge_ioapic_irq -#define enable_edge_ioapic unmask_IO_APIC_irq -#define disable_edge_ioapic disable_edge_ioapic_irq -#define ack_edge_ioapic ack_edge_ioapic_irq -#define end_edge_ioapic end_edge_ioapic_irq -#endif - #define IO_APIC_BASE(idx) \ ((volatile int *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \ + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK))) @@ -219,6 +179,4 @@ extern int (*ioapic_renumber_irq)(int ioapic, int irq); static inline void disable_ioapic_setup(void) { } #endif -extern int assign_irq_vector(int irq); - #endif diff --git a/include/asm-i386/mach-default/irq_vectors_limits.h b/include/asm-i386/mach-default/irq_vectors_limits.h index b330026e6f7f..7f161e760be6 100644 --- a/include/asm-i386/mach-default/irq_vectors_limits.h +++ b/include/asm-i386/mach-default/irq_vectors_limits.h @@ -1,10 +1,6 @@ #ifndef _ASM_IRQ_VECTORS_LIMITS_H #define _ASM_IRQ_VECTORS_LIMITS_H -#ifdef CONFIG_PCI_MSI -#define NR_IRQS FIRST_SYSTEM_VECTOR -#define NR_IRQ_VECTORS NR_IRQS -#else #ifdef CONFIG_X86_IO_APIC #define NR_IRQS 224 # if (224 >= 32 * NR_CPUS) @@ -16,6 +12,5 @@ #define NR_IRQS 16 #define NR_IRQ_VECTORS NR_IRQS #endif -#endif #endif /* _ASM_IRQ_VECTORS_LIMITS_H */ diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h index 254a0fe01c6a..ef0671e5d5c5 100644 --- a/include/asm-i386/mach-summit/mach_apic.h +++ b/include/asm-i386/mach-summit/mach_apic.h @@ -88,7 +88,7 @@ static inline void clustered_apic_check(void) static inline int apicid_to_node(int logical_apicid) { - return apicid_2_node[logical_apicid]; + return apicid_2_node[hard_smp_processor_id()]; } /* Mapping from cpu number to logical apicid */ diff --git a/include/asm-i386/mca_dma.h b/include/asm-i386/mca_dma.h index 4b3b526c5a3f..fbb1f3b71279 100644 --- a/include/asm-i386/mca_dma.h +++ b/include/asm-i386/mca_dma.h @@ -181,7 +181,7 @@ static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) * @mode: mode to set * * The DMA controller supports several modes. The mode values you can - * set are : + * set are- * * %MCA_DMA_MODE_READ when reading from the DMA device. * @@ -190,7 +190,6 @@ static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) * %MCA_DMA_MODE_IO to do DMA to or from an I/O port. * * %MCA_DMA_MODE_16 to do 16bit transfers. - * */ static __inline__ void mca_set_dma_mode(unsigned int dmanr, unsigned int mode) diff --git a/include/asm-i386/msi.h b/include/asm-i386/msi.h deleted file mode 100644 index b11c4b7dfaef..000000000000 --- a/include/asm-i386/msi.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (C) 2003-2004 Intel - * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) - */ - -#ifndef ASM_MSI_H -#define ASM_MSI_H - -#include <asm/desc.h> -#include <mach_apic.h> - -#define LAST_DEVICE_VECTOR (FIRST_SYSTEM_VECTOR - 1) -#define MSI_TARGET_CPU_SHIFT 12 - -extern struct msi_ops msi_apic_ops; - -static inline int msi_arch_init(void) -{ - msi_register(&msi_apic_ops); - return 0; -} - -#endif /* ASM_MSI_H */ diff --git a/include/asm-i386/msidef.h b/include/asm-i386/msidef.h new file mode 100644 index 000000000000..5b8acddb70fb --- /dev/null +++ b/include/asm-i386/msidef.h @@ -0,0 +1,47 @@ +#ifndef ASM_MSIDEF_H +#define ASM_MSIDEF_H + +/* + * Constants for Intel APIC based MSI messages. + */ + +/* + * Shifts for MSI data + */ + +#define MSI_DATA_VECTOR_SHIFT 0 +#define MSI_DATA_VECTOR_MASK 0x000000ff +#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK) + +#define MSI_DATA_DELIVERY_MODE_SHIFT 8 +#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) +#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT) + +#define MSI_DATA_LEVEL_SHIFT 14 +#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) +#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) + +#define MSI_DATA_TRIGGER_SHIFT 15 +#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) +#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) + +/* + * Shift/mask fields for msi address + */ + +#define MSI_ADDR_BASE_HI 0 +#define MSI_ADDR_BASE_LO 0xfee00000 + +#define MSI_ADDR_DEST_MODE_SHIFT 2 +#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT) +#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT) + +#define MSI_ADDR_REDIRECTION_SHIFT 3 +#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) /* dedicated cpu */ +#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) /* lowest priority */ + +#define MSI_ADDR_DEST_ID_SHIFT 12 +#define MSI_ADDR_DEST_ID_MASK 0x00ffff0 +#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK) + +#endif /* ASM_MSIDEF_H */ diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h index 201c86a6711e..8d8d3b9ecdb0 100644 --- a/include/asm-i386/pgtable-2level.h +++ b/include/asm-i386/pgtable-2level.h @@ -16,6 +16,7 @@ #define set_pte(pteptr, pteval) (*(pteptr) = pteval) #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) +#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h index 0d899173232e..c2d701ea35be 100644 --- a/include/asm-i386/pgtable-3level.h +++ b/include/asm-i386/pgtable-3level.h @@ -58,7 +58,21 @@ static inline void set_pte(pte_t *ptep, pte_t pte) } #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) -#define __HAVE_ARCH_SET_PTE_ATOMIC +/* + * Since this is only called on user PTEs, and the page fault handler + * must handle the already racy situation of simultaneous page faults, + * we are justified in merely clearing the PTE present bit, followed + * by a set. The ordering here is important. + */ +static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) +{ + ptep->pte_low = 0; + smp_wmb(); + ptep->pte_high = pte.pte_high; + smp_wmb(); + ptep->pte_low = pte.pte_low; +} + #define set_pte_atomic(pteptr,pteval) \ set_64bit((unsigned long long *)(pteptr),pte_val(pteval)) #define set_pmd(pmdptr,pmdval) \ diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 541b3e234335..7d398f493dde 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -247,6 +247,23 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p #endif /* + * Rules for using pte_update - it must be called after any PTE update which + * has not been done using the set_pte / clear_pte interfaces. It is used by + * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE + * updates should either be sets, clears, or set_pte_atomic for P->P + * transitions, which means this hook should only be called for user PTEs. + * This hook implies a P->P protection or access change has taken place, which + * requires a subsequent TLB flush. The notification can optionally be delayed + * until the TLB flush event by using the pte_update_defer form of the + * interface, but care must be taken to assure that the flush happens while + * still holding the same page table lock so that the shadow and primary pages + * do not become out of sync on SMP. + */ +#define pte_update(mm, addr, ptep) do { } while (0) +#define pte_update_defer(mm, addr, ptep) do { } while (0) + + +/* * We only update the dirty/accessed state if we set * the dirty bit by hand in the kernel, since the hardware * will do the accessed bit for us, and we don't want to @@ -258,25 +275,54 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p do { \ if (dirty) { \ (ptep)->pte_low = (entry).pte_low; \ + pte_update_defer((vma)->vm_mm, (addr), (ptep)); \ flush_tlb_page(vma, address); \ } \ } while (0) +/* + * We don't actually have these, but we want to advertise them so that + * we can encompass the flush here. + */ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY -static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) -{ - if (!pte_dirty(*ptep)) - return 0; - return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); -} - #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) -{ - if (!pte_young(*ptep)) - return 0; - return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); -} + +/* + * Rules for using ptep_establish: the pte MUST be a user pte, and + * must be a present->present transition. + */ +#define __HAVE_ARCH_PTEP_ESTABLISH +#define ptep_establish(vma, address, ptep, pteval) \ +do { \ + set_pte_present((vma)->vm_mm, address, ptep, pteval); \ + flush_tlb_page(vma, address); \ +} while (0) + +#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH +#define ptep_clear_flush_dirty(vma, address, ptep) \ +({ \ + int __dirty; \ + __dirty = pte_dirty(*(ptep)); \ + if (__dirty) { \ + clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \ + pte_update_defer((vma)->vm_mm, (addr), (ptep)); \ + flush_tlb_page(vma, address); \ + } \ + __dirty; \ +}) + +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +#define ptep_clear_flush_young(vma, address, ptep) \ +({ \ + int __young; \ + __young = pte_young(*(ptep)); \ + if (__young) { \ + clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \ + pte_update_defer((vma)->vm_mm, (addr), (ptep)); \ + flush_tlb_page(vma, address); \ + } \ + __young; \ +}) #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) @@ -295,6 +341,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { clear_bit(_PAGE_BIT_RW, &ptep->pte_low); + pte_update(mm, addr, ptep); } /* @@ -426,6 +473,13 @@ extern pte_t *lookup_address(unsigned long address); #define pte_unmap_nested(pte) do { } while (0) #endif +/* Clear a kernel PTE and flush it from the TLB */ +#define kpte_clear_flush(ptep, vaddr) \ +do { \ + pte_clear(&init_mm, vaddr, ptep); \ + __flush_tlb_one(vaddr); \ +} while (0) + /* * The i386 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h index a4a0e5207db5..d505f501077a 100644 --- a/include/asm-i386/ptrace.h +++ b/include/asm-i386/ptrace.h @@ -47,7 +47,10 @@ static inline int user_mode_vm(struct pt_regs *regs) { return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL; } + #define instruction_pointer(regs) ((regs)->eip) +#define regs_return_value(regs) ((regs)->eax) + extern unsigned long profile_pc(struct pt_regs *regs); #endif /* __KERNEL__ */ diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index 915c26a31b79..6aa1206f6e2a 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h @@ -84,6 +84,7 @@ static inline int hard_smp_processor_id(void) #endif #endif +extern int safe_smp_processor_id(void); extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); extern unsigned int num_processors; @@ -92,6 +93,7 @@ extern unsigned int num_processors; #else /* CONFIG_SMP */ +#define safe_smp_processor_id() 0 #define cpu_physical_id(cpu) boot_cpu_physical_apicid #define NO_PROC_ID 0xFF /* No processor magic marker */ diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index b0b3043f05e1..c18b71fae6b3 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h @@ -205,4 +205,8 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) : "+m" (rw->lock) : : "memory"); } +#define _raw_spin_relax(lock) cpu_relax() +#define _raw_read_relax(lock) cpu_relax() +#define _raw_write_relax(lock) cpu_relax() + #endif /* __ASM_SPINLOCK_H */ diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h index 6adbd9b1ae88..978d09596130 100644 --- a/include/asm-i386/topology.h +++ b/include/asm-i386/topology.h @@ -74,6 +74,7 @@ static inline int node_to_first_cpu(int node) #define SD_NODE_INIT (struct sched_domain) { \ .span = CPU_MASK_NONE, \ .parent = NULL, \ + .child = NULL, \ .groups = NULL, \ .min_interval = 8, \ .max_interval = 32, \ diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index bd9987087adc..3ca7ab963d7d 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h @@ -451,45 +451,6 @@ __syscall_return(type,__res); \ #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND -#ifdef __KERNEL_SYSCALLS__ - -#include <linux/compiler.h> -#include <linux/types.h> -#include <linux/linkage.h> -#include <asm/ptrace.h> - -/* - * we need this inline - forking from kernel space will result - * in NO COPY ON WRITE (!!!), until an execve is executed. This - * is no problem, but for the stack. This is handled by not letting - * main() use the stack at all after fork(). Thus, no function - * calls - which means inline code for fork too, as otherwise we - * would use the stack upon exit from 'fork()'. - * - * Actually only pause and fork are needed inline, so that there - * won't be any messing with the stack from main(), but we define - * some others too. - */ -static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp) - -asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount); -asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff); -asmlinkage int sys_execve(struct pt_regs regs); -asmlinkage int sys_clone(struct pt_regs regs); -asmlinkage int sys_fork(struct pt_regs regs); -asmlinkage int sys_vfork(struct pt_regs regs); -asmlinkage int sys_pipe(unsigned long __user *fildes); -asmlinkage long sys_iopl(unsigned long unused); -struct sigaction; -asmlinkage long sys_rt_sigaction(int sig, - const struct sigaction __user *act, - struct sigaction __user *oact, - size_t sigsetsize); - -#endif /* __KERNEL_SYSCALLS__ */ - /* * "Conditional" syscalls * |