diff options
Diffstat (limited to 'include')
97 files changed, 722 insertions, 553 deletions
diff --git a/include/asm-alpha/a.out.h b/include/asm-alpha/a.out.h index d97daf42753d..e43cf61649a9 100644 --- a/include/asm-alpha/a.out.h +++ b/include/asm-alpha/a.out.h @@ -101,6 +101,8 @@ struct exec #define STACK_TOP \ (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL) +#define STACK_TOP_MAX 0x00120000000UL + #endif #endif /* __A_OUT_GNU_H__ */ diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h index cf1021a97b2e..620c4d86cbf4 100644 --- a/include/asm-alpha/system.h +++ b/include/asm-alpha/system.h @@ -139,16 +139,6 @@ extern void halt(void) __attribute__((noreturn)); struct task_struct; extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - #define imb() \ __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") diff --git a/include/asm-arm/a.out.h b/include/asm-arm/a.out.h index 3e5fe64c4394..d7165e86df25 100644 --- a/include/asm-arm/a.out.h +++ b/include/asm-arm/a.out.h @@ -30,6 +30,7 @@ struct exec #ifdef __KERNEL__ #define STACK_TOP ((current->personality == PER_LINUX_32BIT) ? \ TASK_SIZE : TASK_SIZE_26) +#define STACK_TOP_MAX TASK_SIZE #endif #ifndef LIBRARY_START_TEXT diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h index 6f8e6a69dc5f..94ea8c6dc1a4 100644 --- a/include/asm-arm/system.h +++ b/include/asm-arm/system.h @@ -254,16 +254,6 @@ do { \ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) /* * On the StrongARM, "swp" is terminally broken since it bypasses the diff --git a/include/asm-arm26/a.out.h b/include/asm-arm26/a.out.h index 9b2702c42c87..7167f54ae3fc 100644 --- a/include/asm-arm26/a.out.h +++ b/include/asm-arm26/a.out.h @@ -29,6 +29,7 @@ struct exec #ifdef __KERNEL__ #define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP #endif #ifndef LIBRARY_START_TEXT diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h index 4703593b3bb5..e09da5ff1f54 100644 --- a/include/asm-arm26/system.h +++ b/include/asm-arm26/system.h @@ -110,16 +110,6 @@ do { \ } while (0) /* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - -/* * Save the current interrupt enable state & disable IRQs */ #define local_irq_save(x) \ diff --git a/include/asm-avr32/a.out.h b/include/asm-avr32/a.out.h index 50bf6e31a143..9f398ab28ed0 100644 --- a/include/asm-avr32/a.out.h +++ b/include/asm-avr32/a.out.h @@ -20,6 +20,7 @@ struct exec #ifdef __KERNEL__ #define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP #endif diff --git a/include/asm-cris/a.out.h b/include/asm-cris/a.out.h index 770734ce54a6..919b34a084f8 100644 --- a/include/asm-cris/a.out.h +++ b/include/asm-cris/a.out.h @@ -8,6 +8,7 @@ /* grabbed from the intel stuff */ #define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP struct exec diff --git a/include/asm-frv/mem-layout.h b/include/asm-frv/mem-layout.h index a025dd4514e7..aaf2a773d9d3 100644 --- a/include/asm-frv/mem-layout.h +++ b/include/asm-frv/mem-layout.h @@ -60,6 +60,7 @@ */ #define BRK_BASE __UL(2 * 1024 * 1024 + PAGE_SIZE) #define STACK_TOP __UL(2 * 1024 * 1024) +#define STACK_TOP_MAX STACK_TOP /* userspace process size */ #ifdef CONFIG_MMU diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index d984a9041436..d85172e9ed45 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -14,6 +14,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define DEFINE_PER_CPU(type, name) \ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + __attribute__((__section__(".data.percpu.shared_aligned"))) \ + __typeof__(type) per_cpu__##name \ + ____cacheline_aligned_in_smp + /* var is in discarded region: offset to particular copy we want */ #define per_cpu(var, cpu) (*({ \ extern int simple_identifier_##var(void); \ @@ -34,6 +39,9 @@ do { \ #define DEFINE_PER_CPU(type, name) \ __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + DEFINE_PER_CPU(type, name) + #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var #define __raw_get_cpu_var(var) per_cpu__##var diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 84155eb67f1d..0240e0506a07 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -224,7 +224,11 @@ } #define NOTES \ - .notes : { *(.note.*) } :note + .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_notes) = .; \ + *(.note.*) \ + VMLINUX_SYMBOL(__stop_notes) = .; \ + } #define INITCALLS \ *(.initcall0.init) \ @@ -245,3 +249,11 @@ *(.initcall7.init) \ *(.initcall7s.init) +#define PERCPU(align) \ + . = ALIGN(align); \ + __per_cpu_start = .; \ + .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ + *(.data.percpu) \ + *(.data.percpu.shared_aligned) \ + } \ + __per_cpu_end = .; diff --git a/include/asm-h8300/a.out.h b/include/asm-h8300/a.out.h index 3c70939f9f00..aa5d22778235 100644 --- a/include/asm-h8300/a.out.h +++ b/include/asm-h8300/a.out.h @@ -20,6 +20,7 @@ struct exec #ifdef __KERNEL__ #define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP #endif diff --git a/include/asm-i386/a.out.h b/include/asm-i386/a.out.h index ab17bb8e5465..851a60f8258c 100644 --- a/include/asm-i386/a.out.h +++ b/include/asm-i386/a.out.h @@ -20,6 +20,7 @@ struct exec #ifdef __KERNEL__ #define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP #endif diff --git a/include/asm-i386/cmpxchg.h b/include/asm-i386/cmpxchg.h index 7adcef0cd53b..64dcdf46117b 100644 --- a/include/asm-i386/cmpxchg.h +++ b/include/asm-i386/cmpxchg.h @@ -3,14 +3,16 @@ #include <linux/bitops.h> /* for LOCK_PREFIX */ +/* + * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you + * you need to test for the feature in boot_cpu_data. + */ + #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) struct __xchg_dummy { unsigned long a[100]; }; #define __xg(x) ((struct __xchg_dummy *)(x)) - -#ifdef CONFIG_X86_CMPXCHG64 - /* * The semantics of XCHGCMP8B are a bit strange, this is why * there is a loop and the loading of %%eax and %%edx has to @@ -65,8 +67,6 @@ static inline void __set_64bit_var (unsigned long long *ptr, __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ __set_64bit(ptr, ll_low(value), ll_high(value)) ) -#endif - /* * Note: no "lock" prefix even on SMP: xchg always implies lock anyway * Note 2: xchg has side effect, so that attribute volatile is necessary, @@ -252,8 +252,6 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, }) #endif -#ifdef CONFIG_X86_CMPXCHG64 - static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, unsigned long long new) { @@ -289,5 +287,3 @@ static inline unsigned long long __cmpxchg64_local(volatile void *ptr, ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\ (unsigned long long)(n))) #endif - -#endif diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h index 8774d06689da..06f7303c30ca 100644 --- a/include/asm-i386/kprobes.h +++ b/include/asm-i386/kprobes.h @@ -42,7 +42,6 @@ typedef u8 kprobe_opcode_t; ? (MAX_STACK_SIZE) \ : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) -#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define ARCH_SUPPORTS_KRETPROBES #define ARCH_INACTIVE_KPROBE_COUNT 0 #define flush_insn_slot(p) do { } while (0) diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h index f54830b5d5ac..a7ebd436f3cc 100644 --- a/include/asm-i386/percpu.h +++ b/include/asm-i386/percpu.h @@ -54,6 +54,11 @@ extern unsigned long __per_cpu_offset[]; #define DEFINE_PER_CPU(type, name) \ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + __attribute__((__section__(".data.percpu.shared_aligned"))) \ + __typeof__(type) per_cpu__##name \ + ____cacheline_aligned_in_smp + /* We can use this directly for local CPU (faster). */ DECLARE_PER_CPU(unsigned long, this_cpu_off); diff --git a/include/asm-i386/required-features.h b/include/asm-i386/required-features.h index 65848a007050..618feb98f9f5 100644 --- a/include/asm-i386/required-features.h +++ b/include/asm-i386/required-features.h @@ -29,7 +29,7 @@ # define NEED_CMOV 0 #endif -#ifdef CONFIG_X86_CMPXCHG64 +#ifdef CONFIG_X86_PAE # define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31)) #else # define NEED_CX8 0 diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 94ed3686a5f3..609756c61676 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -310,15 +310,6 @@ void enable_hlt(void); extern int es7000_plat; void cpu_idle_wait(void); -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible: - */ -static inline void sched_cacheflush(void) -{ - wbinvd(); -} - extern unsigned long arch_align_stack(unsigned long sp); extern void free_init_pages(char *what, unsigned long begin, unsigned long end); diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h index 62c091ffcccc..a4d806610b7f 100644 --- a/include/asm-i386/tsc.h +++ b/include/asm-i386/tsc.h @@ -63,6 +63,7 @@ extern void tsc_init(void); extern void mark_tsc_unstable(char *reason); extern int unsynchronized_tsc(void); extern void init_tsc_clocksource(void); +int check_tsc_unstable(void); /* * Boot-time check whether the TSCs are synchronized across diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h index 6382e52ec227..067d9dea68f9 100644 --- a/include/asm-ia64/kprobes.h +++ b/include/asm-ia64/kprobes.h @@ -82,8 +82,6 @@ struct kprobe_ctlblk { struct prev_kprobe prev_kprobe[ARCH_PREV_KPROBE_SZ]; }; -#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry - #define ARCH_SUPPORTS_KRETPROBES #define ARCH_INACTIVE_KPROBE_COUNT 1 diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h index fbe5cf3ab8dc..43a7aac414e0 100644 --- a/include/asm-ia64/percpu.h +++ b/include/asm-ia64/percpu.h @@ -29,6 +29,16 @@ __attribute__((__section__(".data.percpu"))) \ __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name +#ifdef CONFIG_SMP +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + __attribute__((__section__(".data.percpu.shared_aligned"))) \ + __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name \ + ____cacheline_aligned_in_smp +#else +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + DEFINE_PER_CPU(type, name) +#endif + /* * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an * external routine, to avoid include-hell. diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index db81ba406cef..6251c76437d2 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -295,9 +295,9 @@ struct thread_struct { regs->ar_bspstore = current->thread.rbs_bot; \ regs->ar_fpsr = FPSR_DEFAULT; \ regs->loadrs = 0; \ - regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \ + regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ - if (unlikely(!current->mm->dumpable)) { \ + if (unlikely(!get_dumpable(current->mm))) { \ /* \ * Zap scratch regs to avoid leaking bits between processes with different \ * uid/privileges. \ diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 384fbf7f2a0f..91bb8e00066c 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -259,7 +259,6 @@ extern void ia64_load_extra (struct task_struct *task); #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) void cpu_idle_wait(void); -void sched_cacheflush(void); #define arch_align_stack(x) (x) diff --git a/include/asm-ia64/ustack.h b/include/asm-ia64/ustack.h index a349467913ea..504167c35b8b 100644 --- a/include/asm-ia64/ustack.h +++ b/include/asm-ia64/ustack.h @@ -11,6 +11,7 @@ /* The absolute hard limit for stack size is 1/2 of the mappable space in the region */ #define MAX_USER_STACK_SIZE (RGN_MAP_LIMIT/2) #define STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT) +#define STACK_TOP_MAX STACK_TOP #endif /* Make a default stack size of 2GiB */ diff --git a/include/asm-m32r/a.out.h b/include/asm-m32r/a.out.h index 9a4a5d20160a..6a1b5d42f328 100644 --- a/include/asm-m32r/a.out.h +++ b/include/asm-m32r/a.out.h @@ -20,6 +20,7 @@ struct exec #ifdef __KERNEL__ #define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP #endif diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index 8ee73d3f316d..2365de5c2955 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h @@ -54,16 +54,6 @@ ); \ } while(0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - /* Interrupt Control */ #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) #define local_irq_enable() \ diff --git a/include/asm-m68k/a.out.h b/include/asm-m68k/a.out.h index eda1662773b8..6fc86a221a94 100644 --- a/include/asm-m68k/a.out.h +++ b/include/asm-m68k/a.out.h @@ -20,6 +20,7 @@ struct exec #ifdef __KERNEL__ #define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP #endif diff --git a/include/asm-m68knommu/irq.h b/include/asm-m68knommu/irq.h index 7b8f874f8429..9373c31ac87d 100644 --- a/include/asm-m68knommu/irq.h +++ b/include/asm-m68knommu/irq.h @@ -1,7 +1,5 @@ -#ifndef _M68K_IRQ_H_ -#define _M68K_IRQ_H_ - -#include <asm/ptrace.h> +#ifndef _M68KNOMMU_IRQ_H_ +#define _M68KNOMMU_IRQ_H_ #ifdef CONFIG_COLDFIRE /* @@ -17,75 +15,12 @@ /* * # of m68k interrupts */ -#define SYS_IRQS 8 -#define NR_IRQS (24+SYS_IRQS) +#define SYS_IRQS 8 +#define NR_IRQS (24 + SYS_IRQS) #endif /* CONFIG_COLDFIRE */ -/* - * Interrupt source definitions - * General interrupt sources are the level 1-7. - * Adding an interrupt service routine for one of these sources - * results in the addition of that routine to a chain of routines. - * Each one is called in succession. Each individual interrupt - * service routine should determine if the device associated with - * that routine requires service. - */ -#define IRQ1 (1) /* level 1 interrupt */ -#define IRQ2 (2) /* level 2 interrupt */ -#define IRQ3 (3) /* level 3 interrupt */ -#define IRQ4 (4) /* level 4 interrupt */ -#define IRQ5 (5) /* level 5 interrupt */ -#define IRQ6 (6) /* level 6 interrupt */ -#define IRQ7 (7) /* level 7 interrupt (non-maskable) */ - -/* - * Machine specific interrupt sources. - * - * Adding an interrupt service routine for a source with this bit - * set indicates a special machine specific interrupt source. - * The machine specific files define these sources. - * - * The IRQ_MACHSPEC bit is now gone - the only thing it did was to - * introduce unnecessary overhead. - * - * All interrupt handling is actually machine specific so it is better - * to use function pointers, as used by the Sparc port, and select the - * interrupt handling functions when initializing the kernel. This way - * we save some unnecessary overhead at run-time. - * 01/11/97 - Jes - */ - -extern void (*mach_enable_irq)(unsigned int); -extern void (*mach_disable_irq)(unsigned int); - -/* - * various flags for request_irq() - the Amiga now uses the standard - * mechanism like all other architectures - IRQF_DISABLED and - * IRQF_SHARED are your friends. - */ -#define IRQ_FLG_LOCK (0x0001) /* handler is not replaceable */ -#define IRQ_FLG_REPLACE (0x0002) /* replace existing handler */ -#define IRQ_FLG_FAST (0x0004) -#define IRQ_FLG_SLOW (0x0008) -#define IRQ_FLG_STD (0x8000) /* internally used */ - -#ifdef CONFIG_M68360 - -#define CPM_INTERRUPT IRQ4 - -/* see MC68360 User's Manual, p. 7-377 */ -#define CPM_VECTOR_BASE 0x04 /* 3 MSbits of CPM vector */ - -#endif /* CONFIG_M68360 */ - -/* - * Some drivers want these entry points - */ -#define enable_irq(x) do { } while (0) -#define disable_irq(x) do { } while (0) -#define disable_irq_nosync(x) disable_irq(x) #define irq_canonicalize(irq) (irq) -#endif /* _M68K_IRQ_H_ */ +#endif /* _M68KNOMMU_IRQ_H_ */ diff --git a/include/asm-m68knommu/irqnode.h b/include/asm-m68knommu/irqnode.h deleted file mode 100644 index 6132a9858b52..000000000000 --- a/include/asm-m68knommu/irqnode.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef _M68K_IRQNODE_H_ -#define _M68K_IRQNODE_H_ - -#include <linux/interrupt.h> - -/* - * This structure is used to chain together the ISRs for a particular - * interrupt source (if it supports chaining). - */ -typedef struct irq_node { - irq_handler_t handler; - unsigned long flags; - void *dev_id; - const char *devname; - struct irq_node *next; -} irq_node_t; - -/* - * This structure has only 4 elements for speed reasons - */ -struct irq_entry { - irq_handler_t handler; - unsigned long flags; - void *dev_id; - const char *devname; -}; - -/* count of spurious interrupts */ -extern volatile unsigned int num_spurious; - -/* - * This function returns a new irq_node_t - */ -extern irq_node_t *new_irq_node(void); - -#endif /* _M68K_IRQNODE_H_ */ diff --git a/include/asm-m68knommu/m68360.h b/include/asm-m68knommu/m68360.h index dd11b070884b..eb7d39ef2855 100644 --- a/include/asm-m68knommu/m68360.h +++ b/include/asm-m68knommu/m68360.h @@ -3,3 +3,11 @@ #include "m68360_quicc.h" #include "m68360_enet.h" +#ifdef CONFIG_M68360 + +#define CPM_INTERRUPT 4 + +/* see MC68360 User's Manual, p. 7-377 */ +#define CPM_VECTOR_BASE 0x04 /* 3 MSbits of CPM vector */ + +#endif /* CONFIG_M68360 */ diff --git a/include/asm-m68knommu/pgtable.h b/include/asm-m68knommu/pgtable.h index 9dfbbc24aa71..e1e6a1d2333a 100644 --- a/include/asm-m68knommu/pgtable.h +++ b/include/asm-m68knommu/pgtable.h @@ -49,7 +49,6 @@ static inline int pte_file(pte_t pte) { return 0; } * These would be in other places but having them here reduces the diffs. */ extern unsigned int kobjsize(const void *objp); -extern int is_in_rom(unsigned long); /* * No page table caches to initialise. diff --git a/include/asm-m68knommu/traps.h b/include/asm-m68knommu/traps.h index f2a81317cc10..d0671e5f8e29 100644 --- a/include/asm-m68knommu/traps.h +++ b/include/asm-m68knommu/traps.h @@ -16,6 +16,10 @@ typedef void (*e_vector)(void); extern e_vector vectors[]; +extern void init_vectors(void); +extern void enable_vector(unsigned int irq); +extern void disable_vector(unsigned int irq); +extern void ack_vector(unsigned int irq); #endif diff --git a/include/asm-m68knommu/uaccess.h b/include/asm-m68knommu/uaccess.h index 62b29b10bc6d..9ed9169a8849 100644 --- a/include/asm-m68knommu/uaccess.h +++ b/include/asm-m68knommu/uaccess.h @@ -15,12 +15,15 @@ #define access_ok(type,addr,size) _access_ok((unsigned long)(addr),(size)) +/* + * It is not enough to just have access_ok check for a real RAM address. + * This would disallow the case of code/ro-data running XIP in flash/rom. + * Ideally we would check the possible flash ranges too, but that is + * currently not so easy. + */ static inline int _access_ok(unsigned long addr, unsigned long size) { - extern unsigned long memory_start, memory_end; - - return (((addr >= memory_start) && (addr+size < memory_end)) || - (is_in_rom(addr) && is_in_rom(addr+size))); + return 1; } /* diff --git a/include/asm-mips/a.out.h b/include/asm-mips/a.out.h index ef33c3f13484..1ad60ba186d0 100644 --- a/include/asm-mips/a.out.h +++ b/include/asm-mips/a.out.h @@ -40,6 +40,7 @@ struct exec #ifdef CONFIG_64BIT #define STACK_TOP (current->thread.mflags & MF_32BIT_ADDR ? TASK_SIZE32 : TASK_SIZE) #endif +#define STACK_TOP_MAX TASK_SIZE #endif diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index 46bdb3f566f9..76339165bc20 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h @@ -71,16 +71,6 @@ do { \ write_c0_userlocal(task_thread_info(current)->tp_value);\ } while(0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) { __u32 retval; diff --git a/include/asm-parisc/a.out.h b/include/asm-parisc/a.out.h index 2a490cc9ec91..23e2c90943e5 100644 --- a/include/asm-parisc/a.out.h +++ b/include/asm-parisc/a.out.h @@ -23,6 +23,7 @@ struct exec * prumpf */ #define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX DEFAULT_TASK_SIZE #endif diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h index 21fbfc5afd02..ee80c920b464 100644 --- a/include/asm-parisc/system.h +++ b/include/asm-parisc/system.h @@ -48,17 +48,6 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct * (last) = _switch_to(prev, next); \ } while(0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - - /* interrupt control */ #define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory") #define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) diff --git a/include/asm-powerpc/a.out.h b/include/asm-powerpc/a.out.h index c7393a977364..5c5ea83f9349 100644 --- a/include/asm-powerpc/a.out.h +++ b/include/asm-powerpc/a.out.h @@ -26,9 +26,12 @@ struct exec #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ STACK_TOP_USER32 : STACK_TOP_USER64) +#define STACK_TOP_MAX STACK_TOP_USER64 + #else /* __powerpc64__ */ #define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP #endif /* __powerpc64__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h index 9537fda238b8..8b08b447d6f3 100644 --- a/include/asm-powerpc/kprobes.h +++ b/include/asm-powerpc/kprobes.h @@ -73,12 +73,10 @@ typedef unsigned int kprobe_opcode_t; } \ } -#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)((func_descr_t *)pentry) #define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \ IS_TWI(instr) || IS_TDI(instr)) #else /* Use stock kprobe_lookup_name since ppc32 doesn't use function descriptors */ -#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)(pentry) #define is_trap(instr) (IS_TW(instr) || IS_TWI(instr)) #endif diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h index 2f2e3024fa61..73dc8ba4010d 100644 --- a/include/asm-powerpc/percpu.h +++ b/include/asm-powerpc/percpu.h @@ -20,6 +20,11 @@ #define DEFINE_PER_CPU(type, name) \ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + __attribute__((__section__(".data.percpu.shared_aligned"))) \ + __typeof__(type) per_cpu__##name \ + ____cacheline_aligned_in_smp + /* var is in discarded region: offset to particular copy we want */ #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset())) @@ -40,6 +45,8 @@ extern void setup_per_cpu_areas(void); #define DEFINE_PER_CPU(type, name) \ __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + DEFINE_PER_CPU(type, name) #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index 32aa42b748be..41520b7a7b76 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -184,16 +184,6 @@ struct thread_struct; extern struct task_struct *_switch(struct thread_struct *prev, struct thread_struct *next); -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - extern unsigned int rtas_data; extern int mem_init_done; /* set on boot once kmalloc can be called */ extern unsigned long memory_limit; diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h index d84a3cf4d033..f1311a8f310f 100644 --- a/include/asm-ppc/system.h +++ b/include/asm-ppc/system.h @@ -129,16 +129,6 @@ extern struct task_struct *__switch_to(struct task_struct *, struct task_struct *); #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - struct thread_struct; extern struct task_struct *_switch(struct thread_struct *prev, struct thread_struct *next); diff --git a/include/asm-s390/a.out.h b/include/asm-s390/a.out.h index 72adee6ef338..46158dcaf517 100644 --- a/include/asm-s390/a.out.h +++ b/include/asm-s390/a.out.h @@ -32,6 +32,7 @@ struct exec #ifdef __KERNEL__ #define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX DEFAULT_TASK_SIZE #endif diff --git a/include/asm-s390/kprobes.h b/include/asm-s390/kprobes.h index 830fe4c4eea6..340ba10446ea 100644 --- a/include/asm-s390/kprobes.h +++ b/include/asm-s390/kprobes.h @@ -46,8 +46,6 @@ typedef u16 kprobe_opcode_t; ? (MAX_STACK_SIZE) \ : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) -#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)(pentry) - #define ARCH_SUPPORTS_KRETPROBES #define ARCH_INACTIVE_KPROBE_COUNT 0 diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h index 9ea7f1023e57..545857e64443 100644 --- a/include/asm-s390/percpu.h +++ b/include/asm-s390/percpu.h @@ -41,6 +41,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; __attribute__((__section__(".data.percpu"))) \ __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + __attribute__((__section__(".data.percpu.shared_aligned"))) \ + __typeof__(type) per_cpu__##name \ + ____cacheline_aligned_in_smp + #define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) #define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) #define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu]) @@ -59,6 +64,8 @@ do { \ #define DEFINE_PER_CPU(type, name) \ __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + DEFINE_PER_CPU(type, name) #define __get_cpu_var(var) __reloc_hide(var,0) #define __raw_get_cpu_var(var) __reloc_hide(var,0) diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index bbe137c3ed69..64a3cd05cae1 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h @@ -97,16 +97,6 @@ static inline void restore_access_regs(unsigned int *acrs) prev = __switch_to(prev,next); \ } while (0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - #ifdef CONFIG_VIRT_CPU_ACCOUNTING extern void account_vtime(struct task_struct *); extern void account_tick_vtime(struct task_struct *); diff --git a/include/asm-sh/a.out.h b/include/asm-sh/a.out.h index 6e9fca9ee333..685d0f6125fa 100644 --- a/include/asm-sh/a.out.h +++ b/include/asm-sh/a.out.h @@ -20,6 +20,7 @@ struct exec #ifdef __KERNEL__ #define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP #endif diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index 7c75045ae22b..245042537205 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h @@ -64,16 +64,6 @@ struct task_struct *__switch_to(struct task_struct *prev, last = __last; \ } while (0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - #ifdef CONFIG_CPU_SH4A #define __icbi() \ { \ diff --git a/include/asm-sh64/a.out.h b/include/asm-sh64/a.out.h index e1995e86b663..237ee4e5b72a 100644 --- a/include/asm-sh64/a.out.h +++ b/include/asm-sh64/a.out.h @@ -31,6 +31,7 @@ struct exec #ifdef __KERNEL__ #define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP #endif diff --git a/include/asm-sparc/a.out.h b/include/asm-sparc/a.out.h index 9090060a23e6..917e04250696 100644 --- a/include/asm-sparc/a.out.h +++ b/include/asm-sparc/a.out.h @@ -92,6 +92,7 @@ struct relocation_info /* used when header.a_machtype == M_SPARC */ #include <asm/page.h> #define STACK_TOP (PAGE_OFFSET - PAGE_SIZE) +#define STACK_TOP_MAX STACK_TOP #endif /* __KERNEL__ */ diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h index 8b4e23b3bb38..d1a2572e3f55 100644 --- a/include/asm-sparc/system.h +++ b/include/asm-sparc/system.h @@ -165,16 +165,6 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, } while(0) /* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - -/* * Changing the IRQ level on the Sparc. */ extern void local_irq_restore(unsigned long); diff --git a/include/asm-sparc64/a.out.h b/include/asm-sparc64/a.out.h index eb3b8e90b279..902e07f89a42 100644 --- a/include/asm-sparc64/a.out.h +++ b/include/asm-sparc64/a.out.h @@ -101,6 +101,8 @@ struct relocation_info /* used when header.a_machtype == M_SPARC */ #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ STACK_TOP32 : STACK_TOP64) +#define STACK_TOP_MAX STACK_TOP64 + #endif #endif /* !(__ASSEMBLY__) */ diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h index a331b7b0dff2..7f6774dca5f4 100644 --- a/include/asm-sparc64/kprobes.h +++ b/include/asm-sparc64/kprobes.h @@ -10,7 +10,6 @@ typedef u32 kprobe_opcode_t; #define BREAKPOINT_INSTRUCTION_2 0x91d02071 /* ta 0x71 */ #define MAX_INSN_SIZE 2 -#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define arch_remove_kprobe(p) do {} while (0) #define ARCH_INACTIVE_KPROBE_COUNT 0 diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h index 88db872ce2f8..caf8750792ff 100644 --- a/include/asm-sparc64/percpu.h +++ b/include/asm-sparc64/percpu.h @@ -18,6 +18,11 @@ extern unsigned long __per_cpu_shift; #define DEFINE_PER_CPU(type, name) \ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + __attribute__((__section__(".data.percpu.shared_aligned"))) \ + __typeof__(type) per_cpu__##name \ + ____cacheline_aligned_in_smp + register unsigned long __local_per_cpu_offset asm("g5"); /* var is in discarded region: offset to particular copy we want */ @@ -38,6 +43,8 @@ do { \ #define real_setup_per_cpu_areas() do { } while (0) #define DEFINE_PER_CPU(type, name) \ __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + DEFINE_PER_CPU(type, name) #define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index 8ba380ec6daa..409067408eec 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h @@ -204,16 +204,6 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ } \ } while(0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) { unsigned long tmp1, tmp2; diff --git a/include/asm-um/a.out.h b/include/asm-um/a.out.h index 7016b893ac9d..78bc9eed26b2 100644 --- a/include/asm-um/a.out.h +++ b/include/asm-um/a.out.h @@ -17,4 +17,6 @@ extern int honeypot; #define STACK_TOP \ CHOOSE_MODE((honeypot ? host_task_size : task_size), task_size) +#define STACK_TOP_MAX STACK_TOP + #endif diff --git a/include/asm-x86_64/a.out.h b/include/asm-x86_64/a.out.h index 7255cde06538..e789300e41a5 100644 --- a/include/asm-x86_64/a.out.h +++ b/include/asm-x86_64/a.out.h @@ -21,7 +21,8 @@ struct exec #ifdef __KERNEL__ #include <linux/thread_info.h> -#define STACK_TOP TASK_SIZE +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX TASK_SIZE64 #endif #endif /* __A_OUT_GNU_H__ */ diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h index cf5317898fb0..7db825403e01 100644 --- a/include/asm-x86_64/kprobes.h +++ b/include/asm-x86_64/kprobes.h @@ -41,7 +41,6 @@ typedef u8 kprobe_opcode_t; ? (MAX_STACK_SIZE) \ : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) -#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define ARCH_SUPPORTS_KRETPROBES #define ARCH_INACTIVE_KPROBE_COUNT 1 diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h index c6fbb67eac90..5abd48270101 100644 --- a/include/asm-x86_64/percpu.h +++ b/include/asm-x86_64/percpu.h @@ -20,6 +20,11 @@ #define DEFINE_PER_CPU(type, name) \ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + __attribute__((__section__(".data.percpu.shared_aligned"))) \ + __typeof__(type) per_cpu__##name \ + ____cacheline_internodealigned_in_smp + /* var is in discarded region: offset to particular copy we want */ #define per_cpu(var, cpu) (*({ \ extern int simple_identifier_##var(void); \ @@ -46,6 +51,8 @@ extern void setup_per_cpu_areas(void); #define DEFINE_PER_CPU(type, name) \ __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + DEFINE_PER_CPU(type, name) #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index ead9f9a56234..e4f246d62c46 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -111,15 +111,6 @@ static inline void write_cr4(unsigned long val) #define wbinvd() \ __asm__ __volatile__ ("wbinvd": : :"memory"); -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - */ -static inline void sched_cacheflush(void) -{ - wbinvd(); -} - #endif /* __KERNEL__ */ #define nop() __asm__ __volatile__ ("nop") diff --git a/include/asm-xtensa/a.out.h b/include/asm-xtensa/a.out.h index ffc4dcfd6ac1..05a2f67c6768 100644 --- a/include/asm-xtensa/a.out.h +++ b/include/asm-xtensa/a.out.h @@ -17,6 +17,7 @@ /* Note: the kernel needs the a.out definitions, even if only ELF is used. */ #define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP struct exec { diff --git a/include/linux/acpi.h b/include/linux/acpi.h index fccd8b548d93..dc234c508a6f 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -122,7 +122,7 @@ extern struct acpi_mcfg_allocation *pci_mmcfg_config; extern int pci_mmcfg_config_num; extern int sbf_port; -extern unsigned long acpi_video_flags; +extern unsigned long acpi_realmode_flags; #else /* !CONFIG_ACPI */ diff --git a/include/linux/aio.h b/include/linux/aio.h index b903fc02bdb7..d10e608f232d 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -86,7 +86,7 @@ struct kioctx; */ struct kiocb { struct list_head ki_run_list; - long ki_flags; + unsigned long ki_flags; int ki_users; unsigned ki_key; /* id of this request */ diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index e1a708337be3..91c8c07fe8b7 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -6,11 +6,13 @@ struct pt_regs; /* - * MAX_ARG_PAGES defines the number of pages allocated for arguments - * and envelope for the new program. 32 should suffice, this gives - * a maximum env+arg of 128kB w/4KB pages! + * These are the maximum length and maximum number of strings passed to the + * execve() system call. MAX_ARG_STRLEN is essentially random but serves to + * prevent the kernel from being unduly impacted by misaddressed pointers. + * MAX_ARG_STRINGS is chosen to fit in a signed 32-bit integer. */ -#define MAX_ARG_PAGES 32 +#define MAX_ARG_STRLEN (PAGE_SIZE * 32) +#define MAX_ARG_STRINGS 0x7FFFFFFF /* sizeof(linux_binprm->buf) */ #define BINPRM_BUF_SIZE 128 @@ -24,7 +26,12 @@ struct pt_regs; */ struct linux_binprm{ char buf[BINPRM_BUF_SIZE]; +#ifdef CONFIG_MMU + struct vm_area_struct *vma; +#else +# define MAX_ARG_PAGES 32 struct page *page[MAX_ARG_PAGES]; +#endif struct mm_struct *mm; unsigned long p; /* current top of mem */ int sh_bang; @@ -40,6 +47,7 @@ struct linux_binprm{ unsigned interp_flags; unsigned interp_data; unsigned long loader, exec; + unsigned long argv_len; }; #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 @@ -68,7 +76,7 @@ extern int register_binfmt(struct linux_binfmt *); extern int unregister_binfmt(struct linux_binfmt *); extern int prepare_binprm(struct linux_binprm *); -extern void remove_arg_zero(struct linux_binprm *); +extern int __must_check remove_arg_zero(struct linux_binprm *); extern int search_binary_handler(struct linux_binprm *,struct pt_regs *); extern int flush_old_exec(struct linux_binprm * bprm); @@ -85,6 +93,7 @@ extern int suid_dumpable; extern int setup_arg_pages(struct linux_binprm * bprm, unsigned long stack_top, int executable_stack); +extern int bprm_mm_init(struct linux_binprm *bprm); extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); extern void compute_creds(struct linux_binprm *binprm); extern int do_coredump(long signr, int exit_code, struct pt_regs * regs); diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h index e4ac016ad272..c4079b403e9e 100644 --- a/include/linux/coda_linux.h +++ b/include/linux/coda_linux.h @@ -43,9 +43,6 @@ int coda_revalidate_inode(struct dentry *); int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *); int coda_setattr(struct dentry *, struct iattr *); -/* global variables */ -extern int coda_fake_statfs; - /* this file: heloers */ static __inline__ struct CodaFid *coda_i2f(struct inode *); static __inline__ char *coda_i2s(struct inode *); diff --git a/include/linux/coda_proc.h b/include/linux/coda_proc.h deleted file mode 100644 index 0dc1b0458e75..000000000000 --- a/include/linux/coda_proc.h +++ /dev/null @@ -1,76 +0,0 @@ -/* - * coda_statis.h - * - * CODA operation statistics - * - * (c) March, 1998 - * by Michihiro Kuramochi, Zhenyu Xia and Zhanyong Wan - * zhanyong.wan@yale.edu - * - */ - -#ifndef _CODA_PROC_H -#define _CODA_PROC_H - -void coda_sysctl_init(void); -void coda_sysctl_clean(void); - -#include <linux/sysctl.h> -#include <linux/coda_fs_i.h> -#include <linux/coda.h> - -/* these four files are presented to show the result of the statistics: - * - * /proc/fs/coda/vfs_stats - * cache_inv_stats - * - * these four files are presented to reset the statistics to 0: - * - * /proc/sys/coda/vfs_stats - * cache_inv_stats - */ - -/* VFS operation statistics */ -struct coda_vfs_stats -{ - /* file operations */ - int open; - int flush; - int release; - int fsync; - - /* dir operations */ - int readdir; - - /* inode operations */ - int create; - int lookup; - int link; - int unlink; - int symlink; - int mkdir; - int rmdir; - int rename; - int permission; - - /* symlink operatoins*/ - int follow_link; - int readlink; -}; - -/* cache invalidation statistics */ -struct coda_cache_inv_stats -{ - int flush; - int purge_user; - int zap_dir; - int zap_file; - int zap_vnode; - int purge_fid; - int replace; -}; - -/* these global variables hold the actual statistics data */ -extern struct coda_vfs_stats coda_vfs_stat; - -#endif /* _CODA_PROC_H */ diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h index b541bb3d1f4b..aa8f454b3b77 100644 --- a/include/linux/coda_psdev.h +++ b/include/linux/coda_psdev.h @@ -8,11 +8,6 @@ struct kstatfs; -struct coda_sb_info -{ - struct venus_comm *sbi_vcomm; -}; - /* communication pending/processing queues */ struct venus_comm { u_long vc_seq; @@ -24,9 +19,9 @@ struct venus_comm { }; -static inline struct coda_sb_info *coda_sbp(struct super_block *sb) +static inline struct venus_comm *coda_vcp(struct super_block *sb) { - return ((struct coda_sb_info *)((sb)->s_fs_info)); + return (struct venus_comm *)((sb)->s_fs_info); } @@ -74,8 +69,6 @@ int venus_statfs(struct dentry *dentry, struct kstatfs *sfs); /* messages between coda filesystem in kernel and Venus */ -extern int coda_hard; -extern unsigned long coda_timeout; struct upc_req { struct list_head uc_chain; caddr_t uc_data; @@ -85,7 +78,6 @@ struct upc_req { u_short uc_opcode; /* copied from data to save lookup */ int uc_unique; wait_queue_head_t uc_sleep; /* process' wait queue */ - unsigned long uc_posttime; }; #define REQ_ASYNC 0x1 diff --git a/include/linux/edac.h b/include/linux/edac.h new file mode 100644 index 000000000000..eab451e69a91 --- /dev/null +++ b/include/linux/edac.h @@ -0,0 +1,29 @@ +/* + * Generic EDAC defs + * + * Author: Dave Jiang <djiang@mvista.com> + * + * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + * + */ +#ifndef _LINUX_EDAC_H_ +#define _LINUX_EDAC_H_ + +#include <asm/atomic.h> + +#define EDAC_OPSTATE_INVAL -1 +#define EDAC_OPSTATE_POLL 0 +#define EDAC_OPSTATE_NMI 1 +#define EDAC_OPSTATE_INT 2 + +extern int edac_op_state; +extern int edac_err_assert; +extern atomic_t edac_handlers; + +extern int edac_handler_set(void); +extern void edac_atomic_assert_error(void); + +#endif diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 2d38b1a74662..c8e02de737f6 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -25,7 +25,7 @@ static inline int freezing(struct task_struct *p) /* * Request that a process be frozen */ -static inline void freeze(struct task_struct *p) +static inline void set_freeze_flag(struct task_struct *p) { set_tsk_thread_flag(p, TIF_FREEZE); } @@ -33,7 +33,7 @@ static inline void freeze(struct task_struct *p) /* * Sometimes we may need to cancel the previous 'freeze' request */ -static inline void do_not_freeze(struct task_struct *p) +static inline void clear_freeze_flag(struct task_struct *p) { clear_tsk_thread_flag(p, TIF_FREEZE); } @@ -56,7 +56,7 @@ static inline int thaw_process(struct task_struct *p) wake_up_process(p); return 1; } - clear_tsk_thread_flag(p, TIF_FREEZE); + clear_freeze_flag(p); task_unlock(p); return 0; } @@ -129,7 +129,8 @@ static inline void set_freezable(void) #else static inline int frozen(struct task_struct *p) { return 0; } static inline int freezing(struct task_struct *p) { return 0; } -static inline void freeze(struct task_struct *p) { BUG(); } +static inline void set_freeze_flag(struct task_struct *p) {} +static inline void clear_freeze_flag(struct task_struct *p) {} static inline int thaw_process(struct task_struct *p) { return 1; } static inline void refrigerator(void) {} diff --git a/include/linux/fs.h b/include/linux/fs.h index 9562a59b3703..d33beadd9a43 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -697,20 +697,26 @@ struct fown_struct { * Track a single file's readahead state */ struct file_ra_state { - unsigned long start; /* Current window */ - unsigned long size; - unsigned long flags; /* ra flags RA_FLAG_xxx*/ - unsigned long cache_hit; /* cache hit count*/ - unsigned long prev_index; /* Cache last read() position */ - unsigned long ahead_start; /* Ahead window */ - unsigned long ahead_size; + pgoff_t start; /* where readahead started */ + unsigned long size; /* # of readahead pages */ + unsigned long async_size; /* do asynchronous readahead when + there are only # of pages ahead */ + unsigned long ra_pages; /* Maximum readahead window */ unsigned long mmap_hit; /* Cache hit stat for mmap accesses */ unsigned long mmap_miss; /* Cache miss stat for mmap accesses */ + unsigned long prev_index; /* Cache last read() position */ unsigned int prev_offset; /* Offset where last read() ended in a page */ }; -#define RA_FLAG_MISS 0x01 /* a cache miss occured against this file */ -#define RA_FLAG_INCACHE 0x02 /* file is already in cache */ + +/* + * Check if @index falls in the readahead windows. + */ +static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) +{ + return (index >= ra->start && + index < ra->start + ra->size); +} struct file { /* @@ -1463,7 +1469,7 @@ extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); extern int register_chrdev_region(dev_t, unsigned, const char *); extern int register_chrdev(unsigned int, const char *, const struct file_operations *); -extern int unregister_chrdev(unsigned int, const char *); +extern void unregister_chrdev(unsigned int, const char *); extern void unregister_chrdev_region(dev_t, unsigned); extern int chrdev_open(struct inode *, struct file *); extern void chrdev_show(struct seq_file *,off_t); diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index f7a93770e1be..7da02c93002b 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h @@ -39,6 +39,9 @@ enum { CTRL_CMD_NEWOPS, CTRL_CMD_DELOPS, CTRL_CMD_GETOPS, + CTRL_CMD_NEWMCAST_GRP, + CTRL_CMD_DELMCAST_GRP, + CTRL_CMD_GETMCAST_GRP, /* unused */ __CTRL_CMD_MAX, }; @@ -52,6 +55,7 @@ enum { CTRL_ATTR_HDRSIZE, CTRL_ATTR_MAXATTR, CTRL_ATTR_OPS, + CTRL_ATTR_MCAST_GROUPS, __CTRL_ATTR_MAX, }; @@ -66,4 +70,13 @@ enum { #define CTRL_ATTR_OP_MAX (__CTRL_ATTR_OP_MAX - 1) +enum { + CTRL_ATTR_MCAST_GRP_UNSPEC, + CTRL_ATTR_MCAST_GRP_NAME, + CTRL_ATTR_MCAST_GRP_ID, + __CTRL_ATTR_MCAST_GRP_MAX, +}; + +#define CTRL_ATTR_MCAST_GRP_MAX (__CTRL_ATTR_MCAST_GRP_MAX - 1) + #endif /* __LINUX_GENERIC_NETLINK_H */ diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 12c5e4e3135a..1fcb0033179e 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -103,21 +103,6 @@ __alloc_zeroed_user_highpage(gfp_t movableflags, #endif /** - * alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA - * @vma: The VMA the page is to be allocated for - * @vaddr: The virtual address the page will be inserted into - * - * This function will allocate a page for a VMA that the caller knows will - * not be able to move in the future using move_pages() or reclaim. If it - * is known that the page can move, use alloc_zeroed_user_highpage_movable - */ -static inline struct page * -alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr) -{ - return __alloc_zeroed_user_highpage(0, vma, vaddr); -} - -/** * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move * @vma: The VMA the page is to be allocated for * @vaddr: The virtual address the page will be inserted into diff --git a/include/linux/i2c-isa.h b/include/linux/i2c-isa.h deleted file mode 100644 index 67e3598c4cec..000000000000 --- a/include/linux/i2c-isa.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * i2c-isa.h - definitions for the i2c-isa pseudo-i2c-adapter interface - * - * Copyright (C) 2005 Jean Delvare <khali@linux-fr.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#ifndef _LINUX_I2C_ISA_H -#define _LINUX_I2C_ISA_H - -#include <linux/i2c.h> - -extern int i2c_isa_add_driver(struct i2c_driver *driver); -extern int i2c_isa_del_driver(struct i2c_driver *driver); - -/* Detect whether we are on the isa bus. This is only useful to hybrid - (i2c+isa) drivers. */ -#define i2c_is_isa_adapter(adapptr) \ - ((adapptr)->id == I2C_HW_ISA) -#define i2c_is_isa_client(clientptr) \ - i2c_is_isa_adapter((clientptr)->adapter) - -#endif /* _LINUX_I2C_ISA_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 2eaba21b9b1a..0c37a737a2b2 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -368,7 +368,6 @@ struct i2c_client_address_data { /* The numbers to use to set I2C bus address */ #define ANY_I2C_BUS 0xffff -#define ANY_I2C_ISA_BUS 9191 /* ----- functions exported by i2c.o */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 23adf6075ae4..51464d12a4e5 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -116,9 +116,12 @@ struct kprobe { */ struct jprobe { struct kprobe kp; - kprobe_opcode_t *entry; /* probe handling code to jump to */ + void *entry; /* probe handling code to jump to */ }; +/* For backward compatibility with old code using JPROBE_ENTRY() */ +#define JPROBE_ENTRY(handler) (handler) + DECLARE_PER_CPU(struct kprobe *, current_kprobe); DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); @@ -211,6 +214,7 @@ int longjmp_break_handler(struct kprobe *, struct pt_regs *); int register_jprobe(struct jprobe *p); void unregister_jprobe(struct jprobe *p); void jprobe_return(void); +unsigned long arch_deref_entry_point(void *); int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); diff --git a/include/linux/lguest.h b/include/linux/lguest.h new file mode 100644 index 000000000000..500aace21ca7 --- /dev/null +++ b/include/linux/lguest.h @@ -0,0 +1,85 @@ +/* Things the lguest guest needs to know. Note: like all lguest interfaces, + * this is subject to wild and random change between versions. */ +#ifndef _ASM_LGUEST_H +#define _ASM_LGUEST_H + +#ifndef __ASSEMBLY__ +#include <asm/irq.h> + +#define LHCALL_FLUSH_ASYNC 0 +#define LHCALL_LGUEST_INIT 1 +#define LHCALL_CRASH 2 +#define LHCALL_LOAD_GDT 3 +#define LHCALL_NEW_PGTABLE 4 +#define LHCALL_FLUSH_TLB 5 +#define LHCALL_LOAD_IDT_ENTRY 6 +#define LHCALL_SET_STACK 7 +#define LHCALL_TS 8 +#define LHCALL_SET_CLOCKEVENT 9 +#define LHCALL_HALT 10 +#define LHCALL_GET_WALLCLOCK 11 +#define LHCALL_BIND_DMA 12 +#define LHCALL_SEND_DMA 13 +#define LHCALL_SET_PTE 14 +#define LHCALL_SET_PMD 15 +#define LHCALL_LOAD_TLS 16 + +#define LG_CLOCK_MIN_DELTA 100UL +#define LG_CLOCK_MAX_DELTA ULONG_MAX + +#define LGUEST_TRAP_ENTRY 0x1F + +static inline unsigned long +hcall(unsigned long call, + unsigned long arg1, unsigned long arg2, unsigned long arg3) +{ + asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) + : "=a"(call) + : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3) + : "memory"); + return call; +} + +void async_hcall(unsigned long call, + unsigned long arg1, unsigned long arg2, unsigned long arg3); + +/* Can't use our min() macro here: needs to be a constant */ +#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) + +#define LHCALL_RING_SIZE 64 +struct hcall_ring +{ + u32 eax, edx, ebx, ecx; +}; + +/* All the good stuff happens here: guest registers it with LGUEST_INIT */ +struct lguest_data +{ +/* Fields which change during running: */ + /* 512 == enabled (same as eflags) */ + unsigned int irq_enabled; + /* Interrupts blocked by guest. */ + DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS); + + /* Virtual address of page fault. */ + unsigned long cr2; + + /* Async hypercall ring. 0xFF == done, 0 == pending. */ + u8 hcall_status[LHCALL_RING_SIZE]; + struct hcall_ring hcalls[LHCALL_RING_SIZE]; + +/* Fields initialized by the hypervisor at boot: */ + /* Memory not to try to access */ + unsigned long reserve_mem; + /* ID of this guest (used by network driver to set ethernet address) */ + u16 guestid; + /* KHz for the TSC clock. */ + u32 tsc_khz; + +/* Fields initialized by the guest at boot: */ + /* Instruction range to suppress interrupts even if enabled */ + unsigned long noirq_start, noirq_end; +}; +extern struct lguest_data lguest_data; +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_LGUEST_H */ diff --git a/include/linux/lguest_bus.h b/include/linux/lguest_bus.h new file mode 100644 index 000000000000..c9b4e05fee49 --- /dev/null +++ b/include/linux/lguest_bus.h @@ -0,0 +1,48 @@ +#ifndef _ASM_LGUEST_DEVICE_H +#define _ASM_LGUEST_DEVICE_H +/* Everything you need to know about lguest devices. */ +#include <linux/device.h> +#include <linux/lguest.h> +#include <linux/lguest_launcher.h> + +struct lguest_device { + /* Unique busid, and index into lguest_page->devices[] */ + unsigned int index; + + struct device dev; + + /* Driver can hang data off here. */ + void *private; +}; + +/* By convention, each device can use irq index+1 if it wants to. */ +static inline int lgdev_irq(const struct lguest_device *dev) +{ + return dev->index + 1; +} + +/* dma args must not be vmalloced! */ +void lguest_send_dma(unsigned long key, struct lguest_dma *dma); +int lguest_bind_dma(unsigned long key, struct lguest_dma *dmas, + unsigned int num, u8 irq); +void lguest_unbind_dma(unsigned long key, struct lguest_dma *dmas); + +/* Map the virtual device space */ +void *lguest_map(unsigned long phys_addr, unsigned long pages); +void lguest_unmap(void *); + +struct lguest_driver { + const char *name; + struct module *owner; + u16 device_type; + int (*probe)(struct lguest_device *dev); + void (*remove)(struct lguest_device *dev); + + struct device_driver drv; +}; + +extern int register_lguest_driver(struct lguest_driver *drv); +extern void unregister_lguest_driver(struct lguest_driver *drv); + +extern struct lguest_device_desc *lguest_devices; /* Just past max_pfn */ +#endif /* _ASM_LGUEST_DEVICE_H */ diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h new file mode 100644 index 000000000000..0ba414a40c80 --- /dev/null +++ b/include/linux/lguest_launcher.h @@ -0,0 +1,73 @@ +#ifndef _ASM_LGUEST_USER +#define _ASM_LGUEST_USER +/* Everything the "lguest" userspace program needs to know. */ +/* They can register up to 32 arrays of lguest_dma. */ +#define LGUEST_MAX_DMA 32 +/* At most we can dma 16 lguest_dma in one op. */ +#define LGUEST_MAX_DMA_SECTIONS 16 + +/* How many devices? Assume each one wants up to two dma arrays per device. */ +#define LGUEST_MAX_DEVICES (LGUEST_MAX_DMA/2) + +struct lguest_dma +{ + /* 0 if free to be used, filled by hypervisor. */ + u32 used_len; + unsigned long addr[LGUEST_MAX_DMA_SECTIONS]; + u16 len[LGUEST_MAX_DMA_SECTIONS]; +}; + +struct lguest_block_page +{ + /* 0 is a read, 1 is a write. */ + int type; + u32 sector; /* Offset in device = sector * 512. */ + u32 bytes; /* Length expected to be read/written in bytes */ + /* 0 = pending, 1 = done, 2 = done, error */ + int result; + u32 num_sectors; /* Disk length = num_sectors * 512 */ +}; + +/* There is a shared page of these. */ +struct lguest_net +{ + /* Simply the mac address (with multicast bit meaning promisc). */ + unsigned char mac[6]; +}; + +/* Where the Host expects the Guest to SEND_DMA console output to. */ +#define LGUEST_CONSOLE_DMA_KEY 0 + +/* We have a page of these descriptors in the lguest_device page. */ +struct lguest_device_desc { + u16 type; +#define LGUEST_DEVICE_T_CONSOLE 1 +#define LGUEST_DEVICE_T_NET 2 +#define LGUEST_DEVICE_T_BLOCK 3 + + u16 features; +#define LGUEST_NET_F_NOCSUM 0x4000 /* Don't bother checksumming */ +#define LGUEST_DEVICE_F_RANDOMNESS 0x8000 /* IRQ is fairly random */ + + u16 status; +/* 256 and above are device specific. */ +#define LGUEST_DEVICE_S_ACKNOWLEDGE 1 /* We have seen device. */ +#define LGUEST_DEVICE_S_DRIVER 2 /* We have found a driver */ +#define LGUEST_DEVICE_S_DRIVER_OK 4 /* Driver says OK! */ +#define LGUEST_DEVICE_S_REMOVED 8 /* Device has gone away. */ +#define LGUEST_DEVICE_S_REMOVED_ACK 16 /* Driver has been told. */ +#define LGUEST_DEVICE_S_FAILED 128 /* Something actually failed */ + + u16 num_pages; + u32 pfn; +}; + +/* Write command first word is a request. */ +enum lguest_req +{ + LHREQ_INITIALIZE, /* + pfnlimit, pgdir, start, pageoffset */ + LHREQ_GETDMA, /* + addr (returns &lguest_dma, irq in ->used_len) */ + LHREQ_IRQ, /* + irq */ + LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ +}; +#endif /* _ASM_LGUEST_USER */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 14c937d345cb..0e843bf65877 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -1,7 +1,8 @@ /* * Runtime locking correctness validator * - * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> + * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * * see Documentation/lockdep-design.txt for more details. */ @@ -9,6 +10,7 @@ #define __LINUX_LOCKDEP_H struct task_struct; +struct lockdep_map; #ifdef CONFIG_LOCKDEP @@ -114,8 +116,44 @@ struct lock_class { const char *name; int name_version; + +#ifdef CONFIG_LOCK_STAT + unsigned long contention_point[4]; +#endif +}; + +#ifdef CONFIG_LOCK_STAT +struct lock_time { + s64 min; + s64 max; + s64 total; + unsigned long nr; +}; + +enum bounce_type { + bounce_acquired_write, + bounce_acquired_read, + bounce_contended_write, + bounce_contended_read, + nr_bounce_types, + + bounce_acquired = bounce_acquired_write, + bounce_contended = bounce_contended_write, }; +struct lock_class_stats { + unsigned long contention_point[4]; + struct lock_time read_waittime; + struct lock_time write_waittime; + struct lock_time read_holdtime; + struct lock_time write_holdtime; + unsigned long bounces[nr_bounce_types]; +}; + +struct lock_class_stats lock_stats(struct lock_class *class); +void clear_lock_stats(struct lock_class *class); +#endif + /* * Map the lock object (the lock instance) to the lock-class object. * This is embedded into specific lock instances: @@ -124,6 +162,9 @@ struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache; const char *name; +#ifdef CONFIG_LOCK_STAT + int cpu; +#endif }; /* @@ -165,6 +206,10 @@ struct held_lock { unsigned long acquire_ip; struct lockdep_map *instance; +#ifdef CONFIG_LOCK_STAT + u64 waittime_stamp; + u64 holdtime_stamp; +#endif /* * The lock-stack is unified in that the lock chains of interrupt * contexts nest ontop of process context chains, but we 'separate' @@ -281,6 +326,30 @@ struct lock_class_key { }; #endif /* !LOCKDEP */ +#ifdef CONFIG_LOCK_STAT + +extern void lock_contended(struct lockdep_map *lock, unsigned long ip); +extern void lock_acquired(struct lockdep_map *lock); + +#define LOCK_CONTENDED(_lock, try, lock) \ +do { \ + if (!try(_lock)) { \ + lock_contended(&(_lock)->dep_map, _RET_IP_); \ + lock(_lock); \ + } \ + lock_acquired(&(_lock)->dep_map); \ +} while (0) + +#else /* CONFIG_LOCK_STAT */ + +#define lock_contended(lockdep_map, ip) do {} while (0) +#define lock_acquired(lockdep_map) do {} while (0) + +#define LOCK_CONTENDED(_lock, try, lock) \ + lock(_lock) + +#endif /* CONFIG_LOCK_STAT */ + #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) extern void early_init_irq_lock_class(void); #else diff --git a/include/linux/mm.h b/include/linux/mm.h index a5c451816fdc..c456c3a1c28e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -168,6 +168,8 @@ extern unsigned int kobjsize(const void *objp); #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ +#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ + #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS #endif @@ -190,6 +192,30 @@ extern unsigned int kobjsize(const void *objp); */ extern pgprot_t protection_map[16]; +#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ +#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ + + +/* + * vm_fault is filled by the the pagefault handler and passed to the vma's + * ->fault function. The vma's ->fault is responsible for returning a bitmask + * of VM_FAULT_xxx flags that give details about how the fault was handled. + * + * pgoff should be used in favour of virtual_address, if possible. If pgoff + * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear + * mapping support. + */ +struct vm_fault { + unsigned int flags; /* FAULT_FLAG_xxx flags */ + pgoff_t pgoff; /* Logical page offset based on vma */ + void __user *virtual_address; /* Faulting virtual address */ + + struct page *page; /* ->fault handlers should return a + * page here, unless VM_FAULT_NOPAGE + * is set (which is also implied by + * VM_FAULT_ERROR). + */ +}; /* * These are the virtual MM functions - opening of an area, closing and @@ -199,9 +225,11 @@ extern pgprot_t protection_map[16]; struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); - struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type); - unsigned long (*nopfn)(struct vm_area_struct * area, unsigned long address); - int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); + int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); + struct page *(*nopage)(struct vm_area_struct *area, + unsigned long address, int *type); + unsigned long (*nopfn)(struct vm_area_struct *area, + unsigned long address); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ @@ -655,7 +683,6 @@ static inline int page_mapped(struct page *page) */ #define NOPAGE_SIGBUS (NULL) #define NOPAGE_OOM ((struct page *) (-1)) -#define NOPAGE_REFAULT ((struct page *) (-2)) /* Return to userspace, rerun */ /* * Error return values for the *_nopfn functions @@ -669,16 +696,18 @@ static inline int page_mapped(struct page *page) * Used to decide whether a process gets delivered SIGBUS or * just gets major/minor fault counters bumped up. */ -#define VM_FAULT_OOM 0x00 -#define VM_FAULT_SIGBUS 0x01 -#define VM_FAULT_MINOR 0x02 -#define VM_FAULT_MAJOR 0x03 - -/* - * Special case for get_user_pages. - * Must be in a distinct bit from the above VM_FAULT_ flags. - */ -#define VM_FAULT_WRITE 0x10 + +#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */ + +#define VM_FAULT_OOM 0x0001 +#define VM_FAULT_SIGBUS 0x0002 +#define VM_FAULT_MAJOR 0x0004 +#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ + +#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ +#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ + +#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS) #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) @@ -762,20 +791,10 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, extern int vmtruncate(struct inode * inode, loff_t offset); extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); -extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot); -extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot); #ifdef CONFIG_MMU -extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, +extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access); - -static inline int handle_mm_fault(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long address, - int write_access) -{ - return __handle_mm_fault(mm, vma, address, write_access) & - (~VM_FAULT_WRITE); -} #else static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, @@ -789,7 +808,6 @@ static inline int handle_mm_fault(struct mm_struct *mm, extern int make_pages_present(unsigned long addr, unsigned long end); extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); -void install_arg_page(struct vm_area_struct *, struct page *, unsigned long); int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); @@ -806,9 +824,15 @@ int FASTCALL(set_page_dirty(struct page *page)); int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); +extern unsigned long move_page_tables(struct vm_area_struct *vma, + unsigned long old_addr, struct vm_area_struct *new_vma, + unsigned long new_addr, unsigned long len); extern unsigned long do_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr); +extern int mprotect_fixup(struct vm_area_struct *vma, + struct vm_area_struct **pprev, unsigned long start, + unsigned long end, unsigned long newflags); /* * A callback you can register to apply pressure to ageable caches. @@ -1104,9 +1128,7 @@ extern void truncate_inode_pages_range(struct address_space *, loff_t lstart, loff_t lend); /* generic vm_area_ops exported for stackable file systems */ -extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *); -extern int filemap_populate(struct vm_area_struct *, unsigned long, - unsigned long, pgprot_t, unsigned long, int); +extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); /* mm/page-writeback.c */ int write_one_page(struct page *page, int wait); @@ -1121,13 +1143,20 @@ int do_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read); int force_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read); -unsigned long page_cache_readahead(struct address_space *mapping, - struct file_ra_state *ra, - struct file *filp, - pgoff_t offset, - unsigned long size); -void handle_ra_miss(struct address_space *mapping, - struct file_ra_state *ra, pgoff_t offset); + +void page_cache_sync_readahead(struct address_space *mapping, + struct file_ra_state *ra, + struct file *filp, + pgoff_t offset, + unsigned long size); + +void page_cache_async_readahead(struct address_space *mapping, + struct file_ra_state *ra, + struct file *filp, + struct page *pg, + pgoff_t offset, + unsigned long size); + unsigned long max_sane_readahead(unsigned long nr); /* Do stack extension */ @@ -1135,6 +1164,8 @@ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); #ifdef CONFIG_IA64 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); #endif +extern int expand_stack_downwards(struct vm_area_struct *vma, + unsigned long address); /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); diff --git a/include/linux/namei.h b/include/linux/namei.h index b7dd24917f0d..6c38efbd810f 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -69,8 +69,8 @@ extern int FASTCALL(__user_walk_fd(int dfd, const char __user *, unsigned, struc #define user_path_walk_link(name,nd) \ __user_walk_fd(AT_FDCWD, name, 0, nd) extern int FASTCALL(path_lookup(const char *, unsigned, struct nameidata *)); -extern int FASTCALL(path_walk(const char *, struct nameidata *)); -extern int FASTCALL(link_path_walk(const char *, struct nameidata *)); +extern int vfs_path_lookup(struct dentry *, struct vfsmount *, + const char *, unsigned int, struct nameidata *); extern void path_release(struct nameidata *); extern void path_release_on_umount(struct nameidata *); diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 2e23353c28a5..83d8239f0cce 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -161,6 +161,8 @@ extern struct sock *netlink_kernel_create(int unit, unsigned int groups, void (*input)(struct sock *sk, int len), struct mutex *cb_mutex, struct module *module); +extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); +extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group); extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); extern int netlink_has_listeners(struct sock *sk, unsigned int group); extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index c098ae194f79..9ba4aec37c50 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -407,8 +407,8 @@ extern void nfs_release_automount_timer(void); /* * linux/fs/nfs/unlink.c */ -extern int nfs_async_unlink(struct dentry *); -extern void nfs_complete_unlink(struct dentry *); +extern int nfs_async_unlink(struct inode *dir, struct dentry *dentry); +extern void nfs_complete_unlink(struct dentry *dentry, struct inode *); /* * linux/fs/nfs/write.c diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 38d77681cf27..cf74a4db84a5 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -278,6 +278,21 @@ struct nfs_writeres { }; /* + * Common arguments to the unlink call + */ +struct nfs_removeargs { + const struct nfs_fh *fh; + struct qstr name; + const u32 * bitmask; +}; + +struct nfs_removeres { + const struct nfs_server *server; + struct nfs4_change_info cinfo; + struct nfs_fattr dir_attr; +}; + +/* * Argument struct for decode_entry function */ struct nfs_entry { @@ -631,18 +646,6 @@ struct nfs4_readlink { struct page ** pages; /* zero-copy data */ }; -struct nfs4_remove_arg { - const struct nfs_fh * fh; - const struct qstr * name; - const u32 * bitmask; -}; - -struct nfs4_remove_res { - const struct nfs_server * server; - struct nfs4_change_info cinfo; - struct nfs_fattr * dir_attr; -}; - struct nfs4_rename_arg { const struct nfs_fh * old_dir; const struct nfs_fh * new_dir; @@ -788,9 +791,8 @@ struct nfs_rpc_ops { int (*create) (struct inode *, struct dentry *, struct iattr *, int, struct nameidata *); int (*remove) (struct inode *, struct qstr *); - int (*unlink_setup) (struct rpc_message *, - struct dentry *, struct qstr *); - int (*unlink_done) (struct dentry *, struct rpc_task *); + void (*unlink_setup) (struct rpc_message *, struct inode *dir); + int (*unlink_done) (struct rpc_task *, struct inode *); int (*rename) (struct inode *, struct qstr *, struct inode *, struct qstr *); int (*link) (struct inode *, struct inode *, struct qstr *); diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h index 78feb7beff75..5cd192469096 100644 --- a/include/linux/nfsd/export.h +++ b/include/linux/nfsd/export.h @@ -116,18 +116,7 @@ struct svc_expkey { #define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE) #define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES) -static inline int EX_RDONLY(struct svc_export *exp, struct svc_rqst *rqstp) -{ - struct exp_flavor_info *f; - struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors; - - for (f = exp->ex_flavors; f < end; f++) { - if (f->pseudoflavor == rqstp->rq_flavor) - return f->flags & NFSEXP_READONLY; - } - return exp->ex_flags & NFSEXP_READONLY; -} - +int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp); __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp); /* diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 576f2bb34cc8..be3f2bb6fcf3 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -212,5 +212,11 @@ extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) +/* Hibernation and suspend events */ +#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ +#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */ +#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */ +#define PM_POST_SUSPEND 0x0004 /* Suspend finished */ + #endif /* __KERNEL__ */ #endif /* _LINUX_NOTIFIER_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 731cd2ac3227..209d3a47f50f 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -90,6 +90,9 @@ #define PG_reclaim 17 /* To be reclaimed asap */ #define PG_buddy 19 /* Page is free, on buddy lists */ +/* PG_readahead is only used for file reads; PG_reclaim is only for writes */ +#define PG_readahead PG_reclaim /* Reminder to do async read-ahead */ + /* PG_owner_priv_1 users should have descriptive aliases */ #define PG_checked PG_owner_priv_1 /* Used by some filesystems */ #define PG_pinned PG_owner_priv_1 /* Xen pinned pagetable */ @@ -186,37 +189,15 @@ static inline void SetPageUptodate(struct page *page) #define __SetPagePrivate(page) __set_bit(PG_private, &(page)->flags) #define __ClearPagePrivate(page) __clear_bit(PG_private, &(page)->flags) +/* + * Only test-and-set exist for PG_writeback. The unconditional operators are + * risky: they bypass page accounting. + */ #define PageWriteback(page) test_bit(PG_writeback, &(page)->flags) -#define SetPageWriteback(page) \ - do { \ - if (!test_and_set_bit(PG_writeback, \ - &(page)->flags)) \ - inc_zone_page_state(page, NR_WRITEBACK); \ - } while (0) -#define TestSetPageWriteback(page) \ - ({ \ - int ret; \ - ret = test_and_set_bit(PG_writeback, \ - &(page)->flags); \ - if (!ret) \ - inc_zone_page_state(page, NR_WRITEBACK); \ - ret; \ - }) -#define ClearPageWriteback(page) \ - do { \ - if (test_and_clear_bit(PG_writeback, \ - &(page)->flags)) \ - dec_zone_page_state(page, NR_WRITEBACK); \ - } while (0) -#define TestClearPageWriteback(page) \ - ({ \ - int ret; \ - ret = test_and_clear_bit(PG_writeback, \ - &(page)->flags); \ - if (ret) \ - dec_zone_page_state(page, NR_WRITEBACK); \ - ret; \ - }) +#define TestSetPageWriteback(page) test_and_set_bit(PG_writeback, \ + &(page)->flags) +#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback, \ + &(page)->flags) #define PageBuddy(page) test_bit(PG_buddy, &(page)->flags) #define __SetPageBuddy(page) __set_bit(PG_buddy, &(page)->flags) @@ -226,6 +207,10 @@ static inline void SetPageUptodate(struct page *page) #define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags) #define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags) +#define PageReadahead(page) test_bit(PG_readahead, &(page)->flags) +#define SetPageReadahead(page) set_bit(PG_readahead, &(page)->flags) +#define ClearPageReadahead(page) clear_bit(PG_readahead, &(page)->flags) + #define PageReclaim(page) test_bit(PG_reclaim, &(page)->flags) #define SetPageReclaim(page) set_bit(PG_reclaim, &(page)->flags) #define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 2c7add169539..b15c6498fe67 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -495,6 +495,8 @@ #define PCI_VENDOR_ID_AMD 0x1022 #define PCI_DEVICE_ID_AMD_K8_NB 0x1100 +#define PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP 0x1101 +#define PCI_DEVICE_ID_AMD_K8_NB_MEMCTL 0x1102 #define PCI_DEVICE_ID_AMD_K8_NB_MISC 0x1103 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 @@ -2209,6 +2211,7 @@ #define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 #define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770 #define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772 +#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778 #define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0 #define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2 #define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640 diff --git a/include/linux/pm.h b/include/linux/pm.h index 2735b7cadd20..ad3cc2eb0d34 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -101,6 +101,7 @@ struct pm_dev */ extern void (*pm_idle)(void); extern void (*pm_power_off)(void); +extern void (*pm_power_off_prepare)(void); typedef int __bitwise suspend_state_t; diff --git a/include/linux/sched.h b/include/linux/sched.h index 731edaca8ffd..33b9b4841ee7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -345,6 +345,27 @@ typedef unsigned long mm_counter_t; (mm)->hiwater_vm = (mm)->total_vm; \ } while (0) +extern void set_dumpable(struct mm_struct *mm, int value); +extern int get_dumpable(struct mm_struct *mm); + +/* mm flags */ +/* dumpable bits */ +#define MMF_DUMPABLE 0 /* core dump is permitted */ +#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ +#define MMF_DUMPABLE_BITS 2 + +/* coredump filter bits */ +#define MMF_DUMP_ANON_PRIVATE 2 +#define MMF_DUMP_ANON_SHARED 3 +#define MMF_DUMP_MAPPED_PRIVATE 4 +#define MMF_DUMP_MAPPED_SHARED 5 +#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS +#define MMF_DUMP_FILTER_BITS 4 +#define MMF_DUMP_FILTER_MASK \ + (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) +#define MMF_DUMP_FILTER_DEFAULT \ + ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED)) + struct mm_struct { struct vm_area_struct * mmap; /* list of VMAs */ struct rb_root mm_rb; @@ -402,7 +423,7 @@ struct mm_struct { unsigned int token_priority; unsigned int last_interval; - unsigned char dumpable:2; + unsigned long flags; /* Must use atomic bitops to access the bits */ /* coredumping support */ int core_waiters; @@ -1327,6 +1348,13 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) #endif extern unsigned long long sched_clock(void); + +/* + * For kernel-internal use: high-speed (but slightly incorrect) per-cpu + * clock constructed from sched_clock(): + */ +extern unsigned long long cpu_clock(int cpu); + extern unsigned long long task_sched_runtime(struct task_struct *task); diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 210549ba4ef4..f6a3a951b79e 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -9,14 +9,14 @@ * Released under the General Public License (GPL). */ -#include <linux/lockdep.h> - #if defined(CONFIG_SMP) # include <asm/spinlock_types.h> #else # include <linux/spinlock_types_up.h> #endif +#include <linux/lockdep.h> + typedef struct { raw_spinlock_t raw_lock; #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 27644af20b7c..04135b0e198e 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -12,14 +12,10 @@ * Released under the General Public License (GPL). */ -#if defined(CONFIG_DEBUG_SPINLOCK) || \ - defined(CONFIG_DEBUG_LOCK_ALLOC) +#ifdef CONFIG_DEBUG_SPINLOCK typedef struct { volatile unsigned int slock; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif } raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } @@ -34,9 +30,6 @@ typedef struct { } raw_spinlock_t; typedef struct { /* no debug version on UP */ -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif } raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { } diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 1d2b084c0185..e7fa657d0c49 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -13,7 +13,7 @@ extern void save_stack_trace(struct stack_trace *trace); extern void print_stack_trace(struct stack_trace *trace, int spaces); #else # define save_stack_trace(trace) do { } while (0) -# define print_stack_trace(trace) do { } while (0) +# define print_stack_trace(trace, spaces) do { } while (0) #endif #endif diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 9e340fa23c06..c6b53d181bfa 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -12,6 +12,7 @@ #include <linux/uio.h> #include <asm/byteorder.h> #include <linux/scatterlist.h> +#include <linux/smp_lock.h> /* * Buffer adjustment @@ -36,6 +37,21 @@ struct xdr_netobj { typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj); /* + * We're still requiring the BKL in the xdr code until it's been + * more carefully audited, at which point this wrapper will become + * unnecessary. + */ +static inline int rpc_call_xdrproc(kxdrproc_t xdrproc, void *rqstp, __be32 *data, void *obj) +{ + int ret; + + lock_kernel(); + ret = xdrproc(rqstp, data, obj); + unlock_kernel(); + return ret; +} + +/* * Basic structure for transmission/reception of a client XDR message. * Features a header (for a linear buffer containing RPC headers * and the data payload for short messages), and then an array of diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 9c7cb6430666..e8e6da394c92 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -43,14 +43,19 @@ static inline void pm_restore_console(void) {} * @prepare: prepare system for hibernation * @enter: shut down system after state has been saved to disk * @finish: finish/clean up after state has been reloaded + * @pre_restore: prepare system for the restoration from a hibernation image + * @restore_cleanup: clean up after a failing image restoration */ struct hibernation_ops { int (*prepare)(void); int (*enter)(void); void (*finish)(void); + int (*pre_restore)(void); + void (*restore_cleanup)(void); }; -#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) +#ifdef CONFIG_PM +#ifdef CONFIG_SOFTWARE_SUSPEND /* kernel/power/snapshot.c */ extern void __register_nosave_region(unsigned long b, unsigned long e, int km); static inline void register_nosave_region(unsigned long b, unsigned long e) @@ -68,16 +73,14 @@ extern unsigned long get_safe_page(gfp_t gfp_mask); extern void hibernation_set_ops(struct hibernation_ops *ops); extern int hibernate(void); -#else -static inline void register_nosave_region(unsigned long b, unsigned long e) {} -static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} +#else /* CONFIG_SOFTWARE_SUSPEND */ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } static inline void swsusp_set_page_free(struct page *p) {} static inline void swsusp_unset_page_free(struct page *p) {} static inline void hibernation_set_ops(struct hibernation_ops *ops) {} static inline int hibernate(void) { return -ENOSYS; } -#endif /* defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) */ +#endif /* CONFIG_SOFTWARE_SUSPEND */ void save_processor_state(void); void restore_processor_state(void); @@ -85,4 +88,43 @@ struct saved_context; void __save_processor_state(struct saved_context *ctxt); void __restore_processor_state(struct saved_context *ctxt); +/* kernel/power/main.c */ +extern struct blocking_notifier_head pm_chain_head; + +static inline int register_pm_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&pm_chain_head, nb); +} + +static inline int unregister_pm_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&pm_chain_head, nb); +} + +#define pm_notifier(fn, pri) { \ + static struct notifier_block fn##_nb = \ + { .notifier_call = fn, .priority = pri }; \ + register_pm_notifier(&fn##_nb); \ +} +#else /* CONFIG_PM */ + +static inline int register_pm_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int unregister_pm_notifier(struct notifier_block *nb) +{ + return 0; +} + +#define pm_notifier(fn, pri) do { (void)(fn); } while (0) +#endif /* CONFIG_PM */ + +#if !defined CONFIG_SOFTWARE_SUSPEND || !defined(CONFIG_PM) +static inline void register_nosave_region(unsigned long b, unsigned long e) +{ +} +#endif + #endif /* _LINUX_SWSUSP_H */ diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index bb320573bb9e..1101b0ce878f 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -49,7 +49,7 @@ static inline struct user_namespace *copy_user_ns(int flags, if (flags & CLONE_NEWUSER) return ERR_PTR(-EINVAL); - return NULL; + return old_ns; } static inline void put_user_ns(struct user_namespace *ns) diff --git a/include/net/genetlink.h b/include/net/genetlink.h index b6eaca122db8..decdda546829 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -5,6 +5,22 @@ #include <net/netlink.h> /** + * struct genl_multicast_group - generic netlink multicast group + * @name: name of the multicast group, names are per-family + * @id: multicast group ID, assigned by the core, to use with + * genlmsg_multicast(). + * @list: list entry for linking + * @family: pointer to family, need not be set before registering + */ +struct genl_multicast_group +{ + struct genl_family *family; /* private */ + struct list_head list; /* private */ + char name[GENL_NAMSIZ]; + u32 id; +}; + +/** * struct genl_family - generic netlink family * @id: protocol family idenfitier * @hdrsize: length of user specific header in bytes @@ -14,6 +30,7 @@ * @attrbuf: buffer to store parsed attributes * @ops_list: list of all assigned operations * @family_list: family list + * @mcast_groups: multicast groups list */ struct genl_family { @@ -25,6 +42,7 @@ struct genl_family struct nlattr ** attrbuf; /* private */ struct list_head ops_list; /* private */ struct list_head family_list; /* private */ + struct list_head mcast_groups; /* private */ }; /** @@ -73,6 +91,10 @@ extern int genl_register_family(struct genl_family *family); extern int genl_unregister_family(struct genl_family *family); extern int genl_register_ops(struct genl_family *, struct genl_ops *ops); extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops); +extern int genl_register_mc_group(struct genl_family *family, + struct genl_multicast_group *grp); +extern void genl_unregister_mc_group(struct genl_family *family, + struct genl_multicast_group *grp); extern struct sock *genl_sock; |