summaryrefslogtreecommitdiff
path: root/include/asm-powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/a.out.h3
-rw-r--r--include/asm-powerpc/kprobes.h2
-rw-r--r--include/asm-powerpc/percpu.h7
-rw-r--r--include/asm-powerpc/system.h10
4 files changed, 10 insertions, 12 deletions
diff --git a/include/asm-powerpc/a.out.h b/include/asm-powerpc/a.out.h
index c7393a977364..5c5ea83f9349 100644
--- a/include/asm-powerpc/a.out.h
+++ b/include/asm-powerpc/a.out.h
@@ -26,9 +26,12 @@ struct exec
#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
STACK_TOP_USER32 : STACK_TOP_USER64)
+#define STACK_TOP_MAX STACK_TOP_USER64
+
#else /* __powerpc64__ */
#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
#endif /* __powerpc64__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h
index 9537fda238b8..8b08b447d6f3 100644
--- a/include/asm-powerpc/kprobes.h
+++ b/include/asm-powerpc/kprobes.h
@@ -73,12 +73,10 @@ typedef unsigned int kprobe_opcode_t;
} \
}
-#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)((func_descr_t *)pentry)
#define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \
IS_TWI(instr) || IS_TDI(instr))
#else
/* Use stock kprobe_lookup_name since ppc32 doesn't use function descriptors */
-#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)(pentry)
#define is_trap(instr) (IS_TW(instr) || IS_TWI(instr))
#endif
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h
index 2f2e3024fa61..73dc8ba4010d 100644
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -20,6 +20,11 @@
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+
/* var is in discarded region: offset to particular copy we want */
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
@@ -40,6 +45,8 @@ extern void setup_per_cpu_areas(void);
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 32aa42b748be..41520b7a7b76 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -184,16 +184,6 @@ struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
extern unsigned int rtas_data;
extern int mem_init_done; /* set on boot once kmalloc can be called */
extern unsigned long memory_limit;