diff options
Diffstat (limited to 'include/asm-ppc64')
-rw-r--r-- | include/asm-ppc64/hvcall.h | 6 | ||||
-rw-r--r-- | include/asm-ppc64/machdep.h | 5 | ||||
-rw-r--r-- | include/asm-ppc64/pci-bridge.h | 5 | ||||
-rw-r--r-- | include/asm-ppc64/plpar_wrappers.h | 9 | ||||
-rw-r--r-- | include/asm-ppc64/processor.h | 1 | ||||
-rw-r--r-- | include/asm-ppc64/ptrace-common.h | 92 | ||||
-rw-r--r-- | include/asm-ppc64/ptrace.h | 128 | ||||
-rw-r--r-- | include/asm-ppc64/spinlock.h | 191 | ||||
-rw-r--r-- | include/asm-ppc64/spinlock_types.h | 20 | ||||
-rw-r--r-- | include/asm-ppc64/system.h | 3 |
10 files changed, 299 insertions, 161 deletions
diff --git a/include/asm-ppc64/hvcall.h b/include/asm-ppc64/hvcall.h index 4f668a4baff0..ab7c3cf24888 100644 --- a/include/asm-ppc64/hvcall.h +++ b/include/asm-ppc64/hvcall.h @@ -56,6 +56,11 @@ #define H_PP1 (1UL<<(63-62)) #define H_PP2 (1UL<<(63-63)) +/* DABRX flags */ +#define H_DABRX_HYPERVISOR (1UL<<(63-61)) +#define H_DABRX_KERNEL (1UL<<(63-62)) +#define H_DABRX_USER (1UL<<(63-63)) + /* pSeries hypervisor opcodes */ #define H_REMOVE 0x04 #define H_ENTER 0x08 @@ -101,6 +106,7 @@ #define H_VIO_SIGNAL 0x104 #define H_SEND_CRQ 0x108 #define H_COPY_RDMA 0x110 +#define H_SET_XDABR 0x134 #define H_STUFF_TCE 0x138 #define H_PUT_TCE_INDIRECT 0x13C #define H_VTERM_PARTNER_INFO 0x150 diff --git a/include/asm-ppc64/machdep.h b/include/asm-ppc64/machdep.h index 9a1ef4427ed2..8027160ec96d 100644 --- a/include/asm-ppc64/machdep.h +++ b/include/asm-ppc64/machdep.h @@ -88,6 +88,7 @@ struct machdep_calls { /* PCI stuff */ void (*pcibios_fixup)(void); + int (*pci_probe_mode)(struct pci_bus *); void (*restart)(char *cmd); void (*power_off)(void); @@ -173,10 +174,6 @@ extern sys_ctrler_t sys_ctrler; void ppc64_boot_msg(unsigned int src, const char *msg); /* Print a termination message (print only -- does not stop the kernel) */ void ppc64_terminate_msg(unsigned int src, const char *msg); -/* Print something that needs attention (device error, etc) */ -void ppc64_attention_msg(unsigned int src, const char *msg); -/* Print a dump progress message. */ -void ppc64_dump_msg(unsigned int src, const char *msg); static inline void log_error(char *buf, unsigned int err_type, int fatal) { diff --git a/include/asm-ppc64/pci-bridge.h b/include/asm-ppc64/pci-bridge.h index 6b4a5b1f695e..d8991389ab39 100644 --- a/include/asm-ppc64/pci-bridge.h +++ b/include/asm-ppc64/pci-bridge.h @@ -119,5 +119,10 @@ static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus) return PCI_DN(busdn)->phb; } +/* Return values for ppc_md.pci_probe_mode function */ +#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */ +#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */ +#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */ + #endif #endif /* __KERNEL__ */ diff --git a/include/asm-ppc64/plpar_wrappers.h b/include/asm-ppc64/plpar_wrappers.h index f4a5fb7d67c7..72dd2449ee76 100644 --- a/include/asm-ppc64/plpar_wrappers.h +++ b/include/asm-ppc64/plpar_wrappers.h @@ -107,5 +107,14 @@ static inline long plpar_put_term_char(unsigned long termno, lbuf[1]); } +static inline long plpar_set_xdabr(unsigned long address, unsigned long flags) +{ + return plpar_hcall_norets(H_SET_XDABR, address, flags); +} + +static inline long plpar_set_dabr(unsigned long val) +{ + return plpar_hcall_norets(H_SET_DABR, val); +} #endif /* _PPC64_PLPAR_WRAPPERS_H */ diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h index 8bd7aa959385..4146189006e3 100644 --- a/include/asm-ppc64/processor.h +++ b/include/asm-ppc64/processor.h @@ -433,6 +433,7 @@ struct thread_struct { unsigned long start_tb; /* Start purr when proc switched in */ unsigned long accum_tb; /* Total accumilated purr for process */ unsigned long vdso_base; /* base of the vDSO library */ + unsigned long dabr; /* Data address breakpoint register */ #ifdef CONFIG_ALTIVEC /* Complete AltiVec register set */ vector128 vr[32] __attribute((aligned(16))); diff --git a/include/asm-ppc64/ptrace-common.h b/include/asm-ppc64/ptrace-common.h index af03547f9c7e..b1babb729673 100644 --- a/include/asm-ppc64/ptrace-common.h +++ b/include/asm-ppc64/ptrace-common.h @@ -11,6 +11,10 @@ #ifndef _PPC64_PTRACE_COMMON_H #define _PPC64_PTRACE_COMMON_H + +#include <linux/config.h> +#include <asm/system.h> + /* * Set of msr bits that gdb can change on behalf of a process. */ @@ -69,4 +73,92 @@ static inline void clear_single_step(struct task_struct *task) clear_ti_thread_flag(task->thread_info, TIF_SINGLESTEP); } +#ifdef CONFIG_ALTIVEC +/* + * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go. + * The transfer totals 34 quadword. Quadwords 0-31 contain the + * corresponding vector registers. Quadword 32 contains the vscr as the + * last word (offset 12) within that quadword. Quadword 33 contains the + * vrsave as the first word (offset 0) within the quadword. + * + * This definition of the VMX state is compatible with the current PPC32 + * ptrace interface. This allows signal handling and ptrace to use the + * same structures. This also simplifies the implementation of a bi-arch + * (combined (32- and 64-bit) gdb. + */ + +/* + * Get contents of AltiVec register state in task TASK + */ +static inline int get_vrregs(unsigned long __user *data, + struct task_struct *task) +{ + unsigned long regsize; + + /* copy AltiVec registers VR[0] .. VR[31] */ + regsize = 32 * sizeof(vector128); + if (copy_to_user(data, task->thread.vr, regsize)) + return -EFAULT; + data += (regsize / sizeof(unsigned long)); + + /* copy VSCR */ + regsize = 1 * sizeof(vector128); + if (copy_to_user(data, &task->thread.vscr, regsize)) + return -EFAULT; + data += (regsize / sizeof(unsigned long)); + + /* copy VRSAVE */ + if (put_user(task->thread.vrsave, (u32 __user *)data)) + return -EFAULT; + + return 0; +} + +/* + * Write contents of AltiVec register state into task TASK. + */ +static inline int set_vrregs(struct task_struct *task, + unsigned long __user *data) +{ + unsigned long regsize; + + /* copy AltiVec registers VR[0] .. VR[31] */ + regsize = 32 * sizeof(vector128); + if (copy_from_user(task->thread.vr, data, regsize)) + return -EFAULT; + data += (regsize / sizeof(unsigned long)); + + /* copy VSCR */ + regsize = 1 * sizeof(vector128); + if (copy_from_user(&task->thread.vscr, data, regsize)) + return -EFAULT; + data += (regsize / sizeof(unsigned long)); + + /* copy VRSAVE */ + if (get_user(task->thread.vrsave, (u32 __user *)data)) + return -EFAULT; + + return 0; +} +#endif + +static inline int ptrace_set_debugreg(struct task_struct *task, + unsigned long addr, unsigned long data) +{ + /* We only support one DABR and no IABRS at the moment */ + if (addr > 0) + return -EINVAL; + + /* The bottom 3 bits are flags */ + if ((data & ~0x7UL) >= TASK_SIZE) + return -EIO; + + /* Ensure translation is on */ + if (data && !(data & DABR_TRANSLATION)) + return -EIO; + + task->thread.dabr = data; + return 0; +} + #endif /* _PPC64_PTRACE_COMMON_H */ diff --git a/include/asm-ppc64/ptrace.h b/include/asm-ppc64/ptrace.h index c96aad28fc08..3a55377f1fd3 100644 --- a/include/asm-ppc64/ptrace.h +++ b/include/asm-ppc64/ptrace.h @@ -25,56 +25,49 @@ */ #ifndef __ASSEMBLY__ -#define PPC_REG unsigned long + struct pt_regs { - PPC_REG gpr[32]; - PPC_REG nip; - PPC_REG msr; - PPC_REG orig_gpr3; /* Used for restarting system calls */ - PPC_REG ctr; - PPC_REG link; - PPC_REG xer; - PPC_REG ccr; - PPC_REG softe; /* Soft enabled/disabled */ - PPC_REG trap; /* Reason for being here */ - PPC_REG dar; /* Fault registers */ - PPC_REG dsisr; - PPC_REG result; /* Result of a system call */ + unsigned long gpr[32]; + unsigned long nip; + unsigned long msr; + unsigned long orig_gpr3; /* Used for restarting system calls */ + unsigned long ctr; + unsigned long link; + unsigned long xer; + unsigned long ccr; + unsigned long softe; /* Soft enabled/disabled */ + unsigned long trap; /* Reason for being here */ + unsigned long dar; /* Fault registers */ + unsigned long dsisr; + unsigned long result; /* Result of a system call */ }; -#define PPC_REG_32 unsigned int struct pt_regs32 { - PPC_REG_32 gpr[32]; - PPC_REG_32 nip; - PPC_REG_32 msr; - PPC_REG_32 orig_gpr3; /* Used for restarting system calls */ - PPC_REG_32 ctr; - PPC_REG_32 link; - PPC_REG_32 xer; - PPC_REG_32 ccr; - PPC_REG_32 mq; /* 601 only (not used at present) */ - /* Used on APUS to hold IPL value. */ - PPC_REG_32 trap; /* Reason for being here */ - PPC_REG_32 dar; /* Fault registers */ - PPC_REG_32 dsisr; - PPC_REG_32 result; /* Result of a system call */ + unsigned int gpr[32]; + unsigned int nip; + unsigned int msr; + unsigned int orig_gpr3; /* Used for restarting system calls */ + unsigned int ctr; + unsigned int link; + unsigned int xer; + unsigned int ccr; + unsigned int mq; /* 601 only (not used at present) */ + unsigned int trap; /* Reason for being here */ + unsigned int dar; /* Fault registers */ + unsigned int dsisr; + unsigned int result; /* Result of a system call */ }; +#ifdef __KERNEL__ + #define instruction_pointer(regs) ((regs)->nip) + #ifdef CONFIG_SMP extern unsigned long profile_pc(struct pt_regs *regs); #else #define profile_pc(regs) instruction_pointer(regs) #endif -#endif /* __ASSEMBLY__ */ - -#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ - -/* Size of dummy stack frame allocated when calling signal handler. */ -#define __SIGNAL_FRAMESIZE 128 -#define __SIGNAL_FRAMESIZE32 64 - #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1) #define force_successful_syscall_return() \ @@ -89,6 +82,16 @@ extern unsigned long profile_pc(struct pt_regs *regs); #define TRAP(regs) ((regs)->trap & ~0xF) #define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1) +#endif /* __KERNEL__ */ + +#endif /* __ASSEMBLY__ */ + +#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ + +/* Size of dummy stack frame allocated when calling signal handler. */ +#define __SIGNAL_FRAMESIZE 128 +#define __SIGNAL_FRAMESIZE32 64 + /* * Offsets used by 'ptrace' system call interface. */ @@ -135,17 +138,21 @@ extern unsigned long profile_pc(struct pt_regs *regs); #define PT_XER 37 #define PT_CCR 38 #define PT_SOFTE 39 +#define PT_TRAP 40 +#define PT_DAR 41 +#define PT_DSISR 42 #define PT_RESULT 43 #define PT_FPR0 48 -/* Kernel and userspace will both use this PT_FPSCR value. 32-bit apps will have - * visibility to the asm-ppc/ptrace.h header instead of this one. +/* + * Kernel and userspace will both use this PT_FPSCR value. 32-bit apps will + * have visibility to the asm-ppc/ptrace.h header instead of this one. */ -#define PT_FPSCR (PT_FPR0 + 32) /* each FP reg occupies 1 slot in 64-bit space */ +#define PT_FPSCR (PT_FPR0 + 32) /* each FP reg occupies 1 slot in 64-bit space */ #ifdef __KERNEL__ -#define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */ +#define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */ #endif #define PT_VR0 82 /* each Vector reg occupies 2 slots in 64-bit */ @@ -173,17 +180,34 @@ extern unsigned long profile_pc(struct pt_regs *regs); #define PTRACE_GETVRREGS 18 #define PTRACE_SETVRREGS 19 -/* Additional PTRACE requests implemented on PowerPC. */ -#define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */ -#define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */ -#define PPC_PTRACE_GETFPREGS 0x97 /* Get FPRs 0 - 31 */ -#define PPC_PTRACE_SETFPREGS 0x96 /* Set FPRs 0 - 31 */ -#define PPC_PTRACE_PEEKTEXT_3264 0x95 /* Read word at location ADDR on a 64-bit process from a 32-bit process. */ -#define PPC_PTRACE_PEEKDATA_3264 0x94 /* Read word at location ADDR on a 64-bit process from a 32-bit process. */ -#define PPC_PTRACE_POKETEXT_3264 0x93 /* Write word at location ADDR on a 64-bit process from a 32-bit process. */ -#define PPC_PTRACE_POKEDATA_3264 0x92 /* Write word at location ADDR on a 64-bit process from a 32-bit process. */ -#define PPC_PTRACE_PEEKUSR_3264 0x91 /* Read a register (specified by ADDR) out of the "user area" on a 64-bit process from a 32-bit process. */ -#define PPC_PTRACE_POKEUSR_3264 0x90 /* Write DATA into location ADDR within the "user area" on a 64-bit process from a 32-bit process. */ +/* + * While we dont have 64bit book E processors, we need to reserve the + * relevant ptrace calls for 32bit compatibility. + */ +#if 0 +#define PTRACE_GETEVRREGS 20 +#define PTRACE_SETEVRREGS 21 +#endif +/* + * Get or set a debug register. The first 16 are DABR registers and the + * second 16 are IABR registers. + */ +#define PTRACE_GET_DEBUGREG 25 +#define PTRACE_SET_DEBUGREG 26 + +/* Additional PTRACE requests implemented on PowerPC. */ +#define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */ +#define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */ +#define PPC_PTRACE_GETFPREGS 0x97 /* Get FPRs 0 - 31 */ +#define PPC_PTRACE_SETFPREGS 0x96 /* Set FPRs 0 - 31 */ + +/* Calls to trace a 64bit program from a 32bit program */ +#define PPC_PTRACE_PEEKTEXT_3264 0x95 +#define PPC_PTRACE_PEEKDATA_3264 0x94 +#define PPC_PTRACE_POKETEXT_3264 0x93 +#define PPC_PTRACE_POKEDATA_3264 0x92 +#define PPC_PTRACE_PEEKUSR_3264 0x91 +#define PPC_PTRACE_POKEUSR_3264 0x90 #endif /* _PPC64_PTRACE_H */ diff --git a/include/asm-ppc64/spinlock.h b/include/asm-ppc64/spinlock.h index acd11564dd75..14cb895bb607 100644 --- a/include/asm-ppc64/spinlock.h +++ b/include/asm-ppc64/spinlock.h @@ -15,36 +15,42 @@ * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. + * + * (the type definitions are in asm/spinlock_types.h) */ #include <linux/config.h> #include <asm/paca.h> #include <asm/hvcall.h> #include <asm/iSeries/HvCall.h> -typedef struct { - volatile unsigned int lock; -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} spinlock_t; +#define __raw_spin_is_locked(x) ((x)->slock != 0) -typedef struct { - volatile signed int lock; -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} rwlock_t; +/* + * This returns the old value in the lock, so we succeeded + * in getting the lock if the return value is 0. + */ +static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) +{ + unsigned long tmp, tmp2; -#ifdef __KERNEL__ -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } + __asm__ __volatile__( +" lwz %1,%3(13) # __spin_trylock\n\ +1: lwarx %0,0,%2\n\ + cmpwi 0,%0,0\n\ + bne- 2f\n\ + stwcx. %1,0,%2\n\ + bne- 1b\n\ + isync\n\ +2:" : "=&r" (tmp), "=&r" (tmp2) + : "r" (&lock->slock), "i" (offsetof(struct paca_struct, lock_token)) + : "cr0", "memory"); -#define spin_is_locked(x) ((x)->lock != 0) -#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) + return tmp; +} -static __inline__ void _raw_spin_unlock(spinlock_t *lock) +static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) { - __asm__ __volatile__("lwsync # spin_unlock": : :"memory"); - lock->lock = 0; + return __spin_trylock(lock) == 0; } /* @@ -64,44 +70,15 @@ static __inline__ void _raw_spin_unlock(spinlock_t *lock) #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) /* We only yield to the hypervisor if we are in shared processor mode */ #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc) -extern void __spin_yield(spinlock_t *lock); -extern void __rw_yield(rwlock_t *lock); +extern void __spin_yield(raw_spinlock_t *lock); +extern void __rw_yield(raw_rwlock_t *lock); #else /* SPLPAR || ISERIES */ #define __spin_yield(x) barrier() #define __rw_yield(x) barrier() #define SHARED_PROCESSOR 0 #endif -extern void spin_unlock_wait(spinlock_t *lock); - -/* - * This returns the old value in the lock, so we succeeded - * in getting the lock if the return value is 0. - */ -static __inline__ unsigned long __spin_trylock(spinlock_t *lock) -{ - unsigned long tmp, tmp2; - - __asm__ __volatile__( -" lwz %1,%3(13) # __spin_trylock\n\ -1: lwarx %0,0,%2\n\ - cmpwi 0,%0,0\n\ - bne- 2f\n\ - stwcx. %1,0,%2\n\ - bne- 1b\n\ - isync\n\ -2:" : "=&r" (tmp), "=&r" (tmp2) - : "r" (&lock->lock), "i" (offsetof(struct paca_struct, lock_token)) - : "cr0", "memory"); - - return tmp; -} - -static int __inline__ _raw_spin_trylock(spinlock_t *lock) -{ - return __spin_trylock(lock) == 0; -} -static void __inline__ _raw_spin_lock(spinlock_t *lock) +static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) { while (1) { if (likely(__spin_trylock(lock) == 0)) @@ -110,12 +87,12 @@ static void __inline__ _raw_spin_lock(spinlock_t *lock) HMT_low(); if (SHARED_PROCESSOR) __spin_yield(lock); - } while (unlikely(lock->lock != 0)); + } while (unlikely(lock->slock != 0)); HMT_medium(); } } -static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) +static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { unsigned long flags_dis; @@ -128,12 +105,20 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag HMT_low(); if (SHARED_PROCESSOR) __spin_yield(lock); - } while (unlikely(lock->lock != 0)); + } while (unlikely(lock->slock != 0)); HMT_medium(); local_irq_restore(flags_dis); } } +static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) +{ + __asm__ __volatile__("lwsync # __raw_spin_unlock": : :"memory"); + lock->slock = 0; +} + +extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); + /* * Read-write spinlocks, allowing multiple readers * but only one writer. @@ -144,24 +129,15 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag * irq-safe write-lock, but readers can get non-irqsafe * read-locks. */ -#define RW_LOCK_UNLOCKED (rwlock_t) { 0 } -#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) - -#define read_can_lock(rw) ((rw)->lock >= 0) -#define write_can_lock(rw) (!(rw)->lock) - -static __inline__ void _raw_write_unlock(rwlock_t *rw) -{ - __asm__ __volatile__("lwsync # write_unlock": : :"memory"); - rw->lock = 0; -} +#define __raw_read_can_lock(rw) ((rw)->lock >= 0) +#define __raw_write_can_lock(rw) (!(rw)->lock) /* * This returns the old value in the lock + 1, * so we got a read lock if the return value is > 0. */ -static long __inline__ __read_trylock(rwlock_t *rw) +static long __inline__ __read_trylock(raw_rwlock_t *rw) { long tmp; @@ -180,45 +156,11 @@ static long __inline__ __read_trylock(rwlock_t *rw) return tmp; } -static int __inline__ _raw_read_trylock(rwlock_t *rw) -{ - return __read_trylock(rw) > 0; -} - -static void __inline__ _raw_read_lock(rwlock_t *rw) -{ - while (1) { - if (likely(__read_trylock(rw) > 0)) - break; - do { - HMT_low(); - if (SHARED_PROCESSOR) - __rw_yield(rw); - } while (unlikely(rw->lock < 0)); - HMT_medium(); - } -} - -static void __inline__ _raw_read_unlock(rwlock_t *rw) -{ - long tmp; - - __asm__ __volatile__( - "eieio # read_unlock\n\ -1: lwarx %0,0,%1\n\ - addic %0,%0,-1\n\ - stwcx. %0,0,%1\n\ - bne- 1b" - : "=&r"(tmp) - : "r"(&rw->lock) - : "cr0", "memory"); -} - /* * This returns the old value in the lock, * so we got the write lock if the return value is 0. */ -static __inline__ long __write_trylock(rwlock_t *rw) +static __inline__ long __write_trylock(raw_rwlock_t *rw) { long tmp, tmp2; @@ -237,12 +179,21 @@ static __inline__ long __write_trylock(rwlock_t *rw) return tmp; } -static int __inline__ _raw_write_trylock(rwlock_t *rw) +static void __inline__ __raw_read_lock(raw_rwlock_t *rw) { - return __write_trylock(rw) == 0; + while (1) { + if (likely(__read_trylock(rw) > 0)) + break; + do { + HMT_low(); + if (SHARED_PROCESSOR) + __rw_yield(rw); + } while (unlikely(rw->lock < 0)); + HMT_medium(); + } } -static void __inline__ _raw_write_lock(rwlock_t *rw) +static void __inline__ __raw_write_lock(raw_rwlock_t *rw) { while (1) { if (likely(__write_trylock(rw) == 0)) @@ -256,5 +207,35 @@ static void __inline__ _raw_write_lock(rwlock_t *rw) } } -#endif /* __KERNEL__ */ +static int __inline__ __raw_read_trylock(raw_rwlock_t *rw) +{ + return __read_trylock(rw) > 0; +} + +static int __inline__ __raw_write_trylock(raw_rwlock_t *rw) +{ + return __write_trylock(rw) == 0; +} + +static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) +{ + long tmp; + + __asm__ __volatile__( + "eieio # read_unlock\n\ +1: lwarx %0,0,%1\n\ + addic %0,%0,-1\n\ + stwcx. %0,0,%1\n\ + bne- 1b" + : "=&r"(tmp) + : "r"(&rw->lock) + : "cr0", "memory"); +} + +static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) +{ + __asm__ __volatile__("lwsync # write_unlock": : :"memory"); + rw->lock = 0; +} + #endif /* __ASM_SPINLOCK_H */ diff --git a/include/asm-ppc64/spinlock_types.h b/include/asm-ppc64/spinlock_types.h new file mode 100644 index 000000000000..a37c8eabb9f2 --- /dev/null +++ b/include/asm-ppc64/spinlock_types.h @@ -0,0 +1,20 @@ +#ifndef __ASM_SPINLOCK_TYPES_H +#define __ASM_SPINLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +typedef struct { + volatile unsigned int slock; +} raw_spinlock_t; + +#define __RAW_SPIN_LOCK_UNLOCKED { 0 } + +typedef struct { + volatile signed int lock; +} raw_rwlock_t; + +#define __RAW_RW_LOCK_UNLOCKED { 0 } + +#endif diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h index c0396428cc3c..375015c62f20 100644 --- a/include/asm-ppc64/system.h +++ b/include/asm-ppc64/system.h @@ -101,6 +101,9 @@ static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } #endif +extern int set_dabr(unsigned long dabr); +extern void _exception(int signr, struct pt_regs *regs, int code, + unsigned long addr); extern int fix_alignment(struct pt_regs *regs); extern void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig); |