From 64ac24e738823161693bf791f87adc802cf529ff Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 7 Mar 2008 21:55:58 -0500 Subject: Generic semaphore implementation Semaphores are no longer performance-critical, so a generic C implementation is better for maintainability, debuggability and extensibility. Thanks to Peter Zijlstra for fixing the lockdep warning. Thanks to Harvey Harrison for pointing out that the unlikely() was unnecessary. Signed-off-by: Matthew Wilcox Acked-by: Ingo Molnar --- arch/arm/kernel/Makefile | 2 +- arch/arm/kernel/semaphore.c | 221 -------------------------------------------- 2 files changed, 1 insertion(+), 222 deletions(-) delete mode 100644 arch/arm/kernel/semaphore.c (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 00d44c6fbfe9..6235f72a14f0 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -7,7 +7,7 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) # Object file lists. obj-y := compat.o entry-armv.o entry-common.o irq.o \ - process.o ptrace.o semaphore.o setup.o signal.o \ + process.o ptrace.o setup.o signal.o \ sys_arm.o stacktrace.o time.o traps.o obj-$(CONFIG_ISA_DMA_API) += dma.o diff --git a/arch/arm/kernel/semaphore.c b/arch/arm/kernel/semaphore.c deleted file mode 100644 index 981fe5c6ccbe..000000000000 --- a/arch/arm/kernel/semaphore.c +++ /dev/null @@ -1,221 +0,0 @@ -/* - * ARM semaphore implementation, taken from - * - * i386 semaphore implementation. - * - * (C) Copyright 1999 Linus Torvalds - * - * Modified for ARM by Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include -#include -#include -#include - -#include - -/* - * Semaphores are implemented using a two-way counter: - * The "count" variable is decremented for each process - * that tries to acquire the semaphore, while the "sleeping" - * variable is a count of such acquires. - * - * Notably, the inline "up()" and "down()" functions can - * efficiently test if they need to do any extra work (up - * needs to do something only if count was negative before - * the increment operation. - * - * "sleeping" and the contention routine ordering is - * protected by the semaphore spinlock. - * - * Note that these functions are only called when there is - * contention on the lock, and as such all this is the - * "non-critical" part of the whole semaphore business. The - * critical part is the inline stuff in - * where we want to avoid any extra jumps and calls. - */ - -/* - * Logic: - * - only on a boundary condition do we need to care. When we go - * from a negative count to a non-negative, we wake people up. - * - when we go from a non-negative count to a negative do we - * (a) synchronize with the "sleeper" count and (b) make sure - * that we're on the wakeup list before we synchronize so that - * we cannot lose wakeup events. - */ - -void __up(struct semaphore *sem) -{ - wake_up(&sem->wait); -} - -static DEFINE_SPINLOCK(semaphore_lock); - -void __sched __down(struct semaphore * sem) -{ - struct task_struct *tsk = current; - DECLARE_WAITQUEUE(wait, tsk); - tsk->state = TASK_UNINTERRUPTIBLE; - add_wait_queue_exclusive(&sem->wait, &wait); - - spin_lock_irq(&semaphore_lock); - sem->sleepers++; - for (;;) { - int sleepers = sem->sleepers; - - /* - * Add "everybody else" into it. They aren't - * playing, because we own the spinlock. - */ - if (!atomic_add_negative(sleepers - 1, &sem->count)) { - sem->sleepers = 0; - break; - } - sem->sleepers = 1; /* us - see -1 above */ - spin_unlock_irq(&semaphore_lock); - - schedule(); - tsk->state = TASK_UNINTERRUPTIBLE; - spin_lock_irq(&semaphore_lock); - } - spin_unlock_irq(&semaphore_lock); - remove_wait_queue(&sem->wait, &wait); - tsk->state = TASK_RUNNING; - wake_up(&sem->wait); -} - -int __sched __down_interruptible(struct semaphore * sem) -{ - int retval = 0; - struct task_struct *tsk = current; - DECLARE_WAITQUEUE(wait, tsk); - tsk->state = TASK_INTERRUPTIBLE; - add_wait_queue_exclusive(&sem->wait, &wait); - - spin_lock_irq(&semaphore_lock); - sem->sleepers ++; - for (;;) { - int sleepers = sem->sleepers; - - /* - * With signals pending, this turns into - * the trylock failure case - we won't be - * sleeping, and we* can't get the lock as - * it has contention. Just correct the count - * and exit. - */ - if (signal_pending(current)) { - retval = -EINTR; - sem->sleepers = 0; - atomic_add(sleepers, &sem->count); - break; - } - - /* - * Add "everybody else" into it. They aren't - * playing, because we own the spinlock. The - * "-1" is because we're still hoping to get - * the lock. - */ - if (!atomic_add_negative(sleepers - 1, &sem->count)) { - sem->sleepers = 0; - break; - } - sem->sleepers = 1; /* us - see -1 above */ - spin_unlock_irq(&semaphore_lock); - - schedule(); - tsk->state = TASK_INTERRUPTIBLE; - spin_lock_irq(&semaphore_lock); - } - spin_unlock_irq(&semaphore_lock); - tsk->state = TASK_RUNNING; - remove_wait_queue(&sem->wait, &wait); - wake_up(&sem->wait); - return retval; -} - -/* - * Trylock failed - make sure we correct for - * having decremented the count. - * - * We could have done the trylock with a - * single "cmpxchg" without failure cases, - * but then it wouldn't work on a 386. - */ -int __down_trylock(struct semaphore * sem) -{ - int sleepers; - unsigned long flags; - - spin_lock_irqsave(&semaphore_lock, flags); - sleepers = sem->sleepers + 1; - sem->sleepers = 0; - - /* - * Add "everybody else" and us into it. They aren't - * playing, because we own the spinlock. - */ - if (!atomic_add_negative(sleepers, &sem->count)) - wake_up(&sem->wait); - - spin_unlock_irqrestore(&semaphore_lock, flags); - return 1; -} - -/* - * The semaphore operations have a special calling sequence that - * allow us to do a simpler in-line version of them. These routines - * need to convert that sequence back into the C sequence when - * there is contention on the semaphore. - * - * ip contains the semaphore pointer on entry. Save the C-clobbered - * registers (r0 to r3 and lr), but not ip, as we use it as a return - * value in some cases.. - * To remain AAPCS compliant (64-bit stack align) we save r4 as well. - */ -asm(" .section .sched.text,\"ax\",%progbits \n\ - .align 5 \n\ - .globl __down_failed \n\ -__down_failed: \n\ - stmfd sp!, {r0 - r4, lr} \n\ - mov r0, ip \n\ - bl __down \n\ - ldmfd sp!, {r0 - r4, pc} \n\ - \n\ - .align 5 \n\ - .globl __down_interruptible_failed \n\ -__down_interruptible_failed: \n\ - stmfd sp!, {r0 - r4, lr} \n\ - mov r0, ip \n\ - bl __down_interruptible \n\ - mov ip, r0 \n\ - ldmfd sp!, {r0 - r4, pc} \n\ - \n\ - .align 5 \n\ - .globl __down_trylock_failed \n\ -__down_trylock_failed: \n\ - stmfd sp!, {r0 - r4, lr} \n\ - mov r0, ip \n\ - bl __down_trylock \n\ - mov ip, r0 \n\ - ldmfd sp!, {r0 - r4, pc} \n\ - \n\ - .align 5 \n\ - .globl __up_wakeup \n\ -__up_wakeup: \n\ - stmfd sp!, {r0 - r4, lr} \n\ - mov r0, ip \n\ - bl __up \n\ - ldmfd sp!, {r0 - r4, pc} \n\ - "); - -EXPORT_SYMBOL(__down_failed); -EXPORT_SYMBOL(__down_interruptible_failed); -EXPORT_SYMBOL(__down_trylock_failed); -EXPORT_SYMBOL(__up_wakeup); -- cgit v1.2.3 From d7f864be8323e5394040e2877594645b0e7da85d Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 18 Apr 2008 22:43:06 +0100 Subject: ARMv7: Add support for the ThumbEE state saving/restoring This patch adds the detection and handling of the ThumbEE extension on ARMv7 CPUs. Signed-off-by: Catalin Marinas --- arch/arm/kernel/Makefile | 1 + arch/arm/kernel/asm-offsets.c | 3 ++ arch/arm/kernel/thumbee.c | 81 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+) create mode 100644 arch/arm/kernel/thumbee.c (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 00d44c6fbfe9..d5be3f7ac0e3 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o obj-$(CONFIG_ATAGS_PROC) += atags.o obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o +obj-$(CONFIG_ARM_THUMBEE) += thumbee.o obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 3278e713c32a..6604b474cfd7 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -58,6 +58,9 @@ int main(void) DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); +#ifdef CONFIG_ARM_THUMBEE + DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state)); +#endif #ifdef CONFIG_IWMMXT DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt)); #endif diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c new file mode 100644 index 000000000000..df3f6b7ebcea --- /dev/null +++ b/arch/arm/kernel/thumbee.c @@ -0,0 +1,81 @@ +/* + * arch/arm/kernel/thumbee.c + * + * Copyright (C) 2008 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include + +#include + +/* + * Access to the ThumbEE Handler Base register + */ +static inline unsigned long teehbr_read() +{ + unsigned long v; + asm("mrc p14, 6, %0, c1, c0, 0\n" : "=r" (v)); + return v; +} + +static inline void teehbr_write(unsigned long v) +{ + asm("mcr p14, 6, %0, c1, c0, 0\n" : : "r" (v)); +} + +static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void *t) +{ + struct thread_info *thread = t; + + switch (cmd) { + case THREAD_NOTIFY_FLUSH: + thread->thumbee_state = 0; + break; + case THREAD_NOTIFY_SWITCH: + current_thread_info()->thumbee_state = teehbr_read(); + teehbr_write(thread->thumbee_state); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block thumbee_notifier_block = { + .notifier_call = thumbee_notifier, +}; + +static int __init thumbee_init(void) +{ + unsigned long pfr0; + unsigned int cpu_arch = cpu_architecture(); + + if (cpu_arch < CPU_ARCH_ARMv7) + return 0; + + /* processor feature register 0 */ + asm("mrc p15, 0, %0, c0, c1, 0\n" : "=r" (pfr0)); + if ((pfr0 & 0x0000f000) != 0x00001000) + return 0; + + printk(KERN_INFO "ThumbEE CPU extension supported.\n"); + elf_hwcap |= HWCAP_THUMBEE; + thread_register_notifier(&thumbee_notifier_block); + + return 0; +} + +late_initcall(thumbee_init); -- cgit v1.2.3 From 48d7927bdf071d05cf5d15b816cf06b0937cb84f Mon Sep 17 00:00:00 2001 From: Paul Brook Date: Fri, 18 Apr 2008 22:43:07 +0100 Subject: Add a prefetch abort handler This patch adds a prefetch abort handler similar to the data abort one and renames the latter for consistency. Initial implementation by Paul Brook with some renaming by Catalin Marinas. Signed-off-by: Paul Brook Signed-off-by: Catalin Marinas --- arch/arm/kernel/asm-offsets.c | 7 +++++++ arch/arm/kernel/entry-armv.S | 34 ++++++++++++++++++++++++---------- arch/arm/kernel/entry-common.S | 5 +++++ 3 files changed, 36 insertions(+), 10 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 6604b474cfd7..0a0d2479274b 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -111,5 +111,12 @@ int main(void) DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush)); DEFINE(PROCINFO_MM_MMUFLAGS, offsetof(struct proc_info_list, __cpu_mm_mmu_flags)); DEFINE(PROCINFO_IO_MMUFLAGS, offsetof(struct proc_info_list, __cpu_io_mmu_flags)); + BLANK(); +#ifdef MULTI_DABORT + DEFINE(PROCESSOR_DABT_FUNC, offsetof(struct processor, _data_abort)); +#endif +#ifdef MULTI_PABORT + DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort)); +#endif return 0; } diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index a46d5b456765..5e647eb6b3b9 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -166,12 +166,12 @@ __dabt_svc: @ The abort handler must return the aborted address in r0, and @ the fault status register in r1. r9 must be preserved. @ -#ifdef MULTI_ABORT +#ifdef MULTI_DABORT ldr r4, .LCprocfns mov lr, pc - ldr pc, [r4] + ldr pc, [r4, #PROCESSOR_DABT_FUNC] #else - bl CPU_ABORT_HANDLER + bl CPU_DABORT_HANDLER #endif @ @@ -293,7 +293,6 @@ __pabt_svc: mrs r9, cpsr tst r3, #PSR_I_BIT biceq r9, r9, #PSR_I_BIT - msr cpsr_c, r9 @ @ set args, then call main handler @@ -301,7 +300,15 @@ __pabt_svc: @ r0 - address of faulting instruction @ r1 - pointer to registers on stack @ - mov r0, r2 @ address (pc) +#ifdef MULTI_PABORT + mov r0, r2 @ pass address of aborted instruction. + ldr r4, .LCprocfns + mov lr, pc + ldr pc, [r4, #PROCESSOR_PABT_FUNC] +#else + CPU_PABORT_HANDLER(r0, r2) +#endif + msr cpsr_c, r9 @ Maybe enable interrupts mov r1, sp @ regs bl do_PrefetchAbort @ call abort handler @@ -320,7 +327,7 @@ __pabt_svc: .align 5 .LCcralign: .word cr_alignment -#ifdef MULTI_ABORT +#ifdef MULTI_DABORT .LCprocfns: .word processor #endif @@ -404,12 +411,12 @@ __dabt_usr: @ The abort handler must return the aborted address in r0, and @ the fault status register in r1. @ -#ifdef MULTI_ABORT +#ifdef MULTI_DABORT ldr r4, .LCprocfns mov lr, pc - ldr pc, [r4] + ldr pc, [r4, #PROCESSOR_DABT_FUNC] #else - bl CPU_ABORT_HANDLER + bl CPU_DABORT_HANDLER #endif @ @@ -619,8 +626,15 @@ __und_usr_unknown: __pabt_usr: usr_entry +#ifdef MULTI_PABORT + mov r0, r2 @ pass address of aborted instruction. + ldr r4, .LCprocfns + mov lr, pc + ldr pc, [r4, #PROCESSOR_PABT_FUNC] +#else + CPU_PABORT_HANDLER(r0, r2) +#endif enable_irq @ Enable interrupts - mov r0, r2 @ address (pc) mov r1, sp @ regs bl do_PrefetchAbort @ call abort handler /* fall through */ diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 6c90c50a9ee3..597ed00a08d8 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -352,6 +352,11 @@ sys_mmap2: b do_mmap2 #endif +ENTRY(pabort_ifar) + mrc p15, 0, r0, cr6, cr0, 2 +ENTRY(pabort_noifar) + mov pc, lr + #ifdef CONFIG_OABI_COMPAT /* -- cgit v1.2.3 From cb170a45d69b573a08247acfbbff3b9d6e6e2f8f Mon Sep 17 00:00:00 2001 From: Paul Brook Date: Fri, 18 Apr 2008 22:43:08 +0100 Subject: Linux Thumb-2 support for user-space applications This patch implements Thumb-2 application support in Linux. Original implementation by Paul Brook with fixes for VFP and Neon by Catalin Marinas. Signed-off-by: Paul Brook Signed-off-by: Catalin Marinas --- arch/arm/kernel/entry-armv.S | 53 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 44 insertions(+), 9 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 5e647eb6b3b9..6fd1460111ec 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -462,10 +462,6 @@ __irq_usr: __und_usr: usr_entry - tst r3, #PSR_T_BIT @ Thumb mode? - bne __und_usr_unknown @ ignore FP - sub r4, r2, #4 - @ @ fall through to the emulation code, which returns using r9 if @ it has emulated the instruction, or the more conventional lr @@ -475,7 +471,24 @@ __und_usr: @ adr r9, ret_from_exception adr lr, __und_usr_unknown -1: ldrt r0, [r4] + tst r3, #PSR_T_BIT @ Thumb mode? + subeq r4, r2, #4 @ ARM instr at LR - 4 + subne r4, r2, #2 @ Thumb instr at LR - 2 +1: ldreqt r0, [r4] + beq call_fpe + @ Thumb instruction +#if __LINUX_ARM_ARCH__ >= 7 +2: ldrht r5, [r4], #2 + and r0, r5, #0xf800 @ mask bits 111x x... .... .... + cmp r0, #0xe800 @ 32bit instruction if xx != 0 + blo __und_usr_unknown +3: ldrht r0, [r4] + add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 + orr r0, r0, r5, lsl #16 +#else + b __und_usr_unknown +#endif + @ @ fallthrough to call_fpe @ @@ -484,10 +497,14 @@ __und_usr: * The out of line fixup for the ldrt above. */ .section .fixup, "ax" -2: mov pc, r9 +4: mov pc, r9 .previous .section __ex_table,"a" - .long 1b, 2b + .long 1b, 4b +#if __LINUX_ARM_ARCH__ >= 7 + .long 2b, 4b + .long 3b, 4b +#endif .previous /* @@ -514,9 +531,16 @@ __und_usr: * r10 = this threads thread_info structure. * lr = unrecognised instruction return address */ + @ + @ Fall-through from Thumb-2 __und_usr + @ +#ifdef CONFIG_NEON + adr r6, .LCneon_thumb_opcodes + b 2f +#endif call_fpe: #ifdef CONFIG_NEON - adr r6, .LCneon_opcodes + adr r6, .LCneon_arm_opcodes 2: ldr r7, [r6], #4 @ mask value cmp r7, #0 @ end mask? @@ -533,6 +557,7 @@ call_fpe: 1: #endif tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 + tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) and r8, r0, #0x0f000000 @ mask out op-code bits teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)? @@ -584,13 +609,23 @@ call_fpe: #ifdef CONFIG_NEON .align 6 -.LCneon_opcodes: +.LCneon_arm_opcodes: .word 0xfe000000 @ mask .word 0xf2000000 @ opcode .word 0xff100000 @ mask .word 0xf4000000 @ opcode + .word 0x00000000 @ mask + .word 0x00000000 @ opcode + +.LCneon_thumb_opcodes: + .word 0xef000000 @ mask + .word 0xef000000 @ opcode + + .word 0xff100000 @ mask + .word 0xf9000000 @ opcode + .word 0x00000000 @ mask .word 0x00000000 @ opcode #endif -- cgit v1.2.3 From cbfc0f04069a426f3c8b4b35021117f6833df9ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Thu, 6 Mar 2008 16:22:00 +0100 Subject: [ARM] 4852/1: Add timerfd_create, timerfd_settime and timerfd_gettime syscall entries MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Uwe Kleine-König Signed-off-by: Russell King --- arch/arm/kernel/calls.S | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 283e14fff993..95f1c121cb30 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -359,9 +359,11 @@ CALL(sys_kexec_load) CALL(sys_utimensat) CALL(sys_signalfd) -/* 350 */ CALL(sys_ni_syscall) +/* 350 */ CALL(sys_timerfd_create) CALL(sys_eventfd) CALL(sys_fallocate) + CALL(sys_timerfd_settime) + CALL(sys_timerfd_gettime) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted -- cgit v1.2.3 From 84081bd2205efd1e6c7203bc7099b4350839ee39 Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Fri, 28 Mar 2008 21:11:47 +0100 Subject: [ARM] 4881/1: print unrecognised processor ID as part of failure message If we fail to boot due to an unsupported processor ID, print the processor ID as part of the failure message. Signed-off-by: Lennert Buytenhek Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/head-common.S | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 50f667febe29..7e9c00a8a412 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -75,8 +75,13 @@ __error_p: #ifdef CONFIG_DEBUG_LL adr r0, str_p1 bl printascii + mov r0, r9 + bl printhex8 + adr r0, str_p2 + bl printascii b __error -str_p1: .asciz "\nError: unrecognized/unsupported processor variant.\n" +str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x" +str_p2: .asciz ").\n" .align #endif -- cgit v1.2.3 From 28fab1a2fd5b1aa92f9ad1adc9e3b8914f89bc74 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 13 Apr 2008 17:47:35 +0100 Subject: [ARM] Fix kernel mode preemption Luc Van Oostenryck reported: The code removed by this patch tested the irq_cpustat_t members __local_irq_count and __local_bh_count but these fields have been removed some time ago: http://git.kernel.org/?p=linux/kernel/git/tglx/history.git;a=commitdiff;h=3ab146c93e039dec99fec8d441a8dd046fe510cc Fix this oversight. Acked-by: Bill Gatliff Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index a46d5b456765..65cef872c5f2 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -209,14 +209,12 @@ __irq_svc: irq_handler #ifdef CONFIG_PREEMPT + str r8, [tsk, #TI_PREEMPT] @ restore preempt count ldr r0, [tsk, #TI_FLAGS] @ get flags + teq r8, #0 @ if preempt count != 0 + movne r0, #0 @ force flags to 0 tst r0, #_TIF_NEED_RESCHED blne svc_preempt -preempt_return: - ldr r0, [tsk, #TI_PREEMPT] @ read preempt value - str r8, [tsk, #TI_PREEMPT] @ restore preempt count - teq r0, r7 - strne r0, [r0, -r0] @ bug() #endif ldr r0, [sp, #S_PSR] @ irqs are already disabled msr spsr_cxsf, r0 @@ -230,19 +228,11 @@ preempt_return: #ifdef CONFIG_PREEMPT svc_preempt: - teq r8, #0 @ was preempt count = 0 - ldreq r6, .LCirq_stat - movne pc, lr @ no - ldr r0, [r6, #4] @ local_irq_count - ldr r1, [r6, #8] @ local_bh_count - adds r0, r0, r1 - movne pc, lr - mov r7, #0 @ preempt_schedule_irq - str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0 + mov r8, lr 1: bl preempt_schedule_irq @ irq en/disable is done inside ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED - beq preempt_return @ go again + moveq pc, r8 @ go again b 1b #endif @@ -326,10 +316,6 @@ __pabt_svc: #endif .LCfp: .word fp_enter -#ifdef CONFIG_PREEMPT -.LCirq_stat: - .word irq_stat -#endif /* * User mode handlers -- cgit v1.2.3