From 048eb582f3f89737d4a29668de9935e6feea7c36 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Fri, 9 Sep 2005 22:32:31 +0200 Subject: kbuild: mips use generic asm-offsets.h support Removed obsolete stuff from arch makefile. mips had a special rule for generating asm-offsets.h so preserved it using an architecture specific hook in top-level Kbuild file. Renamed .h file to asm-offsets.h Signed-off-by: Sam Ravnborg --- include/asm-mips/asmmacro-32.h | 2 +- include/asm-mips/asmmacro-64.h | 2 +- include/asm-mips/sim.h | 2 +- include/asm-mips/stackframe.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include/asm-mips') diff --git a/include/asm-mips/asmmacro-32.h b/include/asm-mips/asmmacro-32.h index ac8823df2554..11daf5ceb7b4 100644 --- a/include/asm-mips/asmmacro-32.h +++ b/include/asm-mips/asmmacro-32.h @@ -7,7 +7,7 @@ #ifndef _ASM_ASMMACRO_32_H #define _ASM_ASMMACRO_32_H -#include +#include #include #include #include diff --git a/include/asm-mips/asmmacro-64.h b/include/asm-mips/asmmacro-64.h index bbed35511f5a..559c355b9b86 100644 --- a/include/asm-mips/asmmacro-64.h +++ b/include/asm-mips/asmmacro-64.h @@ -8,7 +8,7 @@ #ifndef _ASM_ASMMACRO_64_H #define _ASM_ASMMACRO_64_H -#include +#include #include #include #include diff --git a/include/asm-mips/sim.h b/include/asm-mips/sim.h index 3ccfe09fa744..9c2af1b00e19 100644 --- a/include/asm-mips/sim.h +++ b/include/asm-mips/sim.h @@ -11,7 +11,7 @@ #include -#include +#include #define __str2(x) #x #define __str(x) __str2(x) diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h index fb42f99f8527..7b5e64600bc8 100644 --- a/include/asm-mips/stackframe.h +++ b/include/asm-mips/stackframe.h @@ -15,7 +15,7 @@ #include #include -#include +#include .macro SAVE_AT .set push -- cgit v1.2.3 From 4d666d7ada2e14d71d463c85b8b5ef2e2e6723f2 Mon Sep 17 00:00:00 2001 From: Yoichi Yuasa Date: Fri, 9 Sep 2005 13:01:49 -0700 Subject: [PATCH] mips: add TANBAC TB0287 support Add TANBAC TB0287 support. Signed-off-by: Yoichi Yuasa Cc: Ralf Baechle Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-mips/vr41xx/tb0287.h | 43 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 include/asm-mips/vr41xx/tb0287.h (limited to 'include/asm-mips') diff --git a/include/asm-mips/vr41xx/tb0287.h b/include/asm-mips/vr41xx/tb0287.h new file mode 100644 index 000000000000..dd9832313afe --- /dev/null +++ b/include/asm-mips/vr41xx/tb0287.h @@ -0,0 +1,43 @@ +/* + * tb0287.h, Include file for TANBAC TB0287 mini-ITX board. + * + * Copyright (C) 2005 Media Lab Inc. + * + * This code is largely based on tb0219.h. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef __TANBAC_TB0287_H +#define __TANBAC_TB0287_H + +#include + +/* + * General-Purpose I/O Pin Number + */ +#define TB0287_PCI_SLOT_PIN 2 +#define TB0287_SM501_PIN 3 +#define TB0287_SIL680A_PIN 8 +#define TB0287_RTL8110_PIN 13 + +/* + * Interrupt Number + */ +#define TB0287_PCI_SLOT_IRQ GIU_IRQ(TB0287_PCI_SLOT_PIN) +#define TB0287_SM501_IRQ GIU_IRQ(TB0287_SM501_PIN) +#define TB0287_SIL680A_IRQ GIU_IRQ(TB0287_SIL680A_PIN) +#define TB0287_RTL8110_IRQ GIU_IRQ(TB0287_RTL8110_PIN) + +#endif /* __TANBAC_TB0287_H */ -- cgit v1.2.3 From 24b20ac6e1c80082889b3d6ae08aadda777519e5 Mon Sep 17 00:00:00 2001 From: Kenji Kaneshige Date: Fri, 9 Sep 2005 13:02:11 -0700 Subject: [PATCH] remove unnecessary handle_IRQ_event() prototypes The function prototype for handle_IRQ_event() in a few architctures is not needed because they use GENERIC_HARDIRQ. Signed-off-by: Kenji Kaneshige Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-mips/irq.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/asm-mips') diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h index b90b11d0b886..3f2470e9e678 100644 --- a/include/asm-mips/irq.h +++ b/include/asm-mips/irq.h @@ -49,7 +49,4 @@ do { \ extern void arch_init_irq(void); -struct irqaction; -int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); - #endif /* _ASM_IRQ_H */ -- cgit v1.2.3 From fb1c8f93d869b34cacb8b8932e2b83d96a19d720 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 10 Sep 2005 00:25:56 -0700 Subject: [PATCH] spinlock consolidation This patch (written by me and also containing many suggestions of Arjan van de Ven) does a major cleanup of the spinlock code. It does the following things: - consolidates and enhances the spinlock/rwlock debugging code - simplifies the asm/spinlock.h files - encapsulates the raw spinlock type and moves generic spinlock features (such as ->break_lock) into the generic code. - cleans up the spinlock code hierarchy to get rid of the spaghetti. Most notably there's now only a single variant of the debugging code, located in lib/spinlock_debug.c. (previously we had one SMP debugging variant per architecture, plus a separate generic one for UP builds) Also, i've enhanced the rwlock debugging facility, it will now track write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too. All locks have lockup detection now, which will work for both soft and hard spin/rwlock lockups. The arch-level include files now only contain the minimally necessary subset of the spinlock code - all the rest that can be generalized now lives in the generic headers: include/asm-i386/spinlock_types.h | 16 include/asm-x86_64/spinlock_types.h | 16 I have also split up the various spinlock variants into separate files, making it easier to see which does what. The new layout is: SMP | UP ----------------------------|----------------------------------- asm/spinlock_types_smp.h | linux/spinlock_types_up.h linux/spinlock_types.h | linux/spinlock_types.h asm/spinlock_smp.h | linux/spinlock_up.h linux/spinlock_api_smp.h | linux/spinlock_api_up.h linux/spinlock.h | linux/spinlock.h /* * here's the role of the various spinlock/rwlock related include files: * * on SMP builds: * * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the * initializers * * linux/spinlock_types.h: * defines the generic type and initializers * * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) * * linux/spinlock_api_smp.h: * contains the prototypes for the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. * * on UP builds: * * linux/spinlock_type_up.h: * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * * linux/spinlock_types.h: * defines the generic type and initializers * * linux/spinlock_up.h: * contains the __raw_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * * (included on UP-non-debug builds:) * * linux/spinlock_api_up.h: * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. */ All SMP and UP architectures are converted by this patch. arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should be mostly fine. From: Grant Grundler Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU). Builds 32-bit SMP kernel (not booted or tested). I did not try to build non-SMP kernels. That should be trivial to fix up later if necessary. I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids some ugly nesting of linux/*.h and asm/*.h files. Those particular locks are well tested and contained entirely inside arch specific code. I do NOT expect any new issues to arise with them. If someone does ever need to use debug/metrics with them, then they will need to unravel this hairball between spinlocks, atomic ops, and bit ops that exist only because parisc has exactly one atomic instruction: LDCW (load and clear word). From: "Luck, Tony" ia64 fix Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Grant Grundler Cc: Matthew Wilcox Signed-off-by: Hirokazu Takata Signed-off-by: Mikael Pettersson Signed-off-by: Benoit Boissinot Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-mips/spinlock.h | 75 ++++++++++++++------------------------- include/asm-mips/spinlock_types.h | 20 +++++++++++ 2 files changed, 47 insertions(+), 48 deletions(-) create mode 100644 include/asm-mips/spinlock_types.h (limited to 'include/asm-mips') diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h index 114d3eb98a6a..4d0135b11156 100644 --- a/include/asm-mips/spinlock.h +++ b/include/asm-mips/spinlock.h @@ -16,20 +16,10 @@ * Your basic SMP spinlocks, allowing only a single CPU anywhere */ -typedef struct { - volatile unsigned int lock; -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} spinlock_t; - -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } - -#define spin_lock_init(x) do { (x)->lock = 0; } while(0) - -#define spin_is_locked(x) ((x)->lock != 0) -#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) +#define __raw_spin_is_locked(x) ((x)->lock != 0) +#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define __raw_spin_unlock_wait(x) \ + do { cpu_relax(); } while ((x)->lock) /* * Simple spin lock operations. There are two variants, one clears IRQ's @@ -38,13 +28,13 @@ typedef struct { * We make no fairness assumptions. They have a cost. */ -static inline void _raw_spin_lock(spinlock_t *lock) +static inline void __raw_spin_lock(raw_spinlock_t *lock) { unsigned int tmp; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # _raw_spin_lock \n" + " .set noreorder # __raw_spin_lock \n" "1: ll %1, %2 \n" " bnez %1, 1b \n" " li %1, 1 \n" @@ -58,7 +48,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # _raw_spin_lock \n" + " .set noreorder # __raw_spin_lock \n" "1: ll %1, %2 \n" " bnez %1, 1b \n" " li %1, 1 \n" @@ -72,10 +62,10 @@ static inline void _raw_spin_lock(spinlock_t *lock) } } -static inline void _raw_spin_unlock(spinlock_t *lock) +static inline void __raw_spin_unlock(raw_spinlock_t *lock) { __asm__ __volatile__( - " .set noreorder # _raw_spin_unlock \n" + " .set noreorder # __raw_spin_unlock \n" " sync \n" " sw $0, %0 \n" " .set\treorder \n" @@ -84,13 +74,13 @@ static inline void _raw_spin_unlock(spinlock_t *lock) : "memory"); } -static inline unsigned int _raw_spin_trylock(spinlock_t *lock) +static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) { unsigned int temp, res; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # _raw_spin_trylock \n" + " .set noreorder # __raw_spin_trylock \n" "1: ll %0, %3 \n" " ori %2, %0, 1 \n" " sc %2, %1 \n" @@ -104,7 +94,7 @@ static inline unsigned int _raw_spin_trylock(spinlock_t *lock) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # _raw_spin_trylock \n" + " .set noreorder # __raw_spin_trylock \n" "1: ll %0, %3 \n" " ori %2, %0, 1 \n" " sc %2, %1 \n" @@ -129,24 +119,13 @@ static inline unsigned int _raw_spin_trylock(spinlock_t *lock) * read-locks. */ -typedef struct { - volatile unsigned int lock; -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} rwlock_t; - -#define RW_LOCK_UNLOCKED (rwlock_t) { 0 } - -#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) - -static inline void _raw_read_lock(rwlock_t *rw) +static inline void __raw_read_lock(raw_rwlock_t *rw) { unsigned int tmp; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # _raw_read_lock \n" + " .set noreorder # __raw_read_lock \n" "1: ll %1, %2 \n" " bltz %1, 1b \n" " addu %1, 1 \n" @@ -160,7 +139,7 @@ static inline void _raw_read_lock(rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # _raw_read_lock \n" + " .set noreorder # __raw_read_lock \n" "1: ll %1, %2 \n" " bltz %1, 1b \n" " addu %1, 1 \n" @@ -177,13 +156,13 @@ static inline void _raw_read_lock(rwlock_t *rw) /* Note the use of sub, not subu which will make the kernel die with an overflow exception if we ever try to unlock an rwlock that is already unlocked or is being held by a writer. */ -static inline void _raw_read_unlock(rwlock_t *rw) +static inline void __raw_read_unlock(raw_rwlock_t *rw) { unsigned int tmp; if (R10000_LLSC_WAR) { __asm__ __volatile__( - "1: ll %1, %2 # _raw_read_unlock \n" + "1: ll %1, %2 # __raw_read_unlock \n" " sub %1, 1 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" @@ -193,7 +172,7 @@ static inline void _raw_read_unlock(rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # _raw_read_unlock \n" + " .set noreorder # __raw_read_unlock \n" "1: ll %1, %2 \n" " sub %1, 1 \n" " sc %1, %0 \n" @@ -206,13 +185,13 @@ static inline void _raw_read_unlock(rwlock_t *rw) } } -static inline void _raw_write_lock(rwlock_t *rw) +static inline void __raw_write_lock(raw_rwlock_t *rw) { unsigned int tmp; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # _raw_write_lock \n" + " .set noreorder # __raw_write_lock \n" "1: ll %1, %2 \n" " bnez %1, 1b \n" " lui %1, 0x8000 \n" @@ -226,7 +205,7 @@ static inline void _raw_write_lock(rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # _raw_write_lock \n" + " .set noreorder # __raw_write_lock \n" "1: ll %1, %2 \n" " bnez %1, 1b \n" " lui %1, 0x8000 \n" @@ -241,26 +220,26 @@ static inline void _raw_write_lock(rwlock_t *rw) } } -static inline void _raw_write_unlock(rwlock_t *rw) +static inline void __raw_write_unlock(raw_rwlock_t *rw) { __asm__ __volatile__( - " sync # _raw_write_unlock \n" + " sync # __raw_write_unlock \n" " sw $0, %0 \n" : "=m" (rw->lock) : "m" (rw->lock) : "memory"); } -#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) +#define __raw_read_trylock(lock) generic__raw_read_trylock(lock) -static inline int _raw_write_trylock(rwlock_t *rw) +static inline int __raw_write_trylock(raw_rwlock_t *rw) { unsigned int tmp; int ret; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # _raw_write_trylock \n" + " .set noreorder # __raw_write_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bnez %1, 2f \n" @@ -277,7 +256,7 @@ static inline int _raw_write_trylock(rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # _raw_write_trylock \n" + " .set noreorder # __raw_write_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bnez %1, 2f \n" diff --git a/include/asm-mips/spinlock_types.h b/include/asm-mips/spinlock_types.h new file mode 100644 index 000000000000..ce26c5048b15 --- /dev/null +++ b/include/asm-mips/spinlock_types.h @@ -0,0 +1,20 @@ +#ifndef _ASM_SPINLOCK_TYPES_H +#define _ASM_SPINLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +typedef struct { + volatile unsigned int lock; +} raw_spinlock_t; + +#define __RAW_SPIN_LOCK_UNLOCKED { 0 } + +typedef struct { + volatile unsigned int lock; +} raw_rwlock_t; + +#define __RAW_RW_LOCK_UNLOCKED { 0 } + +#endif -- cgit v1.2.3