summaryrefslogtreecommitdiff
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/acpi.h20
-rw-r--r--include/asm-i386/apic.h2
-rw-r--r--include/asm-i386/div64.h2
-rw-r--r--include/asm-i386/fixmap.h2
-rw-r--r--include/asm-i386/io_apic.h4
-rw-r--r--include/asm-i386/mach-default/mach_reboot.h10
-rw-r--r--include/asm-i386/mmzone.h2
-rw-r--r--include/asm-i386/mpspec.h4
-rw-r--r--include/asm-i386/numa.h3
-rw-r--r--include/asm-i386/processor.h4
-rw-r--r--include/asm-i386/spinlock.h200
-rw-r--r--include/asm-i386/spinlock_types.h20
-rw-r--r--include/asm-i386/thread_info.h2
-rw-r--r--include/asm-i386/topology.h2
-rw-r--r--include/asm-i386/unistd.h12
15 files changed, 130 insertions, 159 deletions
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h
index cf828ace13f9..df4ed323aa4d 100644
--- a/include/asm-i386/acpi.h
+++ b/include/asm-i386/acpi.h
@@ -103,7 +103,7 @@ __acpi_release_global_lock (unsigned int *lock)
:"=r"(n_hi), "=r"(n_lo) \
:"0"(n_hi), "1"(n_lo))
-#ifdef CONFIG_ACPI_BOOT
+#ifdef CONFIG_ACPI
extern int acpi_lapic;
extern int acpi_ioapic;
extern int acpi_noirq;
@@ -146,13 +146,6 @@ static inline void check_acpi_pci(void) { }
#endif
-#else /* CONFIG_ACPI_BOOT */
-# define acpi_lapic 0
-# define acpi_ioapic 0
-
-#endif
-
-#ifdef CONFIG_ACPI_PCI
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
static inline void acpi_disable_pci(void)
{
@@ -160,11 +153,16 @@ static inline void acpi_disable_pci(void)
acpi_noirq_set();
}
extern int acpi_irq_balance_set(char *str);
-#else
+
+#else /* !CONFIG_ACPI */
+
+#define acpi_lapic 0
+#define acpi_ioapic 0
static inline void acpi_noirq_set(void) { }
static inline void acpi_disable_pci(void) { }
-static inline int acpi_irq_balance_set(char *str) { return 0; }
-#endif
+
+#endif /* !CONFIG_ACPI */
+
#ifdef CONFIG_ACPI_SLEEP
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index 6a1b1882285c..8c454aa58ac6 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -130,6 +130,8 @@ extern unsigned int nmi_watchdog;
#define NMI_LOCAL_APIC 2
#define NMI_INVALID 3
+extern int disable_timer_pin_1;
+
#else /* !CONFIG_X86_LOCAL_APIC */
static inline void lapic_shutdown(void) { }
diff --git a/include/asm-i386/div64.h b/include/asm-i386/div64.h
index 28ed8b296afc..75c67c785bb8 100644
--- a/include/asm-i386/div64.h
+++ b/include/asm-i386/div64.h
@@ -35,7 +35,7 @@
*/
#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c)
-extern inline long
+static inline long
div_ll_X_l_rem(long long divs, long div, long *rem)
{
long dum2;
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h
index c94cac958389..cfb1c61d3b9c 100644
--- a/include/asm-i386/fixmap.h
+++ b/include/asm-i386/fixmap.h
@@ -76,7 +76,7 @@ enum fixed_addresses {
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#endif
-#ifdef CONFIG_ACPI_BOOT
+#ifdef CONFIG_ACPI
FIX_ACPI_BEGIN,
FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
#endif
diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h
index 002c203ccd6a..51c4e5fe6062 100644
--- a/include/asm-i386/io_apic.h
+++ b/include/asm-i386/io_apic.h
@@ -195,12 +195,12 @@ extern int skip_ioapic_setup;
*/
#define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
-#ifdef CONFIG_ACPI_BOOT
+#ifdef CONFIG_ACPI
extern int io_apic_get_unique_id (int ioapic, int apic_id);
extern int io_apic_get_version (int ioapic);
extern int io_apic_get_redir_entries (int ioapic);
extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low);
-#endif /*CONFIG_ACPI_BOOT*/
+#endif /* CONFIG_ACPI */
extern int (*ioapic_renumber_irq)(int ioapic, int irq);
diff --git a/include/asm-i386/mach-default/mach_reboot.h b/include/asm-i386/mach-default/mach_reboot.h
index 521e227db679..06ae4d81ba6a 100644
--- a/include/asm-i386/mach-default/mach_reboot.h
+++ b/include/asm-i386/mach-default/mach_reboot.h
@@ -22,7 +22,15 @@ static inline void mach_reboot(void)
for (i = 0; i < 100; i++) {
kb_wait();
udelay(50);
- outb(0xfe, 0x64); /* pulse reset low */
+ outb(0x60, 0x64); /* write Controller Command Byte */
+ udelay(50);
+ kb_wait();
+ udelay(50);
+ outb(0x14, 0x60); /* set "System flag" */
+ udelay(50);
+ kb_wait();
+ udelay(50);
+ outb(0xfe, 0x64); /* pulse reset low */
udelay(50);
}
}
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index 516421300ea2..348fe3a4879d 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -29,7 +29,7 @@ static inline void get_memcfg_numa(void)
#ifdef CONFIG_X86_NUMAQ
if (get_memcfg_numaq())
return;
-#elif CONFIG_ACPI_SRAT
+#elif defined(CONFIG_ACPI_SRAT)
if (get_memcfg_from_srat())
return;
#endif
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h
index d84a9c326c22..64a0b8e6afeb 100644
--- a/include/asm-i386/mpspec.h
+++ b/include/asm-i386/mpspec.h
@@ -27,14 +27,14 @@ extern unsigned long mp_lapic_addr;
extern int pic_mode;
extern int using_apic_timer;
-#ifdef CONFIG_ACPI_BOOT
+#ifdef CONFIG_ACPI
extern void mp_register_lapic (u8 id, u8 enabled);
extern void mp_register_lapic_address (u64 address);
extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base);
extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi);
extern void mp_config_acpi_legacy_irqs (void);
extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low);
-#endif /*CONFIG_ACPI_BOOT*/
+#endif /* CONFIG_ACPI */
#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
diff --git a/include/asm-i386/numa.h b/include/asm-i386/numa.h
new file mode 100644
index 000000000000..96fcb157db1d
--- /dev/null
+++ b/include/asm-i386/numa.h
@@ -0,0 +1,3 @@
+
+int pxm_to_nid(int pxm);
+
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 37bef8ed7bed..0a4ec764377c 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -679,7 +679,7 @@ static inline void rep_nop(void)
However we don't do prefetches for pre XP Athlons currently
That should be fixed. */
#define ARCH_HAS_PREFETCH
-extern inline void prefetch(const void *x)
+static inline void prefetch(const void *x)
{
alternative_input(ASM_NOP4,
"prefetchnta (%1)",
@@ -693,7 +693,7 @@ extern inline void prefetch(const void *x)
/* 3dnow! prefetch to get an exclusive cache line. Useful for
spinlocks to avoid one state transition in the cache coherency protocol. */
-extern inline void prefetchw(const void *x)
+static inline void prefetchw(const void *x)
{
alternative_input(ASM_NOP4,
"prefetchw (%1)",
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index f9ff31f40036..23604350cdf4 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -7,46 +7,21 @@
#include <linux/config.h>
#include <linux/compiler.h>
-asmlinkage int printk(const char * fmt, ...)
- __attribute__ ((format (printf, 1, 2)));
-
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
- */
-
-typedef struct {
- volatile unsigned int slock;
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned magic;
-#endif
-#ifdef CONFIG_PREEMPT
- unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
-#else
-#define SPINLOCK_MAGIC_INIT /* */
-#endif
-
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
-
-#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-/*
+ *
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
* We make no fairness assumptions. They have a cost.
+ *
+ * (the type definitions are in asm/spinlock_types.h)
*/
-#define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0)
-#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
+#define __raw_spin_is_locked(x) \
+ (*(volatile signed char *)(&(x)->slock) <= 0)
-#define spin_lock_string \
+#define __raw_spin_lock_string \
"\n1:\t" \
"lock ; decb %0\n\t" \
"jns 3f\n" \
@@ -57,7 +32,7 @@ typedef struct {
"jmp 1b\n" \
"3:\n\t"
-#define spin_lock_string_flags \
+#define __raw_spin_lock_string_flags \
"\n1:\t" \
"lock ; decb %0\n\t" \
"jns 4f\n\t" \
@@ -73,86 +48,71 @@ typedef struct {
"jmp 1b\n" \
"4:\n\t"
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+ __asm__ __volatile__(
+ __raw_spin_lock_string
+ :"=m" (lock->slock) : : "memory");
+}
+
+static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+{
+ __asm__ __volatile__(
+ __raw_spin_lock_string_flags
+ :"=m" (lock->slock) : "r" (flags) : "memory");
+}
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+ char oldval;
+ __asm__ __volatile__(
+ "xchgb %b0,%1"
+ :"=q" (oldval), "=m" (lock->slock)
+ :"0" (0) : "memory");
+ return oldval > 0;
+}
+
/*
- * This works. Despite all the confusion.
- * (except on PPro SMP or if we are using OOSTORE)
+ * __raw_spin_unlock based on writing $1 to the low byte.
+ * This method works. Despite all the confusion.
+ * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
* (PPro errata 66, 92)
*/
#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
-#define spin_unlock_string \
+#define __raw_spin_unlock_string \
"movb $1,%0" \
:"=m" (lock->slock) : : "memory"
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
-#ifdef CONFIG_DEBUG_SPINLOCK
- BUG_ON(lock->magic != SPINLOCK_MAGIC);
- BUG_ON(!spin_is_locked(lock));
-#endif
__asm__ __volatile__(
- spin_unlock_string
+ __raw_spin_unlock_string
);
}
#else
-#define spin_unlock_string \
+#define __raw_spin_unlock_string \
"xchgb %b0, %1" \
:"=q" (oldval), "=m" (lock->slock) \
:"0" (oldval) : "memory"
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
char oldval = 1;
-#ifdef CONFIG_DEBUG_SPINLOCK
- BUG_ON(lock->magic != SPINLOCK_MAGIC);
- BUG_ON(!spin_is_locked(lock));
-#endif
- __asm__ __volatile__(
- spin_unlock_string
- );
-}
-#endif
-
-static inline int _raw_spin_trylock(spinlock_t *lock)
-{
- char oldval;
__asm__ __volatile__(
- "xchgb %b0,%1"
- :"=q" (oldval), "=m" (lock->slock)
- :"0" (0) : "memory");
- return oldval > 0;
+ __raw_spin_unlock_string
+ );
}
-static inline void _raw_spin_lock(spinlock_t *lock)
-{
-#ifdef CONFIG_DEBUG_SPINLOCK
- if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
- printk("eip: %p\n", __builtin_return_address(0));
- BUG();
- }
#endif
- __asm__ __volatile__(
- spin_lock_string
- :"=m" (lock->slock) : : "memory");
-}
-static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
-{
-#ifdef CONFIG_DEBUG_SPINLOCK
- if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
- printk("eip: %p\n", __builtin_return_address(0));
- BUG();
- }
-#endif
- __asm__ __volatile__(
- spin_lock_string_flags
- :"=m" (lock->slock) : "r" (flags) : "memory");
-}
+#define __raw_spin_unlock_wait(lock) \
+ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
/*
* Read-write spinlocks, allowing multiple readers
@@ -163,72 +123,41 @@ static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
* can "mix" irq-safe locks - any writer needs to get a
* irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
+ *
+ * On x86, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ *
+ * The inline assembly is non-obvious. Think about it.
+ *
+ * Changed to use the same technique as rw semaphores. See
+ * semaphore.h for details. -ben
+ *
+ * the helpers are in arch/i386/kernel/semaphore.c
*/
-typedef struct {
- volatile unsigned int lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned magic;
-#endif
-#ifdef CONFIG_PREEMPT
- unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RWLOCK_MAGIC 0xdeaf1eed
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
-#else
-#define RWLOCK_MAGIC_INIT /* */
-#endif
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
-
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
/**
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define read_can_lock(x) ((int)(x)->lock > 0)
+#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-/*
- * On x86, we implement read-write locks as a 32-bit counter
- * with the high bit (sign) being the "contended" bit.
- *
- * The inline assembly is non-obvious. Think about it.
- *
- * Changed to use the same technique as rw semaphores. See
- * semaphore.h for details. -ben
- */
-/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
-
-static inline void _raw_read_lock(rwlock_t *rw)
+static inline void __raw_read_lock(raw_rwlock_t *rw)
{
-#ifdef CONFIG_DEBUG_SPINLOCK
- BUG_ON(rw->magic != RWLOCK_MAGIC);
-#endif
__build_read_lock(rw, "__read_lock_failed");
}
-static inline void _raw_write_lock(rwlock_t *rw)
+static inline void __raw_write_lock(raw_rwlock_t *rw)
{
-#ifdef CONFIG_DEBUG_SPINLOCK
- BUG_ON(rw->magic != RWLOCK_MAGIC);
-#endif
__build_write_lock(rw, "__write_lock_failed");
}
-#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
-#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
-
-static inline int _raw_read_trylock(rwlock_t *lock)
+static inline int __raw_read_trylock(raw_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
atomic_dec(count);
@@ -238,7 +167,7 @@ static inline int _raw_read_trylock(rwlock_t *lock)
return 0;
}
-static inline int _raw_write_trylock(rwlock_t *lock)
+static inline int __raw_write_trylock(raw_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
if (atomic_sub_and_test(RW_LOCK_BIAS, count))
@@ -247,4 +176,15 @@ static inline int _raw_write_trylock(rwlock_t *lock)
return 0;
}
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
+{
+ asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
+}
+
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
+{
+ asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0"
+ : "=m" (rw->lock) : : "memory");
+}
+
#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-i386/spinlock_types.h b/include/asm-i386/spinlock_types.h
new file mode 100644
index 000000000000..59efe849f351
--- /dev/null
+++ b/include/asm-i386/spinlock_types.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int slock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+
+typedef struct {
+ volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+
+#endif
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index e2cb9fa6f563..8fbf791651bf 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -48,7 +48,7 @@ struct thread_info {
#else /* !__ASSEMBLY__ */
-#include <asm/asm_offsets.h>
+#include <asm/asm-offsets.h>
#endif
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index 2461b731781e..0ec27c9e8e45 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -60,7 +60,7 @@ static inline int node_to_first_cpu(int node)
return first_cpu(mask);
}
-#define pcibus_to_node(bus) mp_bus_id_to_node[(bus)->number]
+#define pcibus_to_node(bus) ((long) (bus)->sysdata)
#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus))
/* sched_domains SD_NODE_INIT for NUMAQ machines */
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index a7cb377745bf..fbaf90a3968c 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -332,7 +332,7 @@ type name(type1 arg1) \
long __res; \
__asm__ volatile ("int $0x80" \
: "=a" (__res) \
- : "0" (__NR_##name),"b" ((long)(arg1))); \
+ : "0" (__NR_##name),"b" ((long)(arg1)) : "memory"); \
__syscall_return(type,__res); \
}
@@ -342,7 +342,7 @@ type name(type1 arg1,type2 arg2) \
long __res; \
__asm__ volatile ("int $0x80" \
: "=a" (__res) \
- : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2))); \
+ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)) : "memory"); \
__syscall_return(type,__res); \
}
@@ -353,7 +353,7 @@ long __res; \
__asm__ volatile ("int $0x80" \
: "=a" (__res) \
: "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
- "d" ((long)(arg3))); \
+ "d" ((long)(arg3)) : "memory"); \
__syscall_return(type,__res); \
}
@@ -364,7 +364,7 @@ long __res; \
__asm__ volatile ("int $0x80" \
: "=a" (__res) \
: "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
- "d" ((long)(arg3)),"S" ((long)(arg4))); \
+ "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \
__syscall_return(type,__res); \
}
@@ -376,7 +376,7 @@ long __res; \
__asm__ volatile ("int $0x80" \
: "=a" (__res) \
: "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
- "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5))); \
+ "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) : "memory"); \
__syscall_return(type,__res); \
}
@@ -389,7 +389,7 @@ __asm__ volatile ("push %%ebp ; movl %%eax,%%ebp ; movl %1,%%eax ; int $0x80 ; p
: "=a" (__res) \
: "i" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
"d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)), \
- "0" ((long)(arg6))); \
+ "0" ((long)(arg6)) : "memory"); \
__syscall_return(type,__res); \
}