summaryrefslogtreecommitdiff
path: root/arch/i386
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pretzel.yyz.us>2005-06-26 23:42:30 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-06-26 23:42:30 -0400
commitf45727d52d1581e9ff4df9d1a12a60789ad2d1eb (patch)
tree773ae25f98542e6d382c688f7e85e8137d065614 /arch/i386
parent4c925f452cfd16c690209e96821ee094e09a2404 (diff)
parent5696c1944a33b4434a9a1ebb6383b906afd43a10 (diff)
Merge /spare/repo/netdev-2.6/ branch 'ieee80211'
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/Kconfig107
-rw-r--r--arch/i386/Makefile7
-rw-r--r--arch/i386/boot/Makefile6
-rw-r--r--arch/i386/boot/compressed/head.S7
-rw-r--r--arch/i386/boot/compressed/misc.c8
-rw-r--r--arch/i386/boot/edd.S2
-rw-r--r--arch/i386/boot/install.sh4
-rw-r--r--arch/i386/boot/setup.S6
-rw-r--r--arch/i386/boot/tools/build.c5
-rw-r--r--arch/i386/crypto/aes.c2
-rw-r--r--arch/i386/defconfig1
-rw-r--r--arch/i386/kernel/Makefile3
-rw-r--r--arch/i386/kernel/acpi/boot.c220
-rw-r--r--arch/i386/kernel/acpi/sleep.c27
-rw-r--r--arch/i386/kernel/apic.c82
-rw-r--r--arch/i386/kernel/apm.c10
-rw-r--r--arch/i386/kernel/cpu/common.c47
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig14
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Makefile1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c58
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k7.c15
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c113
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.h15
-rw-r--r--arch/i386/kernel/cpu/cpufreq/sc520_freq.c186
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c6
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.c6
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-smi.c3
-rw-r--r--arch/i386/kernel/cpu/intel.c12
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c6
-rw-r--r--arch/i386/kernel/cpu/mcheck/k7.c2
-rw-r--r--arch/i386/kernel/cpu/mcheck/mce.c4
-rw-r--r--arch/i386/kernel/cpu/mcheck/p4.c4
-rw-r--r--arch/i386/kernel/cpu/mcheck/p5.c2
-rw-r--r--arch/i386/kernel/cpu/mcheck/p6.c2
-rw-r--r--arch/i386/kernel/cpu/mcheck/winchip.c2
-rw-r--r--arch/i386/kernel/cpu/mtrr/generic.c3
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c23
-rw-r--r--arch/i386/kernel/cpu/proc.c2
-rw-r--r--arch/i386/kernel/cpuid.c22
-rw-r--r--arch/i386/kernel/crash.c223
-rw-r--r--arch/i386/kernel/dmi_scan.c391
-rw-r--r--arch/i386/kernel/efi.c4
-rw-r--r--arch/i386/kernel/head.S6
-rw-r--r--arch/i386/kernel/i386_ksyms.c160
-rw-r--r--arch/i386/kernel/i387.c3
-rw-r--r--arch/i386/kernel/i8259.c12
-rw-r--r--arch/i386/kernel/io_apic.c56
-rw-r--r--arch/i386/kernel/irq.c72
-rw-r--r--arch/i386/kernel/kprobes.c176
-rw-r--r--arch/i386/kernel/machine_kexec.c226
-rw-r--r--arch/i386/kernel/mpparse.c33
-rw-r--r--arch/i386/kernel/msr.c22
-rw-r--r--arch/i386/kernel/nmi.c24
-rw-r--r--arch/i386/kernel/pci-dma.c3
-rw-r--r--arch/i386/kernel/process.c75
-rw-r--r--arch/i386/kernel/ptrace.c2
-rw-r--r--arch/i386/kernel/reboot.c87
-rw-r--r--arch/i386/kernel/relocate_kernel.S120
-rw-r--r--arch/i386/kernel/setup.c99
-rw-r--r--arch/i386/kernel/signal.c35
-rw-r--r--arch/i386/kernel/smp.c37
-rw-r--r--arch/i386/kernel/smpboot.c355
-rw-r--r--arch/i386/kernel/syscall_table.S4
-rw-r--r--arch/i386/kernel/sysenter.c12
-rw-r--r--arch/i386/kernel/time.c6
-rw-r--r--arch/i386/kernel/time_hpet.c2
-rw-r--r--arch/i386/kernel/timers/common.c20
-rw-r--r--arch/i386/kernel/timers/timer.c9
-rw-r--r--arch/i386/kernel/timers/timer_hpet.c3
-rw-r--r--arch/i386/kernel/timers/timer_pm.c1
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c28
-rw-r--r--arch/i386/kernel/traps.c74
-rw-r--r--arch/i386/kernel/vmlinux.lds.S59
-rw-r--r--arch/i386/lib/dec_and_lock.c2
-rw-r--r--arch/i386/lib/delay.c8
-rw-r--r--arch/i386/lib/mmx.c5
-rw-r--r--arch/i386/lib/usercopy.c8
-rw-r--r--arch/i386/mach-default/setup.c27
-rw-r--r--arch/i386/mach-default/topology.c15
-rw-r--r--arch/i386/mach-visws/mpparse.c5
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c2
-rw-r--r--arch/i386/mm/Makefile2
-rw-r--r--arch/i386/mm/discontig.c129
-rw-r--r--arch/i386/mm/fault.c7
-rw-r--r--arch/i386/mm/highmem.c24
-rw-r--r--arch/i386/mm/hugetlbpage.c204
-rw-r--r--arch/i386/mm/init.c24
-rw-r--r--arch/i386/mm/ioremap.c7
-rw-r--r--arch/i386/mm/pgtable.c30
-rw-r--r--arch/i386/oprofile/backtrace.c2
-rw-r--r--arch/i386/pci/irq.c22
-rw-r--r--arch/i386/pci/pcbios.c4
-rw-r--r--arch/i386/power/cpu.c37
93 files changed, 2813 insertions, 1235 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index dfd904f6883b..6c02336fe2e4 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -68,7 +68,6 @@ config X86_VOYAGER
config X86_NUMAQ
bool "NUMAQ (IBM/Sequent)"
- select DISCONTIGMEM
select NUMA
help
This option is used for getting Linux to run on a (IBM/Sequent) NUMA
@@ -511,28 +510,7 @@ config SCHED_SMT
cost of slightly increased overhead in some places. If unsure say
N here.
-config PREEMPT
- bool "Preemptible Kernel"
- help
- This option reduces the latency of the kernel when reacting to
- real-time or interactive events by allowing a low priority process to
- be preempted even if it is in kernel mode executing a system call.
- This allows applications to run more reliably even when the system is
- under load.
-
- Say Y here if you are building a kernel for a desktop, embedded
- or real-time system. Say N if you are unsure.
-
-config PREEMPT_BKL
- bool "Preempt The Big Kernel Lock"
- depends on PREEMPT
- default y
- help
- This option reduces the latency of the kernel by making the
- big kernel lock preemptible.
-
- Say Y here if you are building a kernel for a desktop system.
- Say N if you are unsure.
+source "kernel/Kconfig.preempt"
config X86_UP_APIC
bool "Local APIC support on uniprocessors"
@@ -783,25 +761,48 @@ comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI)
-config DISCONTIGMEM
- bool
- depends on NUMA
- default y
-
config HAVE_ARCH_BOOTMEM_NODE
bool
depends on NUMA
default y
-config HAVE_MEMORY_PRESENT
+config ARCH_HAVE_MEMORY_PRESENT
bool
depends on DISCONTIGMEM
default y
config NEED_NODE_MEMMAP_SIZE
bool
- depends on DISCONTIGMEM
+ depends on DISCONTIGMEM || SPARSEMEM
+ default y
+
+config HAVE_ARCH_ALLOC_REMAP
+ bool
+ depends on NUMA
+ default y
+
+config ARCH_DISCONTIGMEM_ENABLE
+ def_bool y
+ depends on NUMA
+
+config ARCH_DISCONTIGMEM_DEFAULT
+ def_bool y
+ depends on NUMA
+
+config ARCH_SPARSEMEM_ENABLE
+ def_bool y
+ depends on NUMA
+
+config ARCH_SELECT_MEMORY_MODEL
+ def_bool y
+ depends on ARCH_SPARSEMEM_ENABLE
+
+source "mm/Kconfig"
+
+config HAVE_ARCH_EARLY_PFN_TO_NID
+ bool
default y
+ depends on NUMA
config HIGHPTE
bool "Allocate 3rd-level pagetables from highmem"
@@ -939,6 +940,43 @@ config SECCOMP
If unsure, say Y. Only embedded should say N here.
+source kernel/Kconfig.hz
+
+config PHYSICAL_START
+ hex "Physical address where the kernel is loaded" if EMBEDDED
+ default "0x100000"
+ help
+ This gives the physical address where the kernel is loaded.
+ Primarily used in the case of kexec on panic where the
+ fail safe kernel needs to run at a different address than
+ the panic-ed kernel.
+
+ Don't change this unless you know what you are doing.
+
+config KEXEC
+ bool "kexec system call (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+ but it is indepedent of the system firmware. And like a reboot
+ you can start any kernel with it, not just Linux.
+
+ The name comes from the similiarity to the exec system call.
+
+ It is an ongoing process to be certain the hardware in a machine
+ is properly shutdown, so do not be surprised if this code does not
+ initially work for you. It may help to enable device hotplugging
+ support. As of this writing the exact hardware interface is
+ strongly in flux, so no good recommendation can be made.
+
+config CRASH_DUMP
+ bool "kernel crash dumps (EXPERIMENTAL)"
+ depends on EMBEDDED
+ depends on EXPERIMENTAL
+ depends on HIGHMEM
+ help
+ Generate crash dump after being started by kexec.
endmenu
@@ -1226,6 +1264,15 @@ config SCx200
This support is also available as a module. If compiled as a
module, it will be called scx200.
+config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
+ depends on SMP && HOTPLUG && EXPERIMENTAL
+ ---help---
+ Say Y here to experiment with turning CPUs off and on. CPUs
+ can be controlled through /sys/devices/system/cpu.
+
+ Say N.
+
source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig"
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 1c36ca332a96..bf7c9ba709f3 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -17,6 +17,13 @@
# 20050320 Kianusch Sayah Karadji <kianusch@sk-tech.net>
# Added support for GEODE CPU
+HAS_BIARCH := $(call cc-option-yn, -m32)
+ifeq ($(HAS_BIARCH),y)
+AS := $(AS) --32
+LD := $(LD) -m elf_i386
+CC := $(CC) -m32
+endif
+
LDFLAGS := -m elf_i386
OBJCOPYFLAGS := -O binary -R .note -R .comment -S
LDFLAGS_vmlinux :=
diff --git a/arch/i386/boot/Makefile b/arch/i386/boot/Makefile
index aa7064a75ee6..1e71382d413a 100644
--- a/arch/i386/boot/Makefile
+++ b/arch/i386/boot/Makefile
@@ -25,8 +25,8 @@ SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
#RAMDISK := -DRAMDISK=512
-targets := vmlinux.bin bootsect bootsect.o setup setup.o \
- zImage bzImage
+targets := vmlinux.bin bootsect bootsect.o \
+ setup setup.o zImage bzImage
subdir- := compressed
hostprogs-y := tools/build
@@ -48,7 +48,7 @@ cmd_image = $(obj)/tools/build $(BUILDFLAGS) $(obj)/bootsect $(obj)/setup \
$(obj)/zImage $(obj)/bzImage: $(obj)/bootsect $(obj)/setup \
$(obj)/vmlinux.bin $(obj)/tools/build FORCE
$(call if_changed,image)
- @echo 'Kernel: $@ is ready'
+ @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
$(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
diff --git a/arch/i386/boot/compressed/head.S b/arch/i386/boot/compressed/head.S
index c5e80b69e7d4..b5893e4ecd37 100644
--- a/arch/i386/boot/compressed/head.S
+++ b/arch/i386/boot/compressed/head.S
@@ -25,6 +25,7 @@
#include <linux/linkage.h>
#include <asm/segment.h>
+#include <asm/page.h>
.globl startup_32
@@ -74,7 +75,7 @@ startup_32:
popl %esi # discard address
popl %esi # real mode pointer
xorl %ebx,%ebx
- ljmp $(__BOOT_CS), $0x100000
+ ljmp $(__BOOT_CS), $__PHYSICAL_START
/*
* We come here, if we were loaded high.
@@ -99,7 +100,7 @@ startup_32:
popl %ecx # lcount
popl %edx # high_buffer_start
popl %eax # hcount
- movl $0x100000,%edi
+ movl $__PHYSICAL_START,%edi
cli # make sure we don't get interrupted
ljmp $(__BOOT_CS), $0x1000 # and jump to the move routine
@@ -124,5 +125,5 @@ move_routine_start:
movsl
movl %ebx,%esi # Restore setup pointer
xorl %ebx,%ebx
- ljmp $(__BOOT_CS), $0x100000
+ ljmp $(__BOOT_CS), $__PHYSICAL_START
move_routine_end:
diff --git a/arch/i386/boot/compressed/misc.c b/arch/i386/boot/compressed/misc.c
index cedc55cc47de..82a807f9f5e6 100644
--- a/arch/i386/boot/compressed/misc.c
+++ b/arch/i386/boot/compressed/misc.c
@@ -13,6 +13,7 @@
#include <linux/vmalloc.h>
#include <linux/tty.h>
#include <asm/io.h>
+#include <asm/page.h>
/*
* gzip declarations
@@ -308,7 +309,7 @@ static void setup_normal_output_buffer(void)
#else
if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
#endif
- output_data = (char *)0x100000; /* Points to 1M */
+ output_data = (char *)__PHYSICAL_START; /* Normally Points to 1M */
free_mem_end_ptr = (long)real_mode;
}
@@ -333,8 +334,8 @@ static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
low_buffer_size = low_buffer_end - LOW_BUFFER_START;
high_loaded = 1;
free_mem_end_ptr = (long)high_buffer_start;
- if ( (0x100000 + low_buffer_size) > ((ulg)high_buffer_start)) {
- high_buffer_start = (uch *)(0x100000 + low_buffer_size);
+ if ( (__PHYSICAL_START + low_buffer_size) > ((ulg)high_buffer_start)) {
+ high_buffer_start = (uch *)(__PHYSICAL_START + low_buffer_size);
mv->hcount = 0; /* say: we need not to move high_buffer */
}
else mv->hcount = -1;
@@ -353,7 +354,6 @@ static void close_output_buffer_if_we_run_high(struct moveparams *mv)
}
}
-
asmlinkage int decompress_kernel(struct moveparams *mv, void *rmode)
{
real_mode = rmode;
diff --git a/arch/i386/boot/edd.S b/arch/i386/boot/edd.S
index 027d6b354ffb..d8d69f2b911d 100644
--- a/arch/i386/boot/edd.S
+++ b/arch/i386/boot/edd.S
@@ -6,7 +6,7 @@
* projects 1572D, 1484D, 1386D, 1226DT
* disk signature read by Matt Domsch <Matt_Domsch@dell.com>
* and Andrew Wilks <Andrew_Wilks@dell.com> September 2003, June 2004
- * legacy CHS retreival by Patrick J. LoPresti <patl@users.sourceforge.net>
+ * legacy CHS retrieval by Patrick J. LoPresti <patl@users.sourceforge.net>
* March 2004
* Command line option parsing, Matt Domsch, November 2004
*/
diff --git a/arch/i386/boot/install.sh b/arch/i386/boot/install.sh
index 90f2452b3b9e..f17b40dfc0f4 100644
--- a/arch/i386/boot/install.sh
+++ b/arch/i386/boot/install.sh
@@ -21,8 +21,8 @@
# User may have a custom install script
-if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi
-if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi
+if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
+if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
# Default install - same as make zlilo
diff --git a/arch/i386/boot/setup.S b/arch/i386/boot/setup.S
index caa1fde6904e..8cb420f40c58 100644
--- a/arch/i386/boot/setup.S
+++ b/arch/i386/boot/setup.S
@@ -33,7 +33,7 @@
* Transcribed from Intel (as86) -> AT&T (gas) by Chris Noe, May 1999.
* <stiker@northlink.com>
*
- * Fix to work around buggy BIOSes which dont use carry bit correctly
+ * Fix to work around buggy BIOSes which don't use carry bit correctly
* and/or report extended memory in CX/DX for e801h memory size detection
* call. As a result the kernel got wrong figures. The int15/e801h docs
* from Ralf Brown interrupt list seem to indicate AX/BX should be used
@@ -357,7 +357,7 @@ bail820:
meme801:
stc # fix to work around buggy
- xorw %cx,%cx # BIOSes which dont clear/set
+ xorw %cx,%cx # BIOSes which don't clear/set
xorw %dx,%dx # carry on pass/error of
# e801h memory size call
# or merely pass cx,dx though
@@ -847,7 +847,7 @@ flush_instr:
#
# but we yet haven't reloaded the CS register, so the default size
# of the target offset still is 16 bit.
-# However, using an operand prefix (0x66), the CPU will properly
+# However, using an operand prefix (0x66), the CPU will properly
# take our 48 bit far pointer. (INTeL 80386 Programmer's Reference
# Manual, Mixing 16-bit and 32-bit code, page 16-6)
diff --git a/arch/i386/boot/tools/build.c b/arch/i386/boot/tools/build.c
index 26509b826aed..4a17956512e1 100644
--- a/arch/i386/boot/tools/build.c
+++ b/arch/i386/boot/tools/build.c
@@ -1,6 +1,4 @@
/*
- * $Id: build.c,v 1.5 1997/05/19 12:29:58 mj Exp $
- *
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1997 Martin Mares
*/
@@ -8,7 +6,8 @@
/*
* This file builds a disk-image from three different files:
*
- * - bootsect: exactly 512 bytes of 8086 machine code, loads the rest
+ * - bootsect: compatibility mbr which prints an error message if
+ * someone tries to boot the kernel directly.
* - setup: 8086 machine code, sets up system parm
* - system: 80386 code for actual system
*
diff --git a/arch/i386/crypto/aes.c b/arch/i386/crypto/aes.c
index 1019430fc1f1..88ee85c3b43b 100644
--- a/arch/i386/crypto/aes.c
+++ b/arch/i386/crypto/aes.c
@@ -59,7 +59,7 @@ struct aes_ctx {
};
#define WPOLY 0x011b
-#define u32_in(x) le32_to_cpu(*(const u32 *)(x))
+#define u32_in(x) le32_to_cpup((const __le32 *)(x))
#define bytes2word(b0, b1, b2, b3) \
(((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0))
diff --git a/arch/i386/defconfig b/arch/i386/defconfig
index 28e620383799..ca07b95c06b8 100644
--- a/arch/i386/defconfig
+++ b/arch/i386/defconfig
@@ -126,7 +126,6 @@ CONFIG_HAVE_DEC_LOCK=y
#
CONFIG_PM=y
CONFIG_SOFTWARE_SUSPEND=y
-# CONFIG_PM_DISK is not set
#
# ACPI (Advanced Configuration and Power Interface) Support
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 0fbcfe00dd8d..4cc83b322b36 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups.o
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
obj-$(CONFIG_X86_NUMAQ) += numaq.o
obj-$(CONFIG_X86_SUMMIT_NUMA) += summit.o
obj-$(CONFIG_KPROBES) += kprobes.o
@@ -43,7 +44,7 @@ obj-$(CONFIG_SCx200) += scx200.o
# Note: kbuild does not track this dependency due to usage of .incbin
$(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so
targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so)
-targets += vsyscall.lds
+targets += vsyscall-note.o vsyscall.lds
# The DSO images are built using a special linker script.
quiet_cmd_syscall = SYSCALL $@
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 848bb97af7ca..9f63ae0f404b 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -29,6 +29,7 @@
#include <linux/efi.h>
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/dmi.h>
#include <asm/pgtable.h>
#include <asm/io_apic.h>
@@ -815,6 +816,219 @@ acpi_process_madt(void)
return;
}
+extern int acpi_force;
+
+#ifdef __i386__
+
+#ifdef CONFIG_ACPI_PCI
+static int __init disable_acpi_irq(struct dmi_system_id *d)
+{
+ if (!acpi_force) {
+ printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
+ d->ident);
+ acpi_noirq_set();
+ }
+ return 0;
+}
+
+static int __init disable_acpi_pci(struct dmi_system_id *d)
+{
+ if (!acpi_force) {
+ printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
+ d->ident);
+ acpi_disable_pci();
+ }
+ return 0;
+}
+#endif
+
+static int __init dmi_disable_acpi(struct dmi_system_id *d)
+{
+ if (!acpi_force) {
+ printk(KERN_NOTICE "%s detected: acpi off\n",d->ident);
+ disable_acpi();
+ } else {
+ printk(KERN_NOTICE
+ "Warning: DMI blacklist says broken, but acpi forced\n");
+ }
+ return 0;
+}
+
+/*
+ * Limit ACPI to CPU enumeration for HT
+ */
+static int __init force_acpi_ht(struct dmi_system_id *d)
+{
+ if (!acpi_force) {
+ printk(KERN_NOTICE "%s detected: force use of acpi=ht\n", d->ident);
+ disable_acpi();
+ acpi_ht = 1;
+ } else {
+ printk(KERN_NOTICE
+ "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
+ }
+ return 0;
+}
+
+/*
+ * If your system is blacklisted here, but you find that acpi=force
+ * works for you, please contact acpi-devel@sourceforge.net
+ */
+static struct dmi_system_id __initdata acpi_dmi_table[] = {
+ /*
+ * Boxes that need ACPI disabled
+ */
+ {
+ .callback = dmi_disable_acpi,
+ .ident = "IBM Thinkpad",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
+ DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
+ },
+ },
+
+ /*
+ * Boxes that need acpi=ht
+ */
+ {
+ .callback = force_acpi_ht,
+ .ident = "FSC Primergy T850",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
+ },
+ },
+ {
+ .callback = force_acpi_ht,
+ .ident = "DELL GX240",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
+ },
+ },
+ {
+ .callback = force_acpi_ht,
+ .ident = "HP VISUALIZE NT Workstation",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
+ },
+ },
+ {
+ .callback = force_acpi_ht,
+ .ident = "Compaq Workstation W8000",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
+ },
+ },
+ {
+ .callback = force_acpi_ht,
+ .ident = "ASUS P4B266",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
+ },
+ },
+ {
+ .callback = force_acpi_ht,
+ .ident = "ASUS P2B-DS",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
+ },
+ },
+ {
+ .callback = force_acpi_ht,
+ .ident = "ASUS CUR-DLS",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
+ },
+ },
+ {
+ .callback = force_acpi_ht,
+ .ident = "ABIT i440BX-W83977",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
+ DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
+ },
+ },
+ {
+ .callback = force_acpi_ht,
+ .ident = "IBM Bladecenter",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
+ DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
+ },
+ },
+ {
+ .callback = force_acpi_ht,
+ .ident = "IBM eServer xSeries 360",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
+ DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
+ },
+ },
+ {
+ .callback = force_acpi_ht,
+ .ident = "IBM eserver xSeries 330",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
+ DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
+ },
+ },
+ {
+ .callback = force_acpi_ht,
+ .ident = "IBM eserver xSeries 440",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
+ },
+ },
+
+#ifdef CONFIG_ACPI_PCI
+ /*
+ * Boxes that need ACPI PCI IRQ routing disabled
+ */
+ {
+ .callback = disable_acpi_irq,
+ .ident = "ASUS A7V",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
+ DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
+ /* newer BIOS, Revision 1011, does work */
+ DMI_MATCH(DMI_BIOS_VERSION, "ASUS A7V ACPI BIOS Revision 1007"),
+ },
+ },
+
+ /*
+ * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
+ */
+ { /* _BBN 0 bug */
+ .callback = disable_acpi_pci,
+ .ident = "ASUS PR-DLS",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
+ DMI_MATCH(DMI_BIOS_VERSION, "ASUS PR-DLS ACPI BIOS Revision 1010"),
+ DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
+ },
+ },
+ {
+ .callback = disable_acpi_pci,
+ .ident = "Acer TravelMate 36x Laptop",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
+ },
+ },
+#endif
+ { }
+};
+
+#endif /* __i386__ */
+
/*
* acpi_boot_table_init() and acpi_boot_init()
* called from setup_arch(), always.
@@ -843,6 +1057,10 @@ acpi_boot_table_init(void)
{
int error;
+#ifdef __i386__
+ dmi_check_system(acpi_dmi_table);
+#endif
+
/*
* If acpi_disabled, bail out
* One exception: acpi=ht continues far enough to enumerate LAPICs
@@ -870,8 +1088,6 @@ acpi_boot_table_init(void)
*/
error = acpi_blacklisted();
if (error) {
- extern int acpi_force;
-
if (acpi_force) {
printk(KERN_WARNING PREFIX "acpi=force override\n");
} else {
diff --git a/arch/i386/kernel/acpi/sleep.c b/arch/i386/kernel/acpi/sleep.c
index 28bb0514bb6e..c1af93032ff3 100644
--- a/arch/i386/kernel/acpi/sleep.c
+++ b/arch/i386/kernel/acpi/sleep.c
@@ -7,6 +7,7 @@
#include <linux/acpi.h>
#include <linux/bootmem.h>
+#include <linux/dmi.h>
#include <asm/smp.h>
#include <asm/tlbflush.h>
@@ -91,3 +92,29 @@ static int __init acpi_sleep_setup(char *str)
__setup("acpi_sleep=", acpi_sleep_setup);
+
+
+static __init int reset_videomode_after_s3(struct dmi_system_id *d)
+{
+ acpi_video_flags |= 2;
+ return 0;
+}
+
+static __initdata struct dmi_system_id acpisleep_dmi_table[] = {
+ { /* Reset video mode after returning from ACPI S3 sleep */
+ .callback = reset_videomode_after_s3,
+ .ident = "Toshiba Satellite 4030cdt",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),
+ },
+ },
+ { }
+};
+
+static int __init acpisleep_dmi_init(void)
+{
+ dmi_check_system(acpisleep_dmi_table);
+ return 0;
+}
+
+core_initcall(acpisleep_dmi_init);
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index d509836b70c3..93df90bbb87e 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -26,6 +26,7 @@
#include <linux/mc146818rtc.h>
#include <linux/kernel_stat.h>
#include <linux/sysdev.h>
+#include <linux/cpu.h>
#include <asm/atomic.h>
#include <asm/smp.h>
@@ -40,6 +41,11 @@
#include "io_ports.h"
/*
+ * Knob to control our willingness to enable the local APIC.
+ */
+int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
+
+/*
* Debug level
*/
int apic_verbosity;
@@ -205,7 +211,7 @@ void __init connect_bsp_APIC(void)
enable_apic_mode();
}
-void disconnect_bsp_APIC(void)
+void disconnect_bsp_APIC(int virt_wire_setup)
{
if (pic_mode) {
/*
@@ -219,6 +225,42 @@ void disconnect_bsp_APIC(void)
outb(0x70, 0x22);
outb(0x00, 0x23);
}
+ else {
+ /* Go back to Virtual Wire compatibility mode */
+ unsigned long value;
+
+ /* For the spurious interrupt use vector F, and enable it */
+ value = apic_read(APIC_SPIV);
+ value &= ~APIC_VECTOR_MASK;
+ value |= APIC_SPIV_APIC_ENABLED;
+ value |= 0xf;
+ apic_write_around(APIC_SPIV, value);
+
+ if (!virt_wire_setup) {
+ /* For LVT0 make it edge triggered, active high, external and enabled */
+ value = apic_read(APIC_LVT0);
+ value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
+ APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
+ APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED );
+ value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
+ value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
+ apic_write_around(APIC_LVT0, value);
+ }
+ else {
+ /* Disable LVT0 */
+ apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
+ }
+
+ /* For LVT1 make it edge triggered, active high, nmi and enabled */
+ value = apic_read(APIC_LVT1);
+ value &= ~(
+ APIC_MODE_MASK | APIC_SEND_PENDING |
+ APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
+ APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
+ value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
+ value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
+ apic_write_around(APIC_LVT1, value);
+ }
}
void disable_local_APIC(void)
@@ -363,7 +405,7 @@ void __init init_bsp_APIC(void)
apic_write_around(APIC_LVT1, value);
}
-void __init setup_local_APIC (void)
+void __devinit setup_local_APIC(void)
{
unsigned long oldvalue, value, ver, maxlvt;
@@ -634,7 +676,7 @@ static struct sys_device device_lapic = {
.cls = &lapic_sysclass,
};
-static void __init apic_pm_activate(void)
+static void __devinit apic_pm_activate(void)
{
apic_pm_state.active = 1;
}
@@ -665,26 +707,6 @@ static void apic_pm_activate(void) { }
* Original code written by Keir Fraser.
*/
-/*
- * Knob to control our willingness to enable the local APIC.
- */
-int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
-
-static int __init lapic_disable(char *str)
-{
- enable_local_apic = -1;
- clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
- return 0;
-}
-__setup("nolapic", lapic_disable);
-
-static int __init lapic_enable(char *str)
-{
- enable_local_apic = 1;
- return 0;
-}
-__setup("lapic", lapic_enable);
-
static int __init apic_set_verbosity(char *str)
{
if (strcmp("debug", str) == 0)
@@ -855,7 +877,7 @@ fake_ioapic_page:
* but we do not accept timer interrupts yet. We only allow the BP
* to calibrate.
*/
-static unsigned int __init get_8254_timer_count(void)
+static unsigned int __devinit get_8254_timer_count(void)
{
extern spinlock_t i8253_lock;
unsigned long flags;
@@ -874,7 +896,7 @@ static unsigned int __init get_8254_timer_count(void)
}
/* next tick in 8254 can be caught by catching timer wraparound */
-static void __init wait_8254_wraparound(void)
+static void __devinit wait_8254_wraparound(void)
{
unsigned int curr_count, prev_count;
@@ -894,7 +916,7 @@ static void __init wait_8254_wraparound(void)
* Default initialization for 8254 timers. If we use other timers like HPET,
* we override this later
*/
-void (*wait_timer_tick)(void) __initdata = wait_8254_wraparound;
+void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound;
/*
* This function sets up the local APIC timer, with a timeout of
@@ -930,7 +952,7 @@ static void __setup_APIC_LVTT(unsigned int clocks)
apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
}
-static void __init setup_APIC_timer(unsigned int clocks)
+static void __devinit setup_APIC_timer(unsigned int clocks)
{
unsigned long flags;
@@ -1043,12 +1065,12 @@ void __init setup_boot_APIC_clock(void)
local_irq_enable();
}
-void __init setup_secondary_APIC_clock(void)
+void __devinit setup_secondary_APIC_clock(void)
{
setup_APIC_timer(calibration_result);
}
-void __init disable_APIC_timer(void)
+void __devinit disable_APIC_timer(void)
{
if (using_apic_timer) {
unsigned long v;
@@ -1133,7 +1155,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs)
}
#ifdef CONFIG_SMP
- update_process_times(user_mode(regs));
+ update_process_times(user_mode_vm(regs));
#endif
}
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 45641a872550..d48ce9290963 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -346,10 +346,10 @@ extern int (*console_blank_hook)(int);
struct apm_user {
int magic;
struct apm_user * next;
- int suser: 1;
- int writer: 1;
- int reader: 1;
- int suspend_wait: 1;
+ unsigned int suser: 1;
+ unsigned int writer: 1;
+ unsigned int reader: 1;
+ unsigned int suspend_wait: 1;
int suspend_result;
int suspends_pending;
int standbys_pending;
@@ -1222,6 +1222,7 @@ static int suspend(int vetoable)
save_processor_state();
err = set_system_power_state(APM_STATE_SUSPEND);
+ ignore_normal_resume = 1;
restore_processor_state();
local_irq_disable();
@@ -1229,7 +1230,6 @@ static int suspend(int vetoable)
spin_lock(&i8253_lock);
reinit_timer();
set_time();
- ignore_normal_resume = 1;
spin_unlock(&i8253_lock);
write_sequnlock(&xtime_lock);
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index d199e525680a..2203a9d20212 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -24,9 +24,9 @@ EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
-static int cachesize_override __initdata = -1;
-static int disable_x86_fxsr __initdata = 0;
-static int disable_x86_serial_nr __initdata = 1;
+static int cachesize_override __devinitdata = -1;
+static int disable_x86_fxsr __devinitdata = 0;
+static int disable_x86_serial_nr __devinitdata = 1;
struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
@@ -59,7 +59,7 @@ static int __init cachesize_setup(char *str)
}
__setup("cachesize=", cachesize_setup);
-int __init get_model_name(struct cpuinfo_x86 *c)
+int __devinit get_model_name(struct cpuinfo_x86 *c)
{
unsigned int *v;
char *p, *q;
@@ -89,7 +89,7 @@ int __init get_model_name(struct cpuinfo_x86 *c)
}
-void __init display_cacheinfo(struct cpuinfo_x86 *c)
+void __devinit display_cacheinfo(struct cpuinfo_x86 *c)
{
unsigned int n, dummy, ecx, edx, l2size;
@@ -130,7 +130,7 @@ void __init display_cacheinfo(struct cpuinfo_x86 *c)
/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
/* Look up CPU names by table lookup. */
-static char __init *table_lookup_model(struct cpuinfo_x86 *c)
+static char __devinit *table_lookup_model(struct cpuinfo_x86 *c)
{
struct cpu_model_info *info;
@@ -151,7 +151,7 @@ static char __init *table_lookup_model(struct cpuinfo_x86 *c)
}
-void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
+void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
{
char *v = c->x86_vendor_id;
int i;
@@ -202,7 +202,7 @@ static inline int flag_is_changeable_p(u32 flag)
/* Probe for the CPUID instruction */
-static int __init have_cpuid_p(void)
+static int __devinit have_cpuid_p(void)
{
return flag_is_changeable_p(X86_EFLAGS_ID);
}
@@ -249,7 +249,7 @@ static void __init early_cpu_detect(void)
#endif
}
-void __init generic_identify(struct cpuinfo_x86 * c)
+void __devinit generic_identify(struct cpuinfo_x86 * c)
{
u32 tfms, xlvl;
int junk;
@@ -296,7 +296,7 @@ void __init generic_identify(struct cpuinfo_x86 * c)
}
}
-static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
+static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
/* Disable processor serial number */
@@ -324,7 +324,7 @@ __setup("serialnumber", x86_serial_nr_setup);
/*
* This does the hard work of actually picking apart the CPU stuff...
*/
-void __init identify_cpu(struct cpuinfo_x86 *c)
+void __devinit identify_cpu(struct cpuinfo_x86 *c)
{
int i;
@@ -432,10 +432,13 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_MCE
mcheck_init(c);
#endif
+ if (c == &boot_cpu_data)
+ sysenter_setup();
+ enable_sep_cpu();
}
#ifdef CONFIG_X86_HT
-void __init detect_ht(struct cpuinfo_x86 *c)
+void __devinit detect_ht(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
int index_msb, tmp;
@@ -490,7 +493,7 @@ void __init detect_ht(struct cpuinfo_x86 *c)
}
#endif
-void __init print_cpu_info(struct cpuinfo_x86 *c)
+void __devinit print_cpu_info(struct cpuinfo_x86 *c)
{
char *vendor = NULL;
@@ -513,7 +516,7 @@ void __init print_cpu_info(struct cpuinfo_x86 *c)
printk("\n");
}
-cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
+cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE;
/* This is hacky. :)
* We're emulating future behavior.
@@ -560,7 +563,7 @@ void __init early_cpu_init(void)
* and IDT. We reload them nevertheless, this function acts as a
* 'CPU state barrier', nothing should get across.
*/
-void __init cpu_init (void)
+void __devinit cpu_init(void)
{
int cpu = smp_processor_id();
struct tss_struct * t = &per_cpu(init_tss, cpu);
@@ -635,7 +638,7 @@ void __init cpu_init (void)
/* Clear all 6 debug registers: */
-#define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
+#define CD(register) set_debugreg(0, register)
CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
@@ -648,3 +651,15 @@ void __init cpu_init (void)
clear_used_math();
mxcsr_feature_mask_init();
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void __devinit cpu_uninit(void)
+{
+ int cpu = raw_smp_processor_id();
+ cpu_clear(cpu, cpu_initialized);
+
+ /* lazy TLB state */
+ per_cpu(cpu_tlbstate, cpu).state = 0;
+ per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
+}
+#endif
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index f25ffd74235c..0f1eb507233b 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -23,7 +23,7 @@ config X86_ACPI_CPUFREQ
If in doubt, say N.
config ELAN_CPUFREQ
- tristate "AMD Elan"
+ tristate "AMD Elan SC400 and SC410"
select CPU_FREQ_TABLE
depends on X86_ELAN
---help---
@@ -38,6 +38,18 @@ config ELAN_CPUFREQ
If in doubt, say N.
+config SC520_CPUFREQ
+ tristate "AMD Elan SC520"
+ select CPU_FREQ_TABLE
+ depends on X86_ELAN
+ ---help---
+ This adds the CPUFreq driver for AMD Elan SC520 processor.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+
config X86_POWERNOW_K6
tristate "AMD Mobile K6-2/K6-3 PowerNow!"
select CPU_FREQ_TABLE
diff --git a/arch/i386/kernel/cpu/cpufreq/Makefile b/arch/i386/kernel/cpu/cpufreq/Makefile
index a922e97aeedd..2e894f1c8910 100644
--- a/arch/i386/kernel/cpu/cpufreq/Makefile
+++ b/arch/i386/kernel/cpu/cpufreq/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_LONGHAUL) += longhaul.o
obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o
+obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o
obj-$(CONFIG_X86_LONGRUN) += longrun.o
obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o
obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index ab0f9f5aac11..04e3563da4fe 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -29,6 +29,7 @@
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/pci.h>
#include <asm/msr.h>
#include <asm/timex.h>
@@ -119,7 +120,13 @@ static int longhaul_get_cpu_mult(void)
static void do_powersaver(union msr_longhaul *longhaul,
unsigned int clock_ratio_index)
{
+ struct pci_dev *dev;
+ unsigned long flags;
+ unsigned int tmp_mask;
int version;
+ int i;
+ u16 pci_cmd;
+ u16 cmd_state[64];
switch (cpu_model) {
case CPU_EZRA_T:
@@ -137,17 +144,58 @@ static void do_powersaver(union msr_longhaul *longhaul,
longhaul->bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
longhaul->bits.EnableSoftBusRatio = 1;
longhaul->bits.RevisionKey = 0;
- local_irq_disable();
- wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
+
+ preempt_disable();
+ local_irq_save(flags);
+
+ /*
+ * get current pci bus master state for all devices
+ * and clear bus master bit
+ */
+ dev = NULL;
+ i = 0;
+ do {
+ dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
+ if (dev != NULL) {
+ pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
+ cmd_state[i++] = pci_cmd;
+ pci_cmd &= ~PCI_COMMAND_MASTER;
+ pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
+ }
+ } while (dev != NULL);
+
+ tmp_mask=inb(0x21); /* works on C3. save mask. */
+ outb(0xFE,0x21); /* TMR0 only */
+ outb(0xFF,0x80); /* delay */
+
local_irq_enable();
+
+ __hlt();
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
__hlt();
+ local_irq_disable();
+
+ outb(tmp_mask,0x21); /* restore mask */
+
+ /* restore pci bus master state for all devices */
+ dev = NULL;
+ i = 0;
+ do {
+ dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
+ if (dev != NULL) {
+ pci_cmd = cmd_state[i++];
+ pci_write_config_byte(dev, PCI_COMMAND, pci_cmd);
+ }
+ } while (dev != NULL);
+ local_irq_restore(flags);
+ preempt_enable();
+
+ /* disable bus ratio bit */
rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
longhaul->bits.EnableSoftBusRatio = 0;
longhaul->bits.RevisionKey = version;
- local_irq_disable();
wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
- local_irq_enable();
}
/**
@@ -578,7 +626,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
longhaul_setup_voltagescaling();
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ policy->cpuinfo.transition_latency = 200000; /* nsec */
policy->cur = calc_speed(longhaul_get_cpu_mult());
ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table);
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
index 913f652623d9..73a5dc5b26b8 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
@@ -23,6 +23,7 @@
#include <linux/dmi.h>
#include <asm/msr.h>
+#include <asm/timer.h>
#include <asm/timex.h>
#include <asm/io.h>
#include <asm/system.h>
@@ -586,13 +587,17 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy)
rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val);
- /* A K7 with powernow technology is set to max frequency by BIOS */
- fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.MFID];
+ /* recalibrate cpu_khz */
+ result = recalibrate_cpu_khz();
+ if (result)
+ return result;
+
+ fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID];
if (!fsb) {
printk(KERN_WARNING PFX "can not determine bus frequency\n");
return -EINVAL;
}
- dprintk("FSB: %3d.%03d MHz\n", fsb/1000, fsb%1000);
+ dprintk("FSB: %3dMHz\n", fsb/1000);
if (dmi_check_system(powernow_dmi_table) || acpi_force) {
printk (KERN_INFO PFX "PSB/PST known to be broken. Trying ACPI instead\n");
@@ -643,9 +648,7 @@ static int powernow_cpu_exit (struct cpufreq_policy *policy) {
}
#endif
- if (powernow_table)
- kfree(powernow_table);
-
+ kfree(powernow_table);
return 0;
}
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index a65ff7e32e5d..10cc096c0ade 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -4,7 +4,7 @@
* GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
*
- * Support : paul.devriendt@amd.com
+ * Support : mark.langsdorf@amd.com
*
* Based on the powernow-k7.c module written by Dave Jones.
* (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs
@@ -15,12 +15,13 @@
*
* Valuable input gratefully received from Dave Jones, Pavel Machek,
* Dominik Brodowski, and others.
+ * Originally developed by Paul Devriendt.
* Processor information obtained from Chapter 9 (Power and Thermal Management)
* of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
* Opteron Processors" available for download from www.amd.com
*
* Tables for specific CPUs can be infrerred from
- * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf
+ * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf
*/
#include <linux/kernel.h>
@@ -30,6 +31,7 @@
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/cpumask.h>
#include <asm/msr.h>
#include <asm/io.h>
@@ -42,7 +44,7 @@
#define PFX "powernow-k8: "
#define BFX PFX "BIOS error: "
-#define VERSION "version 1.00.09e"
+#define VERSION "version 1.40.2"
#include "powernow-k8.h"
/* serialize freq changes */
@@ -50,6 +52,10 @@ static DECLARE_MUTEX(fidvid_sem);
static struct powernow_k8_data *powernow_data[NR_CPUS];
+#ifndef CONFIG_SMP
+static cpumask_t cpu_core_map[1];
+#endif
+
/* Return a frequency in MHz, given an input fid */
static u32 find_freq_from_fid(u32 fid)
{
@@ -274,11 +280,18 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid
{
u32 rvosteps = data->rvo;
u32 savefid = data->currfid;
+ u32 maxvid, lo;
dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n",
smp_processor_id(),
data->currfid, data->currvid, reqvid, data->rvo);
+ rdmsr(MSR_FIDVID_STATUS, lo, maxvid);
+ maxvid = 0x1f & (maxvid >> 16);
+ dprintk("ph1 maxvid=0x%x\n", maxvid);
+ if (reqvid < maxvid) /* lower numbers are higher voltages */
+ reqvid = maxvid;
+
while (data->currvid > reqvid) {
dprintk("ph1: curr 0x%x, req vid 0x%x\n",
data->currvid, reqvid);
@@ -286,8 +299,8 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid
return 1;
}
- while ((rvosteps > 0) && ((data->rvo + data->currvid) > reqvid)) {
- if (data->currvid == 0) {
+ while ((rvosteps > 0) && ((data->rvo + data->currvid) > reqvid)) {
+ if (data->currvid == maxvid) {
rvosteps = 0;
} else {
dprintk("ph1: changing vid for rvo, req 0x%x\n",
@@ -671,7 +684,7 @@ static int find_psb_table(struct powernow_k8_data *data)
* BIOS and Kernel Developer's Guide, which is available on
* www.amd.com
*/
- printk(KERN_ERR PFX "BIOS error - no PSB\n");
+ printk(KERN_INFO PFX "BIOS error - no PSB or ACPI _PSS objects\n");
return -ENODEV;
}
@@ -695,7 +708,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
struct cpufreq_frequency_table *powernow_table;
if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
- dprintk("register performance failed\n");
+ dprintk("register performance failed: bad ACPI data\n");
return -EIO;
}
@@ -746,22 +759,23 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
continue;
}
- if (fid < HI_FID_TABLE_BOTTOM) {
- if (cntlofreq) {
- /* if both entries are the same, ignore this
- * one...
- */
- if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) ||
- (powernow_table[i].index != powernow_table[cntlofreq].index)) {
- printk(KERN_ERR PFX "Too many lo freq table entries\n");
- goto err_out_mem;
- }
-
- dprintk("double low frequency table entry, ignoring it.\n");
- powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
- continue;
- } else
- cntlofreq = i;
+ /* verify only 1 entry from the lo frequency table */
+ if (fid < HI_FID_TABLE_BOTTOM) {
+ if (cntlofreq) {
+ /* if both entries are the same, ignore this
+ * one...
+ */
+ if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) ||
+ (powernow_table[i].index != powernow_table[cntlofreq].index)) {
+ printk(KERN_ERR PFX "Too many lo freq table entries\n");
+ goto err_out_mem;
+ }
+
+ dprintk("double low frequency table entry, ignoring it.\n");
+ powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ continue;
+ } else
+ cntlofreq = i;
}
if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
@@ -816,7 +830,7 @@ static int transition_frequency(struct powernow_k8_data *data, unsigned int inde
{
u32 fid;
u32 vid;
- int res;
+ int res, i;
struct cpufreq_freqs freqs;
dprintk("cpu %d transition to index %u\n", smp_processor_id(), index);
@@ -841,7 +855,8 @@ static int transition_frequency(struct powernow_k8_data *data, unsigned int inde
}
if ((fid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) {
- printk("ignoring illegal change in lo freq table-%x to 0x%x\n",
+ printk(KERN_ERR PFX
+ "ignoring illegal change in lo freq table-%x to 0x%x\n",
data->currfid, fid);
return 1;
}
@@ -850,18 +865,20 @@ static int transition_frequency(struct powernow_k8_data *data, unsigned int inde
smp_processor_id(), fid, vid);
freqs.cpu = data->cpu;
-
freqs.old = find_khz_freq_from_fid(data->currfid);
freqs.new = find_khz_freq_from_fid(fid);
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ for_each_cpu_mask(i, cpu_core_map[data->cpu]) {
+ freqs.cpu = i;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ }
- down(&fidvid_sem);
res = transition_fid_vid(data, fid, vid);
- up(&fidvid_sem);
freqs.new = find_khz_freq_from_fid(data->currfid);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-
+ for_each_cpu_mask(i, cpu_core_map[data->cpu]) {
+ freqs.cpu = i;
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
return res;
}
@@ -874,6 +891,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
u32 checkvid = data->currvid;
unsigned int newstate;
int ret = -EIO;
+ int i;
/* only run on specific CPU from here on */
oldmask = current->cpus_allowed;
@@ -902,22 +920,41 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
data->currfid, data->currvid);
if ((checkvid != data->currvid) || (checkfid != data->currfid)) {
- printk(KERN_ERR PFX
- "error - out of sync, fid 0x%x 0x%x, vid 0x%x 0x%x\n",
- checkfid, data->currfid, checkvid, data->currvid);
+ printk(KERN_INFO PFX
+ "error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
+ checkfid, data->currfid, checkvid, data->currvid);
}
if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate))
goto err_out;
+ down(&fidvid_sem);
+
+ for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
+ /* make sure the sibling is initialized */
+ if (!powernow_data[i]) {
+ ret = 0;
+ up(&fidvid_sem);
+ goto err_out;
+ }
+ }
+
powernow_k8_acpi_pst_values(data, newstate);
if (transition_frequency(data, newstate)) {
printk(KERN_ERR PFX "transition frequency failed\n");
ret = 1;
+ up(&fidvid_sem);
goto err_out;
}
+ /* Update all the fid/vids of our siblings */
+ for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
+ powernow_data[i]->currvid = data->currvid;
+ powernow_data[i]->currfid = data->currfid;
+ }
+ up(&fidvid_sem);
+
pol->cur = find_khz_freq_from_fid(data->currfid);
ret = 0;
@@ -962,7 +999,7 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
*/
if ((num_online_cpus() != 1) || (num_possible_cpus() != 1)) {
- printk(KERN_INFO PFX "MP systems not supported by PSB BIOS structure\n");
+ printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n");
kfree(data);
return -ENODEV;
}
@@ -1003,6 +1040,7 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
schedule();
pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
+ pol->cpus = cpu_core_map[pol->cpu];
/* Take a crude guess here.
* That guess was in microseconds, so multiply with 1000 */
@@ -1069,7 +1107,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
return 0;
}
preempt_disable();
-
+
if (query_current_values_with_pending_wait(data))
goto out;
@@ -1127,9 +1165,10 @@ static void __exit powernowk8_exit(void)
cpufreq_unregister_driver(&cpufreq_amd64_driver);
}
-MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com>");
+MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com.");
MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver.");
MODULE_LICENSE("GPL");
late_initcall(powernowk8_init);
module_exit(powernowk8_exit);
+
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
index 63ebc8470f52..9ed5bf221cb7 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
@@ -174,3 +174,18 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
+
+#ifndef for_each_cpu_mask
+#define for_each_cpu_mask(i,mask) for (i=0;i<1;i++)
+#endif
+
+#ifdef CONFIG_SMP
+static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[])
+{
+}
+#else
+static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[])
+{
+ cpu_set(0, cpu_sharedcore_mask[0]);
+}
+#endif
diff --git a/arch/i386/kernel/cpu/cpufreq/sc520_freq.c b/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
new file mode 100644
index 000000000000..ef457d50f4ac
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
@@ -0,0 +1,186 @@
+/*
+ * sc520_freq.c: cpufreq driver for the AMD Elan sc520
+ *
+ * Copyright (C) 2005 Sean Young <sean@mess.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Based on elanfreq.c
+ *
+ * 2005-03-30: - initial revision
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/delay.h>
+#include <linux/cpufreq.h>
+
+#include <asm/msr.h>
+#include <asm/timex.h>
+#include <asm/io.h>
+
+#define MMCR_BASE 0xfffef000 /* The default base address */
+#define OFFS_CPUCTL 0x2 /* CPU Control Register */
+
+static __u8 __iomem *cpuctl;
+
+#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "sc520_freq", msg)
+
+static struct cpufreq_frequency_table sc520_freq_table[] = {
+ {0x01, 100000},
+ {0x02, 133000},
+ {0, CPUFREQ_TABLE_END},
+};
+
+static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
+{
+ u8 clockspeed_reg = *cpuctl;
+
+ switch (clockspeed_reg & 0x03) {
+ default:
+ printk(KERN_ERR "sc520_freq: error: cpuctl register has unexpected value %02x\n", clockspeed_reg);
+ case 0x01:
+ return 100000;
+ case 0x02:
+ return 133000;
+ }
+}
+
+static void sc520_freq_set_cpu_state (unsigned int state)
+{
+
+ struct cpufreq_freqs freqs;
+ u8 clockspeed_reg;
+
+ freqs.old = sc520_freq_get_cpu_frequency(0);
+ freqs.new = sc520_freq_table[state].frequency;
+ freqs.cpu = 0; /* AMD Elan is UP */
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ dprintk("attempting to set frequency to %i kHz\n",
+ sc520_freq_table[state].frequency);
+
+ local_irq_disable();
+
+ clockspeed_reg = *cpuctl & ~0x03;
+ *cpuctl = clockspeed_reg | sc520_freq_table[state].index;
+
+ local_irq_enable();
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+};
+
+static int sc520_freq_verify (struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]);
+}
+
+static int sc520_freq_target (struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int newstate = 0;
+
+ if (cpufreq_frequency_table_target(policy, sc520_freq_table, target_freq, relation, &newstate))
+ return -EINVAL;
+
+ sc520_freq_set_cpu_state(newstate);
+
+ return 0;
+}
+
+
+/*
+ * Module init and exit code
+ */
+
+static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpuinfo_x86 *c = cpu_data;
+ int result;
+
+ /* capability check */
+ if (c->x86_vendor != X86_VENDOR_AMD ||
+ c->x86 != 4 || c->x86_model != 9)
+ return -ENODEV;
+
+ /* cpuinfo and default policy values */
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+ policy->cpuinfo.transition_latency = 1000000; /* 1ms */
+ policy->cur = sc520_freq_get_cpu_frequency(0);
+
+ result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table);
+ if (result)
+ return (result);
+
+ cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu);
+
+ return 0;
+}
+
+
+static int sc520_freq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+
+static struct freq_attr* sc520_freq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+
+static struct cpufreq_driver sc520_freq_driver = {
+ .get = sc520_freq_get_cpu_frequency,
+ .verify = sc520_freq_verify,
+ .target = sc520_freq_target,
+ .init = sc520_freq_cpu_init,
+ .exit = sc520_freq_cpu_exit,
+ .name = "sc520_freq",
+ .owner = THIS_MODULE,
+ .attr = sc520_freq_attr,
+};
+
+
+static int __init sc520_freq_init(void)
+{
+ struct cpuinfo_x86 *c = cpu_data;
+
+ /* Test if we have the right hardware */
+ if(c->x86_vendor != X86_VENDOR_AMD ||
+ c->x86 != 4 || c->x86_model != 9) {
+ dprintk("no Elan SC520 processor found!\n");
+ return -ENODEV;
+ }
+ cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1);
+ if(!cpuctl) {
+ printk(KERN_ERR "sc520_freq: error: failed to remap memory\n");
+ return -ENOMEM;
+ }
+
+ return cpufreq_register_driver(&sc520_freq_driver);
+}
+
+
+static void __exit sc520_freq_exit(void)
+{
+ cpufreq_unregister_driver(&sc520_freq_driver);
+ iounmap(cpuctl);
+}
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sean Young <sean@mess.org>");
+MODULE_DESCRIPTION("cpufreq driver for AMD's Elan sc520 CPU");
+
+module_init(sc520_freq_init);
+module_exit(sc520_freq_exit);
+
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index 07d5612dc00f..7dcbf70fc16f 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -54,6 +54,8 @@ enum {
CPU_DOTHAN_A1,
CPU_DOTHAN_A2,
CPU_DOTHAN_B0,
+ CPU_MP4HT_D0,
+ CPU_MP4HT_E0,
};
static const struct cpu_id cpu_ids[] = {
@@ -61,6 +63,8 @@ static const struct cpu_id cpu_ids[] = {
[CPU_DOTHAN_A1] = { 6, 13, 1 },
[CPU_DOTHAN_A2] = { 6, 13, 2 },
[CPU_DOTHAN_B0] = { 6, 13, 6 },
+ [CPU_MP4HT_D0] = {15, 3, 4 },
+ [CPU_MP4HT_E0] = {15, 4, 1 },
};
#define N_IDS (sizeof(cpu_ids)/sizeof(cpu_ids[0]))
@@ -226,6 +230,8 @@ static struct cpu_model models[] =
{ &cpu_ids[CPU_DOTHAN_A1], NULL, 0, NULL },
{ &cpu_ids[CPU_DOTHAN_A2], NULL, 0, NULL },
{ &cpu_ids[CPU_DOTHAN_B0], NULL, 0, NULL },
+ { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL },
+ { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL },
{ NULL, }
};
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
index 8ba430a9c3a2..d368b3f5fce8 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
@@ -336,7 +336,7 @@ unsigned int speedstep_get_freqs(unsigned int processor,
if (!prev_speed)
return -EIO;
- dprintk("previous seped is %u\n", prev_speed);
+ dprintk("previous speed is %u\n", prev_speed);
local_irq_save(flags);
@@ -348,7 +348,7 @@ unsigned int speedstep_get_freqs(unsigned int processor,
goto out;
}
- dprintk("low seped is %u\n", *low_speed);
+ dprintk("low speed is %u\n", *low_speed);
/* switch to high state */
set_state(SPEEDSTEP_HIGH);
@@ -358,7 +358,7 @@ unsigned int speedstep_get_freqs(unsigned int processor,
goto out;
}
- dprintk("high seped is %u\n", *high_speed);
+ dprintk("high speed is %u\n", *high_speed);
if (*low_speed == *high_speed) {
ret = -ENODEV;
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
index 79440b3f087e..b25fb6b635ae 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
@@ -357,6 +357,9 @@ static int __init speedstep_init(void)
case SPEEDSTEP_PROCESSOR_PIII_C:
case SPEEDSTEP_PROCESSOR_PIII_C_EARLY:
break;
+ case SPEEDSTEP_PROCESSOR_P4M:
+ printk(KERN_INFO "speedstep-smi: you're trying to use this cpufreq driver on a Pentium 4-based CPU. Most likely it will not work.\n");
+ break;
default:
speedstep_processor = 0;
}
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index 121aa2176e69..96a75d045835 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -28,7 +28,7 @@ extern int trap_init_f00f_bug(void);
struct movsl_mask movsl_mask;
#endif
-void __init early_intel_workaround(struct cpuinfo_x86 *c)
+void __devinit early_intel_workaround(struct cpuinfo_x86 *c)
{
if (c->x86_vendor != X86_VENDOR_INTEL)
return;
@@ -43,7 +43,7 @@ void __init early_intel_workaround(struct cpuinfo_x86 *c)
* This is called before we do cpu ident work
*/
-int __init ppro_with_ram_bug(void)
+int __devinit ppro_with_ram_bug(void)
{
/* Uses data from early_cpu_detect now */
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
@@ -61,7 +61,7 @@ int __init ppro_with_ram_bug(void)
* P4 Xeon errata 037 workaround.
* Hardware prefetcher may cause stale data to be loaded into the cache.
*/
-static void __init Intel_errata_workarounds(struct cpuinfo_x86 *c)
+static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
{
unsigned long lo, hi;
@@ -80,7 +80,7 @@ static void __init Intel_errata_workarounds(struct cpuinfo_x86 *c)
/*
* find out the number of processor cores on the die
*/
-static int __init num_cpu_cores(struct cpuinfo_x86 *c)
+static int __devinit num_cpu_cores(struct cpuinfo_x86 *c)
{
unsigned int eax;
@@ -98,7 +98,7 @@ static int __init num_cpu_cores(struct cpuinfo_x86 *c)
return 1;
}
-static void __init init_intel(struct cpuinfo_x86 *c)
+static void __devinit init_intel(struct cpuinfo_x86 *c)
{
unsigned int l2 = 0;
char *p = NULL;
@@ -204,7 +204,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
return size;
}
-static struct cpu_dev intel_cpu_dev __initdata = {
+static struct cpu_dev intel_cpu_dev __devinitdata = {
.c_vendor = "Intel",
.c_ident = { "GenuineIntel" },
.c_models = {
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index aeb5b4ef8c8b..1d768b263269 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -28,7 +28,7 @@ struct _cache_table
};
/* all the cache descriptor types we care about (no TLB or trace cache entries) */
-static struct _cache_table cache_table[] __initdata =
+static struct _cache_table cache_table[] __devinitdata =
{
{ 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
{ 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -118,7 +118,7 @@ struct _cpuid4_info {
};
#define MAX_CACHE_LEAVES 4
-static unsigned short __devinitdata num_cache_leaves;
+static unsigned short num_cache_leaves;
static int __devinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
{
@@ -160,7 +160,7 @@ static int __init find_num_cache_leaves(void)
return retval;
}
-unsigned int __init init_intel_cacheinfo(struct cpuinfo_x86 *c)
+unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
{
unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
diff --git a/arch/i386/kernel/cpu/mcheck/k7.c b/arch/i386/kernel/cpu/mcheck/k7.c
index 8df52e86c4d2..c4abe7657397 100644
--- a/arch/i386/kernel/cpu/mcheck/k7.c
+++ b/arch/i386/kernel/cpu/mcheck/k7.c
@@ -69,7 +69,7 @@ static fastcall void k7_machine_check(struct pt_regs * regs, long error_code)
/* AMD K7 machine check is Intel like */
-void __init amd_mcheck_init(struct cpuinfo_x86 *c)
+void __devinit amd_mcheck_init(struct cpuinfo_x86 *c)
{
u32 l, h;
int i;
diff --git a/arch/i386/kernel/cpu/mcheck/mce.c b/arch/i386/kernel/cpu/mcheck/mce.c
index bf6d1aefafc0..2cf25d2ba0f1 100644
--- a/arch/i386/kernel/cpu/mcheck/mce.c
+++ b/arch/i386/kernel/cpu/mcheck/mce.c
@@ -16,7 +16,7 @@
#include "mce.h"
-int mce_disabled __initdata = 0;
+int mce_disabled __devinitdata = 0;
int nr_mce_banks;
EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */
@@ -31,7 +31,7 @@ static fastcall void unexpected_machine_check(struct pt_regs * regs, long error_
void fastcall (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check;
/* This has to be run for each processor */
-void __init mcheck_init(struct cpuinfo_x86 *c)
+void __devinit mcheck_init(struct cpuinfo_x86 *c)
{
if (mce_disabled==1)
return;
diff --git a/arch/i386/kernel/cpu/mcheck/p4.c b/arch/i386/kernel/cpu/mcheck/p4.c
index 8b16ceb929b4..0abccb6fdf9e 100644
--- a/arch/i386/kernel/cpu/mcheck/p4.c
+++ b/arch/i386/kernel/cpu/mcheck/p4.c
@@ -78,7 +78,7 @@ fastcall void smp_thermal_interrupt(struct pt_regs *regs)
}
/* P4/Xeon Thermal regulation detect and init */
-static void __init intel_init_thermal(struct cpuinfo_x86 *c)
+static void __devinit intel_init_thermal(struct cpuinfo_x86 *c)
{
u32 l, h;
unsigned int cpu = smp_processor_id();
@@ -232,7 +232,7 @@ static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
}
-void __init intel_p4_mcheck_init(struct cpuinfo_x86 *c)
+void __devinit intel_p4_mcheck_init(struct cpuinfo_x86 *c)
{
u32 l, h;
int i;
diff --git a/arch/i386/kernel/cpu/mcheck/p5.c b/arch/i386/kernel/cpu/mcheck/p5.c
index c45a1b485c80..ec0614cd2925 100644
--- a/arch/i386/kernel/cpu/mcheck/p5.c
+++ b/arch/i386/kernel/cpu/mcheck/p5.c
@@ -29,7 +29,7 @@ static fastcall void pentium_machine_check(struct pt_regs * regs, long error_cod
}
/* Set up machine check reporting for processors with Intel style MCE */
-void __init intel_p5_mcheck_init(struct cpuinfo_x86 *c)
+void __devinit intel_p5_mcheck_init(struct cpuinfo_x86 *c)
{
u32 l, h;
diff --git a/arch/i386/kernel/cpu/mcheck/p6.c b/arch/i386/kernel/cpu/mcheck/p6.c
index 46640f8c2494..f01b73f947e1 100644
--- a/arch/i386/kernel/cpu/mcheck/p6.c
+++ b/arch/i386/kernel/cpu/mcheck/p6.c
@@ -80,7 +80,7 @@ static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
}
/* Set up machine check reporting for processors with Intel style MCE */
-void __init intel_p6_mcheck_init(struct cpuinfo_x86 *c)
+void __devinit intel_p6_mcheck_init(struct cpuinfo_x86 *c)
{
u32 l, h;
int i;
diff --git a/arch/i386/kernel/cpu/mcheck/winchip.c b/arch/i386/kernel/cpu/mcheck/winchip.c
index 753fa7acb984..7bae68fa168f 100644
--- a/arch/i386/kernel/cpu/mcheck/winchip.c
+++ b/arch/i386/kernel/cpu/mcheck/winchip.c
@@ -23,7 +23,7 @@ static fastcall void winchip_machine_check(struct pt_regs * regs, long error_cod
}
/* Set up machine check reporting on the Winchip C6 series */
-void __init winchip_mcheck_init(struct cpuinfo_x86 *c)
+void __devinit winchip_mcheck_init(struct cpuinfo_x86 *c)
{
u32 lo, hi;
machine_check_vector = winchip_machine_check;
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c
index f468a979e9aa..64d91f73a0a4 100644
--- a/arch/i386/kernel/cpu/mtrr/generic.c
+++ b/arch/i386/kernel/cpu/mtrr/generic.c
@@ -70,8 +70,7 @@ void __init get_mtrr_state(void)
/* Free resources associated with a struct mtrr_state */
void __init finalize_mtrr_state(void)
{
- if (mtrr_state.var_ranges)
- kfree(mtrr_state.var_ranges);
+ kfree(mtrr_state.var_ranges);
mtrr_state.var_ranges = NULL;
}
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index e1c2042b9b7e..d66b09e0c820 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -375,6 +375,19 @@ int mtrr_add_page(unsigned long base, unsigned long size,
return error;
}
+static int mtrr_check(unsigned long base, unsigned long size)
+{
+ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
+ printk(KERN_WARNING
+ "mtrr: size and base must be multiples of 4 kiB\n");
+ printk(KERN_DEBUG
+ "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
+ dump_stack();
+ return -1;
+ }
+ return 0;
+}
+
/**
* mtrr_add - Add a memory type region
* @base: Physical base address of region
@@ -415,11 +428,8 @@ int
mtrr_add(unsigned long base, unsigned long size, unsigned int type,
char increment)
{
- if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
- printk(KERN_WARNING "mtrr: size and base must be multiples of 4 kiB\n");
- printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
+ if (mtrr_check(base, size))
return -EINVAL;
- }
return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
increment);
}
@@ -511,11 +521,8 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
int
mtrr_del(int reg, unsigned long base, unsigned long size)
{
- if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
- printk(KERN_INFO "mtrr: size and base must be multiples of 4 kiB\n");
- printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
+ if (mtrr_check(base, size))
return -EINVAL;
- }
return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
}
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 7323c19f354e..8bd77d948a84 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -86,7 +86,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "stepping\t: unknown\n");
if ( cpu_has(c, X86_FEATURE_TSC) ) {
- seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n",
+ seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
cpu_khz / 1000, (cpu_khz % 1000));
}
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index 2e2756345bb2..4647db4ad6de 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -45,7 +45,7 @@
#include <asm/uaccess.h>
#include <asm/system.h>
-static struct class_simple *cpuid_class;
+static struct class *cpuid_class;
#ifdef CONFIG_SMP
@@ -158,12 +158,12 @@ static struct file_operations cpuid_fops = {
.open = cpuid_open,
};
-static int cpuid_class_simple_device_add(int i)
+static int cpuid_class_device_create(int i)
{
int err = 0;
struct class_device *class_err;
- class_err = class_simple_device_add(cpuid_class, MKDEV(CPUID_MAJOR, i), NULL, "cpu%d",i);
+ class_err = class_device_create(cpuid_class, MKDEV(CPUID_MAJOR, i), NULL, "cpu%d",i);
if (IS_ERR(class_err))
err = PTR_ERR(class_err);
return err;
@@ -175,10 +175,10 @@ static int __devinit cpuid_class_cpu_callback(struct notifier_block *nfb, unsign
switch (action) {
case CPU_ONLINE:
- cpuid_class_simple_device_add(cpu);
+ cpuid_class_device_create(cpu);
break;
case CPU_DEAD:
- class_simple_device_remove(MKDEV(CPUID_MAJOR, cpu));
+ class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
break;
}
return NOTIFY_OK;
@@ -200,13 +200,13 @@ static int __init cpuid_init(void)
err = -EBUSY;
goto out;
}
- cpuid_class = class_simple_create(THIS_MODULE, "cpuid");
+ cpuid_class = class_create(THIS_MODULE, "cpuid");
if (IS_ERR(cpuid_class)) {
err = PTR_ERR(cpuid_class);
goto out_chrdev;
}
for_each_online_cpu(i) {
- err = cpuid_class_simple_device_add(i);
+ err = cpuid_class_device_create(i);
if (err != 0)
goto out_class;
}
@@ -218,9 +218,9 @@ static int __init cpuid_init(void)
out_class:
i = 0;
for_each_online_cpu(i) {
- class_simple_device_remove(MKDEV(CPUID_MAJOR, i));
+ class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, i));
}
- class_simple_destroy(cpuid_class);
+ class_destroy(cpuid_class);
out_chrdev:
unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
out:
@@ -232,8 +232,8 @@ static void __exit cpuid_exit(void)
int cpu = 0;
for_each_online_cpu(cpu)
- class_simple_device_remove(MKDEV(CPUID_MAJOR, cpu));
- class_simple_destroy(cpuid_class);
+ class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
+ class_destroy(cpuid_class);
unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
unregister_cpu_notifier(&cpuid_class_cpu_notifier);
}
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
new file mode 100644
index 000000000000..e5fab12f7926
--- /dev/null
+++ b/arch/i386/kernel/crash.c
@@ -0,0 +1,223 @@
+/*
+ * Architecture specific (i386) functions for kexec based crash dumps.
+ *
+ * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
+ *
+ * Copyright (C) IBM Corporation, 2004. All rights reserved.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <linux/reboot.h>
+#include <linux/kexec.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/elf.h>
+#include <linux/elfcore.h>
+
+#include <asm/processor.h>
+#include <asm/hardirq.h>
+#include <asm/nmi.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <mach_ipi.h>
+
+
+note_buf_t crash_notes[NR_CPUS];
+/* This keeps a track of which one is crashing cpu. */
+static int crashing_cpu;
+
+static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
+ size_t data_len)
+{
+ struct elf_note note;
+
+ note.n_namesz = strlen(name) + 1;
+ note.n_descsz = data_len;
+ note.n_type = type;
+ memcpy(buf, &note, sizeof(note));
+ buf += (sizeof(note) +3)/4;
+ memcpy(buf, name, note.n_namesz);
+ buf += (note.n_namesz + 3)/4;
+ memcpy(buf, data, note.n_descsz);
+ buf += (note.n_descsz + 3)/4;
+
+ return buf;
+}
+
+static void final_note(u32 *buf)
+{
+ struct elf_note note;
+
+ note.n_namesz = 0;
+ note.n_descsz = 0;
+ note.n_type = 0;
+ memcpy(buf, &note, sizeof(note));
+}
+
+static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
+{
+ struct elf_prstatus prstatus;
+ u32 *buf;
+
+ if ((cpu < 0) || (cpu >= NR_CPUS))
+ return;
+
+ /* Using ELF notes here is opportunistic.
+ * I need a well defined structure format
+ * for the data I pass, and I need tags
+ * on the data to indicate what information I have
+ * squirrelled away. ELF notes happen to provide
+ * all of that that no need to invent something new.
+ */
+ buf = &crash_notes[cpu][0];
+ memset(&prstatus, 0, sizeof(prstatus));
+ prstatus.pr_pid = current->pid;
+ elf_core_copy_regs(&prstatus.pr_reg, regs);
+ buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
+ sizeof(prstatus));
+ final_note(buf);
+}
+
+static void crash_get_current_regs(struct pt_regs *regs)
+{
+ __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
+ __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
+ __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
+ __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
+ __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
+ __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
+ __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
+ __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
+ __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
+ __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
+ __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
+ __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
+ __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
+
+ regs->eip = (unsigned long)current_text_addr();
+}
+
+/* CPU does not save ss and esp on stack if execution is already
+ * running in kernel mode at the time of NMI occurrence. This code
+ * fixes it.
+ */
+static void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
+{
+ memcpy(newregs, oldregs, sizeof(*newregs));
+ newregs->esp = (unsigned long)&(oldregs->esp);
+ __asm__ __volatile__("xorl %eax, %eax;");
+ __asm__ __volatile__ ("movw %%ss, %%ax;" :"=a"(newregs->xss));
+}
+
+/* We may have saved_regs from where the error came from
+ * or it is NULL if via a direct panic().
+ */
+static void crash_save_self(struct pt_regs *saved_regs)
+{
+ struct pt_regs regs;
+ int cpu;
+
+ cpu = smp_processor_id();
+ if (saved_regs)
+ crash_setup_regs(&regs, saved_regs);
+ else
+ crash_get_current_regs(&regs);
+ crash_save_this_cpu(&regs, cpu);
+}
+
+#ifdef CONFIG_SMP
+static atomic_t waiting_for_crash_ipi;
+
+static int crash_nmi_callback(struct pt_regs *regs, int cpu)
+{
+ struct pt_regs fixed_regs;
+
+ /* Don't do anything if this handler is invoked on crashing cpu.
+ * Otherwise, system will completely hang. Crashing cpu can get
+ * an NMI if system was initially booted with nmi_watchdog parameter.
+ */
+ if (cpu == crashing_cpu)
+ return 1;
+ local_irq_disable();
+
+ if (!user_mode(regs)) {
+ crash_setup_regs(&fixed_regs, regs);
+ regs = &fixed_regs;
+ }
+ crash_save_this_cpu(regs, cpu);
+ disable_local_APIC();
+ atomic_dec(&waiting_for_crash_ipi);
+ /* Assume hlt works */
+ __asm__("hlt");
+ for(;;);
+
+ return 1;
+}
+
+/*
+ * By using the NMI code instead of a vector we just sneak thru the
+ * word generator coming out with just what we want. AND it does
+ * not matter if clustered_apic_mode is set or not.
+ */
+static void smp_send_nmi_allbutself(void)
+{
+ send_IPI_allbutself(APIC_DM_NMI);
+}
+
+static void nmi_shootdown_cpus(void)
+{
+ unsigned long msecs;
+
+ atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+ /* Would it be better to replace the trap vector here? */
+ set_nmi_callback(crash_nmi_callback);
+ /* Ensure the new callback function is set before sending
+ * out the NMI
+ */
+ wmb();
+
+ smp_send_nmi_allbutself();
+
+ msecs = 1000; /* Wait at most a second for the other cpus to stop */
+ while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
+ mdelay(1);
+ msecs--;
+ }
+
+ /* Leave the nmi callback set */
+ disable_local_APIC();
+}
+#else
+static void nmi_shootdown_cpus(void)
+{
+ /* There are no cpus to shootdown */
+}
+#endif
+
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+ /* This function is only called after the system
+ * has paniced or is otherwise in a critical state.
+ * The minimum amount of code to allow a kexec'd kernel
+ * to run successfully needs to happen here.
+ *
+ * In practice this means shooting down the other cpus in
+ * an SMP system.
+ */
+ /* The kernel is broken so disable interrupts */
+ local_irq_disable();
+
+ /* Make a note of crashing cpu. Will be used in NMI callback.*/
+ crashing_cpu = smp_processor_id();
+ nmi_shootdown_cpus();
+ lapic_shutdown();
+#if defined(CONFIG_X86_IO_APIC)
+ disable_IO_APIC();
+#endif
+ crash_save_self(regs);
+}
diff --git a/arch/i386/kernel/dmi_scan.c b/arch/i386/kernel/dmi_scan.c
index 6ed7e28f306c..a3cdf894302b 100644
--- a/arch/i386/kernel/dmi_scan.c
+++ b/arch/i386/kernel/dmi_scan.c
@@ -1,22 +1,15 @@
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/acpi.h>
-#include <asm/io.h>
-#include <linux/pm.h>
-#include <asm/system.h>
#include <linux/dmi.h>
#include <linux/bootmem.h>
-struct dmi_header
-{
- u8 type;
- u8 length;
- u16 handle;
+struct dmi_header {
+ u8 type;
+ u8 length;
+ u16 handle;
};
#undef DMI_DEBUG
@@ -29,15 +22,13 @@ struct dmi_header
static char * __init dmi_string(struct dmi_header *dm, u8 s)
{
- u8 *bp=(u8 *)dm;
- bp+=dm->length;
- if(!s)
+ u8 *bp = ((u8 *) dm) + dm->length;
+
+ if (!s)
return "";
s--;
- while(s>0 && *bp)
- {
- bp+=strlen(bp);
- bp++;
+ while (s > 0 && *bp) {
+ bp += strlen(bp) + 1;
s--;
}
return bp;
@@ -47,16 +38,14 @@ static char * __init dmi_string(struct dmi_header *dm, u8 s)
* We have to be cautious here. We have seen BIOSes with DMI pointers
* pointing to completely the wrong place for example
*/
-
-static int __init dmi_table(u32 base, int len, int num, void (*decode)(struct dmi_header *))
+static int __init dmi_table(u32 base, int len, int num,
+ void (*decode)(struct dmi_header *))
{
- u8 *buf;
- struct dmi_header *dm;
- u8 *data;
- int i=0;
+ u8 *buf, *data;
+ int i = 0;
buf = bt_ioremap(base, len);
- if(buf==NULL)
+ if (buf == NULL)
return -1;
data = buf;
@@ -65,36 +54,34 @@ static int __init dmi_table(u32 base, int len, int num, void (*decode)(struct dm
* Stop when we see all the items the table claimed to have
* OR we run off the end of the table (also happens)
*/
-
- while(i<num && data-buf+sizeof(struct dmi_header)<=len)
- {
- dm=(struct dmi_header *)data;
+ while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
+ struct dmi_header *dm = (struct dmi_header *)data;
/*
* We want to know the total length (formated area and strings)
* before decoding to make sure we won't run off the table in
* dmi_decode or dmi_string
*/
- data+=dm->length;
- while(data-buf<len-1 && (data[0] || data[1]))
+ data += dm->length;
+ while ((data - buf < len - 1) && (data[0] || data[1]))
data++;
- if(data-buf<len-1)
+ if (data - buf < len - 1)
decode(dm);
- data+=2;
+ data += 2;
i++;
}
bt_iounmap(buf, len);
return 0;
}
-
-inline static int __init dmi_checksum(u8 *buf)
+static int __init dmi_checksum(u8 *buf)
{
- u8 sum=0;
+ u8 sum = 0;
int a;
- for(a=0; a<15; a++)
- sum+=buf[a];
- return (sum==0);
+ for (a = 0; a < 15; a++)
+ sum += buf[a];
+
+ return sum == 0;
}
static int __init dmi_iterate(void (*decode)(struct dmi_header *))
@@ -110,28 +97,30 @@ static int __init dmi_iterate(void (*decode)(struct dmi_header *))
p = ioremap(0xF0000, 0x10000);
if (p == NULL)
return -1;
+
for (q = p; q < p + 0x10000; q += 16) {
memcpy_fromio(buf, q, 15);
- if(memcmp(buf, "_DMI_", 5)==0 && dmi_checksum(buf))
- {
- u16 num=buf[13]<<8|buf[12];
- u16 len=buf[7]<<8|buf[6];
- u32 base=buf[11]<<24|buf[10]<<16|buf[9]<<8|buf[8];
+ if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
+ u16 num = (buf[13] << 8) | buf[12];
+ u16 len = (buf[7] << 8) | buf[6];
+ u32 base = (buf[11] << 24) | (buf[10] << 16) |
+ (buf[9] << 8) | buf[8];
/*
* DMI version 0.0 means that the real version is taken from
* the SMBIOS version, which we don't know at this point.
*/
- if(buf[14]!=0)
+ if (buf[14] != 0)
printk(KERN_INFO "DMI %d.%d present.\n",
- buf[14]>>4, buf[14]&0x0F);
+ buf[14] >> 4, buf[14] & 0xF);
else
printk(KERN_INFO "DMI present.\n");
+
dmi_printk((KERN_INFO "%d structures occupying %d bytes.\n",
num, len));
- dmi_printk((KERN_INFO "DMI table at 0x%08X.\n",
- base));
- if(dmi_table(base,len, num, decode)==0)
+ dmi_printk((KERN_INFO "DMI table at 0x%08X.\n", base));
+
+ if (dmi_table(base,len, num, decode) == 0)
return 0;
}
}
@@ -143,16 +132,17 @@ static char *dmi_ident[DMI_STRING_MAX];
/*
* Save a DMI string
*/
-
static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string)
{
char *d = (char*)dm;
char *p = dmi_string(dm, d[string]);
- if(p==NULL || *p == 0)
+
+ if (p == NULL || *p == 0)
return;
if (dmi_ident[slot])
return;
- dmi_ident[slot] = alloc_bootmem(strlen(p)+1);
+
+ dmi_ident[slot] = alloc_bootmem(strlen(p) + 1);
if(dmi_ident[slot])
strcpy(dmi_ident[slot], p);
else
@@ -160,281 +150,47 @@ static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string)
}
/*
- * Ugly compatibility crap.
- */
-#define dmi_blacklist dmi_system_id
-#define NO_MATCH { DMI_NONE, NULL}
-#define MATCH DMI_MATCH
-
-/*
- * Toshiba keyboard likes to repeat keys when they are not repeated.
- */
-
-static __init int broken_toshiba_keyboard(struct dmi_blacklist *d)
-{
- printk(KERN_WARNING "Toshiba with broken keyboard detected. If your keyboard sometimes generates 3 keypresses instead of one, see http://davyd.ucc.asn.au/projects/toshiba/README\n");
- return 0;
-}
-
-
-#ifdef CONFIG_ACPI_SLEEP
-static __init int reset_videomode_after_s3(struct dmi_blacklist *d)
-{
- /* See acpi_wakeup.S */
- extern long acpi_video_flags;
- acpi_video_flags |= 2;
- return 0;
-}
-#endif
-
-
-#ifdef CONFIG_ACPI_BOOT
-extern int acpi_force;
-
-static __init __attribute__((unused)) int dmi_disable_acpi(struct dmi_blacklist *d)
-{
- if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: acpi off\n",d->ident);
- disable_acpi();
- } else {
- printk(KERN_NOTICE
- "Warning: DMI blacklist says broken, but acpi forced\n");
- }
- return 0;
-}
-
-/*
- * Limit ACPI to CPU enumeration for HT
- */
-static __init __attribute__((unused)) int force_acpi_ht(struct dmi_blacklist *d)
-{
- if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: force use of acpi=ht\n", d->ident);
- disable_acpi();
- acpi_ht = 1;
- } else {
- printk(KERN_NOTICE
- "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
- }
- return 0;
-}
-#endif
-
-#ifdef CONFIG_ACPI_PCI
-static __init int disable_acpi_irq(struct dmi_blacklist *d)
-{
- if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
- d->ident);
- acpi_noirq_set();
- }
- return 0;
-}
-static __init int disable_acpi_pci(struct dmi_blacklist *d)
-{
- if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
- d->ident);
- acpi_disable_pci();
- }
- return 0;
-}
-#endif
-
-/*
- * Process the DMI blacklists
- */
-
-
-/*
- * This will be expanded over time to force things like the APM
- * interrupt mask settings according to the laptop
- */
-
-static __initdata struct dmi_blacklist dmi_blacklist[]={
-
- { broken_toshiba_keyboard, "Toshiba Satellite 4030cdt", { /* Keyboard generates spurious repeats */
- MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),
- NO_MATCH, NO_MATCH, NO_MATCH
- } },
-#ifdef CONFIG_ACPI_SLEEP
- { reset_videomode_after_s3, "Toshiba Satellite 4030cdt", { /* Reset video mode after returning from ACPI S3 sleep */
- MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),
- NO_MATCH, NO_MATCH, NO_MATCH
- } },
-#endif
-
-#ifdef CONFIG_ACPI_BOOT
- /*
- * If your system is blacklisted here, but you find that acpi=force
- * works for you, please contact acpi-devel@sourceforge.net
- */
-
- /*
- * Boxes that need ACPI disabled
- */
-
- { dmi_disable_acpi, "IBM Thinkpad", {
- MATCH(DMI_BOARD_VENDOR, "IBM"),
- MATCH(DMI_BOARD_NAME, "2629H1G"),
- NO_MATCH, NO_MATCH }},
-
- /*
- * Boxes that need acpi=ht
- */
-
- { force_acpi_ht, "FSC Primergy T850", {
- MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "DELL GX240", {
- MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
- MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "HP VISUALIZE NT Workstation", {
- MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "Compaq Workstation W8000", {
- MATCH(DMI_SYS_VENDOR, "Compaq"),
- MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "ASUS P4B266", {
- MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- MATCH(DMI_BOARD_NAME, "P4B266"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "ASUS P2B-DS", {
- MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- MATCH(DMI_BOARD_NAME, "P2B-DS"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "ASUS CUR-DLS", {
- MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- MATCH(DMI_BOARD_NAME, "CUR-DLS"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "ABIT i440BX-W83977", {
- MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
- MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "IBM Bladecenter", {
- MATCH(DMI_BOARD_VENDOR, "IBM"),
- MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "IBM eServer xSeries 360", {
- MATCH(DMI_BOARD_VENDOR, "IBM"),
- MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "IBM eserver xSeries 330", {
- MATCH(DMI_BOARD_VENDOR, "IBM"),
- MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "IBM eserver xSeries 440", {
- MATCH(DMI_BOARD_VENDOR, "IBM"),
- MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
- NO_MATCH, NO_MATCH }},
-
-#endif // CONFIG_ACPI_BOOT
-
-#ifdef CONFIG_ACPI_PCI
- /*
- * Boxes that need ACPI PCI IRQ routing disabled
- */
-
- { disable_acpi_irq, "ASUS A7V", {
- MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
- MATCH(DMI_BOARD_NAME, "<A7V>"),
- /* newer BIOS, Revision 1011, does work */
- MATCH(DMI_BIOS_VERSION, "ASUS A7V ACPI BIOS Revision 1007"),
- NO_MATCH }},
-
- /*
- * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
- */
- { disable_acpi_pci, "ASUS PR-DLS", { /* _BBN 0 bug */
- MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- MATCH(DMI_BOARD_NAME, "PR-DLS"),
- MATCH(DMI_BIOS_VERSION, "ASUS PR-DLS ACPI BIOS Revision 1010"),
- MATCH(DMI_BIOS_DATE, "03/21/2003") }},
-
- { disable_acpi_pci, "Acer TravelMate 36x Laptop", {
- MATCH(DMI_SYS_VENDOR, "Acer"),
- MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
- NO_MATCH, NO_MATCH
- } },
-
-#endif
-
- { NULL, }
-};
-
-/*
* Process a DMI table entry. Right now all we care about are the BIOS
* and machine entries. For 2.5 we should pull the smbus controller info
* out of here.
*/
-
static void __init dmi_decode(struct dmi_header *dm)
{
-#ifdef DMI_DEBUG
- u8 *data = (u8 *)dm;
-#endif
+ u8 *data __attribute__((__unused__)) = (u8 *)dm;
- switch(dm->type)
- {
- case 0:
- dmi_printk(("BIOS Vendor: %s\n",
- dmi_string(dm, data[4])));
- dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
- dmi_printk(("BIOS Version: %s\n",
- dmi_string(dm, data[5])));
- dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
- dmi_printk(("BIOS Release: %s\n",
- dmi_string(dm, data[8])));
- dmi_save_ident(dm, DMI_BIOS_DATE, 8);
- break;
- case 1:
- dmi_printk(("System Vendor: %s\n",
- dmi_string(dm, data[4])));
- dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
- dmi_printk(("Product Name: %s\n",
- dmi_string(dm, data[5])));
- dmi_save_ident(dm, DMI_PRODUCT_NAME, 5);
- dmi_printk(("Version: %s\n",
- dmi_string(dm, data[6])));
- dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
- dmi_printk(("Serial Number: %s\n",
- dmi_string(dm, data[7])));
- break;
- case 2:
- dmi_printk(("Board Vendor: %s\n",
- dmi_string(dm, data[4])));
- dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
- dmi_printk(("Board Name: %s\n",
- dmi_string(dm, data[5])));
- dmi_save_ident(dm, DMI_BOARD_NAME, 5);
- dmi_printk(("Board Version: %s\n",
- dmi_string(dm, data[6])));
- dmi_save_ident(dm, DMI_BOARD_VERSION, 6);
- break;
+ switch(dm->type) {
+ case 0:
+ dmi_printk(("BIOS Vendor: %s\n", dmi_string(dm, data[4])));
+ dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
+ dmi_printk(("BIOS Version: %s\n", dmi_string(dm, data[5])));
+ dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
+ dmi_printk(("BIOS Release: %s\n", dmi_string(dm, data[8])));
+ dmi_save_ident(dm, DMI_BIOS_DATE, 8);
+ break;
+ case 1:
+ dmi_printk(("System Vendor: %s\n", dmi_string(dm, data[4])));
+ dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
+ dmi_printk(("Product Name: %s\n", dmi_string(dm, data[5])));
+ dmi_save_ident(dm, DMI_PRODUCT_NAME, 5);
+ dmi_printk(("Version: %s\n", dmi_string(dm, data[6])));
+ dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
+ dmi_printk(("Serial Number: %s\n", dmi_string(dm, data[7])));
+ dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
+ break;
+ case 2:
+ dmi_printk(("Board Vendor: %s\n", dmi_string(dm, data[4])));
+ dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
+ dmi_printk(("Board Name: %s\n", dmi_string(dm, data[5])));
+ dmi_save_ident(dm, DMI_BOARD_NAME, 5);
+ dmi_printk(("Board Version: %s\n", dmi_string(dm, data[6])));
+ dmi_save_ident(dm, DMI_BOARD_VERSION, 6);
+ break;
}
}
void __init dmi_scan_machine(void)
{
- int err = dmi_iterate(dmi_decode);
- if(err == 0)
- dmi_check_system(dmi_blacklist);
- else
+ if (dmi_iterate(dmi_decode))
printk(KERN_INFO "DMI not present.\n");
}
@@ -470,7 +226,6 @@ fail: d++;
return count;
}
-
EXPORT_SYMBOL(dmi_check_system);
/**
@@ -480,8 +235,8 @@ EXPORT_SYMBOL(dmi_check_system);
* Returns one DMI data value, can be used to perform
* complex DMI data checks.
*/
-char * dmi_get_system_info(int field)
+char *dmi_get_system_info(int field)
{
return dmi_ident[field];
}
-
+EXPORT_SYMBOL(dmi_get_system_info);
diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c
index f732f427b418..385883ea8c19 100644
--- a/arch/i386/kernel/efi.c
+++ b/arch/i386/kernel/efi.c
@@ -30,6 +30,7 @@
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/efi.h>
+#include <linux/kexec.h>
#include <asm/setup.h>
#include <asm/io.h>
@@ -598,6 +599,9 @@ efi_initialize_iomem_resources(struct resource *code_resource,
if (md->type == EFI_CONVENTIONAL_MEMORY) {
request_resource(res, code_resource);
request_resource(res, data_resource);
+#ifdef CONFIG_KEXEC
+ request_resource(res, &crashk_res);
+#endif
}
}
}
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index e966fc8c44c4..4477bb107098 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -299,7 +299,6 @@ is386: movl $2,%ecx # set MP
movl %eax,%cr0
call check_x87
- incb ready
lgdt cpu_gdt_descr
lidt idt_descr
ljmp $(__KERNEL_CS),$1f
@@ -316,8 +315,9 @@ is386: movl $2,%ecx # set MP
lldt %ax
cld # gcc2 wants the direction flag cleared at all times
#ifdef CONFIG_SMP
- movb ready, %cl
- cmpb $1,%cl
+ movb ready, %cl
+ movb $1, ready
+ cmpb $0,%cl
je 1f # the first CPU calls start_kernel
# all other CPUs call initialize_secondary
call initialize_secondary
diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c
index 903190a4b3ff..180f070d03cb 100644
--- a/arch/i386/kernel/i386_ksyms.c
+++ b/arch/i386/kernel/i386_ksyms.c
@@ -1,97 +1,17 @@
#include <linux/config.h>
#include <linux/module.h>
-#include <linux/smp.h>
-#include <linux/user.h>
-#include <linux/elfcore.h>
-#include <linux/mca.h>
-#include <linux/sched.h>
-#include <linux/in6.h>
-#include <linux/interrupt.h>
-#include <linux/smp_lock.h>
-#include <linux/pm.h>
-#include <linux/pci.h>
-#include <linux/apm_bios.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-#include <linux/highmem.h>
-#include <linux/time.h>
-
-#include <asm/semaphore.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/uaccess.h>
#include <asm/checksum.h>
-#include <asm/io.h>
-#include <asm/delay.h>
-#include <asm/irq.h>
-#include <asm/mmx.h>
#include <asm/desc.h>
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
-#include <asm/nmi.h>
-#include <asm/ist.h>
-#include <asm/kdebug.h>
-
-extern void dump_thread(struct pt_regs *, struct user *);
-extern spinlock_t rtc_lock;
/* This is definitely a GPL-only symbol */
EXPORT_SYMBOL_GPL(cpu_gdt_table);
-#if defined(CONFIG_APM_MODULE)
-extern void machine_real_restart(unsigned char *, int);
-EXPORT_SYMBOL(machine_real_restart);
-extern void default_idle(void);
-EXPORT_SYMBOL(default_idle);
-#endif
-
-#ifdef CONFIG_SMP
-extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
-extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
-#endif
-
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
-extern struct drive_info_struct drive_info;
-EXPORT_SYMBOL(drive_info);
-#endif
-
-extern unsigned long cpu_khz;
-extern unsigned long get_cmos_time(void);
-
-/* platform dependent support */
-EXPORT_SYMBOL(boot_cpu_data);
-#ifdef CONFIG_DISCONTIGMEM
-EXPORT_SYMBOL(node_data);
-EXPORT_SYMBOL(physnode_map);
-#endif
-#ifdef CONFIG_X86_NUMAQ
-EXPORT_SYMBOL(xquad_portio);
-#endif
-EXPORT_SYMBOL(dump_thread);
-EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL_GPL(kernel_fpu_begin);
-EXPORT_SYMBOL(__ioremap);
-EXPORT_SYMBOL(ioremap_nocache);
-EXPORT_SYMBOL(iounmap);
-EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(pm_idle);
-EXPORT_SYMBOL(pm_power_off);
-EXPORT_SYMBOL(get_cmos_time);
-EXPORT_SYMBOL(cpu_khz);
-EXPORT_SYMBOL(apm_info);
-
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_failed_interruptible);
EXPORT_SYMBOL(__down_failed_trylock);
EXPORT_SYMBOL(__up_wakeup);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy_generic);
-/* Delay loops */
-EXPORT_SYMBOL(__ndelay);
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__delay);
-EXPORT_SYMBOL(__const_udelay);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
@@ -105,87 +25,11 @@ EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(strpbrk);
EXPORT_SYMBOL(strstr);
-EXPORT_SYMBOL(strncpy_from_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(clear_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__copy_from_user_ll);
-EXPORT_SYMBOL(__copy_to_user_ll);
-EXPORT_SYMBOL(strnlen_user);
-
-EXPORT_SYMBOL(dma_alloc_coherent);
-EXPORT_SYMBOL(dma_free_coherent);
-
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL(pci_mem_start);
-#endif
-
-#ifdef CONFIG_PCI_BIOS
-EXPORT_SYMBOL(pcibios_set_irq_routing);
-EXPORT_SYMBOL(pcibios_get_irq_routing_table);
-#endif
-
-#ifdef CONFIG_X86_USE_3DNOW
-EXPORT_SYMBOL(_mmx_memcpy);
-EXPORT_SYMBOL(mmx_clear_page);
-EXPORT_SYMBOL(mmx_copy_page);
-#endif
-
-#ifdef CONFIG_X86_HT
-EXPORT_SYMBOL(smp_num_siblings);
-EXPORT_SYMBOL(cpu_sibling_map);
-#endif
-
#ifdef CONFIG_SMP
-EXPORT_SYMBOL(cpu_data);
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_callout_map);
+extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
+extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
EXPORT_SYMBOL(__write_lock_failed);
EXPORT_SYMBOL(__read_lock_failed);
-
-/* Global SMP stuff */
-EXPORT_SYMBOL(smp_call_function);
-
-/* TLB flushing */
-EXPORT_SYMBOL(flush_tlb_page);
-#endif
-
-#ifdef CONFIG_X86_IO_APIC
-EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
-#endif
-
-#ifdef CONFIG_MCA
-EXPORT_SYMBOL(machine_id);
-#endif
-
-#ifdef CONFIG_VT
-EXPORT_SYMBOL(screen_info);
-#endif
-
-EXPORT_SYMBOL(get_wchan);
-
-EXPORT_SYMBOL(rtc_lock);
-
-EXPORT_SYMBOL_GPL(set_nmi_callback);
-EXPORT_SYMBOL_GPL(unset_nmi_callback);
-
-EXPORT_SYMBOL(register_die_notifier);
-#ifdef CONFIG_HAVE_DEC_LOCK
-EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif
-
-EXPORT_SYMBOL(__PAGE_KERNEL);
-
-#ifdef CONFIG_HIGHMEM
-EXPORT_SYMBOL(kmap);
-EXPORT_SYMBOL(kunmap);
-EXPORT_SYMBOL(kmap_atomic);
-EXPORT_SYMBOL(kunmap_atomic);
-EXPORT_SYMBOL(kmap_atomic_to_page);
-#endif
-
-#if defined(CONFIG_X86_SPEEDSTEP_SMI) || defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
-EXPORT_SYMBOL(ist_info);
#endif
EXPORT_SYMBOL(csum_partial);
diff --git a/arch/i386/kernel/i387.c b/arch/i386/kernel/i387.c
index c55e037f08f7..b817168d9c62 100644
--- a/arch/i386/kernel/i387.c
+++ b/arch/i386/kernel/i387.c
@@ -10,6 +10,7 @@
#include <linux/config.h>
#include <linux/sched.h>
+#include <linux/module.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/math_emu.h>
@@ -79,6 +80,7 @@ void kernel_fpu_begin(void)
}
clts();
}
+EXPORT_SYMBOL_GPL(kernel_fpu_begin);
void restore_fpu( struct task_struct *tsk )
{
@@ -526,6 +528,7 @@ int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu )
return fpvalid;
}
+EXPORT_SYMBOL(dump_fpu);
int dump_task_fpu(struct task_struct *tsk, struct user_i387_struct *fpu)
{
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index 2c4813b47e57..178f4e9bac9d 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -268,10 +268,22 @@ static int i8259A_suspend(struct sys_device *dev, pm_message_t state)
return 0;
}
+static int i8259A_shutdown(struct sys_device *dev)
+{
+ /* Put the i8259A into a quiescent state that
+ * the kernel initialization code can get it
+ * out of.
+ */
+ outb(0xff, 0x21); /* mask all of 8259A-1 */
+ outb(0xff, 0xA1); /* mask all of 8259A-1 */
+ return 0;
+}
+
static struct sysdev_class i8259_sysdev_class = {
set_kset_name("i8259"),
.suspend = i8259A_suspend,
.resume = i8259A_resume,
+ .shutdown = i8259A_shutdown,
};
static struct sys_device device_i8259A = {
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 7a324e8b86f9..35eb8e29c485 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -31,7 +31,7 @@
#include <linux/mc146818rtc.h>
#include <linux/compiler.h>
#include <linux/acpi.h>
-
+#include <linux/module.h>
#include <linux/sysdev.h>
#include <asm/io.h>
#include <asm/smp.h>
@@ -573,12 +573,14 @@ static int balanced_irq(void *unused)
for ( ; ; ) {
set_current_state(TASK_INTERRUPTIBLE);
time_remaining = schedule_timeout(time_remaining);
- try_to_freeze(PF_FREEZE);
+ try_to_freeze();
if (time_after(jiffies,
prev_balance_time+balanced_irq_interval)) {
+ preempt_disable();
do_irq_balance();
prev_balance_time = jiffies;
time_remaining = balanced_irq_interval;
+ preempt_enable();
}
}
return 0;
@@ -630,10 +632,8 @@ static int __init balanced_irq_init(void)
printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
failed:
for (i = 0; i < NR_CPUS; i++) {
- if(irq_cpu_data[i].irq_delta)
- kfree(irq_cpu_data[i].irq_delta);
- if(irq_cpu_data[i].last_irq)
- kfree(irq_cpu_data[i].last_irq);
+ kfree(irq_cpu_data[i].irq_delta);
+ kfree(irq_cpu_data[i].last_irq);
}
return 0;
}
@@ -812,6 +812,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
}
return best_guess;
}
+EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
/*
* This function currently is only a helper for the i386 smp boot process where
@@ -1633,12 +1634,43 @@ static void __init enable_IO_APIC(void)
*/
void disable_IO_APIC(void)
{
+ int pin;
/*
* Clear the IO-APIC before rebooting:
*/
clear_IO_APIC();
- disconnect_bsp_APIC();
+ /*
+ * If the i82559 is routed through an IOAPIC
+ * Put that IOAPIC in virtual wire mode
+ * so legacy interrups can be delivered.
+ */
+ pin = find_isa_irq_pin(0, mp_ExtINT);
+ if (pin != -1) {
+ struct IO_APIC_route_entry entry;
+ unsigned long flags;
+
+ memset(&entry, 0, sizeof(entry));
+ entry.mask = 0; /* Enabled */
+ entry.trigger = 0; /* Edge */
+ entry.irr = 0;
+ entry.polarity = 0; /* High */
+ entry.delivery_status = 0;
+ entry.dest_mode = 0; /* Physical */
+ entry.delivery_mode = 7; /* ExtInt */
+ entry.vector = 0;
+ entry.dest.physical.physical_dest = 0;
+
+
+ /*
+ * Add it to the IO-APIC irq-routing table:
+ */
+ spin_lock_irqsave(&ioapic_lock, flags);
+ io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1));
+ io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0));
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+ disconnect_bsp_APIC(pin != -1);
}
/*
@@ -1659,6 +1691,12 @@ static void __init setup_ioapic_ids_from_mpc(void)
unsigned long flags;
/*
+ * Don't check I/O APIC IDs for xAPIC systems. They have
+ * no meaning without the serial APIC bus.
+ */
+ if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 < 15))
+ return;
+ /*
* This is broken; anything with a real cpu count has to
* circumvent this idiocy regardless.
*/
@@ -1684,10 +1722,6 @@ static void __init setup_ioapic_ids_from_mpc(void)
mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
}
- /* Don't check I/O APIC IDs for some xAPIC systems. They have
- * no meaning without the serial APIC bus. */
- if (NO_IOAPIC_CHECK)
- continue;
/*
* Sanity check, is the ID really free? Every APIC in a
* system must have a unique ID or we get lots of nice
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 73945a3c53c4..ce66dcc26d90 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -15,6 +15,9 @@
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_maxaligned_in_smp;
EXPORT_PER_CPU_SYMBOL(irq_stat);
@@ -153,6 +156,11 @@ void irq_ctx_init(int cpu)
cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
}
+void irq_ctx_exit(int cpu)
+{
+ hardirq_ctx[cpu] = NULL;
+}
+
extern asmlinkage void __do_softirq(void);
asmlinkage void do_softirq(void)
@@ -210,9 +218,8 @@ int show_interrupts(struct seq_file *p, void *v)
if (i == 0) {
seq_printf(p, " ");
- for (j=0; j<NR_CPUS; j++)
- if (cpu_online(j))
- seq_printf(p, "CPU%d ",j);
+ for_each_cpu(j)
+ seq_printf(p, "CPU%d ",j);
seq_putc(p, '\n');
}
@@ -225,9 +232,8 @@ int show_interrupts(struct seq_file *p, void *v)
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
- for (j = 0; j < NR_CPUS; j++)
- if (cpu_online(j))
- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+ for_each_cpu(j)
+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
seq_printf(p, " %14s", irq_desc[i].handler->typename);
seq_printf(p, " %s", action->name);
@@ -240,16 +246,14 @@ skip:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
- for (j = 0; j < NR_CPUS; j++)
- if (cpu_online(j))
- seq_printf(p, "%10u ", nmi_count(j));
+ for_each_cpu(j)
+ seq_printf(p, "%10u ", nmi_count(j));
seq_putc(p, '\n');
#ifdef CONFIG_X86_LOCAL_APIC
seq_printf(p, "LOC: ");
- for (j = 0; j < NR_CPUS; j++)
- if (cpu_online(j))
- seq_printf(p, "%10u ",
- per_cpu(irq_stat,j).apic_timer_irqs);
+ for_each_cpu(j)
+ seq_printf(p, "%10u ",
+ per_cpu(irq_stat,j).apic_timer_irqs);
seq_putc(p, '\n');
#endif
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
@@ -259,3 +263,45 @@ skip:
}
return 0;
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+#include <mach_apic.h>
+
+void fixup_irqs(cpumask_t map)
+{
+ unsigned int irq;
+ static int warned;
+
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ cpumask_t mask;
+ if (irq == 2)
+ continue;
+
+ cpus_and(mask, irq_affinity[irq], map);
+ if (any_online_cpu(mask) == NR_CPUS) {
+ printk("Breaking affinity for irq %i\n", irq);
+ mask = map;
+ }
+ if (irq_desc[irq].handler->set_affinity)
+ irq_desc[irq].handler->set_affinity(irq, mask);
+ else if (irq_desc[irq].action && !(warned++))
+ printk("Cannot set affinity for irq %i\n", irq);
+ }
+
+#if 0
+ barrier();
+ /* Ingo Molnar says: "after the IO-APIC masks have been redirected
+ [note the nop - the interrupt-enable boundary on x86 is two
+ instructions from sti] - to flush out pending hardirqs and
+ IPIs. After this point nothing is supposed to reach this CPU." */
+ __asm__ __volatile__("sti; nop; cli");
+ barrier();
+#else
+ /* That doesn't seem sufficient. Give it 1ms. */
+ local_irq_enable();
+ mdelay(1);
+ local_irq_disable();
+#endif
+}
+#endif
+
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 59ff9b455069..3762f6b35ab2 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -23,6 +23,9 @@
* Rusty Russell).
* 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
* interface to access function arguments.
+ * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
+ * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
+ * <prasanna@in.ibm.com> added function-return probes.
*/
#include <linux/config.h>
@@ -30,15 +33,14 @@
#include <linux/ptrace.h>
#include <linux/spinlock.h>
#include <linux/preempt.h>
+#include <asm/cacheflush.h>
#include <asm/kdebug.h>
#include <asm/desc.h>
-/* kprobe_status settings */
-#define KPROBE_HIT_ACTIVE 0x00000001
-#define KPROBE_HIT_SS 0x00000002
-
static struct kprobe *current_kprobe;
static unsigned long kprobe_status, kprobe_old_eflags, kprobe_saved_eflags;
+static struct kprobe *kprobe_prev;
+static unsigned long kprobe_status_prev, kprobe_old_eflags_prev, kprobe_saved_eflags_prev;
static struct pt_regs jprobe_saved_regs;
static long *jprobe_saved_esp;
/* copy of the kernel stack at the probe fire time */
@@ -68,16 +70,50 @@ int arch_prepare_kprobe(struct kprobe *p)
void arch_copy_kprobe(struct kprobe *p)
{
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+ p->opcode = *p->addr;
}
-void arch_remove_kprobe(struct kprobe *p)
+void arch_arm_kprobe(struct kprobe *p)
{
+ *p->addr = BREAKPOINT_INSTRUCTION;
+ flush_icache_range((unsigned long) p->addr,
+ (unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
-static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
+void arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
- regs->eip = (unsigned long)p->addr;
+ flush_icache_range((unsigned long) p->addr,
+ (unsigned long) p->addr + sizeof(kprobe_opcode_t));
+}
+
+void arch_remove_kprobe(struct kprobe *p)
+{
+}
+
+static inline void save_previous_kprobe(void)
+{
+ kprobe_prev = current_kprobe;
+ kprobe_status_prev = kprobe_status;
+ kprobe_old_eflags_prev = kprobe_old_eflags;
+ kprobe_saved_eflags_prev = kprobe_saved_eflags;
+}
+
+static inline void restore_previous_kprobe(void)
+{
+ current_kprobe = kprobe_prev;
+ kprobe_status = kprobe_status_prev;
+ kprobe_old_eflags = kprobe_old_eflags_prev;
+ kprobe_saved_eflags = kprobe_saved_eflags_prev;
+}
+
+static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs)
+{
+ current_kprobe = p;
+ kprobe_saved_eflags = kprobe_old_eflags
+ = (regs->eflags & (TF_MASK | IF_MASK));
+ if (is_IF_modifier(p->opcode))
+ kprobe_saved_eflags &= ~IF_MASK;
}
static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -91,6 +127,50 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
regs->eip = (unsigned long)&p->ainsn.insn;
}
+struct task_struct *arch_get_kprobe_task(void *ptr)
+{
+ return ((struct thread_info *) (((unsigned long) ptr) &
+ (~(THREAD_SIZE -1))))->task;
+}
+
+void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
+{
+ unsigned long *sara = (unsigned long *)&regs->esp;
+ struct kretprobe_instance *ri;
+ static void *orig_ret_addr;
+
+ /*
+ * Save the return address when the return probe hits
+ * the first time, and use it to populate the (krprobe
+ * instance)->ret_addr for subsequent return probes at
+ * the same addrress since stack address would have
+ * the kretprobe_trampoline by then.
+ */
+ if (((void*) *sara) != kretprobe_trampoline)
+ orig_ret_addr = (void*) *sara;
+
+ if ((ri = get_free_rp_inst(rp)) != NULL) {
+ ri->rp = rp;
+ ri->stack_addr = sara;
+ ri->ret_addr = orig_ret_addr;
+ add_rp_inst(ri);
+ /* Replace the return addr with trampoline addr */
+ *sara = (unsigned long) &kretprobe_trampoline;
+ } else {
+ rp->nmissed++;
+ }
+}
+
+void arch_kprobe_flush_task(struct task_struct *tk)
+{
+ struct kretprobe_instance *ri;
+ while ((ri = get_rp_inst_tsk(tk)) != NULL) {
+ *((unsigned long *)(ri->stack_addr)) =
+ (unsigned long) ri->ret_addr;
+ recycle_rp_inst(ri);
+ }
+}
+
/*
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
* remain disabled thorough out this function.
@@ -127,8 +207,18 @@ static int kprobe_handler(struct pt_regs *regs)
unlock_kprobes();
goto no_kprobe;
}
- disarm_kprobe(p, regs);
- ret = 1;
+ /* We have reentered the kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe();
+ set_current_kprobe(p, regs);
+ p->nmissed++;
+ prepare_singlestep(p, regs);
+ kprobe_status = KPROBE_REENTER;
+ return 1;
} else {
p = current_kprobe;
if (p->break_handler && p->break_handler(p, regs)) {
@@ -163,11 +253,7 @@ static int kprobe_handler(struct pt_regs *regs)
}
kprobe_status = KPROBE_HIT_ACTIVE;
- current_kprobe = p;
- kprobe_saved_eflags = kprobe_old_eflags
- = (regs->eflags & (TF_MASK | IF_MASK));
- if (is_IF_modifier(p->opcode))
- kprobe_saved_eflags &= ~IF_MASK;
+ set_current_kprobe(p, regs);
if (p->pre_handler && p->pre_handler(p, regs))
/* handler has already set things up, so skip ss setup */
@@ -184,6 +270,55 @@ no_kprobe:
}
/*
+ * For function-return probes, init_kprobes() establishes a probepoint
+ * here. When a retprobed function returns, this probe is hit and
+ * trampoline_probe_handler() runs, calling the kretprobe's handler.
+ */
+ void kretprobe_trampoline_holder(void)
+ {
+ asm volatile ( ".global kretprobe_trampoline\n"
+ "kretprobe_trampoline: \n"
+ "nop\n");
+ }
+
+/*
+ * Called when we hit the probe point at kretprobe_trampoline
+ */
+int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct task_struct *tsk;
+ struct kretprobe_instance *ri;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ unsigned long *sara = ((unsigned long *) &regs->esp) - 1;
+
+ tsk = arch_get_kprobe_task(sara);
+ head = kretprobe_inst_table_head(tsk);
+
+ hlist_for_each_entry(ri, node, head, hlist) {
+ if (ri->stack_addr == sara && ri->rp) {
+ if (ri->rp->handler)
+ ri->rp->handler(ri, regs);
+ }
+ }
+ return 0;
+}
+
+void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+ struct kretprobe_instance *ri;
+ /* RA already popped */
+ unsigned long *sara = ((unsigned long *)&regs->esp) - 1;
+
+ while ((ri = get_rp_inst(sara))) {
+ regs->eip = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri);
+ }
+ regs->eflags &= ~TF_MASK;
+}
+
+/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "int 3"
* instruction. To avoid the SMP problems that can occur when we
@@ -263,13 +398,22 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
if (!kprobe_running())
return 0;
- if (current_kprobe->post_handler)
+ if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
+ kprobe_status = KPROBE_HIT_SSDONE;
current_kprobe->post_handler(current_kprobe, regs, 0);
+ }
- resume_execution(current_kprobe, regs);
+ if (current_kprobe->post_handler != trampoline_post_handler)
+ resume_execution(current_kprobe, regs);
regs->eflags |= kprobe_saved_eflags;
+ /*Restore back the original saved kprobes variables and continue. */
+ if (kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe();
+ goto out;
+ }
unlock_kprobes();
+out:
preempt_enable_no_resched();
/*
diff --git a/arch/i386/kernel/machine_kexec.c b/arch/i386/kernel/machine_kexec.c
new file mode 100644
index 000000000000..52ed18d8b511
--- /dev/null
+++ b/arch/i386/kernel/machine_kexec.c
@@ -0,0 +1,226 @@
+/*
+ * machine_kexec.c - handle transition of Linux booting another kernel
+ * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/kexec.h>
+#include <linux/delay.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/apic.h>
+#include <asm/cpufeature.h>
+
+static inline unsigned long read_cr3(void)
+{
+ unsigned long cr3;
+ asm volatile("movl %%cr3,%0": "=r"(cr3));
+ return cr3;
+}
+
+#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
+
+#define L0_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define L1_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define L2_ATTR (_PAGE_PRESENT)
+
+#define LEVEL0_SIZE (1UL << 12UL)
+
+#ifndef CONFIG_X86_PAE
+#define LEVEL1_SIZE (1UL << 22UL)
+static u32 pgtable_level1[1024] PAGE_ALIGNED;
+
+static void identity_map_page(unsigned long address)
+{
+ unsigned long level1_index, level2_index;
+ u32 *pgtable_level2;
+
+ /* Find the current page table */
+ pgtable_level2 = __va(read_cr3());
+
+ /* Find the indexes of the physical address to identity map */
+ level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE;
+ level2_index = address / LEVEL1_SIZE;
+
+ /* Identity map the page table entry */
+ pgtable_level1[level1_index] = address | L0_ATTR;
+ pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR;
+
+ /* Flush the tlb so the new mapping takes effect.
+ * Global tlb entries are not flushed but that is not an issue.
+ */
+ load_cr3(pgtable_level2);
+}
+
+#else
+#define LEVEL1_SIZE (1UL << 21UL)
+#define LEVEL2_SIZE (1UL << 30UL)
+static u64 pgtable_level1[512] PAGE_ALIGNED;
+static u64 pgtable_level2[512] PAGE_ALIGNED;
+
+static void identity_map_page(unsigned long address)
+{
+ unsigned long level1_index, level2_index, level3_index;
+ u64 *pgtable_level3;
+
+ /* Find the current page table */
+ pgtable_level3 = __va(read_cr3());
+
+ /* Find the indexes of the physical address to identity map */
+ level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE;
+ level2_index = (address % LEVEL2_SIZE)/LEVEL1_SIZE;
+ level3_index = address / LEVEL2_SIZE;
+
+ /* Identity map the page table entry */
+ pgtable_level1[level1_index] = address | L0_ATTR;
+ pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR;
+ set_64bit(&pgtable_level3[level3_index],
+ __pa(pgtable_level2) | L2_ATTR);
+
+ /* Flush the tlb so the new mapping takes effect.
+ * Global tlb entries are not flushed but that is not an issue.
+ */
+ load_cr3(pgtable_level3);
+}
+#endif
+
+
+static void set_idt(void *newidt, __u16 limit)
+{
+ unsigned char curidt[6];
+
+ /* ia32 supports unaliged loads & stores */
+ (*(__u16 *)(curidt)) = limit;
+ (*(__u32 *)(curidt +2)) = (unsigned long)(newidt);
+
+ __asm__ __volatile__ (
+ "lidt %0\n"
+ : "=m" (curidt)
+ );
+};
+
+
+static void set_gdt(void *newgdt, __u16 limit)
+{
+ unsigned char curgdt[6];
+
+ /* ia32 supports unaligned loads & stores */
+ (*(__u16 *)(curgdt)) = limit;
+ (*(__u32 *)(curgdt +2)) = (unsigned long)(newgdt);
+
+ __asm__ __volatile__ (
+ "lgdt %0\n"
+ : "=m" (curgdt)
+ );
+};
+
+static void load_segments(void)
+{
+#define __STR(X) #X
+#define STR(X) __STR(X)
+
+ __asm__ __volatile__ (
+ "\tljmp $"STR(__KERNEL_CS)",$1f\n"
+ "\t1:\n"
+ "\tmovl $"STR(__KERNEL_DS)",%eax\n"
+ "\tmovl %eax,%ds\n"
+ "\tmovl %eax,%es\n"
+ "\tmovl %eax,%fs\n"
+ "\tmovl %eax,%gs\n"
+ "\tmovl %eax,%ss\n"
+ );
+#undef STR
+#undef __STR
+}
+
+typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)(
+ unsigned long indirection_page,
+ unsigned long reboot_code_buffer,
+ unsigned long start_address,
+ unsigned int has_pae) ATTRIB_NORET;
+
+const extern unsigned char relocate_new_kernel[];
+extern void relocate_new_kernel_end(void);
+const extern unsigned int relocate_new_kernel_size;
+
+/*
+ * A architecture hook called to validate the
+ * proposed image and prepare the control pages
+ * as needed. The pages for KEXEC_CONTROL_CODE_SIZE
+ * have been allocated, but the segments have yet
+ * been copied into the kernel.
+ *
+ * Do what every setup is needed on image and the
+ * reboot code buffer to allow us to avoid allocations
+ * later.
+ *
+ * Currently nothing.
+ */
+int machine_kexec_prepare(struct kimage *image)
+{
+ return 0;
+}
+
+/*
+ * Undo anything leftover by machine_kexec_prepare
+ * when an image is freed.
+ */
+void machine_kexec_cleanup(struct kimage *image)
+{
+}
+
+/*
+ * Do not allocate memory (or fail in any way) in machine_kexec().
+ * We are past the point of no return, committed to rebooting now.
+ */
+NORET_TYPE void machine_kexec(struct kimage *image)
+{
+ unsigned long page_list;
+ unsigned long reboot_code_buffer;
+
+ relocate_new_kernel_t rnk;
+
+ /* Interrupts aren't acceptable while we reboot */
+ local_irq_disable();
+
+ /* Compute some offsets */
+ reboot_code_buffer = page_to_pfn(image->control_code_page)
+ << PAGE_SHIFT;
+ page_list = image->head;
+
+ /* Set up an identity mapping for the reboot_code_buffer */
+ identity_map_page(reboot_code_buffer);
+
+ /* copy it out */
+ memcpy((void *)reboot_code_buffer, relocate_new_kernel,
+ relocate_new_kernel_size);
+
+ /* The segment registers are funny things, they are
+ * automatically loaded from a table, in memory wherever you
+ * set them to a specific selector, but this table is never
+ * accessed again you set the segment to a different selector.
+ *
+ * The more common model is are caches where the behide
+ * the scenes work is done, but is also dropped at arbitrary
+ * times.
+ *
+ * I take advantage of this here by force loading the
+ * segments, before I zap the gdt with an invalid value.
+ */
+ load_segments();
+ /* The gdt & idt are now invalid.
+ * If you want to load them you must set up your own idt & gdt.
+ */
+ set_gdt(phys_to_virt(0),0);
+ set_idt(phys_to_virt(0),0);
+
+ /* now call it */
+ rnk = (relocate_new_kernel_t) reboot_code_buffer;
+ (*rnk)(page_list, reboot_code_buffer, image->start, cpu_has_pae);
+}
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 1347ab4939e7..af917f609c7d 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -67,7 +67,6 @@ unsigned long mp_lapic_addr;
/* Processor that is doing the boot up */
unsigned int boot_cpu_physical_apicid = -1U;
-unsigned int boot_cpu_logical_apicid = -1U;
/* Internal processor count */
static unsigned int __initdata num_processors;
@@ -180,7 +179,6 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
Dprintk(" Bootup CPU\n");
boot_cpu_physical_apicid = m->mpc_apicid;
- boot_cpu_logical_apicid = apicid;
}
if (num_processors >= NR_CPUS) {
@@ -914,7 +912,10 @@ void __init mp_register_ioapic (
mp_ioapics[idx].mpc_apicaddr = address;
set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
- mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id);
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15))
+ mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id);
+ else
+ mp_ioapics[idx].mpc_apicid = id;
mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
/*
@@ -1055,11 +1056,20 @@ void __init mp_config_acpi_legacy_irqs (void)
}
}
+#define MAX_GSI_NUM 4096
+
int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
{
int ioapic = -1;
int ioapic_pin = 0;
int idx, bit = 0;
+ static int pci_irq = 16;
+ /*
+ * Mapping between Global System Interrups, which
+ * represent all possible interrupts, and IRQs
+ * assigned to actual devices.
+ */
+ static int gsi_to_irq[MAX_GSI_NUM];
#ifdef CONFIG_ACPI_BUS
/* Don't set up the ACPI SCI because it's already set up */
@@ -1094,11 +1104,26 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
- return gsi;
+ return gsi_to_irq[gsi];
}
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
+ if (edge_level) {
+ /*
+ * For PCI devices assign IRQs in order, avoiding gaps
+ * due to unused I/O APIC pins.
+ */
+ int irq = gsi;
+ if (gsi < MAX_GSI_NUM) {
+ gsi = pci_irq++;
+ gsi_to_irq[irq] = gsi;
+ } else {
+ printk(KERN_ERR "GSI %u is too high\n", gsi);
+ return gsi;
+ }
+ }
+
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index 05d9f8f363a6..b2f03c39a6fe 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -44,7 +44,7 @@
#include <asm/uaccess.h>
#include <asm/system.h>
-static struct class_simple *msr_class;
+static struct class *msr_class;
/* Note: "err" is handled in a funny way below. Otherwise one version
of gcc or another breaks. */
@@ -260,12 +260,12 @@ static struct file_operations msr_fops = {
.open = msr_open,
};
-static int msr_class_simple_device_add(int i)
+static int msr_class_device_create(int i)
{
int err = 0;
struct class_device *class_err;
- class_err = class_simple_device_add(msr_class, MKDEV(MSR_MAJOR, i), NULL, "msr%d",i);
+ class_err = class_device_create(msr_class, MKDEV(MSR_MAJOR, i), NULL, "msr%d",i);
if (IS_ERR(class_err))
err = PTR_ERR(class_err);
return err;
@@ -277,10 +277,10 @@ static int __devinit msr_class_cpu_callback(struct notifier_block *nfb, unsigned
switch (action) {
case CPU_ONLINE:
- msr_class_simple_device_add(cpu);
+ msr_class_device_create(cpu);
break;
case CPU_DEAD:
- class_simple_device_remove(MKDEV(MSR_MAJOR, cpu));
+ class_device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
break;
}
return NOTIFY_OK;
@@ -302,13 +302,13 @@ static int __init msr_init(void)
err = -EBUSY;
goto out;
}
- msr_class = class_simple_create(THIS_MODULE, "msr");
+ msr_class = class_create(THIS_MODULE, "msr");
if (IS_ERR(msr_class)) {
err = PTR_ERR(msr_class);
goto out_chrdev;
}
for_each_online_cpu(i) {
- err = msr_class_simple_device_add(i);
+ err = msr_class_device_create(i);
if (err != 0)
goto out_class;
}
@@ -320,8 +320,8 @@ static int __init msr_init(void)
out_class:
i = 0;
for_each_online_cpu(i)
- class_simple_device_remove(MKDEV(MSR_MAJOR, i));
- class_simple_destroy(msr_class);
+ class_device_destroy(msr_class, MKDEV(MSR_MAJOR, i));
+ class_destroy(msr_class);
out_chrdev:
unregister_chrdev(MSR_MAJOR, "cpu/msr");
out:
@@ -332,8 +332,8 @@ static void __exit msr_exit(void)
{
int cpu = 0;
for_each_online_cpu(cpu)
- class_simple_device_remove(MKDEV(MSR_MAJOR, cpu));
- class_simple_destroy(msr_class);
+ class_device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
+ class_destroy(msr_class);
unregister_chrdev(MSR_MAJOR, "cpu/msr");
unregister_cpu_notifier(&msr_class_cpu_notifier);
}
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 2c0ee9c2d020..da6c46d667cb 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -28,8 +28,7 @@
#include <linux/sysctl.h>
#include <asm/smp.h>
-#include <asm/mtrr.h>
-#include <asm/mpspec.h>
+#include <asm/div64.h>
#include <asm/nmi.h>
#include "mach_traps.h"
@@ -324,6 +323,16 @@ static void clear_msr_range(unsigned int base, unsigned int n)
wrmsr(base+i, 0, 0);
}
+static inline void write_watchdog_counter(const char *descr)
+{
+ u64 count = (u64)cpu_khz * 1000;
+
+ do_div(count, nmi_hz);
+ if(descr)
+ Dprintk("setting %s to -0x%08Lx\n", descr, count);
+ wrmsrl(nmi_perfctr_msr, 0 - count);
+}
+
static void setup_k7_watchdog(void)
{
unsigned int evntsel;
@@ -339,8 +348,7 @@ static void setup_k7_watchdog(void)
| K7_NMI_EVENT;
wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
- Dprintk("setting K7_PERFCTR0 to %08lx\n", -(cpu_khz/nmi_hz*1000));
- wrmsr(MSR_K7_PERFCTR0, -(cpu_khz/nmi_hz*1000), -1);
+ write_watchdog_counter("K7_PERFCTR0");
apic_write(APIC_LVTPC, APIC_DM_NMI);
evntsel |= K7_EVNTSEL_ENABLE;
wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
@@ -361,8 +369,7 @@ static void setup_p6_watchdog(void)
| P6_NMI_EVENT;
wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
- Dprintk("setting P6_PERFCTR0 to %08lx\n", -(cpu_khz/nmi_hz*1000));
- wrmsr(MSR_P6_PERFCTR0, -(cpu_khz/nmi_hz*1000), 0);
+ write_watchdog_counter("P6_PERFCTR0");
apic_write(APIC_LVTPC, APIC_DM_NMI);
evntsel |= P6_EVNTSEL0_ENABLE;
wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
@@ -402,8 +409,7 @@ static int setup_p4_watchdog(void)
wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
- Dprintk("setting P4_IQ_COUNTER0 to 0x%08lx\n", -(cpu_khz/nmi_hz*1000));
- wrmsr(MSR_P4_IQ_COUNTER0, -(cpu_khz/nmi_hz*1000), -1);
+ write_watchdog_counter("P4_IQ_COUNTER0");
apic_write(APIC_LVTPC, APIC_DM_NMI);
wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
return 1;
@@ -518,7 +524,7 @@ void nmi_watchdog_tick (struct pt_regs * regs)
* other P6 variant */
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
- wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1);
+ write_watchdog_counter(NULL);
}
}
diff --git a/arch/i386/kernel/pci-dma.c b/arch/i386/kernel/pci-dma.c
index 4de2e03c7b45..1e51427cc9eb 100644
--- a/arch/i386/kernel/pci-dma.c
+++ b/arch/i386/kernel/pci-dma.c
@@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
+#include <linux/module.h>
#include <asm/io.h>
struct dma_coherent_mem {
@@ -54,6 +55,7 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
}
return ret;
}
+EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
@@ -68,6 +70,7 @@ void dma_free_coherent(struct device *dev, size_t size,
} else
free_pages((unsigned long)vaddr, order);
}
+EXPORT_SYMBOL(dma_free_coherent);
int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dma_addr_t device_addr, size_t size, int flags)
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 96e3ea6b17c7..5f8cfa6b7940 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -13,6 +13,7 @@
#include <stdarg.h>
+#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/fs.h>
@@ -37,6 +38,7 @@
#include <linux/kallsyms.h>
#include <linux/ptrace.h>
#include <linux/random.h>
+#include <linux/kprobes.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -54,6 +56,9 @@
#include <linux/irq.h>
#include <linux/err.h>
+#include <asm/tlbflush.h>
+#include <asm/cpu.h>
+
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
static int hlt_counter;
@@ -73,6 +78,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
* Powermanagement idle function, if any..
*/
void (*pm_idle)(void);
+EXPORT_SYMBOL(pm_idle);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
void disable_hlt(void)
@@ -105,6 +111,9 @@ void default_idle(void)
cpu_relax();
}
}
+#ifdef CONFIG_APM_MODULE
+EXPORT_SYMBOL(default_idle);
+#endif
/*
* On SMP it's slightly faster (but much more power-consuming!)
@@ -138,14 +147,42 @@ static void poll_idle (void)
}
}
+#ifdef CONFIG_HOTPLUG_CPU
+#include <asm/nmi.h>
+/* We don't actually take CPU down, just spin without interrupts. */
+static inline void play_dead(void)
+{
+ /* This must be done before dead CPU ack */
+ cpu_exit_clear();
+ wbinvd();
+ mb();
+ /* Ack it */
+ __get_cpu_var(cpu_state) = CPU_DEAD;
+
+ /*
+ * With physical CPU hotplug, we should halt the cpu
+ */
+ local_irq_disable();
+ while (1)
+ __asm__ __volatile__("hlt":::"memory");
+}
+#else
+static inline void play_dead(void)
+{
+ BUG();
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
* low exit latency (ie sit in a loop waiting for
* somebody to say that they'd like to reschedule)
*/
-void cpu_idle (void)
+void cpu_idle(void)
{
+ int cpu = raw_smp_processor_id();
+
/* endless idle loop with no priority at all */
while (1) {
while (!need_resched()) {
@@ -160,6 +197,9 @@ void cpu_idle (void)
if (!idle)
idle = default_idle;
+ if (cpu_is_offline(cpu))
+ play_dead();
+
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
idle();
}
@@ -218,7 +258,7 @@ static void mwait_idle(void)
}
}
-void __init select_idle_routine(const struct cpuinfo_x86 *c)
+void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_MWAIT)) {
printk("monitor/mwait feature present.\n");
@@ -262,7 +302,7 @@ void show_regs(struct pt_regs * regs)
printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
print_symbol("EIP is at %s\n", regs->eip);
- if (regs->xcs & 3)
+ if (user_mode(regs))
printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
printk(" EFLAGS: %08lx %s (%s)\n",
regs->eflags, print_tainted(), system_utsname.release);
@@ -325,6 +365,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
/* Ok, create the new process.. */
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}
+EXPORT_SYMBOL(kernel_thread);
/*
* Free current thread data structures etc..
@@ -334,6 +375,13 @@ void exit_thread(void)
struct task_struct *tsk = current;
struct thread_struct *t = &tsk->thread;
+ /*
+ * Remove function-return probe instances associated with this task
+ * and put them back on the free list. Do not insert an exit probe for
+ * this function, it will be disabled by kprobe_flush_task if you do.
+ */
+ kprobe_flush_task(tsk);
+
/* The process may have allocated an io port bitmap... nuke it. */
if (unlikely(NULL != t->io_bitmap_ptr)) {
int cpu = get_cpu();
@@ -357,6 +405,13 @@ void flush_thread(void)
{
struct task_struct *tsk = current;
+ /*
+ * Remove function-return probe instances associated with this task
+ * and put them back on the free list. Do not insert an exit probe for
+ * this function, it will be disabled by kprobe_flush_task if you do.
+ */
+ kprobe_flush_task(tsk);
+
memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
/*
@@ -508,6 +563,7 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
dump->u_fpvalid = dump_fpu (regs, &dump->i387);
}
+EXPORT_SYMBOL(dump_thread);
/*
* Capture the user space registers if the task is not running (in user space)
@@ -627,13 +683,13 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
* Now maybe reload the debug registers
*/
if (unlikely(next->debugreg[7])) {
- loaddebug(next, 0);
- loaddebug(next, 1);
- loaddebug(next, 2);
- loaddebug(next, 3);
+ set_debugreg(current->thread.debugreg[0], 0);
+ set_debugreg(current->thread.debugreg[1], 1);
+ set_debugreg(current->thread.debugreg[2], 2);
+ set_debugreg(current->thread.debugreg[3], 3);
/* no 4 and 5 */
- loaddebug(next, 6);
- loaddebug(next, 7);
+ set_debugreg(current->thread.debugreg[6], 6);
+ set_debugreg(current->thread.debugreg[7], 7);
}
if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
@@ -731,6 +787,7 @@ unsigned long get_wchan(struct task_struct *p)
} while (count++ < 16);
return 0;
}
+EXPORT_SYMBOL(get_wchan);
/*
* sys_alloc_thread_area: get a yet unused TLS descriptor index.
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index e34f651fa13c..0da59b42843c 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -668,7 +668,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
info.si_code = TRAP_BRKPT;
/* User-mode eip? */
- info.si_addr = user_mode(regs) ? (void __user *) regs->eip : NULL;
+ info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL;
/* Send us the fakey SIGTRAP */
force_sig_info(SIGTRAP, &info, tsk);
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c
index 6dc27eb70ee7..b3e584849961 100644
--- a/arch/i386/kernel/reboot.c
+++ b/arch/i386/kernel/reboot.c
@@ -2,6 +2,7 @@
* linux/arch/i386/kernel/reboot.c
*/
+#include <linux/config.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/delay.h>
@@ -19,12 +20,12 @@
* Power off function, if any
*/
void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
static int reboot_mode;
static int reboot_thru_bios;
#ifdef CONFIG_SMP
-int reboot_smp = 0;
static int reboot_cpu = -1;
/* shamelessly grabbed from lib/vsprintf.c for readability */
#define is_digit(c) ((c) >= '0' && (c) <= '9')
@@ -47,7 +48,6 @@ static int __init reboot_setup(char *str)
break;
#ifdef CONFIG_SMP
case 's': /* "smp" reboot by executing reset on BSP or other CPU*/
- reboot_smp = 1;
if (is_digit(*(str+1))) {
reboot_cpu = (int) (*(str+1) - '0');
if (is_digit(*(str+2)))
@@ -86,33 +86,9 @@ static int __init set_bios_reboot(struct dmi_system_id *d)
return 0;
}
-/*
- * Some machines require the "reboot=s" commandline option, this quirk makes that automatic.
- */
-static int __init set_smp_reboot(struct dmi_system_id *d)
-{
-#ifdef CONFIG_SMP
- if (!reboot_smp) {
- reboot_smp = 1;
- printk(KERN_INFO "%s series board detected. Selecting SMP-method for reboots.\n", d->ident);
- }
-#endif
- return 0;
-}
-
-/*
- * Some machines require the "reboot=b,s" commandline option, this quirk makes that automatic.
- */
-static int __init set_smp_bios_reboot(struct dmi_system_id *d)
-{
- set_smp_reboot(d);
- set_bios_reboot(d);
- return 0;
-}
-
static struct dmi_system_id __initdata reboot_dmi_table[] = {
{ /* Handle problems with rebooting on Dell 1300's */
- .callback = set_smp_bios_reboot,
+ .callback = set_bios_reboot,
.ident = "Dell PowerEdge 1300",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
@@ -295,42 +271,36 @@ void machine_real_restart(unsigned char *code, int length)
:
: "i" ((void *) (0x1000 - sizeof (real_mode_switch) - 100)));
}
+#ifdef CONFIG_APM_MODULE
+EXPORT_SYMBOL(machine_real_restart);
+#endif
-void machine_restart(char * __unused)
+void machine_shutdown(void)
{
#ifdef CONFIG_SMP
- int cpuid;
-
- cpuid = GET_APIC_ID(apic_read(APIC_ID));
-
- if (reboot_smp) {
-
- /* check to see if reboot_cpu is valid
- if its not, default to the BSP */
- if ((reboot_cpu == -1) ||
- (reboot_cpu > (NR_CPUS -1)) ||
- !physid_isset(cpuid, phys_cpu_present_map))
- reboot_cpu = boot_cpu_physical_apicid;
-
- reboot_smp = 0; /* use this as a flag to only go through this once*/
- /* re-run this function on the other CPUs
- it will fall though this section since we have
- cleared reboot_smp, and do the reboot if it is the
- correct CPU, otherwise it halts. */
- if (reboot_cpu != cpuid)
- smp_call_function((void *)machine_restart , NULL, 1, 0);
+ int reboot_cpu_id;
+
+ /* The boot cpu is always logical cpu 0 */
+ reboot_cpu_id = 0;
+
+ /* See if there has been given a command line override */
+ if ((reboot_cpu_id != -1) && (reboot_cpu < NR_CPUS) &&
+ cpu_isset(reboot_cpu, cpu_online_map)) {
+ reboot_cpu_id = reboot_cpu;
}
- /* if reboot_cpu is still -1, then we want a tradional reboot,
- and if we are not running on the reboot_cpu,, halt */
- if ((reboot_cpu != -1) && (cpuid != reboot_cpu)) {
- for (;;)
- __asm__ __volatile__ ("hlt");
+ /* Make certain the cpu I'm rebooting on is online */
+ if (!cpu_isset(reboot_cpu_id, cpu_online_map)) {
+ reboot_cpu_id = smp_processor_id();
}
- /*
- * Stop all CPUs and turn off local APICs and the IO-APIC, so
- * other OSs see a clean IRQ state.
+
+ /* Make certain I only run on the appropriate processor */
+ set_cpus_allowed(current, cpumask_of_cpu(reboot_cpu_id));
+
+ /* O.K. Now that I'm on the appropriate processor, stop
+ * all of the others, and disable their local APICs.
*/
+
smp_send_stop();
#endif /* CONFIG_SMP */
@@ -339,6 +309,11 @@ void machine_restart(char * __unused)
#ifdef CONFIG_X86_IO_APIC
disable_IO_APIC();
#endif
+}
+
+void machine_restart(char * __unused)
+{
+ machine_shutdown();
if (!reboot_thru_bios) {
if (efi_enabled) {
diff --git a/arch/i386/kernel/relocate_kernel.S b/arch/i386/kernel/relocate_kernel.S
new file mode 100644
index 000000000000..d312616effa1
--- /dev/null
+++ b/arch/i386/kernel/relocate_kernel.S
@@ -0,0 +1,120 @@
+/*
+ * relocate_kernel.S - put the kernel image in place to boot
+ * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/linkage.h>
+
+ /*
+ * Must be relocatable PIC code callable as a C function, that once
+ * it starts can not use the previous processes stack.
+ */
+ .globl relocate_new_kernel
+relocate_new_kernel:
+ /* read the arguments and say goodbye to the stack */
+ movl 4(%esp), %ebx /* page_list */
+ movl 8(%esp), %ebp /* reboot_code_buffer */
+ movl 12(%esp), %edx /* start address */
+ movl 16(%esp), %ecx /* cpu_has_pae */
+
+ /* zero out flags, and disable interrupts */
+ pushl $0
+ popfl
+
+ /* set a new stack at the bottom of our page... */
+ lea 4096(%ebp), %esp
+
+ /* store the parameters back on the stack */
+ pushl %edx /* store the start address */
+
+ /* Set cr0 to a known state:
+ * 31 0 == Paging disabled
+ * 18 0 == Alignment check disabled
+ * 16 0 == Write protect disabled
+ * 3 0 == No task switch
+ * 2 0 == Don't do FP software emulation.
+ * 0 1 == Proctected mode enabled
+ */
+ movl %cr0, %eax
+ andl $~((1<<31)|(1<<18)|(1<<16)|(1<<3)|(1<<2)), %eax
+ orl $(1<<0), %eax
+ movl %eax, %cr0
+
+ /* clear cr4 if applicable */
+ testl %ecx, %ecx
+ jz 1f
+ /* Set cr4 to a known state:
+ * Setting everything to zero seems safe.
+ */
+ movl %cr4, %eax
+ andl $0, %eax
+ movl %eax, %cr4
+
+ jmp 1f
+1:
+
+ /* Flush the TLB (needed?) */
+ xorl %eax, %eax
+ movl %eax, %cr3
+
+ /* Do the copies */
+ movl %ebx, %ecx
+ jmp 1f
+
+0: /* top, read another word from the indirection page */
+ movl (%ebx), %ecx
+ addl $4, %ebx
+1:
+ testl $0x1, %ecx /* is it a destination page */
+ jz 2f
+ movl %ecx, %edi
+ andl $0xfffff000, %edi
+ jmp 0b
+2:
+ testl $0x2, %ecx /* is it an indirection page */
+ jz 2f
+ movl %ecx, %ebx
+ andl $0xfffff000, %ebx
+ jmp 0b
+2:
+ testl $0x4, %ecx /* is it the done indicator */
+ jz 2f
+ jmp 3f
+2:
+ testl $0x8, %ecx /* is it the source indicator */
+ jz 0b /* Ignore it otherwise */
+ movl %ecx, %esi /* For every source page do a copy */
+ andl $0xfffff000, %esi
+
+ movl $1024, %ecx
+ rep ; movsl
+ jmp 0b
+
+3:
+
+ /* To be certain of avoiding problems with self-modifying code
+ * I need to execute a serializing instruction here.
+ * So I flush the TLB, it's handy, and not processor dependent.
+ */
+ xorl %eax, %eax
+ movl %eax, %cr3
+
+ /* set all of the registers to known values */
+ /* leave %esp alone */
+
+ xorl %eax, %eax
+ xorl %ebx, %ebx
+ xorl %ecx, %ecx
+ xorl %edx, %edx
+ xorl %esi, %esi
+ xorl %edi, %edi
+ xorl %ebp, %ebp
+ ret
+relocate_new_kernel_end:
+
+ .globl relocate_new_kernel_size
+relocate_new_kernel_size:
+ .long relocate_new_kernel_end - relocate_new_kernel
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 945ec73163c8..7306353c520e 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -23,8 +23,10 @@
* This file handles the architecture-dependent parts of initialization
*/
+#include <linux/config.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/mmzone.h>
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/acpi.h>
@@ -41,7 +43,12 @@
#include <linux/init.h>
#include <linux/edd.h>
#include <linux/nodemask.h>
+#include <linux/kexec.h>
+#include <linux/crash_dump.h>
+
#include <video/edid.h>
+
+#include <asm/apic.h>
#include <asm/e820.h>
#include <asm/mpspec.h>
#include <asm/setup.h>
@@ -53,12 +60,15 @@
#include "setup_arch_pre.h"
#include <bios_ebda.h>
+/* Forward Declaration. */
+void __init find_max_pfn(void);
+
/* This value is set up by the early boot code to point to the value
immediately after the boot time page tables. It contains a *physical*
address, and must not be in the .bss segment! */
unsigned long init_pg_tables_end __initdata = ~0UL;
-int disable_pse __initdata = 0;
+int disable_pse __devinitdata = 0;
/*
* Machine setup..
@@ -73,6 +83,7 @@ EXPORT_SYMBOL(efi_enabled);
struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
/* common cpu data for all cpus */
struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
+EXPORT_SYMBOL(boot_cpu_data);
unsigned long mmu_cr4_features;
@@ -90,12 +101,18 @@ extern acpi_interrupt_flags acpi_sci_flags;
/* for MCA, but anyone else can use it if they want */
unsigned int machine_id;
+#ifdef CONFIG_MCA
+EXPORT_SYMBOL(machine_id);
+#endif
unsigned int machine_submodel_id;
unsigned int BIOS_revision;
unsigned int mca_pentium_flag;
/* For PCI or other memory-mapped resources */
unsigned long pci_mem_start = 0x10000000;
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pci_mem_start);
+#endif
/* Boot loader ID as an integer, for the benefit of proc_dointvec */
int bootloader_type;
@@ -107,14 +124,26 @@ static unsigned int highmem_pages = -1;
* Setup options
*/
struct drive_info_struct { char dummy[32]; } drive_info;
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \
+ defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
+EXPORT_SYMBOL(drive_info);
+#endif
struct screen_info screen_info;
+#ifdef CONFIG_VT
+EXPORT_SYMBOL(screen_info);
+#endif
struct apm_info apm_info;
+EXPORT_SYMBOL(apm_info);
struct sys_desc_table_struct {
unsigned short length;
unsigned char table[0];
};
struct edid_info edid_info;
struct ist_info ist_info;
+#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
+ defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
+EXPORT_SYMBOL(ist_info);
+#endif
struct e820map e820;
extern void early_cpu_init(void);
@@ -711,6 +740,15 @@ static void __init parse_cmdline_early (char ** cmdline_p)
if (to != command_line)
to--;
if (!memcmp(from+7, "exactmap", 8)) {
+#ifdef CONFIG_CRASH_DUMP
+ /* If we are doing a crash dump, we
+ * still need to know the real mem
+ * size before original memory map is
+ * reset.
+ */
+ find_max_pfn();
+ saved_max_pfn = max_pfn;
+#endif
from += 8+7;
e820.nr_map = 0;
userdef = 1;
@@ -814,6 +852,44 @@ static void __init parse_cmdline_early (char ** cmdline_p)
#endif /* CONFIG_X86_LOCAL_APIC */
#endif /* CONFIG_ACPI_BOOT */
+#ifdef CONFIG_X86_LOCAL_APIC
+ /* enable local APIC */
+ else if (!memcmp(from, "lapic", 5))
+ lapic_enable();
+
+ /* disable local APIC */
+ else if (!memcmp(from, "nolapic", 6))
+ lapic_disable();
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+#ifdef CONFIG_KEXEC
+ /* crashkernel=size@addr specifies the location to reserve for
+ * a crash kernel. By reserving this memory we guarantee
+ * that linux never set's it up as a DMA target.
+ * Useful for holding code to do something appropriate
+ * after a kernel panic.
+ */
+ else if (!memcmp(from, "crashkernel=", 12)) {
+ unsigned long size, base;
+ size = memparse(from+12, &from);
+ if (*from == '@') {
+ base = memparse(from+1, &from);
+ /* FIXME: Do I want a sanity check
+ * to validate the memory range?
+ */
+ crashk_res.start = base;
+ crashk_res.end = base + size - 1;
+ }
+ }
+#endif
+#ifdef CONFIG_CRASH_DUMP
+ /* elfcorehdr= specifies the location of elf core header
+ * stored by the crashed kernel.
+ */
+ else if (!memcmp(from, "elfcorehdr=", 11))
+ elfcorehdr_addr = memparse(from+11, &from);
+#endif
+
/*
* highmem=size forces highmem to be exactly 'size' bytes.
* This works even on boxes that have no highmem otherwise.
@@ -1022,7 +1098,7 @@ static void __init reserve_ebda_region(void)
reserve_bootmem(addr, PAGE_SIZE);
}
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_NEED_MULTIPLE_NODES
void __init setup_bootmem_allocator(void);
static unsigned long __init setup_memory(void)
{
@@ -1072,9 +1148,9 @@ void __init zone_sizes_init(void)
free_area_init(zones_size);
}
#else
-extern unsigned long setup_memory(void);
+extern unsigned long __init setup_memory(void);
extern void zone_sizes_init(void);
-#endif /* !CONFIG_DISCONTIGMEM */
+#endif /* !CONFIG_NEED_MULTIPLE_NODES */
void __init setup_bootmem_allocator(void)
{
@@ -1092,8 +1168,8 @@ void __init setup_bootmem_allocator(void)
* the (very unlikely) case of us accidentally initializing the
* bootmem allocator with an invalid RAM area.
*/
- reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(min_low_pfn) +
- bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
+ reserve_bootmem(__PHYSICAL_START, (PFN_PHYS(min_low_pfn) +
+ bootmap_size + PAGE_SIZE-1) - (__PHYSICAL_START));
/*
* reserve physical page 0 - it's a special BIOS page on many boxes,
@@ -1149,6 +1225,11 @@ void __init setup_bootmem_allocator(void)
}
}
#endif
+#ifdef CONFIG_KEXEC
+ if (crashk_res.start != crashk_res.end)
+ reserve_bootmem(crashk_res.start,
+ crashk_res.end - crashk_res.start + 1);
+#endif
}
/*
@@ -1202,6 +1283,9 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat
*/
request_resource(res, code_resource);
request_resource(res, data_resource);
+#ifdef CONFIG_KEXEC
+ request_resource(res, &crashk_res);
+#endif
}
}
}
@@ -1475,6 +1559,7 @@ void __init setup_arch(char **cmdline_p)
#endif
paging_init();
remapped_pgdat_init();
+ sparse_init();
zone_sizes_init();
/*
@@ -1502,11 +1587,13 @@ void __init setup_arch(char **cmdline_p)
if (efi_enabled)
efi_map_memmap();
+#ifdef CONFIG_ACPI_BOOT
/*
* Parse the ACPI tables for possible boot-time SMP configuration.
*/
acpi_boot_table_init();
acpi_boot_init();
+#endif
#ifdef CONFIG_X86_LOCAL_APIC
if (smp_found_config)
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index ea46d028af08..89ef7adc63a4 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -346,8 +346,8 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
extern void __user __kernel_sigreturn;
extern void __user __kernel_rt_sigreturn;
-static void setup_frame(int sig, struct k_sigaction *ka,
- sigset_t *set, struct pt_regs * regs)
+static int setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs * regs)
{
void __user *restorer;
struct sigframe __user *frame;
@@ -429,13 +429,14 @@ static void setup_frame(int sig, struct k_sigaction *ka,
current->comm, current->pid, frame, regs->eip, frame->pretcode);
#endif
- return;
+ return 1;
give_sigsegv:
force_sigsegv(sig, current);
+ return 0;
}
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs * regs)
{
void __user *restorer;
@@ -522,20 +523,23 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
current->comm, current->pid, frame, regs->eip, frame->pretcode);
#endif
- return;
+ return 1;
give_sigsegv:
force_sigsegv(sig, current);
+ return 0;
}
/*
* OK, we're invoking a handler
*/
-static void
+static int
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
sigset_t *oldset, struct pt_regs * regs)
{
+ int ret;
+
/* Are we from a system call? */
if (regs->orig_eax >= 0) {
/* If so, check system call restarting.. */
@@ -569,17 +573,19 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
/* Set up the stack frame */
if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame(sig, ka, info, oldset, regs);
+ ret = setup_rt_frame(sig, ka, info, oldset, regs);
else
- setup_frame(sig, ka, oldset, regs);
+ ret = setup_frame(sig, ka, oldset, regs);
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ if (ret && !(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
+
+ return ret;
}
/*
@@ -599,13 +605,11 @@ int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
* kernel mode. Just return without doing anything
* if so.
*/
- if ((regs->xcs & 3) != 3)
+ if (!user_mode(regs))
return 1;
- if (current->flags & PF_FREEZE) {
- refrigerator(0);
+ if (try_to_freeze())
goto no_signal;
- }
if (!oldset)
oldset = &current->blocked;
@@ -618,12 +622,11 @@ int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
* inside the kernel.
*/
if (unlikely(current->thread.debugreg[7])) {
- loaddebug(&current->thread, 7);
+ set_debugreg(current->thread.debugreg[7], 7);
}
/* Whee! Actually deliver the signal. */
- handle_signal(signr, &info, &ka, oldset, regs);
- return 1;
+ return handle_signal(signr, &info, &ka, oldset, regs);
}
no_signal:
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 6223c33ac91c..cec4bde67161 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -19,6 +19,8 @@
#include <linux/mc146818rtc.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
#include <asm/mtrr.h>
#include <asm/tlbflush.h>
@@ -163,7 +165,7 @@ void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
unsigned long flags;
local_irq_save(flags);
-
+ WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
/*
* Wait for idle.
*/
@@ -345,21 +347,21 @@ out:
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
unsigned long va)
{
- cpumask_t tmp;
/*
* A couple of (to be removed) sanity checks:
*
- * - we do not send IPIs to not-yet booted CPUs.
* - current CPU must not be in mask
* - mask must exist :)
*/
BUG_ON(cpus_empty(cpumask));
-
- cpus_and(tmp, cpumask, cpu_online_map);
- BUG_ON(!cpus_equal(cpumask, tmp));
BUG_ON(cpu_isset(smp_processor_id(), cpumask));
BUG_ON(!mm);
+ /* If a CPU which we ran on has gone down, OK. */
+ cpus_and(cpumask, cpumask, cpu_online_map);
+ if (cpus_empty(cpumask))
+ return;
+
/*
* i'm not happy about this global shared spinlock in the
* MM hot path, but we'll see how contended it is.
@@ -452,6 +454,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
preempt_enable();
}
+EXPORT_SYMBOL(flush_tlb_page);
static void do_flush_tlb_all(void* info)
{
@@ -474,6 +477,7 @@ void flush_tlb_all(void)
*/
void smp_send_reschedule(int cpu)
{
+ WARN_ON(cpu_is_offline(cpu));
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
}
@@ -491,6 +495,16 @@ struct call_data_struct {
int wait;
};
+void lock_ipi_call_lock(void)
+{
+ spin_lock_irq(&call_lock);
+}
+
+void unlock_ipi_call_lock(void)
+{
+ spin_unlock_irq(&call_lock);
+}
+
static struct call_data_struct * call_data;
/*
@@ -514,10 +528,15 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
*/
{
struct call_data_struct data;
- int cpus = num_online_cpus()-1;
+ int cpus;
- if (!cpus)
+ /* Holding any lock stops cpus from going down. */
+ spin_lock(&call_lock);
+ cpus = num_online_cpus() - 1;
+ if (!cpus) {
+ spin_unlock(&call_lock);
return 0;
+ }
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
@@ -529,7 +548,6 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
if (wait)
atomic_set(&data.finished, 0);
- spin_lock(&call_lock);
call_data = &data;
mb();
@@ -547,6 +565,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
return 0;
}
+EXPORT_SYMBOL(smp_call_function);
static void stop_this_cpu (void * dummy)
{
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 35bfe138cb1a..d66bf489a2e9 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -44,6 +44,9 @@
#include <linux/smp_lock.h>
#include <linux/irq.h>
#include <linux/bootmem.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/percpu.h>
#include <linux/delay.h>
#include <linux/mc146818rtc.h>
@@ -56,24 +59,46 @@
#include <smpboot_hooks.h>
/* Set if we find a B stepping CPU */
-static int __initdata smp_b_stepping;
+static int __devinitdata smp_b_stepping;
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
-int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
+#ifdef CONFIG_X86_HT
+EXPORT_SYMBOL(smp_num_siblings);
+#endif
+
+/* Package ID of each logical CPU */
+int phys_proc_id[NR_CPUS] = {[0 ... NR_CPUS-1] = BAD_APICID};
EXPORT_SYMBOL(phys_proc_id);
-int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
+
+/* Core ID of each logical CPU */
+int cpu_core_id[NR_CPUS] = {[0 ... NR_CPUS-1] = BAD_APICID};
EXPORT_SYMBOL(cpu_core_id);
+cpumask_t cpu_sibling_map[NR_CPUS];
+EXPORT_SYMBOL(cpu_sibling_map);
+
+cpumask_t cpu_core_map[NR_CPUS];
+EXPORT_SYMBOL(cpu_core_map);
+
/* bitmap of online cpus */
cpumask_t cpu_online_map;
+EXPORT_SYMBOL(cpu_online_map);
cpumask_t cpu_callin_map;
cpumask_t cpu_callout_map;
+EXPORT_SYMBOL(cpu_callout_map);
static cpumask_t smp_commenced_mask;
+/* TSC's upper 32 bits can't be written in eariler CPU (before prescott), there
+ * is no way to resync one AP against BP. TBD: for prescott and above, we
+ * should use IA64's algorithm
+ */
+static int __devinitdata tsc_sync_disabled;
+
/* Per CPU bogomips and other parameters */
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
+EXPORT_SYMBOL(cpu_data);
u8 x86_cpu_to_apicid[NR_CPUS] =
{ [0 ... NR_CPUS-1] = 0xff };
@@ -90,13 +115,16 @@ static int trampoline_exec;
static void map_cpu_to_logical_apicid(void);
+/* State of each CPU. */
+DEFINE_PER_CPU(int, cpu_state) = { 0 };
+
/*
* Currently trivial. Write the real->protected mode
* bootstrap into the page concerned. The caller
* has made sure it's suitably aligned.
*/
-static unsigned long __init setup_trampoline(void)
+static unsigned long __devinit setup_trampoline(void)
{
memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
return virt_to_phys(trampoline_base);
@@ -126,7 +154,7 @@ void __init smp_alloc_memory(void)
* a given CPU
*/
-static void __init smp_store_cpu_info(int id)
+static void __devinit smp_store_cpu_info(int id)
{
struct cpuinfo_x86 *c = cpu_data + id;
@@ -199,7 +227,7 @@ static void __init synchronize_tsc_bp (void)
unsigned long long t0;
unsigned long long sum, avg;
long long delta;
- unsigned long one_usec;
+ unsigned int one_usec;
int buggy = 0;
printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus());
@@ -320,7 +348,7 @@ extern void calibrate_delay(void);
static atomic_t init_deasserted;
-static void __init smp_callin(void)
+static void __devinit smp_callin(void)
{
int cpuid, phys_id;
unsigned long timeout;
@@ -405,16 +433,48 @@ static void __init smp_callin(void)
/*
* Synchronize the TSC with the BP
*/
- if (cpu_has_tsc && cpu_khz)
+ if (cpu_has_tsc && cpu_khz && !tsc_sync_disabled)
synchronize_tsc_ap();
}
static int cpucount;
+static inline void
+set_cpu_sibling_map(int cpu)
+{
+ int i;
+
+ if (smp_num_siblings > 1) {
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_isset(i, cpu_callout_map))
+ continue;
+ if (cpu_core_id[cpu] == cpu_core_id[i]) {
+ cpu_set(i, cpu_sibling_map[cpu]);
+ cpu_set(cpu, cpu_sibling_map[i]);
+ }
+ }
+ } else {
+ cpu_set(cpu, cpu_sibling_map[cpu]);
+ }
+
+ if (current_cpu_data.x86_num_cores > 1) {
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_isset(i, cpu_callout_map))
+ continue;
+ if (phys_proc_id[cpu] == phys_proc_id[i]) {
+ cpu_set(i, cpu_core_map[cpu]);
+ cpu_set(cpu, cpu_core_map[i]);
+ }
+ }
+ } else {
+ cpu_core_map[cpu] = cpu_sibling_map[cpu];
+ }
+}
+
/*
* Activate a secondary processor.
*/
-static void __init start_secondary(void *unused)
+static void __devinit start_secondary(void *unused)
{
/*
* Dont put anything before smp_callin(), SMP
@@ -437,7 +497,23 @@ static void __init start_secondary(void *unused)
* the local TLBs too.
*/
local_flush_tlb();
+
+ /* This must be done before setting cpu_online_map */
+ set_cpu_sibling_map(raw_smp_processor_id());
+ wmb();
+
+ /*
+ * We need to hold call_lock, so there is no inconsistency
+ * between the time smp_call_function() determines number of
+ * IPI receipients, and the time when the determination is made
+ * for which cpus receive the IPI. Holding this
+ * lock helps us to not include this cpu in a currently in progress
+ * smp_call_function().
+ */
+ lock_ipi_call_lock();
cpu_set(smp_processor_id(), cpu_online_map);
+ unlock_ipi_call_lock();
+ per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
/* We can take interrupts now: we're officially "up". */
local_irq_enable();
@@ -452,7 +528,7 @@ static void __init start_secondary(void *unused)
* from the task structure
* This function must not return.
*/
-void __init initialize_secondary(void)
+void __devinit initialize_secondary(void)
{
/*
* We don't actually need to load the full TSS,
@@ -566,7 +642,7 @@ static inline void __inquire_remote_apic(int apicid)
* INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
* won't ... remember to clear down the APIC, etc later.
*/
-static int __init
+static int __devinit
wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
{
unsigned long send_status = 0, accept_status = 0;
@@ -612,7 +688,7 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
#endif /* WAKE_SECONDARY_VIA_NMI */
#ifdef WAKE_SECONDARY_VIA_INIT
-static int __init
+static int __devinit
wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
{
unsigned long send_status = 0, accept_status = 0;
@@ -747,8 +823,43 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
#endif /* WAKE_SECONDARY_VIA_INIT */
extern cpumask_t cpu_initialized;
+static inline int alloc_cpu_id(void)
+{
+ cpumask_t tmp_map;
+ int cpu;
+ cpus_complement(tmp_map, cpu_present_map);
+ cpu = first_cpu(tmp_map);
+ if (cpu >= NR_CPUS)
+ return -ENODEV;
+ return cpu;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static struct task_struct * __devinitdata cpu_idle_tasks[NR_CPUS];
+static inline struct task_struct * alloc_idle_task(int cpu)
+{
+ struct task_struct *idle;
+
+ if ((idle = cpu_idle_tasks[cpu]) != NULL) {
+ /* initialize thread_struct. we really want to avoid destroy
+ * idle tread
+ */
+ idle->thread.esp = (unsigned long)(((struct pt_regs *)
+ (THREAD_SIZE + (unsigned long) idle->thread_info)) - 1);
+ init_idle(idle, cpu);
+ return idle;
+ }
+ idle = fork_idle(cpu);
-static int __init do_boot_cpu(int apicid)
+ if (!IS_ERR(idle))
+ cpu_idle_tasks[cpu] = idle;
+ return idle;
+}
+#else
+#define alloc_idle_task(cpu) fork_idle(cpu)
+#endif
+
+static int __devinit do_boot_cpu(int apicid, int cpu)
/*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
* (ie clustered apic addressing mode), this is a LOGICAL apic ID.
@@ -757,16 +868,17 @@ static int __init do_boot_cpu(int apicid)
{
struct task_struct *idle;
unsigned long boot_error;
- int timeout, cpu;
+ int timeout;
unsigned long start_eip;
unsigned short nmi_high = 0, nmi_low = 0;
- cpu = ++cpucount;
+ ++cpucount;
+
/*
* We can't use kernel_thread since we must avoid to
* reschedule the child.
*/
- idle = fork_idle(cpu);
+ idle = alloc_idle_task(cpu);
if (IS_ERR(idle))
panic("failed fork for CPU %d", cpu);
idle->thread.eip = (unsigned long) start_secondary;
@@ -833,13 +945,16 @@ static int __init do_boot_cpu(int apicid)
inquire_remote_apic(apicid);
}
}
- x86_cpu_to_apicid[cpu] = apicid;
+
if (boot_error) {
/* Try to put things back the way they were before ... */
unmap_cpu_to_logical_apicid(cpu);
cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
cpucount--;
+ } else {
+ x86_cpu_to_apicid[cpu] = apicid;
+ cpu_set(cpu, cpu_present_map);
}
/* mark "stuck" area as not stuck */
@@ -848,6 +963,75 @@ static int __init do_boot_cpu(int apicid)
return boot_error;
}
+#ifdef CONFIG_HOTPLUG_CPU
+void cpu_exit_clear(void)
+{
+ int cpu = raw_smp_processor_id();
+
+ idle_task_exit();
+
+ cpucount --;
+ cpu_uninit();
+ irq_ctx_exit(cpu);
+
+ cpu_clear(cpu, cpu_callout_map);
+ cpu_clear(cpu, cpu_callin_map);
+ cpu_clear(cpu, cpu_present_map);
+
+ cpu_clear(cpu, smp_commenced_mask);
+ unmap_cpu_to_logical_apicid(cpu);
+}
+
+struct warm_boot_cpu_info {
+ struct completion *complete;
+ int apicid;
+ int cpu;
+};
+
+static void __devinit do_warm_boot_cpu(void *p)
+{
+ struct warm_boot_cpu_info *info = p;
+ do_boot_cpu(info->apicid, info->cpu);
+ complete(info->complete);
+}
+
+int __devinit smp_prepare_cpu(int cpu)
+{
+ DECLARE_COMPLETION(done);
+ struct warm_boot_cpu_info info;
+ struct work_struct task;
+ int apicid, ret;
+
+ lock_cpu_hotplug();
+ apicid = x86_cpu_to_apicid[cpu];
+ if (apicid == BAD_APICID) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ info.complete = &done;
+ info.apicid = apicid;
+ info.cpu = cpu;
+ INIT_WORK(&task, do_warm_boot_cpu, &info);
+
+ tsc_sync_disabled = 1;
+
+ /* init low mem mapping */
+ memcpy(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
+ sizeof(swapper_pg_dir[0]) * KERNEL_PGD_PTRS);
+ flush_tlb_all();
+ schedule_work(&task);
+ wait_for_completion(&done);
+
+ tsc_sync_disabled = 0;
+ zap_low_mappings();
+ ret = 0;
+exit:
+ unlock_cpu_hotplug();
+ return ret;
+}
+#endif
+
static void smp_tune_scheduling (void)
{
unsigned long cachesize; /* kB */
@@ -885,10 +1069,9 @@ static void smp_tune_scheduling (void)
static int boot_cpu_logical_apicid;
/* Where the IO area was mapped on multiquad, always 0 otherwise */
void *xquad_portio;
-
-cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
-cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
-EXPORT_SYMBOL(cpu_core_map);
+#ifdef CONFIG_X86_NUMAQ
+EXPORT_SYMBOL(xquad_portio);
+#endif
static void __init smp_boot_cpus(unsigned int max_cpus)
{
@@ -1001,7 +1184,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
if (max_cpus <= cpucount+1)
continue;
- if (do_boot_cpu(apicid))
+ if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
printk("CPU #%d not responding - cannot use it.\n",
apicid);
else
@@ -1053,42 +1236,8 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
cpus_clear(cpu_core_map[cpu]);
}
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- struct cpuinfo_x86 *c = cpu_data + cpu;
- int siblings = 0;
- int i;
- if (!cpu_isset(cpu, cpu_callout_map))
- continue;
-
- if (smp_num_siblings > 1) {
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_isset(i, cpu_callout_map))
- continue;
- if (cpu_core_id[cpu] == cpu_core_id[i]) {
- siblings++;
- cpu_set(i, cpu_sibling_map[cpu]);
- }
- }
- } else {
- siblings++;
- cpu_set(cpu, cpu_sibling_map[cpu]);
- }
-
- if (siblings != smp_num_siblings)
- printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
-
- if (c->x86_num_cores > 1) {
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_isset(i, cpu_callout_map))
- continue;
- if (phys_proc_id[cpu] == phys_proc_id[i]) {
- cpu_set(i, cpu_core_map[cpu]);
- }
- }
- } else {
- cpu_core_map[cpu] = cpu_sibling_map[cpu];
- }
- }
+ cpu_set(0, cpu_sibling_map[0]);
+ cpu_set(0, cpu_core_map[0]);
smpboot_setup_io_apic();
@@ -1105,6 +1254,9 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
+ smp_commenced_mask = cpumask_of_cpu(0);
+ cpu_callin_map = cpumask_of_cpu(0);
+ mb();
smp_boot_cpus(max_cpus);
}
@@ -1112,23 +1264,98 @@ void __devinit smp_prepare_boot_cpu(void)
{
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), cpu_callout_map);
+ cpu_set(smp_processor_id(), cpu_present_map);
+ per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
}
-int __devinit __cpu_up(unsigned int cpu)
+#ifdef CONFIG_HOTPLUG_CPU
+static void
+remove_siblinginfo(int cpu)
{
- /* This only works at boot for x86. See "rewrite" above. */
- if (cpu_isset(cpu, smp_commenced_mask)) {
- local_irq_enable();
- return -ENOSYS;
+ int sibling;
+
+ for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
+ cpu_clear(cpu, cpu_sibling_map[sibling]);
+ for_each_cpu_mask(sibling, cpu_core_map[cpu])
+ cpu_clear(cpu, cpu_core_map[sibling]);
+ cpus_clear(cpu_sibling_map[cpu]);
+ cpus_clear(cpu_core_map[cpu]);
+ phys_proc_id[cpu] = BAD_APICID;
+ cpu_core_id[cpu] = BAD_APICID;
+}
+
+int __cpu_disable(void)
+{
+ cpumask_t map = cpu_online_map;
+ int cpu = smp_processor_id();
+
+ /*
+ * Perhaps use cpufreq to drop frequency, but that could go
+ * into generic code.
+ *
+ * We won't take down the boot processor on i386 due to some
+ * interrupts only being able to be serviced by the BSP.
+ * Especially so if we're not using an IOAPIC -zwane
+ */
+ if (cpu == 0)
+ return -EBUSY;
+
+ /* We enable the timer again on the exit path of the death loop */
+ disable_APIC_timer();
+ /* Allow any queued timer interrupts to get serviced */
+ local_irq_enable();
+ mdelay(1);
+ local_irq_disable();
+
+ remove_siblinginfo(cpu);
+
+ cpu_clear(cpu, map);
+ fixup_irqs(map);
+ /* It's now safe to remove this processor from the online map */
+ cpu_clear(cpu, cpu_online_map);
+ return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+ /* We don't do anything here: idle task is faking death itself. */
+ unsigned int i;
+
+ for (i = 0; i < 10; i++) {
+ /* They ack this in play_dead by setting CPU_DEAD */
+ if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
+ printk ("CPU %d is now offline\n", cpu);
+ return;
+ }
+ current->state = TASK_UNINTERRUPTIBLE;
+ schedule_timeout(HZ/10);
}
+ printk(KERN_ERR "CPU %u didn't die...\n", cpu);
+}
+#else /* ... !CONFIG_HOTPLUG_CPU */
+int __cpu_disable(void)
+{
+ return -ENOSYS;
+}
+void __cpu_die(unsigned int cpu)
+{
+ /* We said "no" in __cpu_disable */
+ BUG();
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+int __devinit __cpu_up(unsigned int cpu)
+{
/* In case one didn't come up */
if (!cpu_isset(cpu, cpu_callin_map)) {
+ printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
local_irq_enable();
return -EIO;
}
local_irq_enable();
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
/* Unleash the CPU! */
cpu_set(cpu, smp_commenced_mask);
while (!cpu_isset(cpu, cpu_online_map))
@@ -1142,10 +1369,12 @@ void __init smp_cpus_done(unsigned int max_cpus)
setup_ioapic_dest();
#endif
zap_low_mappings();
+#ifndef CONFIG_HOTPLUG_CPU
/*
* Disable executability of the SMP trampoline:
*/
set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
+#endif
}
void __init smp_intr_init(void)
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index 6cd1ed311f02..442a6e937b19 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -251,7 +251,7 @@ ENTRY(sys_call_table)
.long sys_io_submit
.long sys_io_cancel
.long sys_fadvise64 /* 250 */
- .long sys_ni_syscall
+ .long sys_set_zone_reclaim
.long sys_exit_group
.long sys_lookup_dcookie
.long sys_epoll_create
@@ -283,7 +283,7 @@ ENTRY(sys_call_table)
.long sys_mq_timedreceive /* 280 */
.long sys_mq_notify
.long sys_mq_getsetattr
- .long sys_ni_syscall /* reserved for kexec */
+ .long sys_kexec_load
.long sys_waitid
.long sys_ni_syscall /* 285 */ /* available */
.long sys_add_key
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index 960d8bd137d0..0bada1870bdf 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -21,11 +21,16 @@
extern asmlinkage void sysenter_entry(void);
-void enable_sep_cpu(void *info)
+void enable_sep_cpu(void)
{
int cpu = get_cpu();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ if (!boot_cpu_has(X86_FEATURE_SEP)) {
+ put_cpu();
+ return;
+ }
+
tss->ss1 = __KERNEL_CS;
tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
@@ -41,7 +46,7 @@ void enable_sep_cpu(void *info)
extern const char vsyscall_int80_start, vsyscall_int80_end;
extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
-static int __init sysenter_setup(void)
+int __init sysenter_setup(void)
{
void *page = (void *)get_zeroed_page(GFP_ATOMIC);
@@ -58,8 +63,5 @@ static int __init sysenter_setup(void)
&vsyscall_sysenter_start,
&vsyscall_sysenter_end - &vsyscall_sysenter_start);
- on_each_cpu(enable_sep_cpu, NULL, 1, 1);
return 0;
}
-
-__initcall(sysenter_setup);
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index a0dcb7c87c30..e68d9fdb0759 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -77,11 +77,13 @@ u64 jiffies_64 = INITIAL_JIFFIES;
EXPORT_SYMBOL(jiffies_64);
-unsigned long cpu_khz; /* Detected as we calibrate the TSC */
+unsigned int cpu_khz; /* Detected as we calibrate the TSC */
+EXPORT_SYMBOL(cpu_khz);
extern unsigned long wall_jiffies;
DEFINE_SPINLOCK(rtc_lock);
+EXPORT_SYMBOL(rtc_lock);
DEFINE_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
@@ -324,6 +326,8 @@ unsigned long get_cmos_time(void)
return retval;
}
+EXPORT_SYMBOL(get_cmos_time);
+
static void sync_cmos_clock(unsigned long dummy);
static struct timer_list sync_cmos_timer =
diff --git a/arch/i386/kernel/time_hpet.c b/arch/i386/kernel/time_hpet.c
index 10a0cbb88e75..658c0629ba6a 100644
--- a/arch/i386/kernel/time_hpet.c
+++ b/arch/i386/kernel/time_hpet.c
@@ -50,7 +50,7 @@ static void hpet_writel(unsigned long d, unsigned long a)
* comparator value and continue. Next tick can be caught by checking
* for a change in the comparator value. Used in apic.c.
*/
-static void __init wait_hpet_tick(void)
+static void __devinit wait_hpet_tick(void)
{
unsigned int start_cmp_val, end_cmp_val;
diff --git a/arch/i386/kernel/timers/common.c b/arch/i386/kernel/timers/common.c
index f7f90005e22e..8163fe0cf1f0 100644
--- a/arch/i386/kernel/timers/common.c
+++ b/arch/i386/kernel/timers/common.c
@@ -6,6 +6,7 @@
#include <linux/timex.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
+#include <linux/module.h>
#include <asm/io.h>
#include <asm/timer.h>
@@ -24,7 +25,7 @@
#define CALIBRATE_TIME (5 * 1000020/HZ)
-unsigned long __init calibrate_tsc(void)
+unsigned long calibrate_tsc(void)
{
mach_prepare_counter();
@@ -85,7 +86,7 @@ bad_ctc:
#define CALIBRATE_CNT_HPET (5 * hpet_tick)
#define CALIBRATE_TIME_HPET (5 * KERNEL_TICK_USEC)
-unsigned long __init calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr)
+unsigned long __devinit calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr)
{
unsigned long tsc_startlow, tsc_starthigh;
unsigned long tsc_endlow, tsc_endhigh;
@@ -138,8 +139,17 @@ bad_calibration:
}
#endif
+
+unsigned long read_timer_tsc(void)
+{
+ unsigned long retval;
+ rdtscl(retval);
+ return retval;
+}
+
+
/* calculate cpu_khz */
-void __init init_cpu_khz(void)
+void init_cpu_khz(void)
{
if (cpu_has_tsc) {
unsigned long tsc_quotient = calibrate_tsc();
@@ -153,8 +163,10 @@ void __init init_cpu_khz(void)
:"=a" (cpu_khz), "=d" (edx)
:"r" (tsc_quotient),
"0" (eax), "1" (edx));
- printk("Detected %lu.%03lu MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000);
+ printk("Detected %u.%03u MHz processor.\n",
+ cpu_khz / 1000, cpu_khz % 1000);
}
}
}
}
+
diff --git a/arch/i386/kernel/timers/timer.c b/arch/i386/kernel/timers/timer.c
index a3d6a288088b..7e39ed8e33f8 100644
--- a/arch/i386/kernel/timers/timer.c
+++ b/arch/i386/kernel/timers/timer.c
@@ -64,3 +64,12 @@ struct timer_opts* __init select_timer(void)
panic("select_timer: Cannot find a suitable timer\n");
return NULL;
}
+
+int read_current_timer(unsigned long *timer_val)
+{
+ if (cur_timer->read_timer) {
+ *timer_val = cur_timer->read_timer();
+ return 0;
+ }
+ return -1;
+}
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c
index f778f471a09a..d766e0963ac1 100644
--- a/arch/i386/kernel/timers/timer_hpet.c
+++ b/arch/i386/kernel/timers/timer_hpet.c
@@ -158,7 +158,7 @@ static int __init init_hpet(char* override)
{ unsigned long eax=0, edx=1000;
ASM_DIV64_REG(cpu_khz, edx, tsc_quotient,
eax, edx);
- printk("Detected %lu.%03lu MHz processor.\n",
+ printk("Detected %u.%03u MHz processor.\n",
cpu_khz / 1000, cpu_khz % 1000);
}
set_cyc2ns_scale(cpu_khz/1000);
@@ -186,6 +186,7 @@ static struct timer_opts timer_hpet = {
.get_offset = get_offset_hpet,
.monotonic_clock = monotonic_clock_hpet,
.delay = delay_hpet,
+ .read_timer = read_timer_tsc,
};
struct init_timer_opts __initdata timer_hpet_init = {
diff --git a/arch/i386/kernel/timers/timer_pm.c b/arch/i386/kernel/timers/timer_pm.c
index d77f22030fe6..4ef20e663498 100644
--- a/arch/i386/kernel/timers/timer_pm.c
+++ b/arch/i386/kernel/timers/timer_pm.c
@@ -246,6 +246,7 @@ static struct timer_opts timer_pmtmr = {
.get_offset = get_offset_pmtmr,
.monotonic_clock = monotonic_clock_pmtmr,
.delay = delay_pmtmr,
+ .read_timer = read_timer_tsc,
};
struct init_timer_opts __initdata timer_pmtmr_init = {
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index 7926d967be00..f46e625bab67 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -33,7 +33,7 @@ static struct timer_opts timer_tsc;
static inline void cpufreq_delayed_get(void);
-int tsc_disable __initdata = 0;
+int tsc_disable __devinitdata = 0;
extern spinlock_t i8253_lock;
@@ -256,7 +256,7 @@ static unsigned long loops_per_jiffy_ref = 0;
#ifndef CONFIG_SMP
static unsigned long fast_gettimeoffset_ref = 0;
-static unsigned long cpu_khz_ref = 0;
+static unsigned int cpu_khz_ref = 0;
#endif
static int
@@ -320,6 +320,26 @@ core_initcall(cpufreq_tsc);
static inline void cpufreq_delayed_get(void) { return; }
#endif
+int recalibrate_cpu_khz(void)
+{
+#ifndef CONFIG_SMP
+ unsigned int cpu_khz_old = cpu_khz;
+
+ if (cpu_has_tsc) {
+ init_cpu_khz();
+ cpu_data[0].loops_per_jiffy =
+ cpufreq_scale(cpu_data[0].loops_per_jiffy,
+ cpu_khz_old,
+ cpu_khz);
+ return 0;
+ } else
+ return -ENODEV;
+#else
+ return -ENODEV;
+#endif
+}
+EXPORT_SYMBOL(recalibrate_cpu_khz);
+
static void mark_offset_tsc(void)
{
unsigned long lost,delay;
@@ -514,7 +534,8 @@ static int __init init_tsc(char* override)
:"=a" (cpu_khz), "=d" (edx)
:"r" (tsc_quotient),
"0" (eax), "1" (edx));
- printk("Detected %lu.%03lu MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000);
+ printk("Detected %u.%03u MHz processor.\n",
+ cpu_khz / 1000, cpu_khz % 1000);
}
set_cyc2ns_scale(cpu_khz/1000);
return 0;
@@ -552,6 +573,7 @@ static struct timer_opts timer_tsc = {
.get_offset = get_offset_tsc,
.monotonic_clock = monotonic_clock_tsc,
.delay = delay_tsc,
+ .read_timer = read_timer_tsc,
};
struct init_timer_opts __initdata timer_tsc_init = {
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 00c63419c06f..a61f33d06ea3 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -27,6 +27,7 @@
#include <linux/ptrace.h>
#include <linux/utsname.h>
#include <linux/kprobes.h>
+#include <linux/kexec.h>
#ifdef CONFIG_EISA
#include <linux/ioport.h>
@@ -104,6 +105,7 @@ int register_die_notifier(struct notifier_block *nb)
spin_unlock_irqrestore(&die_notifier_lock, flags);
return err;
}
+EXPORT_SYMBOL(register_die_notifier);
static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
{
@@ -209,7 +211,7 @@ void show_registers(struct pt_regs *regs)
esp = (unsigned long) (&regs->esp);
ss = __KERNEL_DS;
- if (regs->xcs & 3) {
+ if (user_mode(regs)) {
in_kernel = 0;
esp = regs->esp;
ss = regs->xss & 0xffff;
@@ -233,22 +235,22 @@ void show_registers(struct pt_regs *regs)
* time of the fault..
*/
if (in_kernel) {
- u8 *eip;
+ u8 __user *eip;
printk("\nStack: ");
show_stack(NULL, (unsigned long*)esp);
printk("Code: ");
- eip = (u8 *)regs->eip - 43;
+ eip = (u8 __user *)regs->eip - 43;
for (i = 0; i < 64; i++, eip++) {
unsigned char c;
- if (eip < (u8 *)PAGE_OFFSET || __get_user(c, eip)) {
+ if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
printk(" Bad EIP value.");
break;
}
- if (eip == (u8 *)regs->eip)
+ if (eip == (u8 __user *)regs->eip)
printk("<%02x> ", c);
else
printk("%02x ", c);
@@ -265,20 +267,20 @@ static void handle_BUG(struct pt_regs *regs)
char c;
unsigned long eip;
- if (regs->xcs & 3)
+ if (user_mode(regs))
goto no_bug; /* Not in kernel */
eip = regs->eip;
if (eip < PAGE_OFFSET)
goto no_bug;
- if (__get_user(ud2, (unsigned short *)eip))
+ if (__get_user(ud2, (unsigned short __user *)eip))
goto no_bug;
if (ud2 != 0x0b0f)
goto no_bug;
- if (__get_user(line, (unsigned short *)(eip + 2)))
+ if (__get_user(line, (unsigned short __user *)(eip + 2)))
goto bug;
- if (__get_user(file, (char **)(eip + 4)) ||
+ if (__get_user(file, (char * __user *)(eip + 4)) ||
(unsigned long)file < PAGE_OFFSET || __get_user(c, file))
file = "<bad filename>";
@@ -293,6 +295,9 @@ bug:
printk("Kernel BUG\n");
}
+/* This is gone through when something in the kernel
+ * has done something bad and is about to be terminated.
+*/
void die(const char * str, struct pt_regs * regs, long err)
{
static struct {
@@ -306,7 +311,7 @@ void die(const char * str, struct pt_regs * regs, long err)
};
static int die_counter;
- if (die.lock_owner != _smp_processor_id()) {
+ if (die.lock_owner != raw_smp_processor_id()) {
console_verbose();
spin_lock_irq(&die.lock);
die.lock_owner = smp_processor_id();
@@ -340,6 +345,10 @@ void die(const char * str, struct pt_regs * regs, long err)
bust_spinlocks(0);
die.lock_owner = -1;
spin_unlock_irq(&die.lock);
+
+ if (kexec_should_crash(current))
+ crash_kexec(regs);
+
if (in_interrupt())
panic("Fatal exception in interrupt");
@@ -353,26 +362,27 @@ void die(const char * str, struct pt_regs * regs, long err)
static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
{
- if (!(regs->eflags & VM_MASK) && !(3 & regs->xcs))
+ if (!user_mode_vm(regs))
die(str, regs, err);
}
static void do_trap(int trapnr, int signr, char *str, int vm86,
struct pt_regs * regs, long error_code, siginfo_t *info)
{
+ struct task_struct *tsk = current;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = trapnr;
+
if (regs->eflags & VM_MASK) {
if (vm86)
goto vm86_trap;
goto trap_signal;
}
- if (!(regs->xcs & 3))
+ if (!user_mode(regs))
goto kernel_trap;
trap_signal: {
- struct task_struct *tsk = current;
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = trapnr;
if (info)
force_sig_info(signr, info, tsk);
else
@@ -485,10 +495,13 @@ fastcall void do_general_protection(struct pt_regs * regs, long error_code)
}
put_cpu();
+ current->thread.error_code = error_code;
+ current->thread.trap_no = 13;
+
if (regs->eflags & VM_MASK)
goto gp_in_vm86;
- if (!(regs->xcs & 3))
+ if (!user_mode(regs))
goto gp_in_kernel;
current->thread.error_code = error_code;
@@ -569,6 +582,15 @@ void die_nmi (struct pt_regs *regs, const char *msg)
console_silent();
spin_unlock(&nmi_print_lock);
bust_spinlocks(0);
+
+ /* If we are in kernel we are probably nested up pretty bad
+ * and might aswell get out now while we still can.
+ */
+ if (!user_mode(regs)) {
+ current->thread.trap_no = 2;
+ crash_kexec(regs);
+ }
+
do_exit(SIGSEGV);
}
@@ -624,6 +646,14 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code)
nmi_enter();
cpu = smp_processor_id();
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if (!cpu_online(cpu)) {
+ nmi_exit();
+ return;
+ }
+#endif
+
++nmi_count(cpu);
if (!nmi_callback(regs, cpu))
@@ -636,11 +666,13 @@ void set_nmi_callback(nmi_callback_t callback)
{
nmi_callback = callback;
}
+EXPORT_SYMBOL_GPL(set_nmi_callback);
void unset_nmi_callback(void)
{
nmi_callback = dummy_nmi_callback;
}
+EXPORT_SYMBOL_GPL(unset_nmi_callback);
#ifdef CONFIG_KPROBES
fastcall void do_int3(struct pt_regs *regs, long error_code)
@@ -682,7 +714,7 @@ fastcall void do_debug(struct pt_regs * regs, long error_code)
unsigned int condition;
struct task_struct *tsk = current;
- __asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
+ get_debugreg(condition, 6);
if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
SIGTRAP) == NOTIFY_STOP)
@@ -713,7 +745,7 @@ fastcall void do_debug(struct pt_regs * regs, long error_code)
* check for kernel mode by just checking the CPL
* of CS.
*/
- if ((regs->xcs & 3) == 0)
+ if (!user_mode(regs))
goto clear_TF_reenable;
}
@@ -724,9 +756,7 @@ fastcall void do_debug(struct pt_regs * regs, long error_code)
* the signal is delivered.
*/
clear_dr7:
- __asm__("movl %0,%%db7"
- : /* no output */
- : "r" (0));
+ set_debugreg(0, 7);
return;
debug_vm86:
@@ -871,9 +901,9 @@ fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
error_code);
return;
}
- die_if_kernel("cache flush denied", regs, error_code);
current->thread.trap_no = 19;
current->thread.error_code = error_code;
+ die_if_kernel("cache flush denied", regs, error_code);
force_sig(SIGSEGV, current);
}
}
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index e0512cc8bea7..7e01a528a83a 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -2,20 +2,23 @@
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
*/
+#define LOAD_OFFSET __PAGE_OFFSET
+
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
#include <asm/page.h>
OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
OUTPUT_ARCH(i386)
-ENTRY(startup_32)
+ENTRY(phys_startup_32)
jiffies = jiffies_64;
SECTIONS
{
- . = __PAGE_OFFSET + 0x100000;
+ . = __KERNEL_START;
+ phys_startup_32 = startup_32 - LOAD_OFFSET;
/* read-only */
_text = .; /* Text and read-only data */
- .text : {
+ .text : AT(ADDR(.text) - LOAD_OFFSET) {
*(.text)
SCHED_TEXT
LOCK_TEXT
@@ -27,49 +30,55 @@ SECTIONS
. = ALIGN(16); /* Exception table */
__start___ex_table = .;
- __ex_table : { *(__ex_table) }
+ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
__stop___ex_table = .;
RODATA
/* writeable */
- .data : { /* Data */
+ .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */
*(.data)
CONSTRUCTORS
}
. = ALIGN(4096);
__nosave_begin = .;
- .data_nosave : { *(.data.nosave) }
+ .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) }
. = ALIGN(4096);
__nosave_end = .;
. = ALIGN(4096);
- .data.page_aligned : { *(.data.idt) }
+ .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
+ *(.data.idt)
+ }
. = ALIGN(32);
- .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+ .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
+ *(.data.cacheline_aligned)
+ }
_edata = .; /* End of data section */
. = ALIGN(THREAD_SIZE); /* init_task */
- .data.init_task : { *(.data.init_task) }
+ .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
+ *(.data.init_task)
+ }
/* will be freed after init */
. = ALIGN(4096); /* Init code and data */
__init_begin = .;
- .init.text : {
+ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
_sinittext = .;
*(.init.text)
_einittext = .;
}
- .init.data : { *(.init.data) }
+ .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) }
. = ALIGN(16);
__setup_start = .;
- .init.setup : { *(.init.setup) }
+ .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) }
__setup_end = .;
__initcall_start = .;
- .initcall.init : {
+ .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
*(.initcall1.init)
*(.initcall2.init)
*(.initcall3.init)
@@ -80,33 +89,41 @@ SECTIONS
}
__initcall_end = .;
__con_initcall_start = .;
- .con_initcall.init : { *(.con_initcall.init) }
+ .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
+ *(.con_initcall.init)
+ }
__con_initcall_end = .;
SECURITY_INIT
. = ALIGN(4);
__alt_instructions = .;
- .altinstructions : { *(.altinstructions) }
+ .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
+ *(.altinstructions)
+ }
__alt_instructions_end = .;
- .altinstr_replacement : { *(.altinstr_replacement) }
+ .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
+ *(.altinstr_replacement)
+ }
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
- .exit.text : { *(.exit.text) }
- .exit.data : { *(.exit.data) }
+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
+ .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
. = ALIGN(4096);
__initramfs_start = .;
- .init.ramfs : { *(.init.ramfs) }
+ .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
__initramfs_end = .;
. = ALIGN(32);
__per_cpu_start = .;
- .data.percpu : { *(.data.percpu) }
+ .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
__per_cpu_end = .;
. = ALIGN(4096);
__init_end = .;
/* freed after init ends here */
__bss_start = .; /* BSS */
- .bss : {
+ .bss.page_aligned : AT(ADDR(.bss.page_aligned) - LOAD_OFFSET) {
*(.bss.page_aligned)
+ }
+ .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
*(.bss)
}
. = ALIGN(4);
diff --git a/arch/i386/lib/dec_and_lock.c b/arch/i386/lib/dec_and_lock.c
index ab43394dc775..8b81b2524fa6 100644
--- a/arch/i386/lib/dec_and_lock.c
+++ b/arch/i386/lib/dec_and_lock.c
@@ -8,6 +8,7 @@
*/
#include <linux/spinlock.h>
+#include <linux/module.h>
#include <asm/atomic.h>
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
@@ -38,3 +39,4 @@ slow_path:
spin_unlock(lock);
return 0;
}
+EXPORT_SYMBOL(_atomic_dec_and_lock);
diff --git a/arch/i386/lib/delay.c b/arch/i386/lib/delay.c
index 080639f262b1..c49a6acbee56 100644
--- a/arch/i386/lib/delay.c
+++ b/arch/i386/lib/delay.c
@@ -13,6 +13,7 @@
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <asm/processor.h>
#include <asm/delay.h>
#include <asm/timer.h>
@@ -34,7 +35,7 @@ inline void __const_udelay(unsigned long xloops)
xloops *= 4;
__asm__("mull %0"
:"=d" (xloops), "=&a" (d0)
- :"1" (xloops),"0" (cpu_data[_smp_processor_id()].loops_per_jiffy * (HZ/4)));
+ :"1" (xloops),"0" (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4)));
__delay(++xloops);
}
@@ -47,3 +48,8 @@ void __ndelay(unsigned long nsecs)
{
__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
}
+
+EXPORT_SYMBOL(__delay);
+EXPORT_SYMBOL(__const_udelay);
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/i386/lib/mmx.c b/arch/i386/lib/mmx.c
index 01f8b1a2cc84..2afda94dffd3 100644
--- a/arch/i386/lib/mmx.c
+++ b/arch/i386/lib/mmx.c
@@ -3,6 +3,7 @@
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/hardirq.h>
+#include <linux/module.h>
#include <asm/i387.h>
@@ -397,3 +398,7 @@ void mmx_copy_page(void *to, void *from)
else
fast_copy_page(to, from);
}
+
+EXPORT_SYMBOL(_mmx_memcpy);
+EXPORT_SYMBOL(mmx_clear_page);
+EXPORT_SYMBOL(mmx_copy_page);
diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c
index 51aa2bbb0269..4cf981d70f45 100644
--- a/arch/i386/lib/usercopy.c
+++ b/arch/i386/lib/usercopy.c
@@ -84,6 +84,7 @@ __strncpy_from_user(char *dst, const char __user *src, long count)
__do_strncpy_from_user(dst, src, count, res);
return res;
}
+EXPORT_SYMBOL(__strncpy_from_user);
/**
* strncpy_from_user: - Copy a NUL terminated string from userspace.
@@ -111,7 +112,7 @@ strncpy_from_user(char *dst, const char __user *src, long count)
__do_strncpy_from_user(dst, src, count, res);
return res;
}
-
+EXPORT_SYMBOL(strncpy_from_user);
/*
* Zero Userspace
@@ -157,6 +158,7 @@ clear_user(void __user *to, unsigned long n)
__do_clear_user(to, n);
return n;
}
+EXPORT_SYMBOL(clear_user);
/**
* __clear_user: - Zero a block of memory in user space, with less checking.
@@ -175,6 +177,7 @@ __clear_user(void __user *to, unsigned long n)
__do_clear_user(to, n);
return n;
}
+EXPORT_SYMBOL(__clear_user);
/**
* strlen_user: - Get the size of a string in user space.
@@ -218,6 +221,7 @@ long strnlen_user(const char __user *s, long n)
:"cc");
return res & mask;
}
+EXPORT_SYMBOL(strnlen_user);
#ifdef CONFIG_X86_INTEL_USERCOPY
static unsigned long
@@ -570,6 +574,7 @@ survive:
n = __copy_user_intel(to, from, n);
return n;
}
+EXPORT_SYMBOL(__copy_to_user_ll);
unsigned long
__copy_from_user_ll(void *to, const void __user *from, unsigned long n)
@@ -581,6 +586,7 @@ __copy_from_user_ll(void *to, const void __user *from, unsigned long n)
n = __copy_user_zeroing_intel(to, from, n);
return n;
}
+EXPORT_SYMBOL(__copy_from_user_ll);
/**
* copy_to_user: - Copy a block of data into user space.
diff --git a/arch/i386/mach-default/setup.c b/arch/i386/mach-default/setup.c
index 0aa08eaa8932..e5a1a83d09ef 100644
--- a/arch/i386/mach-default/setup.c
+++ b/arch/i386/mach-default/setup.c
@@ -10,6 +10,14 @@
#include <asm/acpi.h>
#include <asm/arch_hooks.h>
+#ifdef CONFIG_HOTPLUG_CPU
+#define DEFAULT_SEND_IPI (1)
+#else
+#define DEFAULT_SEND_IPI (0)
+#endif
+
+int no_broadcast=DEFAULT_SEND_IPI;
+
/**
* pre_intr_init_hook - initialisation prior to setting up interrupt vectors
*
@@ -104,3 +112,22 @@ void __init mca_nmi_hook(void)
printk("NMI generated from unknown source!\n");
}
#endif
+
+static __init int no_ipi_broadcast(char *str)
+{
+ get_option(&str, &no_broadcast);
+ printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" :
+ "IPI Broadcast");
+ return 1;
+}
+
+__setup("no_ipi_broadcast", no_ipi_broadcast);
+
+static int __init print_ipi_mode(void)
+{
+ printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" :
+ "Shortcut");
+ return 0;
+}
+
+late_initcall(print_ipi_mode);
diff --git a/arch/i386/mach-default/topology.c b/arch/i386/mach-default/topology.c
index 5b3e8817dae8..23395fff35d1 100644
--- a/arch/i386/mach-default/topology.c
+++ b/arch/i386/mach-default/topology.c
@@ -73,12 +73,11 @@ static int __init topology_init(void)
{
int i;
- for (i = 0; i < MAX_NUMNODES; i++) {
- if (node_online(i))
- arch_register_node(i);
- }
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_possible(i)) arch_register_cpu(i);
+ for_each_online_node(i)
+ arch_register_node(i);
+
+ for_each_cpu(i)
+ arch_register_cpu(i);
return 0;
}
@@ -88,8 +87,8 @@ static int __init topology_init(void)
{
int i;
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_possible(i)) arch_register_cpu(i);
+ for_each_cpu(i)
+ arch_register_cpu(i);
return 0;
}
diff --git a/arch/i386/mach-visws/mpparse.c b/arch/i386/mach-visws/mpparse.c
index 5a22082147f4..5f3d7e6de37b 100644
--- a/arch/i386/mach-visws/mpparse.c
+++ b/arch/i386/mach-visws/mpparse.c
@@ -23,7 +23,6 @@ unsigned long mp_lapic_addr;
/* Processor that is doing the boot up */
unsigned int boot_cpu_physical_apicid = -1U;
-unsigned int boot_cpu_logical_apicid = -1U;
/* Bitmask of physically existing CPUs */
physid_mask_t phys_cpu_present_map;
@@ -52,10 +51,8 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
(m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
m->mpc_apicver);
- if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
+ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR)
boot_cpu_physical_apicid = m->mpc_apicid;
- boot_cpu_logical_apicid = logical_apicid;
- }
ver = m->mpc_apicver;
if ((ver >= 0x14 && m->mpc_apicid >= 0xff) || m->mpc_apicid >= 0xf) {
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index a6e0ddd65bd0..8c8527593da0 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -1288,7 +1288,7 @@ smp_local_timer_interrupt(struct pt_regs * regs)
per_cpu(prof_counter, cpu);
}
- update_process_times(user_mode(regs));
+ update_process_times(user_mode_vm(regs));
}
if( ((1<<cpu) & voyager_extended_vic_processors) == 0)
diff --git a/arch/i386/mm/Makefile b/arch/i386/mm/Makefile
index fc3272506846..80908b5aa60f 100644
--- a/arch/i386/mm/Makefile
+++ b/arch/i386/mm/Makefile
@@ -4,7 +4,7 @@
obj-y := init.o pgtable.o fault.o ioremap.o extable.o pageattr.o mmap.o
-obj-$(CONFIG_DISCONTIGMEM) += discontig.o
+obj-$(CONFIG_NUMA) += discontig.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index 1726b4096b10..b358f0702a44 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -29,12 +29,16 @@
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/nodemask.h>
+#include <linux/module.h>
+#include <linux/kexec.h>
+
#include <asm/e820.h>
#include <asm/setup.h>
#include <asm/mmzone.h>
#include <bios_ebda.h>
struct pglist_data *node_data[MAX_NUMNODES];
+EXPORT_SYMBOL(node_data);
bootmem_data_t node0_bdata;
/*
@@ -42,12 +46,16 @@ bootmem_data_t node0_bdata;
* populated the following initialisation.
*
* 1) node_online_map - the map of all nodes configured (online) in the system
- * 2) physnode_map - the mapping between a pfn and owning node
- * 3) node_start_pfn - the starting page frame number for a node
+ * 2) node_start_pfn - the starting page frame number for a node
* 3) node_end_pfn - the ending page fram number for a node
*/
+unsigned long node_start_pfn[MAX_NUMNODES];
+unsigned long node_end_pfn[MAX_NUMNODES];
+
+#ifdef CONFIG_DISCONTIGMEM
/*
+ * 4) physnode_map - the mapping between a pfn and owning node
* physnode_map keeps track of the physical memory layout of a generic
* numa node on a 256Mb break (each element of the array will
* represent 256Mb of memory and will be marked by the node id. so,
@@ -59,6 +67,7 @@ bootmem_data_t node0_bdata;
* physnode_map[8- ] = -1;
*/
s8 physnode_map[MAX_ELEMENTS] = { [0 ... (MAX_ELEMENTS - 1)] = -1};
+EXPORT_SYMBOL(physnode_map);
void memory_present(int nid, unsigned long start, unsigned long end)
{
@@ -85,9 +94,7 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
return (nr_pages + 1) * sizeof(struct page);
}
-
-unsigned long node_start_pfn[MAX_NUMNODES];
-unsigned long node_end_pfn[MAX_NUMNODES];
+#endif
extern unsigned long find_max_low_pfn(void);
extern void find_max_pfn(void);
@@ -108,6 +115,9 @@ unsigned long node_remap_offset[MAX_NUMNODES];
void *node_remap_start_vaddr[MAX_NUMNODES];
void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
+void *node_remap_end_vaddr[MAX_NUMNODES];
+void *node_remap_alloc_vaddr[MAX_NUMNODES];
+
/*
* FLAT - support for basic PC memory model with discontig enabled, essentially
* a single node with all available processors in it with a flat
@@ -146,6 +156,21 @@ static void __init find_max_pfn_node(int nid)
BUG();
}
+/* Find the owning node for a pfn. */
+int early_pfn_to_nid(unsigned long pfn)
+{
+ int nid;
+
+ for_each_node(nid) {
+ if (node_end_pfn[nid] == 0)
+ break;
+ if (node_start_pfn[nid] <= pfn && node_end_pfn[nid] >= pfn)
+ return nid;
+ }
+
+ return 0;
+}
+
/*
* Allocate memory for the pg_data_t for this node via a crude pre-bootmem
* method. For node zero take this from the bottom of memory, for
@@ -163,6 +188,21 @@ static void __init allocate_pgdat(int nid)
}
}
+void *alloc_remap(int nid, unsigned long size)
+{
+ void *allocation = node_remap_alloc_vaddr[nid];
+
+ size = ALIGN(size, L1_CACHE_BYTES);
+
+ if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid])
+ return 0;
+
+ node_remap_alloc_vaddr[nid] += size;
+ memset(allocation, 0, size);
+
+ return allocation;
+}
+
void __init remap_numa_kva(void)
{
void *vaddr;
@@ -170,8 +210,6 @@ void __init remap_numa_kva(void)
int node;
for_each_online_node(node) {
- if (node == 0)
- continue;
for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) {
vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT);
set_pmd_pfn((ulong) vaddr,
@@ -185,13 +223,9 @@ static unsigned long calculate_numa_remap_pages(void)
{
int nid;
unsigned long size, reserve_pages = 0;
+ unsigned long pfn;
for_each_online_node(nid) {
- if (nid == 0)
- continue;
- if (!node_remap_size[nid])
- continue;
-
/*
* The acpi/srat node info can show hot-add memroy zones
* where memory could be added but not currently present.
@@ -208,11 +242,24 @@ static unsigned long calculate_numa_remap_pages(void)
size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
/* now the roundup is correct, convert to PAGE_SIZE pages */
size = size * PTRS_PER_PTE;
+
+ /*
+ * Validate the region we are allocating only contains valid
+ * pages.
+ */
+ for (pfn = node_end_pfn[nid] - size;
+ pfn < node_end_pfn[nid]; pfn++)
+ if (!page_is_ram(pfn))
+ break;
+
+ if (pfn != node_end_pfn[nid])
+ size = 0;
+
printk("Reserving %ld pages of KVA for lmem_map of node %d\n",
size, nid);
node_remap_size[nid] = size;
- reserve_pages += size;
node_remap_offset[nid] = reserve_pages;
+ reserve_pages += size;
printk("Shrinking node %d from %ld pages to %ld pages\n",
nid, node_end_pfn[nid], node_end_pfn[nid] - size);
node_end_pfn[nid] -= size;
@@ -265,12 +312,18 @@ unsigned long __init setup_memory(void)
(ulong) pfn_to_kaddr(max_low_pfn));
for_each_online_node(nid) {
node_remap_start_vaddr[nid] = pfn_to_kaddr(
- (highstart_pfn + reserve_pages) - node_remap_offset[nid]);
+ highstart_pfn + node_remap_offset[nid]);
+ /* Init the node remap allocator */
+ node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
+ (node_remap_size[nid] * PAGE_SIZE);
+ node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] +
+ ALIGN(sizeof(pg_data_t), PAGE_SIZE);
+
allocate_pgdat(nid);
printk ("node %d will remap to vaddr %08lx - %08lx\n", nid,
(ulong) node_remap_start_vaddr[nid],
- (ulong) pfn_to_kaddr(highstart_pfn + reserve_pages
- - node_remap_offset[nid] + node_remap_size[nid]));
+ (ulong) pfn_to_kaddr(highstart_pfn
+ + node_remap_offset[nid] + node_remap_size[nid]));
}
printk("High memory starts at vaddr %08lx\n",
(ulong) pfn_to_kaddr(highstart_pfn));
@@ -333,23 +386,9 @@ void __init zone_sizes_init(void)
}
zholes_size = get_zholes_size(nid);
- /*
- * We let the lmem_map for node 0 be allocated from the
- * normal bootmem allocator, but other nodes come from the
- * remapped KVA area - mbligh
- */
- if (!nid)
- free_area_init_node(nid, NODE_DATA(nid),
- zones_size, start, zholes_size);
- else {
- unsigned long lmem_map;
- lmem_map = (unsigned long)node_remap_start_vaddr[nid];
- lmem_map += sizeof(pg_data_t) + PAGE_SIZE - 1;
- lmem_map &= PAGE_MASK;
- NODE_DATA(nid)->node_mem_map = (struct page *)lmem_map;
- free_area_init_node(nid, NODE_DATA(nid), zones_size,
- start, zholes_size);
- }
+
+ free_area_init_node(nid, NODE_DATA(nid), zones_size, start,
+ zholes_size);
}
return;
}
@@ -358,24 +397,26 @@ void __init set_highmem_pages_init(int bad_ppro)
{
#ifdef CONFIG_HIGHMEM
struct zone *zone;
+ struct page *page;
for_each_zone(zone) {
- unsigned long node_pfn, node_high_size, zone_start_pfn;
- struct page * zone_mem_map;
-
+ unsigned long node_pfn, zone_start_pfn, zone_end_pfn;
+
if (!is_highmem(zone))
continue;
- printk("Initializing %s for node %d\n", zone->name,
- zone->zone_pgdat->node_id);
-
- node_high_size = zone->spanned_pages;
- zone_mem_map = zone->zone_mem_map;
zone_start_pfn = zone->zone_start_pfn;
+ zone_end_pfn = zone_start_pfn + zone->spanned_pages;
+
+ printk("Initializing %s for node %d (%08lx:%08lx)\n",
+ zone->name, zone->zone_pgdat->node_id,
+ zone_start_pfn, zone_end_pfn);
- for (node_pfn = 0; node_pfn < node_high_size; node_pfn++) {
- one_highpage_init((struct page *)(zone_mem_map + node_pfn),
- zone_start_pfn + node_pfn, bad_ppro);
+ for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) {
+ if (!pfn_valid(node_pfn))
+ continue;
+ page = pfn_to_page(node_pfn);
+ one_highpage_init(page, node_pfn, bad_ppro);
}
}
totalram_pages += totalhigh_pages;
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index a509237c4815..8e90339d6eaa 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -146,7 +146,7 @@ static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
if (instr > limit)
break;
- if (__get_user(opcode, (unsigned char *) instr))
+ if (__get_user(opcode, (unsigned char __user *) instr))
break;
instr_hi = opcode & 0xf0;
@@ -173,7 +173,7 @@ static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
scan_more = 0;
if (instr > limit)
break;
- if (__get_user(opcode, (unsigned char *) instr))
+ if (__get_user(opcode, (unsigned char __user *) instr))
break;
prefetch = (instr_lo == 0xF) &&
(opcode == 0x0D || opcode == 0x18);
@@ -463,6 +463,9 @@ no_context:
printk(KERN_ALERT "*pte = %08lx\n", page);
}
#endif
+ tsk->thread.cr2 = address;
+ tsk->thread.trap_no = 14;
+ tsk->thread.error_code = error_code;
die("Oops", regs, error_code);
bust_spinlocks(0);
do_exit(SIGKILL);
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c
index fc4c4cad4e98..b6eb4dcb8777 100644
--- a/arch/i386/mm/highmem.c
+++ b/arch/i386/mm/highmem.c
@@ -1,4 +1,5 @@
#include <linux/highmem.h>
+#include <linux/module.h>
void *kmap(struct page *page)
{
@@ -74,6 +75,24 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
preempt_check_resched();
}
+/* This is the same as kmap_atomic() but can map memory that doesn't
+ * have a struct page associated with it.
+ */
+void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+
+ inc_preempt_count();
+
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
+ __flush_tlb_one(vaddr);
+
+ return (void*) vaddr;
+}
+
struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long)ptr;
@@ -87,3 +106,8 @@ struct page *kmap_atomic_to_page(void *ptr)
return pte_page(*pte);
}
+EXPORT_SYMBOL(kmap);
+EXPORT_SYMBOL(kunmap);
+EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic);
+EXPORT_SYMBOL(kmap_atomic_to_page);
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c
index 171fc925e1e4..3b099f32b948 100644
--- a/arch/i386/mm/hugetlbpage.c
+++ b/arch/i386/mm/hugetlbpage.c
@@ -18,7 +18,7 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
-static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
@@ -30,7 +30,7 @@ static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
return (pte_t *) pmd;
}
-static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
@@ -42,21 +42,6 @@ static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return (pte_t *) pmd;
}
-static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, pte_t * page_table, int write_access)
-{
- pte_t entry;
-
- add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
- if (write_access) {
- entry =
- pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
- } else
- entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
- entry = pte_mkyoung(entry);
- mk_pte_huge(entry);
- set_pte(page_table, entry);
-}
-
/*
* This function checks for proper alignment of input addr and len parameters.
*/
@@ -69,77 +54,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
return 0;
}
-int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
- struct vm_area_struct *vma)
-{
- pte_t *src_pte, *dst_pte, entry;
- struct page *ptepage;
- unsigned long addr = vma->vm_start;
- unsigned long end = vma->vm_end;
-
- while (addr < end) {
- dst_pte = huge_pte_alloc(dst, addr);
- if (!dst_pte)
- goto nomem;
- src_pte = huge_pte_offset(src, addr);
- entry = *src_pte;
- ptepage = pte_page(entry);
- get_page(ptepage);
- set_pte(dst_pte, entry);
- add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
- addr += HPAGE_SIZE;
- }
- return 0;
-
-nomem:
- return -ENOMEM;
-}
-
-int
-follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
- struct page **pages, struct vm_area_struct **vmas,
- unsigned long *position, int *length, int i)
-{
- unsigned long vpfn, vaddr = *position;
- int remainder = *length;
-
- WARN_ON(!is_vm_hugetlb_page(vma));
-
- vpfn = vaddr/PAGE_SIZE;
- while (vaddr < vma->vm_end && remainder) {
-
- if (pages) {
- pte_t *pte;
- struct page *page;
-
- pte = huge_pte_offset(mm, vaddr);
-
- /* hugetlb should be locked, and hence, prefaulted */
- WARN_ON(!pte || pte_none(*pte));
-
- page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
-
- WARN_ON(!PageCompound(page));
-
- get_page(page);
- pages[i] = page;
- }
-
- if (vmas)
- vmas[i] = vma;
-
- vaddr += PAGE_SIZE;
- ++vpfn;
- --remainder;
- ++i;
- }
-
- *length = remainder;
- *position = vaddr;
-
- return i;
-}
-
#if 0 /* This is just for testing */
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
@@ -204,83 +118,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
}
#endif
-void unmap_hugepage_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+void hugetlb_clean_stale_pgtable(pte_t *pte)
{
- struct mm_struct *mm = vma->vm_mm;
- unsigned long address;
- pte_t pte, *ptep;
+ pmd_t *pmd = (pmd_t *) pte;
struct page *page;
- BUG_ON(start & (HPAGE_SIZE - 1));
- BUG_ON(end & (HPAGE_SIZE - 1));
-
- for (address = start; address < end; address += HPAGE_SIZE) {
- ptep = huge_pte_offset(mm, address);
- if (!ptep)
- continue;
- pte = ptep_get_and_clear(mm, address, ptep);
- if (pte_none(pte))
- continue;
- page = pte_page(pte);
- put_page(page);
- }
- add_mm_counter(mm ,rss, -((end - start) >> PAGE_SHIFT));
- flush_tlb_range(vma, start, end);
-}
-
-int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr;
- int ret = 0;
-
- BUG_ON(vma->vm_start & ~HPAGE_MASK);
- BUG_ON(vma->vm_end & ~HPAGE_MASK);
-
- spin_lock(&mm->page_table_lock);
- for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
- unsigned long idx;
- pte_t *pte = huge_pte_alloc(mm, addr);
- struct page *page;
-
- if (!pte) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (!pte_none(*pte))
- continue;
-
- idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
- + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
- page = find_get_page(mapping, idx);
- if (!page) {
- /* charge the fs quota first */
- if (hugetlb_get_quota(mapping)) {
- ret = -ENOMEM;
- goto out;
- }
- page = alloc_huge_page();
- if (!page) {
- hugetlb_put_quota(mapping);
- ret = -ENOMEM;
- goto out;
- }
- ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
- if (! ret) {
- unlock_page(page);
- } else {
- hugetlb_put_quota(mapping);
- free_huge_page(page);
- goto out;
- }
- }
- set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
- }
-out:
- spin_unlock(&mm->page_table_lock);
- return ret;
+ page = pmd_page(*pmd);
+ pmd_clear(pmd);
+ dec_page_state(nr_page_table_pages);
+ page_cache_release(page);
}
/* x86_64 also uses this file */
@@ -294,7 +140,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
struct vm_area_struct *vma;
unsigned long start_addr;
- start_addr = mm->free_area_cache;
+ if (len > mm->cached_hole_size) {
+ start_addr = mm->free_area_cache;
+ } else {
+ start_addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
+ }
full_search:
addr = ALIGN(start_addr, HPAGE_SIZE);
@@ -308,6 +159,7 @@ full_search:
*/
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
goto full_search;
}
return -ENOMEM;
@@ -316,6 +168,8 @@ full_search:
mm->free_area_cache = addr + len;
return addr;
}
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
addr = ALIGN(vma->vm_end, HPAGE_SIZE);
}
}
@@ -327,12 +181,17 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev_vma;
unsigned long base = mm->mmap_base, addr = addr0;
+ unsigned long largest_hole = mm->cached_hole_size;
int first_time = 1;
/* don't allow allocations above current base */
if (mm->free_area_cache > base)
mm->free_area_cache = base;
+ if (len <= largest_hole) {
+ largest_hole = 0;
+ mm->free_area_cache = base;
+ }
try_again:
/* make sure it can fit in the remaining address space */
if (mm->free_area_cache < len)
@@ -353,13 +212,21 @@ try_again:
* vma->vm_start, use it:
*/
if (addr + len <= vma->vm_start &&
- (!prev_vma || (addr >= prev_vma->vm_end)))
+ (!prev_vma || (addr >= prev_vma->vm_end))) {
/* remember the address as a hint for next time */
- return (mm->free_area_cache = addr);
- else
+ mm->cached_hole_size = largest_hole;
+ return (mm->free_area_cache = addr);
+ } else {
/* pull free_area_cache down to the first hole */
- if (mm->free_area_cache == vma->vm_end)
+ if (mm->free_area_cache == vma->vm_end) {
mm->free_area_cache = vma->vm_start;
+ mm->cached_hole_size = largest_hole;
+ }
+ }
+
+ /* remember the largest hole we saw so far */
+ if (addr + largest_hole < vma->vm_start)
+ largest_hole = vma->vm_start - addr;
/* try just below the current vma->vm_start */
addr = (vma->vm_start - len) & HPAGE_MASK;
@@ -372,6 +239,7 @@ fail:
*/
if (first_time) {
mm->free_area_cache = base;
+ largest_hole = 0;
first_time = 0;
goto try_again;
}
@@ -382,6 +250,7 @@ fail:
* allocations.
*/
mm->free_area_cache = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = ~0UL;
addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
len, pgoff, flags);
@@ -389,6 +258,7 @@ fail:
* Restore the topdown base:
*/
mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
return addr;
}
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 7a7ea3737265..12216b52e28b 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -191,7 +191,7 @@ static inline int page_kills_ppro(unsigned long pagenr)
extern int is_available_memory(efi_memory_desc_t *);
-static inline int page_is_ram(unsigned long pagenr)
+int page_is_ram(unsigned long pagenr)
{
int i;
unsigned long addr, end;
@@ -269,7 +269,6 @@ void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
{
if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
ClearPageReserved(page);
- set_bit(PG_highmem, &page->flags);
set_page_count(page, 1);
__free_page(page);
totalhigh_pages++;
@@ -277,7 +276,9 @@ void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
SetPageReserved(page);
}
-#ifndef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NUMA
+extern void set_highmem_pages_init(int);
+#else
static void __init set_highmem_pages_init(int bad_ppro)
{
int pfn;
@@ -285,9 +286,7 @@ static void __init set_highmem_pages_init(int bad_ppro)
one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
totalram_pages += totalhigh_pages;
}
-#else
-extern void set_highmem_pages_init(int);
-#endif /* !CONFIG_DISCONTIGMEM */
+#endif /* CONFIG_FLATMEM */
#else
#define kmap_init() do { } while (0)
@@ -296,12 +295,13 @@ extern void set_highmem_pages_init(int);
#endif /* CONFIG_HIGHMEM */
unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
+EXPORT_SYMBOL(__PAGE_KERNEL);
unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
-#ifndef CONFIG_DISCONTIGMEM
-#define remap_numa_kva() do {} while (0)
-#else
+#ifdef CONFIG_NUMA
extern void __init remap_numa_kva(void);
+#else
+#define remap_numa_kva() do {} while (0)
#endif
static void __init pagetable_init (void)
@@ -352,7 +352,7 @@ static void __init pagetable_init (void)
#endif
}
-#if defined(CONFIG_PM_DISK) || defined(CONFIG_SOFTWARE_SUSPEND)
+#ifdef CONFIG_SOFTWARE_SUSPEND
/*
* Swap suspend & friends need this for resume because things like the intel-agp
* driver might have split up a kernel 4MB mapping.
@@ -526,7 +526,7 @@ static void __init set_max_mapnr_init(void)
#else
num_physpages = max_low_pfn;
#endif
-#ifndef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_FLATMEM
max_mapnr = num_physpages;
#endif
}
@@ -540,7 +540,7 @@ void __init mem_init(void)
int tmp;
int bad_ppro;
-#ifndef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_FLATMEM
if (!mem_map)
BUG();
#endif
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c
index ab542792b27b..6b25afc933b6 100644
--- a/arch/i386/mm/ioremap.c
+++ b/arch/i386/mm/ioremap.c
@@ -11,6 +11,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <asm/io.h>
#include <asm/fixmap.h>
#include <asm/cacheflush.h>
@@ -165,7 +166,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
}
return (void __iomem *) (offset + (char __iomem *)addr);
}
-
+EXPORT_SYMBOL(__ioremap);
/**
* ioremap_nocache - map bus memory into CPU space
@@ -222,6 +223,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
return p;
}
+EXPORT_SYMBOL(ioremap_nocache);
void iounmap(volatile void __iomem *addr)
{
@@ -241,7 +243,7 @@ void iounmap(volatile void __iomem *addr)
write_lock(&vmlist_lock);
p = __remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
if (!p) {
- printk("iounmap: bad address %p\n", addr);
+ printk(KERN_WARNING "iounmap: bad address %p\n", addr);
goto out_unlock;
}
@@ -255,6 +257,7 @@ out_unlock:
write_unlock(&vmlist_lock);
kfree(p);
}
+EXPORT_SYMBOL(iounmap);
void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
{
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index dd81479ff88a..bd2f7afc7a2a 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -30,13 +30,14 @@ void show_mem(void)
struct page *page;
pg_data_t *pgdat;
unsigned long i;
+ struct page_state ps;
- printk("Mem-info:\n");
+ printk(KERN_INFO "Mem-info:\n");
show_free_areas();
- printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+ printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
for_each_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; ++i) {
- page = pgdat->node_mem_map + i;
+ page = pgdat_page_nr(pgdat, i);
total++;
if (PageHighMem(page))
highmem++;
@@ -48,11 +49,18 @@ void show_mem(void)
shared += page_count(page) - 1;
}
}
- printk("%d pages of RAM\n", total);
- printk("%d pages of HIGHMEM\n",highmem);
- printk("%d reserved pages\n",reserved);
- printk("%d pages shared\n",shared);
- printk("%d pages swap cached\n",cached);
+ printk(KERN_INFO "%d pages of RAM\n", total);
+ printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
+ printk(KERN_INFO "%d reserved pages\n", reserved);
+ printk(KERN_INFO "%d pages shared\n", shared);
+ printk(KERN_INFO "%d pages swap cached\n", cached);
+
+ get_page_state(&ps);
+ printk(KERN_INFO "%lu pages dirty\n", ps.nr_dirty);
+ printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback);
+ printk(KERN_INFO "%lu pages mapped\n", ps.nr_mapped);
+ printk(KERN_INFO "%lu pages slab\n", ps.nr_slab);
+ printk(KERN_INFO "%lu pages pagetables\n", ps.nr_page_table_pages);
}
/*
@@ -105,16 +113,16 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
pmd_t *pmd;
if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
- printk ("set_pmd_pfn: vaddr misaligned\n");
+ printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
return; /* BUG(); */
}
if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
- printk ("set_pmd_pfn: pfn misaligned\n");
+ printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
return; /* BUG(); */
}
pgd = swapper_pg_dir + pgd_index(vaddr);
if (pgd_none(*pgd)) {
- printk ("set_pmd_pfn: pgd_none\n");
+ printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
return; /* BUG(); */
}
pud = pud_offset(pgd, vaddr);
diff --git a/arch/i386/oprofile/backtrace.c b/arch/i386/oprofile/backtrace.c
index 52d72e074f7f..65dfd2edb671 100644
--- a/arch/i386/oprofile/backtrace.c
+++ b/arch/i386/oprofile/backtrace.c
@@ -91,7 +91,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
head = (struct frame_head *)regs->ebp;
#endif
- if (!user_mode(regs)) {
+ if (!user_mode_vm(regs)) {
while (depth-- && valid_kernel_stack(head, regs))
head = dump_backtrace(head);
return;
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index da21b1d07c15..83458f81e661 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -227,6 +227,24 @@ static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
}
/*
+ * The VIA pirq rules are nibble-based, like ALI,
+ * but without the ugly irq number munging.
+ * However, for 82C586, nibble map is different .
+ */
+static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+{
+ static unsigned int pirqmap[4] = { 3, 2, 5, 1 };
+ return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
+}
+
+static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+{
+ static unsigned int pirqmap[4] = { 3, 2, 5, 1 };
+ write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
+ return 1;
+}
+
+/*
* ITE 8330G pirq rules are nibble-based
* FIXME: pirqmap may be { 1, 0, 3, 2 },
* 2+3 are both mapped to irq 9 on my system
@@ -512,6 +530,10 @@ static __init int via_router_probe(struct irq_router *r, struct pci_dev *router,
switch(device)
{
case PCI_DEVICE_ID_VIA_82C586_0:
+ r->name = "VIA";
+ r->get = pirq_via586_get;
+ r->set = pirq_via586_set;
+ return 1;
case PCI_DEVICE_ID_VIA_82C596:
case PCI_DEVICE_ID_VIA_82C686:
case PCI_DEVICE_ID_VIA_8231:
diff --git a/arch/i386/pci/pcbios.c b/arch/i386/pci/pcbios.c
index 141421b673b0..b9d65f0bc2d1 100644
--- a/arch/i386/pci/pcbios.c
+++ b/arch/i386/pci/pcbios.c
@@ -4,6 +4,7 @@
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/module.h>
#include "pci.h"
#include "pci-functions.h"
@@ -456,7 +457,7 @@ struct irq_routing_table * __devinit pcibios_get_irq_routing_table(void)
free_page(page);
return rt;
}
-
+EXPORT_SYMBOL(pcibios_get_irq_routing_table);
int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
{
@@ -473,6 +474,7 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
"S" (&pci_indirect));
return !(ret & 0xff00);
}
+EXPORT_SYMBOL(pcibios_set_irq_routing);
static int __init pci_pcbios_init(void)
{
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c
index cf337c673d92..0e6b45b61251 100644
--- a/arch/i386/power/cpu.c
+++ b/arch/i386/power/cpu.c
@@ -22,9 +22,11 @@
#include <linux/device.h>
#include <linux/suspend.h>
#include <linux/acpi.h>
+
#include <asm/uaccess.h>
#include <asm/acpi.h>
#include <asm/tlbflush.h>
+#include <asm/processor.h>
static struct saved_context saved_context;
@@ -33,8 +35,6 @@ unsigned long saved_context_esp, saved_context_ebp;
unsigned long saved_context_esi, saved_context_edi;
unsigned long saved_context_eflags;
-extern void enable_sep_cpu(void *);
-
void __save_processor_state(struct saved_context *ctxt)
{
kernel_fpu_begin();
@@ -44,7 +44,6 @@ void __save_processor_state(struct saved_context *ctxt)
*/
asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit));
asm volatile ("sidt %0" : "=m" (ctxt->idt_limit));
- asm volatile ("sldt %0" : "=m" (ctxt->ldt));
asm volatile ("str %0" : "=m" (ctxt->tr));
/*
@@ -94,20 +93,19 @@ static void fix_processor_context(void)
* Now maybe reload the debug registers
*/
if (current->thread.debugreg[7]){
- loaddebug(&current->thread, 0);
- loaddebug(&current->thread, 1);
- loaddebug(&current->thread, 2);
- loaddebug(&current->thread, 3);
- /* no 4 and 5 */
- loaddebug(&current->thread, 6);
- loaddebug(&current->thread, 7);
+ set_debugreg(current->thread.debugreg[0], 0);
+ set_debugreg(current->thread.debugreg[1], 1);
+ set_debugreg(current->thread.debugreg[2], 2);
+ set_debugreg(current->thread.debugreg[3], 3);
+ /* no 4 and 5 */
+ set_debugreg(current->thread.debugreg[6], 6);
+ set_debugreg(current->thread.debugreg[7], 7);
}
}
void __restore_processor_state(struct saved_context *ctxt)
{
-
/*
* control registers
*/
@@ -117,6 +115,13 @@ void __restore_processor_state(struct saved_context *ctxt)
asm volatile ("movl %0, %%cr0" :: "r" (ctxt->cr0));
/*
+ * now restore the descriptor tables to their proper values
+ * ltr is done i fix_processor_context().
+ */
+ asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit));
+ asm volatile ("lidt %0" :: "m" (ctxt->idt_limit));
+
+ /*
* segment registers
*/
asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
@@ -125,18 +130,10 @@ void __restore_processor_state(struct saved_context *ctxt)
asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
/*
- * now restore the descriptor tables to their proper values
- * ltr is done i fix_processor_context().
- */
- asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit));
- asm volatile ("lidt %0" :: "m" (ctxt->idt_limit));
- asm volatile ("lldt %0" :: "m" (ctxt->ldt));
-
- /*
* sysenter MSRs
*/
if (boot_cpu_has(X86_FEATURE_SEP))
- enable_sep_cpu(NULL);
+ enable_sep_cpu();
fix_processor_context();
do_fpu_end();