diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig | 13 | ||||
-rw-r--r-- | arch/x86/boot/printf.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/apic_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/longrun.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/init_task.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/kvmclock.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 36 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_32.c | 25 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_64.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/i8254.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/lapic.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/x86_emulate.c | 7 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 2 | ||||
-rw-r--r-- | arch/x86/pci/common.c | 8 | ||||
-rw-r--r-- | arch/x86/vdso/vclock_gettime.c | 6 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 2 |
18 files changed, 83 insertions, 53 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index fe361ae7ef2f..dcbec34154cf 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -26,17 +26,10 @@ config X86 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) select HAVE_ARCH_KGDB if !X86_VOYAGER -config DEFCONFIG_LIST +config ARCH_DEFCONFIG string - depends on X86_32 - option defconfig_list - default "arch/x86/configs/i386_defconfig" - -config DEFCONFIG_LIST - string - depends on X86_64 - option defconfig_list - default "arch/x86/configs/x86_64_defconfig" + default "arch/x86/configs/i386_defconfig" if X86_32 + default "arch/x86/configs/x86_64_defconfig" if X86_64 config GENERIC_LOCKBREAK diff --git a/arch/x86/boot/printf.c b/arch/x86/boot/printf.c index c1d00c0274c4..50e47cdbdddd 100644 --- a/arch/x86/boot/printf.c +++ b/arch/x86/boot/printf.c @@ -56,7 +56,7 @@ static char *number(char *str, long num, int base, int size, int precision, if (type & LEFT) type &= ~ZEROPAD; if (base < 2 || base > 36) - return 0; + return NULL; c = (type & ZEROPAD) ? '0' : ' '; sign = 0; if (type & SIGN) { diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 5910020c3f24..0633cfd0dc29 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -534,7 +534,7 @@ int setup_profiling_timer(unsigned int multiplier) */ void clear_local_APIC(void) { - int maxlvt = lapic_get_maxlvt(); + int maxlvt; u32 v; /* APIC hasn't been mapped yet */ diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c index af4a867a097c..777a7ff075de 100644 --- a/arch/x86/kernel/cpu/cpufreq/longrun.c +++ b/arch/x86/kernel/cpu/cpufreq/longrun.c @@ -245,7 +245,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, if ((ecx > 95) || (ecx == 0) || (eax < ebx)) return -EIO; - edx = (eax - ebx) / (100 - ecx); + edx = ((eax - ebx) * 100) / (100 - ecx); *low_freq = edx * 1000; /* back to kHz */ dprintk("low frequency is %u kHz\n", *low_freq); diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 46d4034d9f37..206791eb46e3 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -1127,12 +1127,23 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) * an UP version, and is deprecated by AMD. */ if (num_online_cpus() != 1) { - printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n"); +#ifndef CONFIG_ACPI_PROCESSOR + printk(KERN_ERR PFX "ACPI Processor support is required " + "for SMP systems but is absent. Please load the " + "ACPI Processor module before starting this " + "driver.\n"); +#else + printk(KERN_ERR PFX "Your BIOS does not provide ACPI " + "_PSS objects in a way that Linux understands. " + "Please report this to the Linux ACPI maintainers" + " and complain to your BIOS vendor.\n"); +#endif kfree(data); return -ENODEV; } if (pol->cpu != 0) { - printk(KERN_ERR PFX "No _PSS objects for CPU other than CPU0\n"); + printk(KERN_ERR PFX "No ACPI _PSS objects for CPU other than " + "CPU0. Complain to your BIOS vendor.\n"); kfree(data); return -ENODEV; } diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c index 3d01e47777db..a4f93b4120c1 100644 --- a/arch/x86/kernel/init_task.c +++ b/arch/x86/kernel/init_task.c @@ -11,7 +11,6 @@ #include <asm/desc.h> static struct fs_struct init_fs = INIT_FS; -static struct files_struct init_files = INIT_FILES; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 4bc1be5d5472..08a30986d472 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -53,7 +53,7 @@ static cycle_t kvm_clock_read(void); * have elapsed since the hypervisor wrote the data. So we try to account for * that with system time */ -unsigned long kvm_get_wallclock(void) +static unsigned long kvm_get_wallclock(void) { u32 wc_sec, wc_nsec; u64 delta; @@ -86,7 +86,7 @@ unsigned long kvm_get_wallclock(void) return ts.tv_sec + 1; } -int kvm_set_wallclock(unsigned long now) +static int kvm_set_wallclock(unsigned long now) { return 0; } diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 67e9b4a1e89d..ba370dc8685b 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -99,15 +99,6 @@ static void mwait_idle(void) local_irq_enable(); } - -static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) -{ - if (force_mwait) - return 1; - /* Any C1 states supported? */ - return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0; -} - /* * On SMP it's slightly faster (but much more power-consuming!) * to poll the ->work.need_resched flag instead of waiting for the @@ -119,6 +110,33 @@ static void poll_idle(void) cpu_relax(); } +/* + * mwait selection logic: + * + * It depends on the CPU. For AMD CPUs that support MWAIT this is + * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings + * then depend on a clock divisor and current Pstate of the core. If + * all cores of a processor are in halt state (C1) the processor can + * enter the C1E (C1 enhanced) state. If mwait is used this will never + * happen. + * + * idle=mwait overrides this decision and forces the usage of mwait. + */ +static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) +{ + if (force_mwait) + return 1; + + if (c->x86_vendor == X86_VENDOR_AMD) { + switch(c->x86) { + case 0x10: + case 0x11: + return 0; + } + } + return 1; +} + void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) { static int selected; diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index e4790728b224..068759db63dd 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c @@ -14,7 +14,7 @@ #include "mach_timer.h" -static int tsc_enabled; +static int tsc_disabled; /* * On some systems the TSC frequency does not @@ -28,8 +28,8 @@ EXPORT_SYMBOL_GPL(tsc_khz); static int __init tsc_setup(char *str) { printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " - "cannot disable TSC completely.\n"); - mark_tsc_unstable("user disabled TSC"); + "cannot disable TSC completely.\n"); + tsc_disabled = 1; return 1; } #else @@ -120,7 +120,7 @@ unsigned long long native_sched_clock(void) * very important for it to be as fast as the platform * can achive it. ) */ - if (unlikely(!tsc_enabled && !tsc_unstable)) + if (unlikely(tsc_disabled)) /* No locking but a rare wrong value is not a big deal: */ return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); @@ -322,7 +322,6 @@ void mark_tsc_unstable(char *reason) { if (!tsc_unstable) { tsc_unstable = 1; - tsc_enabled = 0; printk("Marking TSC unstable due to: %s.\n", reason); /* Can be called before registration */ if (clocksource_tsc.mult) @@ -336,7 +335,7 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable); static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d) { printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", - d->ident); + d->ident); tsc_unstable = 1; return 0; } @@ -403,14 +402,22 @@ void __init tsc_init(void) { int cpu; - if (!cpu_has_tsc) + if (!cpu_has_tsc || tsc_disabled) { + /* Disable the TSC in case of !cpu_has_tsc */ + tsc_disabled = 1; return; + } cpu_khz = calculate_cpu_khz(); tsc_khz = cpu_khz; if (!cpu_khz) { mark_tsc_unstable("could not calculate TSC khz"); + /* + * We need to disable the TSC completely in this case + * to prevent sched_clock() from using it. + */ + tsc_disabled = 1; return; } @@ -441,8 +448,6 @@ void __init tsc_init(void) if (check_tsc_unstable()) { clocksource_tsc.rating = 0; clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; - } else - tsc_enabled = 1; - + } clocksource_register(&clocksource_tsc); } diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c index fcc16e58609e..1784b8077a12 100644 --- a/arch/x86/kernel/tsc_64.c +++ b/arch/x86/kernel/tsc_64.c @@ -227,14 +227,14 @@ void __init tsc_calibrate(void) /* hpet or pmtimer available ? */ if (!hpet && !pm1 && !pm2) { printk(KERN_INFO "TSC calibrated against PIT\n"); - return; + goto out; } /* Check, whether the sampling was disturbed by an SMI */ if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) { printk(KERN_WARNING "TSC calibration disturbed by SMI, " "using PIT calibration result\n"); - return; + goto out; } tsc2 = (tsc2 - tsc1) * 1000000L; @@ -255,6 +255,7 @@ void __init tsc_calibrate(void) tsc_khz = tsc2 / tsc1; +out: for_each_possible_cpu(cpu) set_cyc2ns_scale(tsc_khz, cpu); } diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 3324d90038e4..7c077a9d9777 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -216,7 +216,7 @@ int pit_has_pending_timer(struct kvm_vcpu *vcpu) { struct kvm_pit *pit = vcpu->kvm->arch.vpit; - if (pit && vcpu->vcpu_id == 0) + if (pit && vcpu->vcpu_id == 0 && pit->pit_state.inject_pending) return atomic_read(&pit->pit_state.pit_timer.pending); return 0; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 36809d79788b..c297c50eba63 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -957,7 +957,7 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu) { struct kvm_lapic *lapic = vcpu->arch.apic; - if (lapic) + if (lapic && apic_enabled(lapic) && apic_lvt_enabled(lapic, APIC_LVTT)) return atomic_read(&lapic->timer.pending); return 0; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 36c5406b1813..7246b60afb96 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1996,7 +1996,7 @@ static struct shrinker mmu_shrinker = { .seeks = DEFAULT_SEEKS * 10, }; -void mmu_destroy_caches(void) +static void mmu_destroy_caches(void) { if (pte_chain_cache) kmem_cache_destroy(pte_chain_cache); diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index f2a696d6a243..8a96320ab071 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -677,8 +677,9 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, c->use_modrm_ea = 1; if (c->modrm_mod == 3) { - c->modrm_val = *(unsigned long *) - decode_register(c->modrm_rm, c->regs, c->d & ByteOp); + c->modrm_ptr = decode_register(c->modrm_rm, + c->regs, c->d & ByteOp); + c->modrm_val = *(unsigned long *)c->modrm_ptr; return rc; } @@ -1005,6 +1006,7 @@ done_prefixes: if ((c->d & ModRM) && c->modrm_mod == 3) { c->src.type = OP_REG; c->src.val = c->modrm_val; + c->src.ptr = c->modrm_ptr; break; } c->src.type = OP_MEM; @@ -1049,6 +1051,7 @@ done_prefixes: if ((c->d & ModRM) && c->modrm_mod == 3) { c->dst.type = OP_REG; c->dst.val = c->dst.orig_val = c->modrm_val; + c->dst.ptr = c->modrm_ptr; break; } c->dst.type = OP_MEM; diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index bcb1a8e4b2db..de3a99812450 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -28,7 +28,7 @@ #ifdef CONFIG_X86_PAT int __read_mostly pat_wc_enabled = 1; -void __init pat_disable(char *reason) +void __cpuinit pat_disable(char *reason) { pat_wc_enabled = 0; printk(KERN_INFO "%s\n", reason); diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 8545c8a9d107..6e64aaf00d1d 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -302,18 +302,18 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = { }, { .callback = set_bf_sort, - .ident = "HP ProLiant DL385 G2", + .ident = "HP ProLiant DL360", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"), + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"), }, }, { .callback = set_bf_sort, - .ident = "HP ProLiant DL585 G2", + .ident = "HP ProLiant DL380", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"), + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"), }, }, #ifdef __i386__ diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 23476c2ebfc4..efa2ba7c6005 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -106,9 +106,9 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) do_realtime((struct timespec *)tv); tv->tv_usec /= 1000; if (unlikely(tz != NULL)) { - /* This relies on gcc inlining the memcpy. We'll notice - if it ever fails to do so. */ - memcpy(tz, >od->sys_tz, sizeof(struct timezone)); + /* Avoid memcpy. Some old compilers fail to inline it */ + tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest; + tz->tz_dsttime = gtod->sys_tz.tz_dsttime; } return 0; } diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 126766d43aea..3525ef523a74 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -60,7 +60,7 @@ xmaddr_t arbitrary_virt_to_machine(unsigned long address) { unsigned int level; pte_t *pte = lookup_address(address, &level); - unsigned offset = address & PAGE_MASK; + unsigned offset = address & ~PAGE_MASK; BUG_ON(pte == NULL); |