summaryrefslogtreecommitdiff
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/discontig_32.c26
-rw-r--r--arch/x86/mm/highmem_32.c1
-rw-r--r--arch/x86/mm/init_32.c12
-rw-r--r--arch/x86/mm/ioremap.c23
-rw-r--r--arch/x86/mm/k8topology_64.c38
-rw-r--r--arch/x86/mm/pageattr.c10
-rw-r--r--arch/x86/mm/pat.c54
-rw-r--r--arch/x86/mm/pgtable_32.c7
8 files changed, 88 insertions, 83 deletions
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 18378850e25a..914ccf983687 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -476,29 +476,3 @@ int memory_add_physaddr_to_nid(u64 addr)
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
-
-#ifndef CONFIG_HAVE_ARCH_PARSE_SRAT
-/*
- * XXX FIXME: Make SLIT table parsing available to 32-bit NUMA
- *
- * These stub functions are needed to compile 32-bit NUMA when SRAT is
- * not set. There are functions in srat_64.c for parsing this table
- * and it may be possible to make them common functions.
- */
-void acpi_numa_slit_init (struct acpi_table_slit *slit)
-{
- printk(KERN_INFO "ACPI: No support for parsing SLIT table\n");
-}
-
-void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa)
-{
-}
-
-void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma)
-{
-}
-
-void acpi_numa_arch_fixup(void)
-{
-}
-#endif /* CONFIG_HAVE_ARCH_PARSE_SRAT */
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 9cf33d3ee5bc..165c871ba9af 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -155,4 +155,3 @@ EXPORT_SYMBOL(kmap);
EXPORT_SYMBOL(kunmap);
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(kunmap_atomic);
-EXPORT_SYMBOL(kmap_atomic_to_page);
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index de236e419cb5..ec30d10154b6 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -438,8 +438,6 @@ void zap_low_mappings(void)
{
int i;
- save_pg_dir();
-
/*
* Zap initial low-memory mappings.
*
@@ -663,16 +661,8 @@ void __init mem_init(void)
test_wp_bit();
cpa_init();
-
- /*
- * Subtle. SMP is doing it's boot stuff late (because it has to
- * fork idle threads) - but it also needs low mappings for the
- * protected-mode entry to work. We zap these entries only after
- * the WP-bit has been tested.
- */
-#ifndef CONFIG_SMP
+ save_pg_dir();
zap_low_mappings();
-#endif
}
#ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 804de18abcc2..71bb3159031a 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -149,7 +149,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
* Don't allow anybody to remap normal RAM that we're using..
*/
for (pfn = phys_addr >> PAGE_SHIFT;
- (pfn << PAGE_SHIFT) < last_addr; pfn++) {
+ (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
+ pfn++) {
int is_ram = page_is_ram(pfn);
@@ -176,11 +177,11 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
/*
* Do not fallback to certain memory types with certain
* requested type:
- * - request is uncached, return cannot be write-back
- * - request is uncached, return cannot be write-combine
+ * - request is uc-, return cannot be write-back
+ * - request is uc-, return cannot be write-combine
* - request is write-combine, return cannot be write-back
*/
- if ((prot_val == _PAGE_CACHE_UC &&
+ if ((prot_val == _PAGE_CACHE_UC_MINUS &&
(new_prot_val == _PAGE_CACHE_WB ||
new_prot_val == _PAGE_CACHE_WC)) ||
(prot_val == _PAGE_CACHE_WC &&
@@ -201,6 +202,9 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
default:
prot = PAGE_KERNEL_NOCACHE;
break;
+ case _PAGE_CACHE_UC_MINUS:
+ prot = PAGE_KERNEL_UC_MINUS;
+ break;
case _PAGE_CACHE_WC:
prot = PAGE_KERNEL_WC;
break;
@@ -255,7 +259,16 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
*/
void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
{
- return __ioremap_caller(phys_addr, size, _PAGE_CACHE_UC,
+ /*
+ * Ideally, this should be:
+ * pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
+ *
+ * Till we fix all X drivers to use ioremap_wc(), we will use
+ * UC MINUS.
+ */
+ unsigned long val = _PAGE_CACHE_UC_MINUS;
+
+ return __ioremap_caller(phys_addr, size, val,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_nocache);
diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c
index 86808e666f9c..1f476e477844 100644
--- a/arch/x86/mm/k8topology_64.c
+++ b/arch/x86/mm/k8topology_64.c
@@ -13,12 +13,15 @@
#include <linux/nodemask.h>
#include <asm/io.h>
#include <linux/pci_ids.h>
+#include <linux/acpi.h>
#include <asm/types.h>
#include <asm/mmzone.h>
#include <asm/proto.h>
#include <asm/e820.h>
#include <asm/pci-direct.h>
#include <asm/numa.h>
+#include <asm/mpspec.h>
+#include <asm/apic.h>
static __init int find_northbridge(void)
{
@@ -44,6 +47,30 @@ static __init int find_northbridge(void)
return -1;
}
+static __init void early_get_boot_cpu_id(void)
+{
+ /*
+ * need to get boot_cpu_id so can use that to create apicid_to_node
+ * in k8_scan_nodes()
+ */
+ /*
+ * Find possible boot-time SMP configuration:
+ */
+ early_find_smp_config();
+#ifdef CONFIG_ACPI
+ /*
+ * Read APIC information from ACPI tables.
+ */
+ early_acpi_boot_init();
+#endif
+ /*
+ * get boot-time SMP configuration:
+ */
+ if (smp_found_config)
+ early_get_smp_config();
+ early_init_lapic_mapping();
+}
+
int __init k8_scan_nodes(unsigned long start, unsigned long end)
{
unsigned long prevbase;
@@ -56,6 +83,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
unsigned cores;
unsigned bits;
int j;
+ unsigned apicid_base;
if (!early_pci_allowed())
return -1;
@@ -174,11 +202,19 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
/* use the coreid bits from early_identify_cpu */
bits = boot_cpu_data.x86_coreid_bits;
cores = (1<<bits);
+ apicid_base = 0;
+ /* need to get boot_cpu_id early for system with apicid lifting */
+ early_get_boot_cpu_id();
+ if (boot_cpu_physical_apicid > 0) {
+ printk(KERN_INFO "BSP APIC ID: %02x\n",
+ boot_cpu_physical_apicid);
+ apicid_base = boot_cpu_physical_apicid;
+ }
for (i = 0; i < 8; i++) {
if (nodes[i].start != nodes[i].end) {
nodeid = nodeids[i];
- for (j = 0; j < cores; j++)
+ for (j = apicid_base; j < cores + apicid_base; j++)
apicid_to_node[(nodeid << bits) + j] = i;
setup_node_bootmem(i, nodes[i].start, nodes[i].end);
}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index bd5e05c654dc..60bcb5b6a37e 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -777,14 +777,20 @@ static inline int change_page_attr_clear(unsigned long addr, int numpages,
int _set_memory_uc(unsigned long addr, int numpages)
{
+ /*
+ * for now UC MINUS. see comments in ioremap_nocache()
+ */
return change_page_attr_set(addr, numpages,
- __pgprot(_PAGE_CACHE_UC));
+ __pgprot(_PAGE_CACHE_UC_MINUS));
}
int set_memory_uc(unsigned long addr, int numpages)
{
+ /*
+ * for now UC MINUS. see comments in ioremap_nocache()
+ */
if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
- _PAGE_CACHE_UC, NULL))
+ _PAGE_CACHE_UC_MINUS, NULL))
return -EINVAL;
return _set_memory_uc(addr, numpages);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 277446cd30b6..de3a99812450 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -25,31 +25,24 @@
#include <asm/mtrr.h>
#include <asm/io.h>
-int pat_wc_enabled = 1;
+#ifdef CONFIG_X86_PAT
+int __read_mostly pat_wc_enabled = 1;
-static u64 __read_mostly boot_pat_state;
-
-static int nopat(char *str)
+void __cpuinit pat_disable(char *reason)
{
pat_wc_enabled = 0;
- printk(KERN_INFO "x86: PAT support disabled.\n");
-
- return 0;
+ printk(KERN_INFO "%s\n", reason);
}
-early_param("nopat", nopat);
-static int pat_known_cpu(void)
+static int nopat(char *str)
{
- if (!pat_wc_enabled)
- return 0;
-
- if (cpu_has_pat)
- return 1;
-
- pat_wc_enabled = 0;
- printk(KERN_INFO "CPU and/or kernel does not support PAT.\n");
+ pat_disable("PAT support disabled.");
return 0;
}
+early_param("nopat", nopat);
+#endif
+
+static u64 __read_mostly boot_pat_state;
enum {
PAT_UC = 0, /* uncached */
@@ -66,17 +59,19 @@ void pat_init(void)
{
u64 pat;
-#ifndef CONFIG_X86_PAT
- nopat(NULL);
-#endif
-
- /* Boot CPU enables PAT based on CPU feature */
- if (!smp_processor_id() && !pat_known_cpu())
+ if (!pat_wc_enabled)
return;
- /* APs enable PAT iff boot CPU has enabled it before */
- if (smp_processor_id() && !pat_wc_enabled)
- return;
+ /* Paranoia check. */
+ if (!cpu_has_pat) {
+ printk(KERN_ERR "PAT enabled, but CPU feature cleared\n");
+ /*
+ * Panic if this happens on the secondary CPU, and we
+ * switched to PAT on the boot CPU. We have no way to
+ * undo PAT.
+ */
+ BUG_ON(boot_pat_state);
+ }
/* Set PWT to Write-Combining. All other bits stay the same */
/*
@@ -95,9 +90,8 @@ void pat_init(void)
PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC);
/* Boot CPU check */
- if (!smp_processor_id()) {
+ if (!boot_pat_state)
rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
- }
wrmsrl(MSR_IA32_CR_PAT, pat);
printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
@@ -561,7 +555,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
"%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
current->comm, current->pid,
cattr_name(flags),
- offset, offset + size);
+ offset, (unsigned long long)(offset + size));
return 0;
}
@@ -582,7 +576,7 @@ void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
"%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
current->comm, current->pid,
cattr_name(want_flags),
- addr, addr + size,
+ addr, (unsigned long long)(addr + size),
cattr_name(flags));
}
}
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 9ee007be9142..369cf065b6a4 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -172,10 +172,3 @@ void reserve_top_address(unsigned long reserve)
__FIXADDR_TOP = -reserve - PAGE_SIZE;
__VMALLOC_RESERVE += reserve;
}
-
-int pmd_bad(pmd_t pmd)
-{
- WARN_ON_ONCE(pmd_bad_v1(pmd) != pmd_bad_v2(pmd));
-
- return pmd_bad_v1(pmd);
-}