summaryrefslogtreecommitdiff
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/discontig_32.c26
-rw-r--r--arch/x86/mm/highmem_32.c1
-rw-r--r--arch/x86/mm/ioremap.c23
-rw-r--r--arch/x86/mm/k8topology_64.c38
-rw-r--r--arch/x86/mm/pageattr.c10
5 files changed, 63 insertions, 35 deletions
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 18378850e25a..914ccf983687 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -476,29 +476,3 @@ int memory_add_physaddr_to_nid(u64 addr)
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
-
-#ifndef CONFIG_HAVE_ARCH_PARSE_SRAT
-/*
- * XXX FIXME: Make SLIT table parsing available to 32-bit NUMA
- *
- * These stub functions are needed to compile 32-bit NUMA when SRAT is
- * not set. There are functions in srat_64.c for parsing this table
- * and it may be possible to make them common functions.
- */
-void acpi_numa_slit_init (struct acpi_table_slit *slit)
-{
- printk(KERN_INFO "ACPI: No support for parsing SLIT table\n");
-}
-
-void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa)
-{
-}
-
-void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma)
-{
-}
-
-void acpi_numa_arch_fixup(void)
-{
-}
-#endif /* CONFIG_HAVE_ARCH_PARSE_SRAT */
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 9cf33d3ee5bc..165c871ba9af 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -155,4 +155,3 @@ EXPORT_SYMBOL(kmap);
EXPORT_SYMBOL(kunmap);
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(kunmap_atomic);
-EXPORT_SYMBOL(kmap_atomic_to_page);
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 804de18abcc2..71bb3159031a 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -149,7 +149,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
* Don't allow anybody to remap normal RAM that we're using..
*/
for (pfn = phys_addr >> PAGE_SHIFT;
- (pfn << PAGE_SHIFT) < last_addr; pfn++) {
+ (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
+ pfn++) {
int is_ram = page_is_ram(pfn);
@@ -176,11 +177,11 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
/*
* Do not fallback to certain memory types with certain
* requested type:
- * - request is uncached, return cannot be write-back
- * - request is uncached, return cannot be write-combine
+ * - request is uc-, return cannot be write-back
+ * - request is uc-, return cannot be write-combine
* - request is write-combine, return cannot be write-back
*/
- if ((prot_val == _PAGE_CACHE_UC &&
+ if ((prot_val == _PAGE_CACHE_UC_MINUS &&
(new_prot_val == _PAGE_CACHE_WB ||
new_prot_val == _PAGE_CACHE_WC)) ||
(prot_val == _PAGE_CACHE_WC &&
@@ -201,6 +202,9 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
default:
prot = PAGE_KERNEL_NOCACHE;
break;
+ case _PAGE_CACHE_UC_MINUS:
+ prot = PAGE_KERNEL_UC_MINUS;
+ break;
case _PAGE_CACHE_WC:
prot = PAGE_KERNEL_WC;
break;
@@ -255,7 +259,16 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
*/
void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
{
- return __ioremap_caller(phys_addr, size, _PAGE_CACHE_UC,
+ /*
+ * Ideally, this should be:
+ * pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
+ *
+ * Till we fix all X drivers to use ioremap_wc(), we will use
+ * UC MINUS.
+ */
+ unsigned long val = _PAGE_CACHE_UC_MINUS;
+
+ return __ioremap_caller(phys_addr, size, val,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_nocache);
diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c
index 86808e666f9c..1f476e477844 100644
--- a/arch/x86/mm/k8topology_64.c
+++ b/arch/x86/mm/k8topology_64.c
@@ -13,12 +13,15 @@
#include <linux/nodemask.h>
#include <asm/io.h>
#include <linux/pci_ids.h>
+#include <linux/acpi.h>
#include <asm/types.h>
#include <asm/mmzone.h>
#include <asm/proto.h>
#include <asm/e820.h>
#include <asm/pci-direct.h>
#include <asm/numa.h>
+#include <asm/mpspec.h>
+#include <asm/apic.h>
static __init int find_northbridge(void)
{
@@ -44,6 +47,30 @@ static __init int find_northbridge(void)
return -1;
}
+static __init void early_get_boot_cpu_id(void)
+{
+ /*
+ * need to get boot_cpu_id so can use that to create apicid_to_node
+ * in k8_scan_nodes()
+ */
+ /*
+ * Find possible boot-time SMP configuration:
+ */
+ early_find_smp_config();
+#ifdef CONFIG_ACPI
+ /*
+ * Read APIC information from ACPI tables.
+ */
+ early_acpi_boot_init();
+#endif
+ /*
+ * get boot-time SMP configuration:
+ */
+ if (smp_found_config)
+ early_get_smp_config();
+ early_init_lapic_mapping();
+}
+
int __init k8_scan_nodes(unsigned long start, unsigned long end)
{
unsigned long prevbase;
@@ -56,6 +83,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
unsigned cores;
unsigned bits;
int j;
+ unsigned apicid_base;
if (!early_pci_allowed())
return -1;
@@ -174,11 +202,19 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
/* use the coreid bits from early_identify_cpu */
bits = boot_cpu_data.x86_coreid_bits;
cores = (1<<bits);
+ apicid_base = 0;
+ /* need to get boot_cpu_id early for system with apicid lifting */
+ early_get_boot_cpu_id();
+ if (boot_cpu_physical_apicid > 0) {
+ printk(KERN_INFO "BSP APIC ID: %02x\n",
+ boot_cpu_physical_apicid);
+ apicid_base = boot_cpu_physical_apicid;
+ }
for (i = 0; i < 8; i++) {
if (nodes[i].start != nodes[i].end) {
nodeid = nodeids[i];
- for (j = 0; j < cores; j++)
+ for (j = apicid_base; j < cores + apicid_base; j++)
apicid_to_node[(nodeid << bits) + j] = i;
setup_node_bootmem(i, nodes[i].start, nodes[i].end);
}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index bd5e05c654dc..60bcb5b6a37e 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -777,14 +777,20 @@ static inline int change_page_attr_clear(unsigned long addr, int numpages,
int _set_memory_uc(unsigned long addr, int numpages)
{
+ /*
+ * for now UC MINUS. see comments in ioremap_nocache()
+ */
return change_page_attr_set(addr, numpages,
- __pgprot(_PAGE_CACHE_UC));
+ __pgprot(_PAGE_CACHE_UC_MINUS));
}
int set_memory_uc(unsigned long addr, int numpages)
{
+ /*
+ * for now UC MINUS. see comments in ioremap_nocache()
+ */
if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
- _PAGE_CACHE_UC, NULL))
+ _PAGE_CACHE_UC_MINUS, NULL))
return -EINVAL;
return _set_memory_uc(addr, numpages);