summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/setup_percpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/setup_percpu.c')
-rw-r--r--arch/x86/kernel/setup_percpu.c689
1 files changed, 369 insertions, 320 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 01161077a49c..c29f301d3885 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -7,402 +7,451 @@
#include <linux/crash_dump.h>
#include <linux/smp.h>
#include <linux/topology.h>
+#include <linux/pfn.h>
#include <asm/sections.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/mpspec.h>
#include <asm/apicdef.h>
#include <asm/highmem.h>
+#include <asm/proto.h>
+#include <asm/cpumask.h>
+#include <asm/cpu.h>
+#include <asm/stackprotector.h>
-#ifdef CONFIG_X86_LOCAL_APIC
-unsigned int num_processors;
-unsigned disabled_cpus __cpuinitdata;
-/* Processor that is doing the boot up */
-unsigned int boot_cpu_physical_apicid = -1U;
-EXPORT_SYMBOL(boot_cpu_physical_apicid);
-unsigned int max_physical_apicid;
-
-/* Bitmask of physically existing CPUs */
-physid_mask_t phys_cpu_present_map;
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+# define DBG(x...) printk(KERN_DEBUG x)
+#else
+# define DBG(x...)
#endif
-/* map cpu index to physical APIC ID */
-DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
-DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
-EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
-EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
-
-#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
-#define X86_64_NUMA 1
-
-/* map cpu index to node index */
-DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
-EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
-
-/* which logical CPUs are on which nodes */
-cpumask_t *node_to_cpumask_map;
-EXPORT_SYMBOL(node_to_cpumask_map);
-
-/* setup node_to_cpumask_map */
-static void __init setup_node_to_cpumask_map(void);
+DEFINE_PER_CPU(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
+#ifdef CONFIG_X86_64
+#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
#else
-static inline void setup_node_to_cpumask_map(void) { }
+#define BOOT_PERCPU_OFFSET 0
#endif
-#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
-/*
- * Copy data used in early init routines from the initial arrays to the
- * per cpu data areas. These arrays then become expendable and the
- * *_early_ptr's are zeroed indicating that the static arrays are gone.
- */
-static void __init setup_per_cpu_maps(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- per_cpu(x86_cpu_to_apicid, cpu) =
- early_per_cpu_map(x86_cpu_to_apicid, cpu);
- per_cpu(x86_bios_cpu_apicid, cpu) =
- early_per_cpu_map(x86_bios_cpu_apicid, cpu);
-#ifdef X86_64_NUMA
- per_cpu(x86_cpu_to_node_map, cpu) =
- early_per_cpu_map(x86_cpu_to_node_map, cpu);
-#endif
- }
+DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
+EXPORT_PER_CPU_SYMBOL(this_cpu_off);
- /* indicate the early static arrays will soon be gone */
- early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
- early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
-#ifdef X86_64_NUMA
- early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
-#endif
-}
-
-#ifdef CONFIG_X86_32
-/*
- * Great future not-so-futuristic plan: make i386 and x86_64 do it
- * the same way
- */
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
+ [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
+};
EXPORT_SYMBOL(__per_cpu_offset);
-static inline void setup_cpu_pda_map(void) { }
-
-#elif !defined(CONFIG_SMP)
-static inline void setup_cpu_pda_map(void) { }
-#else /* CONFIG_SMP && CONFIG_X86_64 */
-
-/*
- * Allocate cpu_pda pointer table and array via alloc_bootmem.
+/**
+ * pcpu_need_numa - determine percpu allocation needs to consider NUMA
+ *
+ * If NUMA is not configured or there is only one NUMA node available,
+ * there is no reason to consider NUMA. This function determines
+ * whether percpu allocation should consider NUMA or not.
+ *
+ * RETURNS:
+ * true if NUMA should be considered; otherwise, false.
*/
-static void __init setup_cpu_pda_map(void)
+static bool __init pcpu_need_numa(void)
{
- char *pda;
- struct x8664_pda **new_cpu_pda;
- unsigned long size;
- int cpu;
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+ pg_data_t *last = NULL;
+ unsigned int cpu;
- size = roundup(sizeof(struct x8664_pda), cache_line_size());
+ for_each_possible_cpu(cpu) {
+ int node = early_cpu_to_node(cpu);
- /* allocate cpu_pda array and pointer table */
- {
- unsigned long tsize = nr_cpu_ids * sizeof(void *);
- unsigned long asize = size * (nr_cpu_ids - 1);
+ if (node_online(node) && NODE_DATA(node) &&
+ last && last != NODE_DATA(node))
+ return true;
- tsize = roundup(tsize, cache_line_size());
- new_cpu_pda = alloc_bootmem(tsize + asize);
- pda = (char *)new_cpu_pda + tsize;
+ last = NODE_DATA(node);
}
+#endif
+ return false;
+}
- /* initialize pointer table to static pda's */
- for_each_possible_cpu(cpu) {
- if (cpu == 0) {
- /* leave boot cpu pda in place */
- new_cpu_pda[0] = cpu_pda(0);
- continue;
- }
- new_cpu_pda[cpu] = (struct x8664_pda *)pda;
- new_cpu_pda[cpu]->in_bootmem = 1;
- pda += size;
+/**
+ * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
+ * @cpu: cpu to allocate for
+ * @size: size allocation in bytes
+ * @align: alignment
+ *
+ * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
+ * does the right thing for NUMA regardless of the current
+ * configuration.
+ *
+ * RETURNS:
+ * Pointer to the allocated area on success, NULL on failure.
+ */
+static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
+ unsigned long align)
+{
+ const unsigned long goal = __pa(MAX_DMA_ADDRESS);
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+ int node = early_cpu_to_node(cpu);
+ void *ptr;
+
+ if (!node_online(node) || !NODE_DATA(node)) {
+ ptr = __alloc_bootmem_nopanic(size, align, goal);
+ pr_info("cpu %d has no node %d or node-local memory\n",
+ cpu, node);
+ pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
+ cpu, size, __pa(ptr));
+ } else {
+ ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
+ size, align, goal);
+ pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
+ "%016lx\n", cpu, size, node, __pa(ptr));
}
-
- /* point to new pointer table */
- _cpu_pda = new_cpu_pda;
+ return ptr;
+#else
+ return __alloc_bootmem_nopanic(size, align, goal);
+#endif
}
-#endif /* CONFIG_SMP && CONFIG_X86_64 */
-
-#ifdef CONFIG_X86_64
+/*
+ * Remap allocator
+ *
+ * This allocator uses PMD page as unit. A PMD page is allocated for
+ * each cpu and each is remapped into vmalloc area using PMD mapping.
+ * As PMD page is quite large, only part of it is used for the first
+ * chunk. Unused part is returned to the bootmem allocator.
+ *
+ * So, the PMD pages are mapped twice - once to the physical mapping
+ * and to the vmalloc area for the first percpu chunk. The double
+ * mapping does add one more PMD TLB entry pressure but still is much
+ * better than only using 4k mappings while still being NUMA friendly.
+ */
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+static size_t pcpur_size __initdata;
+static void **pcpur_ptrs __initdata;
-/* correctly size the local cpu masks */
-static void __init setup_cpu_local_masks(void)
+static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
{
- alloc_bootmem_cpumask_var(&cpu_initialized_mask);
- alloc_bootmem_cpumask_var(&cpu_callin_mask);
- alloc_bootmem_cpumask_var(&cpu_callout_mask);
- alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
-}
+ size_t off = (size_t)pageno << PAGE_SHIFT;
-#else /* CONFIG_X86_32 */
+ if (off >= pcpur_size)
+ return NULL;
-static inline void setup_cpu_local_masks(void)
-{
+ return virt_to_page(pcpur_ptrs[cpu] + off);
}
-#endif /* CONFIG_X86_32 */
-
-/*
- * Great future plan:
- * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
- * Always point %gs to its beginning
- */
-void __init setup_per_cpu_areas(void)
+static ssize_t __init setup_pcpu_remap(size_t static_size)
{
- ssize_t size, old_size;
- char *ptr;
- int cpu;
- unsigned long align = 1;
-
- /* Setup cpu_pda map */
- setup_cpu_pda_map();
+ static struct vm_struct vm;
+ pg_data_t *last;
+ size_t ptrs_size;
+ unsigned int cpu;
+ ssize_t ret;
+
+ /*
+ * If large page isn't supported, there's no benefit in doing
+ * this. Also, on non-NUMA, embedding is better.
+ */
+ if (!cpu_has_pse || pcpu_need_numa())
+ return -EINVAL;
+
+ last = NULL;
+ for_each_possible_cpu(cpu) {
+ int node = early_cpu_to_node(cpu);
- /* Copy section for each CPU (we discard the original) */
- old_size = PERCPU_ENOUGH_ROOM;
- align = max_t(unsigned long, PAGE_SIZE, align);
- size = roundup(old_size, align);
+ if (node_online(node) && NODE_DATA(node) &&
+ last && last != NODE_DATA(node))
+ goto proceed;
- pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
- NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
+ last = NODE_DATA(node);
+ }
+ return -EINVAL;
+
+proceed:
+ /*
+ * Currently supports only single page. Supporting multiple
+ * pages won't be too difficult if it ever becomes necessary.
+ */
+ pcpur_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE);
+ if (pcpur_size > PMD_SIZE) {
+ pr_warning("PERCPU: static data is larger than large page, "
+ "can't use large page\n");
+ return -EINVAL;
+ }
- pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
+ /* allocate pointer array and alloc large pages */
+ ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
+ pcpur_ptrs = alloc_bootmem(ptrs_size);
for_each_possible_cpu(cpu) {
-#ifndef CONFIG_NEED_MULTIPLE_NODES
- ptr = __alloc_bootmem(size, align,
- __pa(MAX_DMA_ADDRESS));
-#else
- int node = early_cpu_to_node(cpu);
- if (!node_online(node) || !NODE_DATA(node)) {
- ptr = __alloc_bootmem(size, align,
- __pa(MAX_DMA_ADDRESS));
- pr_info("cpu %d has no node %d or node-local memory\n",
- cpu, node);
- pr_debug("per cpu data for cpu%d at %016lx\n",
- cpu, __pa(ptr));
- } else {
- ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
- __pa(MAX_DMA_ADDRESS));
- pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
- cpu, node, __pa(ptr));
- }
-#endif
- per_cpu_offset(cpu) = ptr - __per_cpu_start;
- memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+ pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
+ if (!pcpur_ptrs[cpu])
+ goto enomem;
+
+ /*
+ * Only use pcpur_size bytes and give back the rest.
+ *
+ * Ingo: The 2MB up-rounding bootmem is needed to make
+ * sure the partial 2MB page is still fully RAM - it's
+ * not well-specified to have a PAT-incompatible area
+ * (unmapped RAM, device memory, etc.) in that hole.
+ */
+ free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
+ PMD_SIZE - pcpur_size);
+
+ memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
}
- /* Setup percpu data maps */
- setup_per_cpu_maps();
+ /* allocate address and map */
+ vm.flags = VM_ALLOC;
+ vm.size = num_possible_cpus() * PMD_SIZE;
+ vm_area_register_early(&vm, PMD_SIZE);
- /* Setup node to cpumask map */
- setup_node_to_cpumask_map();
+ for_each_possible_cpu(cpu) {
+ pmd_t *pmd;
- /* Setup cpu initialized, callin, callout masks */
- setup_cpu_local_masks();
-}
+ pmd = populate_extra_pmd((unsigned long)vm.addr
+ + cpu * PMD_SIZE);
+ set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
+ PAGE_KERNEL_LARGE));
+ }
+ /* we're ready, commit */
+ pr_info("PERCPU: Remapped at %p with large pages, static data "
+ "%zu bytes\n", vm.addr, static_size);
+
+ ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, PMD_SIZE,
+ pcpur_size - static_size, vm.addr, NULL);
+ goto out_free_ar;
+
+enomem:
+ for_each_possible_cpu(cpu)
+ if (pcpur_ptrs[cpu])
+ free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
+ ret = -ENOMEM;
+out_free_ar:
+ free_bootmem(__pa(pcpur_ptrs), ptrs_size);
+ return ret;
+}
+#else
+static ssize_t __init setup_pcpu_remap(size_t static_size)
+{
+ return -EINVAL;
+}
#endif
-#ifdef X86_64_NUMA
-
/*
- * Allocate node_to_cpumask_map based on number of available nodes
- * Requires node_possible_map to be valid.
+ * Embedding allocator
*
- * Note: node_to_cpumask() is not valid until after this is done.
+ * The first chunk is sized to just contain the static area plus
+ * PERCPU_DYNAMIC_RESERVE and allocated as a contiguous area using
+ * bootmem allocator and used as-is without being mapped into vmalloc
+ * area. This enables the first chunk to piggy back on the linear
+ * physical PMD mapping and doesn't add any additional pressure to
+ * TLB.
*/
-static void __init setup_node_to_cpumask_map(void)
-{
- unsigned int node, num = 0;
- cpumask_t *map;
-
- /* setup nr_node_ids if not done yet */
- if (nr_node_ids == MAX_NUMNODES) {
- for_each_node_mask(node, node_possible_map)
- num = node;
- nr_node_ids = num + 1;
- }
-
- /* allocate the map */
- map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
-
- pr_debug("Node to cpumask map at %p for %d nodes\n",
- map, nr_node_ids);
-
- /* node_to_cpumask() will now work */
- node_to_cpumask_map = map;
-}
+static void *pcpue_ptr __initdata;
+static size_t pcpue_unit_size __initdata;
-void __cpuinit numa_set_node(int cpu, int node)
+static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
{
- int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
-
- if (cpu_pda(cpu) && node != NUMA_NO_NODE)
- cpu_pda(cpu)->nodenumber = node;
-
- if (cpu_to_node_map)
- cpu_to_node_map[cpu] = node;
-
- else if (per_cpu_offset(cpu))
- per_cpu(x86_cpu_to_node_map, cpu) = node;
-
- else
- pr_debug("Setting node for non-present cpu %d\n", cpu);
+ return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size
+ + ((size_t)pageno << PAGE_SHIFT));
}
-void __cpuinit numa_clear_node(int cpu)
+static ssize_t __init setup_pcpu_embed(size_t static_size)
{
- numa_set_node(cpu, NUMA_NO_NODE);
+ unsigned int cpu;
+
+ /*
+ * If large page isn't supported, there's no benefit in doing
+ * this. Also, embedding allocation doesn't play well with
+ * NUMA.
+ */
+ if (!cpu_has_pse || pcpu_need_numa())
+ return -EINVAL;
+
+ /* allocate and copy */
+ pcpue_unit_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE);
+ pcpue_unit_size = max_t(size_t, pcpue_unit_size, PCPU_MIN_UNIT_SIZE);
+ pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size,
+ PAGE_SIZE);
+ if (!pcpue_ptr)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu)
+ memcpy(pcpue_ptr + cpu * pcpue_unit_size, __per_cpu_load,
+ static_size);
+
+ /* we're ready, commit */
+ pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
+ pcpue_unit_size >> PAGE_SHIFT, pcpue_ptr, static_size);
+
+ return pcpu_setup_first_chunk(pcpue_get_page, static_size,
+ pcpue_unit_size,
+ pcpue_unit_size - static_size, pcpue_ptr,
+ NULL);
}
-#ifndef CONFIG_DEBUG_PER_CPU_MAPS
+/*
+ * 4k page allocator
+ *
+ * This is the basic allocator. Static percpu area is allocated
+ * page-by-page and most of initialization is done by the generic
+ * setup function.
+ */
+static struct page **pcpu4k_pages __initdata;
+static int pcpu4k_nr_static_pages __initdata;
-void __cpuinit numa_add_cpu(int cpu)
+static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
{
- cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
+ if (pageno < pcpu4k_nr_static_pages)
+ return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
+ return NULL;
}
-void __cpuinit numa_remove_cpu(int cpu)
+static void __init pcpu4k_populate_pte(unsigned long addr)
{
- cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
+ populate_extra_pte(addr);
}
-#else /* CONFIG_DEBUG_PER_CPU_MAPS */
-
-/*
- * --------- debug versions of the numa functions ---------
- */
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
+static ssize_t __init setup_pcpu_4k(size_t static_size)
{
- int node = cpu_to_node(cpu);
- cpumask_t *mask;
- char buf[64];
-
- if (node_to_cpumask_map == NULL) {
- printk(KERN_ERR "node_to_cpumask_map NULL\n");
- dump_stack();
- return;
- }
-
- mask = &node_to_cpumask_map[node];
- if (enable)
- cpu_set(cpu, *mask);
- else
- cpu_clear(cpu, *mask);
-
- cpulist_scnprintf(buf, sizeof(buf), mask);
- printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
- enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
-}
+ size_t pages_size;
+ unsigned int cpu;
+ int i, j;
+ ssize_t ret;
+
+ pcpu4k_nr_static_pages = PFN_UP(static_size);
+
+ /* unaligned allocations can't be freed, round up to page size */
+ pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
+ * sizeof(pcpu4k_pages[0]));
+ pcpu4k_pages = alloc_bootmem(pages_size);
+
+ /* allocate and copy */
+ j = 0;
+ for_each_possible_cpu(cpu)
+ for (i = 0; i < pcpu4k_nr_static_pages; i++) {
+ void *ptr;
+
+ ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
+ if (!ptr)
+ goto enomem;
+
+ memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
+ pcpu4k_pages[j++] = virt_to_page(ptr);
+ }
-void __cpuinit numa_add_cpu(int cpu)
-{
- numa_set_cpumask(cpu, 1);
+ /* we're ready, commit */
+ pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
+ pcpu4k_nr_static_pages, static_size);
+
+ ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL,
+ pcpu4k_populate_pte);
+ goto out_free_ar;
+
+enomem:
+ while (--j >= 0)
+ free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
+ ret = -ENOMEM;
+out_free_ar:
+ free_bootmem(__pa(pcpu4k_pages), pages_size);
+ return ret;
}
-void __cpuinit numa_remove_cpu(int cpu)
+static inline void setup_percpu_segment(int cpu)
{
- numa_set_cpumask(cpu, 0);
-}
+#ifdef CONFIG_X86_32
+ struct desc_struct gdt;
-int cpu_to_node(int cpu)
-{
- if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
- printk(KERN_WARNING
- "cpu_to_node(%d): usage too early!\n", cpu);
- dump_stack();
- return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
- }
- return per_cpu(x86_cpu_to_node_map, cpu);
+ pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
+ 0x2 | DESCTYPE_S, 0x8);
+ gdt.s = 1;
+ write_gdt_entry(get_cpu_gdt_table(cpu),
+ GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
+#endif
}
-EXPORT_SYMBOL(cpu_to_node);
/*
- * Same function as cpu_to_node() but used if called before the
- * per_cpu areas are setup.
+ * Great future plan:
+ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
+ * Always point %gs to its beginning
*/
-int early_cpu_to_node(int cpu)
+void __init setup_per_cpu_areas(void)
{
- if (early_per_cpu_ptr(x86_cpu_to_node_map))
- return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
-
- if (!per_cpu_offset(cpu)) {
- printk(KERN_WARNING
- "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
- dump_stack();
- return NUMA_NO_NODE;
- }
- return per_cpu(x86_cpu_to_node_map, cpu);
-}
-
+ size_t static_size = __per_cpu_end - __per_cpu_start;
+ unsigned int cpu;
+ unsigned long delta;
+ size_t pcpu_unit_size;
+ ssize_t ret;
-/* empty cpumask */
-static const cpumask_t cpu_mask_none;
-
-/*
- * Returns a pointer to the bitmask of CPUs on Node 'node'.
- */
-const cpumask_t *cpumask_of_node(int node)
-{
- if (node_to_cpumask_map == NULL) {
- printk(KERN_WARNING
- "cpumask_of_node(%d): no node_to_cpumask_map!\n",
- node);
- dump_stack();
- return (const cpumask_t *)&cpu_online_map;
- }
- if (node >= nr_node_ids) {
- printk(KERN_WARNING
- "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
- node, nr_node_ids);
- dump_stack();
- return &cpu_mask_none;
- }
- return &node_to_cpumask_map[node];
-}
-EXPORT_SYMBOL(cpumask_of_node);
+ pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
+ NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
-/*
- * Returns a bitmask of CPUs on Node 'node'.
- *
- * Side note: this function creates the returned cpumask on the stack
- * so with a high NR_CPUS count, excessive stack space is used. The
- * node_to_cpumask_ptr function should be used whenever possible.
- */
-cpumask_t node_to_cpumask(int node)
-{
- if (node_to_cpumask_map == NULL) {
- printk(KERN_WARNING
- "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
- dump_stack();
- return cpu_online_map;
- }
- if (node >= nr_node_ids) {
- printk(KERN_WARNING
- "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
- node, nr_node_ids);
- dump_stack();
- return cpu_mask_none;
+ /*
+ * Allocate percpu area. If PSE is supported, try to make use
+ * of large page mappings. Please read comments on top of
+ * each allocator for details.
+ */
+ ret = setup_pcpu_remap(static_size);
+ if (ret < 0)
+ ret = setup_pcpu_embed(static_size);
+ if (ret < 0)
+ ret = setup_pcpu_4k(static_size);
+ if (ret < 0)
+ panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
+ static_size, ret);
+
+ pcpu_unit_size = ret;
+
+ /* alrighty, percpu areas up and running */
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu) {
+ per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
+ per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
+ per_cpu(cpu_number, cpu) = cpu;
+ setup_percpu_segment(cpu);
+ setup_stack_canary_segment(cpu);
+ /*
+ * Copy data used in early init routines from the
+ * initial arrays to the per cpu data areas. These
+ * arrays then become expendable and the *_early_ptr's
+ * are zeroed indicating that the static arrays are
+ * gone.
+ */
+#ifdef CONFIG_X86_LOCAL_APIC
+ per_cpu(x86_cpu_to_apicid, cpu) =
+ early_per_cpu_map(x86_cpu_to_apicid, cpu);
+ per_cpu(x86_bios_cpu_apicid, cpu) =
+ early_per_cpu_map(x86_bios_cpu_apicid, cpu);
+#endif
+#ifdef CONFIG_X86_64
+ per_cpu(irq_stack_ptr, cpu) =
+ per_cpu(irq_stack_union.irq_stack, cpu) +
+ IRQ_STACK_SIZE - 64;
+#ifdef CONFIG_NUMA
+ per_cpu(x86_cpu_to_node_map, cpu) =
+ early_per_cpu_map(x86_cpu_to_node_map, cpu);
+#endif
+#endif
+ /*
+ * Up to this point, the boot CPU has been using .data.init
+ * area. Reload any changed state for the boot CPU.
+ */
+ if (cpu == boot_cpu_id)
+ switch_to_new_gdt(cpu);
}
- return node_to_cpumask_map[node];
-}
-EXPORT_SYMBOL(node_to_cpumask);
-
-/*
- * --------- end of debug versions of the numa functions ---------
- */
-#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
+ /* indicate the early static arrays will soon be gone */
+#ifdef CONFIG_X86_LOCAL_APIC
+ early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
+ early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
+#endif
+#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
+ early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
+#endif
-#endif /* X86_64_NUMA */
+ /* Setup node to cpumask map */
+ setup_node_to_cpumask_map();
+ /* Setup cpu initialized, callin, callout masks */
+ setup_cpu_local_masks();
+}