From 56720367cd89ef5265f39da2d674c5b92cd4cd87 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Update defconfig Rerun and enable autofs 4, relayfs and softdog Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/defconfig | 98 +++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 83 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig index f8db7e500fbf..5d56542fb68f 100644 --- a/arch/x86_64/defconfig +++ b/arch/x86_64/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.13-git11 -# Mon Sep 12 16:16:16 2005 +# Linux kernel version: 2.6.14-git7 +# Sat Nov 5 15:55:50 2005 # CONFIG_X86_64=y CONFIG_64BIT=y @@ -35,7 +35,7 @@ CONFIG_POSIX_MQUEUE=y # CONFIG_BSD_PROCESS_ACCT is not set CONFIG_SYSCTL=y # CONFIG_AUDIT is not set -# CONFIG_HOTPLUG is not set +CONFIG_HOTPLUG=y CONFIG_KOBJECT_UEVENT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y @@ -93,10 +93,11 @@ CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set CONFIG_PREEMPT_BKL=y +CONFIG_NUMA=y CONFIG_K8_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y # CONFIG_NUMA_EMU is not set CONFIG_ARCH_DISCONTIGMEM_ENABLE=y -CONFIG_NUMA=y CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_SELECT_MEMORY_MODEL=y @@ -107,9 +108,10 @@ CONFIG_DISCONTIGMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_NEED_MULTIPLE_NODES=y # CONFIG_SPARSEMEM_STATIC is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y -CONFIG_HAVE_DEC_LOCK=y CONFIG_NR_CPUS=32 +CONFIG_HOTPLUG_CPU=y CONFIG_HPET_TIMER=y CONFIG_X86_PM_TIMER=y CONFIG_HPET_EMULATE_RTC=y @@ -117,6 +119,7 @@ CONFIG_GART_IOMMU=y CONFIG_SWIOTLB=y CONFIG_X86_MCE=y CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y CONFIG_PHYSICAL_START=0x100000 # CONFIG_KEXEC is not set CONFIG_SECCOMP=y @@ -136,11 +139,15 @@ CONFIG_PM=y # CONFIG_PM_DEBUG is not set CONFIG_SOFTWARE_SUSPEND=y CONFIG_PM_STD_PARTITION="" +CONFIG_SUSPEND_SMP=y # # ACPI (Advanced Configuration and Power Interface) Support # CONFIG_ACPI=y +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_SLEEP_PROC_FS=y +CONFIG_ACPI_SLEEP_PROC_SLEEP=y CONFIG_ACPI_AC=y CONFIG_ACPI_BATTERY=y CONFIG_ACPI_BUTTON=y @@ -148,6 +155,7 @@ CONFIG_ACPI_BUTTON=y CONFIG_ACPI_HOTKEY=m CONFIG_ACPI_FAN=y CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_THERMAL=y CONFIG_ACPI_NUMA=y # CONFIG_ACPI_ASUS is not set @@ -158,7 +166,7 @@ CONFIG_ACPI_BLACKLIST_YEAR=2001 CONFIG_ACPI_EC=y CONFIG_ACPI_POWER=y CONFIG_ACPI_SYSTEM=y -# CONFIG_ACPI_CONTAINER is not set +CONFIG_ACPI_CONTAINER=y # # CPU Frequency scaling @@ -293,7 +301,6 @@ CONFIG_IPV6=y # Network testing # # CONFIG_NET_PKTGEN is not set -# CONFIG_NETFILTER_NETLINK is not set # CONFIG_HAMRADIO is not set # CONFIG_IRDA is not set # CONFIG_BT is not set @@ -311,6 +318,11 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_FW_LOADER is not set # CONFIG_DEBUG_DRIVER is not set +# +# Connector - unified userspace <-> kernelspace linker +# +# CONFIG_CONNECTOR is not set + # # Memory Technology Devices (MTD) # @@ -354,6 +366,11 @@ CONFIG_IOSCHED_NOOP=y # CONFIG_IOSCHED_AS is not set CONFIG_IOSCHED_DEADLINE=y CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_AS is not set +CONFIG_DEFAULT_DEADLINE=y +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" # CONFIG_ATA_OVER_ETH is not set # @@ -450,6 +467,7 @@ CONFIG_BLK_DEV_SD=y CONFIG_SCSI_SPI_ATTRS=y # CONFIG_SCSI_FC_ATTRS is not set # CONFIG_SCSI_ISCSI_ATTRS is not set +# CONFIG_SCSI_SAS_ATTRS is not set # # SCSI low-level drivers @@ -469,20 +487,24 @@ CONFIG_AIC79XX_DEBUG_MASK=0 # CONFIG_AIC79XX_REG_PRETTY_PRINT is not set # CONFIG_MEGARAID_NEWGEN is not set # CONFIG_MEGARAID_LEGACY is not set +# CONFIG_MEGARAID_SAS is not set CONFIG_SCSI_SATA=y # CONFIG_SCSI_SATA_AHCI is not set # CONFIG_SCSI_SATA_SVW is not set CONFIG_SCSI_ATA_PIIX=y # CONFIG_SCSI_SATA_MV is not set -# CONFIG_SCSI_SATA_NV is not set -# CONFIG_SCSI_SATA_PROMISE is not set +CONFIG_SCSI_SATA_NV=y +# CONFIG_SCSI_PDC_ADMA is not set # CONFIG_SCSI_SATA_QSTOR is not set +# CONFIG_SCSI_SATA_PROMISE is not set # CONFIG_SCSI_SATA_SX4 is not set # CONFIG_SCSI_SATA_SIL is not set +# CONFIG_SCSI_SATA_SIL24 is not set # CONFIG_SCSI_SATA_SIS is not set # CONFIG_SCSI_SATA_ULI is not set CONFIG_SCSI_SATA_VIA=y # CONFIG_SCSI_SATA_VITESSE is not set +CONFIG_SCSI_SATA_INTEL_COMBINED=y # CONFIG_SCSI_BUSLOGIC is not set # CONFIG_SCSI_DMX3191D is not set # CONFIG_SCSI_EATA is not set @@ -525,6 +547,7 @@ CONFIG_BLK_DEV_DM=y CONFIG_FUSION=y CONFIG_FUSION_SPI=y # CONFIG_FUSION_FC is not set +# CONFIG_FUSION_SAS is not set CONFIG_FUSION_MAX_SGE=128 # CONFIG_FUSION_CTL is not set @@ -564,6 +587,7 @@ CONFIG_NET_ETHERNET=y CONFIG_MII=y # CONFIG_HAPPYMEAL is not set # CONFIG_SUNGEM is not set +# CONFIG_CASSINI is not set CONFIG_NET_VENDOR_3COM=y CONFIG_VORTEX=y # CONFIG_TYPHOON is not set @@ -740,7 +764,43 @@ CONFIG_LEGACY_PTY_COUNT=256 # # Watchdog Cards # -# CONFIG_WATCHDOG is not set +CONFIG_WATCHDOG=y +# CONFIG_WATCHDOG_NOWAYOUT is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=y +# CONFIG_ACQUIRE_WDT is not set +# CONFIG_ADVANTECH_WDT is not set +# CONFIG_ALIM1535_WDT is not set +# CONFIG_ALIM7101_WDT is not set +# CONFIG_SC520_WDT is not set +# CONFIG_EUROTECH_WDT is not set +# CONFIG_IB700_WDT is not set +# CONFIG_IBMASR is not set +# CONFIG_WAFER_WDT is not set +# CONFIG_I6300ESB_WDT is not set +# CONFIG_I8XX_TCO is not set +# CONFIG_SC1200_WDT is not set +# CONFIG_60XX_WDT is not set +# CONFIG_SBC8360_WDT is not set +# CONFIG_CPU5_WDT is not set +# CONFIG_W83627HF_WDT is not set +# CONFIG_W83877F_WDT is not set +# CONFIG_W83977F_WDT is not set +# CONFIG_MACHZ_WDT is not set + +# +# PCI-based Watchdog Cards +# +# CONFIG_PCIPCWATCHDOG is not set +# CONFIG_WDTPCI is not set + +# +# USB-based Watchdog Cards +# +# CONFIG_USBPCWATCHDOG is not set CONFIG_HW_RANDOM=y # CONFIG_NVRAM is not set CONFIG_RTC=y @@ -767,6 +827,7 @@ CONFIG_MAX_RAW_DEVS=256 # TPM devices # # CONFIG_TCG_TPM is not set +# CONFIG_TELCLOCK is not set # # I2C support @@ -783,6 +844,7 @@ CONFIG_MAX_RAW_DEVS=256 # CONFIG_HWMON=y # CONFIG_HWMON_VID is not set +# CONFIG_SENSORS_HDAPS is not set # CONFIG_HWMON_DEBUG_CHIP is not set # @@ -886,12 +948,15 @@ CONFIG_USB_UHCI_HCD=y # USB Device Class drivers # # CONFIG_OBSOLETE_OSS_USB_DRIVER is not set -# CONFIG_USB_BLUETOOTH_TTY is not set # CONFIG_USB_ACM is not set CONFIG_USB_PRINTER=y # -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information +# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# + +# +# may also be needed; see USB_STORAGE Help for more information # CONFIG_USB_STORAGE=y # CONFIG_USB_STORAGE_DEBUG is not set @@ -924,6 +989,7 @@ CONFIG_USB_HIDINPUT=y # CONFIG_USB_XPAD is not set # CONFIG_USB_ATI_REMOTE is not set # CONFIG_USB_KEYSPAN_REMOTE is not set +# CONFIG_USB_APPLETOUCH is not set # # USB Imaging devices @@ -1005,7 +1071,7 @@ CONFIG_USB_MON=y # # CONFIG_EDD is not set # CONFIG_DELL_RBU is not set -CONFIG_DCDBAS=m +# CONFIG_DCDBAS is not set # # File systems @@ -1037,7 +1103,7 @@ CONFIG_INOTIFY=y # CONFIG_QUOTA is not set CONFIG_DNOTIFY=y CONFIG_AUTOFS_FS=y -# CONFIG_AUTOFS4_FS is not set +CONFIG_AUTOFS4_FS=y # CONFIG_FUSE_FS is not set # @@ -1068,7 +1134,7 @@ CONFIG_TMPFS=y CONFIG_HUGETLBFS=y CONFIG_HUGETLB_PAGE=y CONFIG_RAMFS=y -# CONFIG_RELAYFS_FS is not set +CONFIG_RELAYFS_FS=y # # Miscellaneous filesystems @@ -1186,7 +1252,9 @@ CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_DEBUG_KOBJECT is not set # CONFIG_DEBUG_INFO is not set CONFIG_DEBUG_FS=y +# CONFIG_DEBUG_VM is not set # CONFIG_FRAME_POINTER is not set +# CONFIG_RCU_TORTURE_TEST is not set CONFIG_INIT_DEBUG=y # CONFIG_IOMMU_DEBUG is not set CONFIG_KPROBES=y -- cgit v1.2.3 From a2f1b424900715ed9d1699c3bb88a434a2b42bc0 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Add 4GB DMA32 zone Add a new 4GB GFP_DMA32 zone between the GFP_DMA and GFP_NORMAL zones. As a bit of historical background: when the x86-64 port was originally designed we had some discussion if we should use a 16MB DMA zone like i386 or a 4GB DMA zone like IA64 or both. Both was ruled out at this point because it was in early 2.4 when VM is still quite shakey and had bad troubles even dealing with one DMA zone. We settled on the 16MB DMA zone mainly because we worried about older soundcards and the floppy. But this has always caused problems since then because device drivers had trouble getting enough DMA able memory. These days the VM works much better and the wide use of NUMA has proven it can deal with many zones successfully. So this patch adds both zones. This helps drivers who need a lot of memory below 4GB because their hardware is not accessing more (graphic drivers - proprietary and free ones, video frame buffer drivers, sound drivers etc.). Previously they could only use IOMMU+16MB GFP_DMA, which was not enough memory. Another common problem is that hardware who has full memory addressing for >4GB misses it for some control structures in memory (like transmit rings or other metadata). They tended to allocate memory in the 16MB GFP_DMA or the IOMMU/swiotlb then using pci_alloc_consistent, but that can tie up a lot of precious 16MB GFPDMA/IOMMU/swiotlb memory (even on AMD systems the IOMMU tends to be quite small) especially if you have many devices. With the new zone pci_alloc_consistent can just put this stuff into memory below 4GB which works better. One argument was still if the zone should be 4GB or 2GB. The main motivation for 2GB would be an unnamed not so unpopular hardware raid controller (mostly found in older machines from a particular four letter company) who has a strange 2GB restriction in firmware. But that one works ok with swiotlb/IOMMU anyways, so it doesn't really need GFP_DMA32. I chose 4GB to be compatible with IA64 and because it seems to be the most common restriction. The new zone is so far added only for x86-64. For other architectures who don't set up this new zone nothing changes. Architectures can set a compatibility define in Kconfig CONFIG_DMA_IS_DMA32 that will define GFP_DMA32 as GFP_DMA. Otherwise it's a nop because on 32bit architectures it's normally not needed because GFP_NORMAL (=0) is DMA able enough. One problem is still that GFP_DMA means different things on different architectures. e.g. some drivers used to have #ifdef ia64 use GFP_DMA (trusting it to be 4GB) #elif __x86_64__ (use other hacks like the swiotlb because 16MB is not enough) ... . This was quite ugly and is now obsolete. These should be now converted to use GFP_DMA32 unconditionally. I haven't done this yet. Or best only use pci_alloc_consistent/dma_alloc_coherent which will use GFP_DMA32 transparently. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/mm/init.c | 65 +++++++++++++++++++++++++++++++++------------------ arch/x86_64/mm/numa.c | 25 ++++---------------- 2 files changed, 47 insertions(+), 43 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index e60a1a848de8..a1ad4cc423a7 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -318,32 +318,51 @@ void zap_low_mappings(void) flush_tlb_all(); } +/* Compute zone sizes for the DMA and DMA32 zones in a node. */ +__init void +size_zones(unsigned long *z, unsigned long *h, + unsigned long start_pfn, unsigned long end_pfn) +{ + int i; + unsigned long w; + + for (i = 0; i < MAX_NR_ZONES; i++) + z[i] = 0; + + if (start_pfn < MAX_DMA_PFN) + z[ZONE_DMA] = MAX_DMA_PFN - start_pfn; + if (start_pfn < MAX_DMA32_PFN) { + unsigned long dma32_pfn = MAX_DMA32_PFN; + if (dma32_pfn > end_pfn) + dma32_pfn = end_pfn; + z[ZONE_DMA32] = dma32_pfn - start_pfn; + } + z[ZONE_NORMAL] = end_pfn - start_pfn; + + /* Remove lower zones from higher ones. */ + w = 0; + for (i = 0; i < MAX_NR_ZONES; i++) { + if (z[i]) + z[i] -= w; + w += z[i]; + } + + /* Compute holes */ + w = 0; + for (i = 0; i < MAX_NR_ZONES; i++) { + unsigned long s = w; + w += z[i]; + h[i] = e820_hole_size(s, w); + } +} + #ifndef CONFIG_NUMA void __init paging_init(void) { - { - unsigned long zones_size[MAX_NR_ZONES]; - unsigned long holes[MAX_NR_ZONES]; - unsigned int max_dma; - - memset(zones_size, 0, sizeof(zones_size)); - memset(holes, 0, sizeof(holes)); - - max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; - - if (end_pfn < max_dma) { - zones_size[ZONE_DMA] = end_pfn; - holes[ZONE_DMA] = e820_hole_size(0, end_pfn); - } else { - zones_size[ZONE_DMA] = max_dma; - holes[ZONE_DMA] = e820_hole_size(0, max_dma); - zones_size[ZONE_NORMAL] = end_pfn - max_dma; - holes[ZONE_NORMAL] = e820_hole_size(max_dma, end_pfn); - } - free_area_init_node(0, NODE_DATA(0), zones_size, - __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes); - } - return; + unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES]; + size_zones(zones, holes, 0, end_pfn); + free_area_init_node(0, NODE_DATA(0), zones, + __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes); } #endif diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c index 214803821001..18e86e2eac2d 100644 --- a/arch/x86_64/mm/numa.c +++ b/arch/x86_64/mm/numa.c @@ -132,29 +132,14 @@ void __init setup_node_zones(int nodeid) unsigned long start_pfn, end_pfn; unsigned long zones[MAX_NR_ZONES]; unsigned long holes[MAX_NR_ZONES]; - unsigned long dma_end_pfn; - memset(zones, 0, sizeof(unsigned long) * MAX_NR_ZONES); - memset(holes, 0, sizeof(unsigned long) * MAX_NR_ZONES); + start_pfn = node_start_pfn(nodeid); + end_pfn = node_end_pfn(nodeid); - start_pfn = node_start_pfn(nodeid); - end_pfn = node_end_pfn(nodeid); + Dprintk(KERN_INFO "setting up node %d %lx-%lx\n", + nodeid, start_pfn, end_pfn); - Dprintk(KERN_INFO "setting up node %d %lx-%lx\n", nodeid, start_pfn, end_pfn); - - /* All nodes > 0 have a zero length zone DMA */ - dma_end_pfn = __pa(MAX_DMA_ADDRESS) >> PAGE_SHIFT; - if (start_pfn < dma_end_pfn) { - zones[ZONE_DMA] = dma_end_pfn - start_pfn; - holes[ZONE_DMA] = e820_hole_size(start_pfn, dma_end_pfn); - zones[ZONE_NORMAL] = end_pfn - dma_end_pfn; - holes[ZONE_NORMAL] = e820_hole_size(dma_end_pfn, end_pfn); - - } else { - zones[ZONE_NORMAL] = end_pfn - start_pfn; - holes[ZONE_NORMAL] = e820_hole_size(start_pfn, end_pfn); - } - + size_zones(zones, holes, start_pfn, end_pfn); free_area_init_node(nodeid, NODE_DATA(nodeid), zones, start_pfn, holes); } -- cgit v1.2.3 From d1e3dfdc2c934a279bcfa19359e4daec02bda18f Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Set compatibility flag for 4GB zone on IA64 IA64 traditionally had a 4GB DMA32 zone. Set the compatibility flag to keep old drivers working. For new drivers it would be better to use ZONE_DMA32 now. Cc: tony.luck@intel.com Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/ia64/Kconfig | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 3b4248cff9a7..3dba24c318b7 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -58,6 +58,10 @@ config IA64_UNCACHED_ALLOCATOR bool select GENERIC_ALLOCATOR +config ZONE_DMA_IS_DMA32 + bool + default y + choice prompt "System type" default IA64_GENERIC -- cgit v1.2.3 From fed644132f8ec4bf05b63f79c507c0acaa692c37 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Make i386 compile again with fourth DMA32 zone The code should deal with an additional empty zone, so fix up the #error. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/srat.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/i386/kernel/srat.c b/arch/i386/kernel/srat.c index 8de658db8146..52b3ed5d2cb5 100644 --- a/arch/i386/kernel/srat.c +++ b/arch/i386/kernel/srat.c @@ -137,8 +137,8 @@ static void __init parse_memory_affinity_structure (char *sratp) "enabled and removable" : "enabled" ) ); } -#if MAX_NR_ZONES != 3 -#error "MAX_NR_ZONES != 3, chunk_to_zone requires review" +#if MAX_NR_ZONES != 4 +#error "MAX_NR_ZONES != 4, chunk_to_zone requires review" #endif /* Take a chunk of pages from page frame cstart to cend and count the number * of pages in each zone, returned via zones[]. -- cgit v1.2.3 From e18c6874a505958d153a11f9d6947971c349008a Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Account mem_map in VM holes accounting The VM needs to know about lost memory in zones to accurately balance dirty pages. This patch accounts mem_map in there too, which fixes a constant errror of a few percent. Also some other misc mappings and the kernel text itself are accounted too. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/mm/init.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch') diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index a1ad4cc423a7..2b1d6c382396 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -47,6 +47,8 @@ extern int swiotlb; extern char _stext[]; +static unsigned long dma_reserve __initdata; + DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); /* @@ -354,6 +356,21 @@ size_zones(unsigned long *z, unsigned long *h, w += z[i]; h[i] = e820_hole_size(s, w); } + + /* Add the space pace needed for mem_map to the holes too. */ + for (i = 0; i < MAX_NR_ZONES; i++) + h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE; + + /* The 16MB DMA zone has the kernel and other misc mappings. + Account them too */ + if (h[ZONE_DMA]) { + h[ZONE_DMA] += dma_reserve; + if (h[ZONE_DMA] >= z[ZONE_DMA]) { + printk(KERN_WARNING + "Kernel too large and filling up ZONE_DMA?\n"); + h[ZONE_DMA] = z[ZONE_DMA]; + } + } } #ifndef CONFIG_NUMA @@ -510,6 +527,8 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len) #else reserve_bootmem(phys, len); #endif + if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) + dma_reserve += len / PAGE_SIZE; } int kern_addr_valid(unsigned long addr) -- cgit v1.2.3 From 89b831ef8bf5cfbb357dbc0a2e07700d7f20eec5 Mon Sep 17 00:00:00 2001 From: Jacob Shin Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Support for AMD specific MCE Threshold. MC4_MISC - DRAM Errors Threshold Register realized under AMD K8 Rev F. This register is used to count correctable and uncorrectable ECC errors that occur during DRAM read operations. The user may interface through sysfs files in order to change the threshold configuration. bank%d/error_count - reads current error count, write to clear. bank%d/interrupt_enable - set/clear interrupt enable. bank%d/threshold_limit - read/write the threshold limit. APIC vector 0xF9 in hw_irq.h. 5 software defined bank ids in mce.h. new apic.c function to setup threshold apic lvt. defaults to interrupt off, count enabled, and threshold limit max. sysfs interface created on /sys/devices/system/threshold. AK: added some ifdefs to make it compile on UP Signed-off-by: Jacob Shin Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/Kconfig | 8 + arch/x86_64/kernel/Makefile | 1 + arch/x86_64/kernel/apic.c | 10 + arch/x86_64/kernel/entry.S | 3 + arch/x86_64/kernel/i8259.c | 2 + arch/x86_64/kernel/mce.c | 3 + arch/x86_64/kernel/mce_amd.c | 538 +++++++++++++++++++++++++++++++++++++++++++ arch/x86_64/kernel/traps.c | 4 + 8 files changed, 569 insertions(+) create mode 100644 arch/x86_64/kernel/mce_amd.c (limited to 'arch') diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index 21afa69a086d..504dc52e8bfa 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig @@ -374,6 +374,14 @@ config X86_MCE_INTEL Additional support for intel specific MCE features such as the thermal monitor. +config X86_MCE_AMD + bool "AMD MCE features" + depends on X86_MCE && X86_LOCAL_APIC + default y + help + Additional support for AMD specific MCE features such as + the DRAM Error Threshold. + config PHYSICAL_START hex "Physical address where the kernel is loaded" if EMBEDDED default "0x100000" diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile index 14328cab5d3a..fe4cbd1c4b2f 100644 --- a/arch/x86_64/kernel/Makefile +++ b/arch/x86_64/kernel/Makefile @@ -11,6 +11,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \ obj-$(CONFIG_X86_MCE) += mce.o obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o +obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o obj-$(CONFIG_MTRR) += ../../i386/kernel/cpu/mtrr/ obj-$(CONFIG_ACPI) += acpi/ obj-$(CONFIG_X86_MSR) += msr.o diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c index b6e7715d877f..18691ce4c759 100644 --- a/arch/x86_64/kernel/apic.c +++ b/arch/x86_64/kernel/apic.c @@ -833,6 +833,16 @@ int setup_profiling_timer(unsigned int multiplier) return 0; } +#ifdef CONFIG_X86_MCE_AMD +void setup_threshold_lvt(unsigned long lvt_off) +{ + unsigned int v = 0; + unsigned long reg = (lvt_off << 4) + 0x500; + v |= THRESHOLD_APIC_VECTOR; + apic_write(reg, v); +} +#endif /* CONFIG_X86_MCE_AMD */ + #undef APIC_DIVISOR /* diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S index 7937971d1853..9ff42041bb6b 100644 --- a/arch/x86_64/kernel/entry.S +++ b/arch/x86_64/kernel/entry.S @@ -612,6 +612,9 @@ retint_kernel: ENTRY(thermal_interrupt) apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt +ENTRY(threshold_interrupt) + apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt + #ifdef CONFIG_SMP ENTRY(reschedule_interrupt) apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c index c6c9791d77c1..5de30035e54b 100644 --- a/arch/x86_64/kernel/i8259.c +++ b/arch/x86_64/kernel/i8259.c @@ -492,6 +492,7 @@ void invalidate_interrupt5(void); void invalidate_interrupt6(void); void invalidate_interrupt7(void); void thermal_interrupt(void); +void threshold_interrupt(void); void i8254_timer_resume(void); static void setup_timer_hardware(void) @@ -580,6 +581,7 @@ void __init init_IRQ(void) set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); #endif set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); + set_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); #ifdef CONFIG_X86_LOCAL_APIC /* self generated IPI for local APIC timer */ diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c index 69541db5ff2c..cf8a76f0f47e 100644 --- a/arch/x86_64/kernel/mce.c +++ b/arch/x86_64/kernel/mce.c @@ -356,6 +356,9 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) case X86_VENDOR_INTEL: mce_intel_feature_init(c); break; + case X86_VENDOR_AMD: + mce_amd_feature_init(c); + break; default: break; } diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c new file mode 100644 index 000000000000..1f76175ace02 --- /dev/null +++ b/arch/x86_64/kernel/mce_amd.c @@ -0,0 +1,538 @@ +/* + * (c) 2005 Advanced Micro Devices, Inc. + * Your use of this code is subject to the terms and conditions of the + * GNU general public license version 2. See "COPYING" or + * http://www.gnu.org/licenses/gpl.html + * + * Written by Jacob Shin - AMD, Inc. + * + * Support : jacob.shin@amd.com + * + * MC4_MISC0 DRAM ECC Error Threshold available under AMD K8 Rev F. + * MC4_MISC0 exists per physical processor. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PFX "mce_threshold: " +#define VERSION "version 1.00.9" +#define NR_BANKS 5 +#define THRESHOLD_MAX 0xFFF +#define INT_TYPE_APIC 0x00020000 +#define MASK_VALID_HI 0x80000000 +#define MASK_LVTOFF_HI 0x00F00000 +#define MASK_COUNT_EN_HI 0x00080000 +#define MASK_INT_TYPE_HI 0x00060000 +#define MASK_OVERFLOW_HI 0x00010000 +#define MASK_ERR_COUNT_HI 0x00000FFF +#define MASK_OVERFLOW 0x0001000000000000L + +struct threshold_bank { + unsigned int cpu; + u8 bank; + u8 interrupt_enable; + u16 threshold_limit; + struct kobject kobj; +}; + +static struct threshold_bank threshold_defaults = { + .interrupt_enable = 0, + .threshold_limit = THRESHOLD_MAX, +}; + +#ifdef CONFIG_SMP +static unsigned char shared_bank[NR_BANKS] = { + 0, 0, 0, 0, 1 +}; +#endif + +static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ + +/* + * CPU Initialization + */ + +/* must be called with correct cpu affinity */ +static void threshold_restart_bank(struct threshold_bank *b, + int reset, u16 old_limit) +{ + u32 mci_misc_hi, mci_misc_lo; + + rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi); + + if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) + reset = 1; /* limit cannot be lower than err count */ + + if (reset) { /* reset err count and overflow bit */ + mci_misc_hi = + (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | + (THRESHOLD_MAX - b->threshold_limit); + } else if (old_limit) { /* change limit w/o reset */ + int new_count = (mci_misc_hi & THRESHOLD_MAX) + + (old_limit - b->threshold_limit); + mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | + (new_count & THRESHOLD_MAX); + } + + b->interrupt_enable ? + (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : + (mci_misc_hi &= ~MASK_INT_TYPE_HI); + + mci_misc_hi |= MASK_COUNT_EN_HI; + wrmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi); +} + +void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) +{ + int bank; + u32 mci_misc_lo, mci_misc_hi; + unsigned int cpu = smp_processor_id(); + + for (bank = 0; bank < NR_BANKS; ++bank) { + rdmsr(MSR_IA32_MC0_MISC + bank * 4, mci_misc_lo, mci_misc_hi); + + /* !valid, !counter present, bios locked */ + if (!(mci_misc_hi & MASK_VALID_HI) || + !(mci_misc_hi & MASK_VALID_HI >> 1) || + (mci_misc_hi & MASK_VALID_HI >> 2)) + continue; + + per_cpu(bank_map, cpu) |= (1 << bank); + +#ifdef CONFIG_SMP + if (shared_bank[bank] && cpu_core_id[cpu]) + continue; +#endif + + setup_threshold_lvt((mci_misc_hi & MASK_LVTOFF_HI) >> 20); + threshold_defaults.cpu = cpu; + threshold_defaults.bank = bank; + threshold_restart_bank(&threshold_defaults, 0, 0); + } +} + +/* + * APIC Interrupt Handler + */ + +/* + * threshold interrupt handler will service THRESHOLD_APIC_VECTOR. + * the interrupt goes off when error_count reaches threshold_limit. + * the handler will simply log mcelog w/ software defined bank number. + */ +asmlinkage void mce_threshold_interrupt(void) +{ + int bank; + struct mce m; + + ack_APIC_irq(); + irq_enter(); + + memset(&m, 0, sizeof(m)); + rdtscll(m.tsc); + m.cpu = smp_processor_id(); + + /* assume first bank caused it */ + for (bank = 0; bank < NR_BANKS; ++bank) { + m.bank = MCE_THRESHOLD_BASE + bank; + rdmsrl(MSR_IA32_MC0_MISC + bank * 4, m.misc); + + if (m.misc & MASK_OVERFLOW) { + mce_log(&m); + goto out; + } + } + out: + irq_exit(); +} + +/* + * Sysfs Interface + */ + +static struct sysdev_class threshold_sysclass = { + set_kset_name("threshold"), +}; + +static DEFINE_PER_CPU(struct sys_device, device_threshold); + +struct threshold_attr { + struct attribute attr; + ssize_t(*show) (struct threshold_bank *, char *); + ssize_t(*store) (struct threshold_bank *, const char *, size_t count); +}; + +static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); + +static cpumask_t affinity_set(unsigned int cpu) +{ + cpumask_t oldmask = current->cpus_allowed; + cpumask_t newmask = CPU_MASK_NONE; + cpu_set(cpu, newmask); + set_cpus_allowed(current, newmask); + return oldmask; +} + +static void affinity_restore(cpumask_t oldmask) +{ + set_cpus_allowed(current, oldmask); +} + +#define SHOW_FIELDS(name) \ + static ssize_t show_ ## name(struct threshold_bank * b, char *buf) \ + { \ + return sprintf(buf, "%lx\n", (unsigned long) b->name); \ + } +SHOW_FIELDS(interrupt_enable) +SHOW_FIELDS(threshold_limit) + +static ssize_t store_interrupt_enable(struct threshold_bank *b, + const char *buf, size_t count) +{ + char *end; + cpumask_t oldmask; + unsigned long new = simple_strtoul(buf, &end, 0); + if (end == buf) + return -EINVAL; + b->interrupt_enable = !!new; + + oldmask = affinity_set(b->cpu); + threshold_restart_bank(b, 0, 0); + affinity_restore(oldmask); + + return end - buf; +} + +static ssize_t store_threshold_limit(struct threshold_bank *b, + const char *buf, size_t count) +{ + char *end; + cpumask_t oldmask; + u16 old; + unsigned long new = simple_strtoul(buf, &end, 0); + if (end == buf) + return -EINVAL; + if (new > THRESHOLD_MAX) + new = THRESHOLD_MAX; + if (new < 1) + new = 1; + old = b->threshold_limit; + b->threshold_limit = new; + + oldmask = affinity_set(b->cpu); + threshold_restart_bank(b, 0, old); + affinity_restore(oldmask); + + return end - buf; +} + +static ssize_t show_error_count(struct threshold_bank *b, char *buf) +{ + u32 high, low; + cpumask_t oldmask; + oldmask = affinity_set(b->cpu); + rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, low, high); /* ignore low 32 */ + affinity_restore(oldmask); + return sprintf(buf, "%x\n", + (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit)); +} + +static ssize_t store_error_count(struct threshold_bank *b, + const char *buf, size_t count) +{ + cpumask_t oldmask; + oldmask = affinity_set(b->cpu); + threshold_restart_bank(b, 1, 0); + affinity_restore(oldmask); + return 1; +} + +#define THRESHOLD_ATTR(_name,_mode,_show,_store) { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .show = _show, \ + .store = _store, \ +}; + +#define ATTR_FIELDS(name) \ + static struct threshold_attr name = \ + THRESHOLD_ATTR(name, 0644, show_## name, store_## name) + +ATTR_FIELDS(interrupt_enable); +ATTR_FIELDS(threshold_limit); +ATTR_FIELDS(error_count); + +static struct attribute *default_attrs[] = { + &interrupt_enable.attr, + &threshold_limit.attr, + &error_count.attr, + NULL +}; + +#define to_bank(k) container_of(k,struct threshold_bank,kobj) +#define to_attr(a) container_of(a,struct threshold_attr,attr) + +static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + struct threshold_bank *b = to_bank(kobj); + struct threshold_attr *a = to_attr(attr); + ssize_t ret; + ret = a->show ? a->show(b, buf) : -EIO; + return ret; +} + +static ssize_t store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct threshold_bank *b = to_bank(kobj); + struct threshold_attr *a = to_attr(attr); + ssize_t ret; + ret = a->store ? a->store(b, buf, count) : -EIO; + return ret; +} + +static struct sysfs_ops threshold_ops = { + .show = show, + .store = store, +}; + +static struct kobj_type threshold_ktype = { + .sysfs_ops = &threshold_ops, + .default_attrs = default_attrs, +}; + +/* symlinks sibling shared banks to first core. first core owns dir/files. */ +static __cpuinit int threshold_create_bank(unsigned int cpu, int bank) +{ + int err = 0; + struct threshold_bank *b = 0; + +#ifdef CONFIG_SMP + if (cpu_core_id[cpu] && shared_bank[bank]) { /* symlink */ + char name[16]; + unsigned lcpu = first_cpu(cpu_core_map[cpu]); + if (cpu_core_id[lcpu]) + goto out; /* first core not up yet */ + + b = per_cpu(threshold_banks, lcpu)[bank]; + if (!b) + goto out; + sprintf(name, "bank%i", bank); + err = sysfs_create_link(&per_cpu(device_threshold, cpu).kobj, + &b->kobj, name); + if (err) + goto out; + per_cpu(threshold_banks, cpu)[bank] = b; + goto out; + } +#endif + + b = kmalloc(sizeof(struct threshold_bank), GFP_KERNEL); + if (!b) { + err = -ENOMEM; + goto out; + } + memset(b, 0, sizeof(struct threshold_bank)); + + b->cpu = cpu; + b->bank = bank; + b->interrupt_enable = 0; + b->threshold_limit = THRESHOLD_MAX; + kobject_set_name(&b->kobj, "bank%i", bank); + b->kobj.parent = &per_cpu(device_threshold, cpu).kobj; + b->kobj.ktype = &threshold_ktype; + + err = kobject_register(&b->kobj); + if (err) { + kfree(b); + goto out; + } + per_cpu(threshold_banks, cpu)[bank] = b; + out: + return err; +} + +/* create dir/files for all valid threshold banks */ +static __cpuinit int threshold_create_device(unsigned int cpu) +{ + int bank; + int err = 0; + + per_cpu(device_threshold, cpu).id = cpu; + per_cpu(device_threshold, cpu).cls = &threshold_sysclass; + err = sysdev_register(&per_cpu(device_threshold, cpu)); + if (err) + goto out; + + for (bank = 0; bank < NR_BANKS; ++bank) { + if (!(per_cpu(bank_map, cpu) & 1 << bank)) + continue; + err = threshold_create_bank(cpu, bank); + if (err) + goto out; + } + out: + return err; +} + +#ifdef CONFIG_HOTPLUG_CPU +/* + * let's be hotplug friendly. + * in case of multiple core processors, the first core always takes ownership + * of shared sysfs dir/files, and rest of the cores will be symlinked to it. + */ + +/* cpu hotplug call removes all symlinks before first core dies */ +static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank) +{ + struct threshold_bank *b; + char name[16]; + + b = per_cpu(threshold_banks, cpu)[bank]; + if (!b) + return; + if (shared_bank[bank] && atomic_read(&b->kobj.kref.refcount) > 2) { + sprintf(name, "bank%i", bank); + sysfs_remove_link(&per_cpu(device_threshold, cpu).kobj, name); + per_cpu(threshold_banks, cpu)[bank] = 0; + } else { + kobject_unregister(&b->kobj); + kfree(per_cpu(threshold_banks, cpu)[bank]); + } +} + +static __cpuinit void threshold_remove_device(unsigned int cpu) +{ + int bank; + + for (bank = 0; bank < NR_BANKS; ++bank) { + if (!(per_cpu(bank_map, cpu) & 1 << bank)) + continue; + threshold_remove_bank(cpu, bank); + } + sysdev_unregister(&per_cpu(device_threshold, cpu)); +} + +/* link all existing siblings when first core comes up */ +static __cpuinit int threshold_create_symlinks(unsigned int cpu) +{ + int bank, err = 0; + unsigned int lcpu = 0; + + if (cpu_core_id[cpu]) + return 0; + for_each_cpu_mask(lcpu, cpu_core_map[cpu]) { + if (lcpu == cpu) + continue; + for (bank = 0; bank < NR_BANKS; ++bank) { + if (!(per_cpu(bank_map, cpu) & 1 << bank)) + continue; + if (!shared_bank[bank]) + continue; + err = threshold_create_bank(lcpu, bank); + } + } + return err; +} + +/* remove all symlinks before first core dies. */ +static __cpuinit void threshold_remove_symlinks(unsigned int cpu) +{ + int bank; + unsigned int lcpu = 0; + if (cpu_core_id[cpu]) + return; + for_each_cpu_mask(lcpu, cpu_core_map[cpu]) { + if (lcpu == cpu) + continue; + for (bank = 0; bank < NR_BANKS; ++bank) { + if (!(per_cpu(bank_map, cpu) & 1 << bank)) + continue; + if (!shared_bank[bank]) + continue; + threshold_remove_bank(lcpu, bank); + } + } +} +#else /* !CONFIG_HOTPLUG_CPU */ +static __cpuinit void threshold_create_symlinks(unsigned int cpu) +{ +} +static __cpuinit void threshold_remove_symlinks(unsigned int cpu) +{ +} +static void threshold_remove_device(unsigned int cpu) +{ +} +#endif + +/* get notified when a cpu comes on/off */ +static __cpuinit int threshold_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + /* cpu was unsigned int to begin with */ + unsigned int cpu = (unsigned long)hcpu; + + if (cpu >= NR_CPUS) + goto out; + + switch (action) { + case CPU_ONLINE: + threshold_create_device(cpu); + threshold_create_symlinks(cpu); + break; + case CPU_DOWN_PREPARE: + threshold_remove_symlinks(cpu); + break; + case CPU_DOWN_FAILED: + threshold_create_symlinks(cpu); + break; + case CPU_DEAD: + threshold_remove_device(cpu); + break; + default: + break; + } + out: + return NOTIFY_OK; +} + +static struct notifier_block threshold_cpu_notifier = { + .notifier_call = threshold_cpu_callback, +}; + +static __init int threshold_init_device(void) +{ + int err; + int lcpu = 0; + + err = sysdev_class_register(&threshold_sysclass); + if (err) + goto out; + + /* to hit CPUs online before the notifier is up */ + for_each_online_cpu(lcpu) { + err = threshold_create_device(lcpu); + if (err) + goto out; + } + register_cpu_notifier(&threshold_cpu_notifier); + + out: + return err; +} + +device_initcall(threshold_init_device); diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index b5e09e6b5536..4a836384dd0f 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c @@ -888,6 +888,10 @@ asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) { } +asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void) +{ +} + /* * 'math_state_restore()' saves the current math information in the * old math state array, and gets the new ones from the current task -- cgit v1.2.3 From 6004e1b7effcbb385a6b7c790e4b8008682cf679 Mon Sep 17 00:00:00 2001 From: James Cleverdon Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] i386/x86-64: Share interrupt vectors when there is a large number of interrupt sources Here's a patch that builds on Natalie Protasevich's IRQ compression patch and tries to work for MPS boots as well as ACPI. It is meant for a 4-node IBM x460 NUMA box, which was dying because it had interrupt pins with GSI numbers > NR_IRQS and thus overflowed irq_desc. The problem is that this system has 270 GSIs (which are 1:1 mapped with I/O APIC RTEs) and an 8-node box would have 540. This is much bigger than NR_IRQS (224 for both i386 and x86_64). Also, there aren't enough vectors to go around. There are about 190 usable vectors, not counting the reserved ones and the unused vectors at 0x20 to 0x2F. So, my patch attempts to compress the GSI range and share vectors by sharing IRQs. Cc: "Protasevich, Natalie" Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/acpi/boot.c | 15 ++++----- arch/x86_64/kernel/io_apic.c | 80 ++++++++++++++++++++++++++++++++++++++++---- arch/x86_64/kernel/mpparse.c | 2 +- 3 files changed, 81 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index b66c13c0cc0f..82754bb80e20 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c @@ -39,17 +39,14 @@ #ifdef CONFIG_X86_64 -static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ -} extern void __init clustered_apic_check(void); -static inline int ioapic_setup_disabled(void) -{ - return 0; -} +extern int gsi_irq_sharing(int gsi); #include +static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; } + + #else /* X86 */ #ifdef CONFIG_X86_LOCAL_APIC @@ -57,6 +54,8 @@ static inline int ioapic_setup_disabled(void) #include #endif /* CONFIG_X86_LOCAL_APIC */ +static inline int gsi_irq_sharing(int gsi) { return gsi; } + #endif /* X86 */ #define BAD_MADT_ENTRY(entry, end) ( \ @@ -459,7 +458,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) *irq = IO_APIC_VECTOR(gsi); else #endif - *irq = gsi; + *irq = gsi_irq_sharing(gsi); return 0; } diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index c8eee20cd519..97154ab058b4 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c @@ -57,7 +57,7 @@ int nr_ioapic_registers[MAX_IO_APICS]; * Rough estimation of how many shared IRQs there are, can * be changed anytime. */ -#define MAX_PLUS_SHARED_IRQS NR_IRQS +#define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS) /* @@ -85,6 +85,7 @@ int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1}; int pin; \ struct irq_pin_list *entry = irq_2_pin + irq; \ \ + BUG_ON(irq >= NR_IRQS); \ for (;;) { \ unsigned int reg; \ pin = entry->pin; \ @@ -127,6 +128,8 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) } #endif +static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF }; + /* * The common case is 1:1 IRQ<->pin mappings. Sometimes there are * shared ISA-space IRQs, so we have to support them. We are super @@ -137,6 +140,7 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin) static int first_free_entry = NR_IRQS; struct irq_pin_list *entry = irq_2_pin + irq; + BUG_ON(irq >= NR_IRQS); while (entry->next) entry = irq_2_pin + entry->next; @@ -144,7 +148,7 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin) entry->next = first_free_entry; entry = irq_2_pin + entry->next; if (++first_free_entry >= PIN_MAP_SIZE) - panic("io_apic.c: whoops"); + panic("io_apic.c: ran out of irq_2_pin entries!"); } entry->apic = apic; entry->pin = pin; @@ -420,6 +424,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) best_guess = irq; } } + BUG_ON(best_guess >= NR_IRQS); return best_guess; } @@ -610,6 +615,64 @@ static inline int irq_trigger(int idx) return MPBIOS_trigger(idx); } +static int next_irq = 16; + +/* + * gsi_irq_sharing -- Name overload! "irq" can be either a legacy IRQ + * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number + * from ACPI, which can reach 800 in large boxen. + * + * Compact the sparse GSI space into a sequential IRQ series and reuse + * vectors if possible. + */ +int gsi_irq_sharing(int gsi) +{ + int i, tries, vector; + + BUG_ON(gsi >= NR_IRQ_VECTORS); + + if (platform_legacy_irq(gsi)) + return gsi; + + if (gsi_2_irq[gsi] != 0xFF) + return (int)gsi_2_irq[gsi]; + + tries = NR_IRQS; + try_again: + vector = assign_irq_vector(gsi); + + /* + * Sharing vectors means sharing IRQs, so scan irq_vectors for previous + * use of vector and if found, return that IRQ. However, we never want + * to share legacy IRQs, which usually have a different trigger mode + * than PCI. + */ + for (i = 0; i < NR_IRQS; i++) + if (IO_APIC_VECTOR(i) == vector) + break; + if (platform_legacy_irq(i)) { + if (--tries >= 0) { + IO_APIC_VECTOR(i) = 0; + goto try_again; + } + panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi); + } + if (i < NR_IRQS) { + gsi_2_irq[gsi] = i; + printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n", + gsi, vector, i); + return i; + } + + i = next_irq++; + BUG_ON(i >= NR_IRQS); + gsi_2_irq[gsi] = i; + IO_APIC_VECTOR(i) = vector; + printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n", + gsi, vector, i); + return i; +} + static int pin_2_irq(int idx, int apic, int pin) { int irq, i; @@ -639,6 +702,7 @@ static int pin_2_irq(int idx, int apic, int pin) while (i < apic) irq += nr_ioapic_registers[i++]; irq += pin; + irq = gsi_irq_sharing(irq); break; } default: @@ -648,6 +712,7 @@ static int pin_2_irq(int idx, int apic, int pin) break; } } + BUG_ON(irq >= NR_IRQS); /* * PCI IRQ command line redirection. Yes, limits are hardcoded. @@ -663,6 +728,7 @@ static int pin_2_irq(int idx, int apic, int pin) } } } + BUG_ON(irq >= NR_IRQS); return irq; } @@ -690,8 +756,8 @@ int assign_irq_vector(int irq) { static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; - BUG_ON(irq >= NR_IRQ_VECTORS); - if (IO_APIC_VECTOR(irq) > 0) + BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS); + if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) return IO_APIC_VECTOR(irq); next: current_vector += 8; @@ -699,9 +765,8 @@ next: goto next; if (current_vector >= FIRST_SYSTEM_VECTOR) { - offset++; - if (!(offset%8)) - return -ENOSPC; + /* If we run out of vectors on large boxen, must share them. */ + offset = (offset + 1) % 8; current_vector = FIRST_DEVICE_VECTOR + offset; } @@ -1917,6 +1982,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a entry.polarity = active_high_low; entry.mask = 1; /* Disabled (masked) */ + irq = gsi_irq_sharing(irq); /* * IRQs < 16 are already in the irq_2_pin[] map */ diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c index f16d38d09daf..8f6958e79455 100644 --- a/arch/x86_64/kernel/mpparse.c +++ b/arch/x86_64/kernel/mpparse.c @@ -218,7 +218,7 @@ static void __init MP_intsrc_info (struct mpc_config_intsrc *m) m->mpc_irqtype, m->mpc_irqflag & 3, (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); - if (++mp_irq_entries == MAX_IRQ_SOURCES) + if (++mp_irq_entries >= MAX_IRQ_SOURCES) panic("Max # of irq sources exceeded!!\n"); } -- cgit v1.2.3 From 47492d3667ec519172ab978bd8231b8c7152fa9d Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Use the DMA32 zone for dma_alloc_coherent()/pci_alloc_consistent Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/pci-gart.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c index 88be97c96987..2e28e855ec3c 100644 --- a/arch/x86_64/kernel/pci-gart.c +++ b/arch/x86_64/kernel/pci-gart.c @@ -220,6 +220,12 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, uses the normal dma_mask for alloc_coherent. */ dma_mask &= *dev->dma_mask; + /* Why <=? Even when the mask is smaller than 4GB it is often larger + than 16MB and in this case we have a chance of finding fitting memory + in the next higher zone first. If not retry with true GFP_DMA. -AK */ + if (dma_mask <= 0xffffffff) + gfp |= GFP_DMA32; + again: memory = dma_alloc_pages(dev, gfp, get_order(size)); if (memory == NULL) @@ -245,7 +251,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, } if (!(gfp & GFP_DMA)) { - gfp |= GFP_DMA; + gfp = (gfp & ~GFP_DMA32) | GFP_DMA; goto again; } return NULL; -- cgit v1.2.3 From e9b59d834faf0305cb6214a358f65f72c57b9e99 Mon Sep 17 00:00:00 2001 From: Ashok Raj Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Remove duplicate __cpuinit define Remove duplicate __cpuinit in smp.c. Already defined in init.h which is already included. Signed-off-by: Ashok Raj Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/smp.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c index 9db9dda161b4..493a0d1f1279 100644 --- a/arch/x86_64/kernel/smp.c +++ b/arch/x86_64/kernel/smp.c @@ -28,8 +28,6 @@ #include #include -#define __cpuinit __init - /* * Smarter SMP flushing macros. * c/o Linus Torvalds. -- cgit v1.2.3 From f5f786d0455c359c554b8f74783f887c0a2c9fac Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86-64/i386: Fix CPU model for family 6 According to cpuid instruction in IA32 SDM-Vol2, when computing cpu model, we need to consider extended model ID for family 0x6 also. AK: Also added fixes/simplifcation from Petr Vandrovec Signed-off-by: Suresh Siddha Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/cpu/common.c | 4 ++-- arch/x86_64/kernel/setup.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 74145a33cb0f..35a67dab4a94 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c @@ -233,10 +233,10 @@ static void __init early_cpu_detect(void) cpuid(0x00000001, &tfms, &misc, &junk, &cap0); c->x86 = (tfms >> 8) & 15; c->x86_model = (tfms >> 4) & 15; - if (c->x86 == 0xf) { + if (c->x86 == 0xf) c->x86 += (tfms >> 20) & 0xff; + if (c->x86 >= 0x6) c->x86_model += ((tfms >> 16) & 0xF) << 4; - } c->x86_mask = tfms & 15; if (cap0 & (1<<19)) c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index da0bc3e7bdf5..f27731ac95c5 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -1060,10 +1060,10 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) c->x86 = (tfms >> 8) & 0xf; c->x86_model = (tfms >> 4) & 0xf; c->x86_mask = tfms & 0xf; - if (c->x86 == 0xf) { + if (c->x86 == 0xf) c->x86 += (tfms >> 20) & 0xff; + if (c->x86 >= 0x6) c->x86_model += ((tfms >> 16) & 0xF) << 4; - } if (c->x86_capability[0] & (1<<19)) c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; } else { -- cgit v1.2.3 From 50895c5d76e15d8af480eff1aaab5770cabbc2c2 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Fix gcc 4 warning in aperture.c Fix arch/x86_64/kernel/aperture.c: In function #iommu_hole_init#: arch/x86_64/kernel/aperture.c:199: warning: #aper_order# may be used uninitialized in this function Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/aperture.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c index 962ad4823b6a..c7f4fdd20f05 100644 --- a/arch/x86_64/kernel/aperture.c +++ b/arch/x86_64/kernel/aperture.c @@ -196,7 +196,7 @@ static __u32 __init search_agp_bridge(u32 *order, int *valid_agp) void __init iommu_hole_init(void) { int fix, num; - u32 aper_size, aper_alloc = 0, aper_order, last_aper_order = 0; + u32 aper_size, aper_alloc = 0, aper_order = 0, last_aper_order = 0; u64 aper_base, last_aper_base = 0; int valid_agp = 0; -- cgit v1.2.3 From 69d81fcde7797342417591ba7affb372b9c86eae Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Speed up numa_node_id by putting it directly into the PDA Not go from the CPU number to an mapping array. Mode number is often used now in fast paths. This also adds a generic numa_node_id to all the topology includes Suggested by Eric Dumazet Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/setup.c | 4 ++-- arch/x86_64/mm/numa.c | 10 ++++++++-- arch/x86_64/mm/srat.c | 2 +- 3 files changed, 11 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index f27731ac95c5..99cfa751949a 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -823,7 +823,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) if (!node_online(node)) node = nearby_node(apicid); } - cpu_to_node[cpu] = node; + numa_set_node(cpu, node); printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n", cpu, c->x86_num_cores, node, cpu_core_id[cpu]); @@ -975,7 +975,7 @@ static void srat_detect_node(void) node = apicid_to_node[hard_smp_processor_id()]; if (node == NUMA_NO_NODE) node = 0; - cpu_to_node[cpu] = node; + numa_set_node(cpu, node); if (acpi_numa > 0) printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node); diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c index 18e86e2eac2d..4bf64583ba3b 100644 --- a/arch/x86_64/mm/numa.c +++ b/arch/x86_64/mm/numa.c @@ -156,7 +156,7 @@ void __init numa_init_array(void) for (i = 0; i < NR_CPUS; i++) { if (cpu_to_node[i] != NUMA_NO_NODE) continue; - cpu_to_node[i] = rr; + numa_set_node(i, rr); rr = next_node(rr, node_online_map); if (rr == MAX_NUMNODES) rr = first_node(node_online_map); @@ -242,7 +242,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) nodes_clear(node_online_map); node_set_online(0); for (i = 0; i < NR_CPUS; i++) - cpu_to_node[i] = 0; + numa_set_node(i, 0); node_to_cpumask[0] = cpumask_of_cpu(0); setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT); } @@ -252,6 +252,12 @@ __cpuinit void numa_add_cpu(int cpu) set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]); } +void __cpuinit numa_set_node(int cpu, int node) +{ + cpu_pda[cpu].nodenumber = node; + cpu_to_node[cpu] = node; +} + unsigned long __init numa_free_all_bootmem(void) { int i; diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c index 4b2e844c15a7..c7aa08a58041 100644 --- a/arch/x86_64/mm/srat.c +++ b/arch/x86_64/mm/srat.c @@ -203,7 +203,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) if (cpu_to_node[i] == NUMA_NO_NODE) continue; if (!node_isset(cpu_to_node[i], nodes_parsed)) - cpu_to_node[i] = NUMA_NO_NODE; + numa_set_node(i, NUMA_NO_NODE); } numa_init_array(); return 0; -- cgit v1.2.3 From f6c2e3330d3fdd5474bc3756da46fca889a30e33 Mon Sep 17 00:00:00 2001 From: "Siddha, Suresh B" Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Unmap NULL during early bootup We should zap the low mappings, as soon as possible, so that we can catch kernel bugs more effectively. Previously early boot had NULL mapped and didn't trap on NULL references. This patch introduces boot_level4_pgt, which will always have low identity addresses mapped. Druing boot, all the processors will use this as their level4 pgt. On BP, we will switch to init_level4_pgt as soon as we enter C code and zap the low mappings as soon as we are done with the usage of identity low mapped addresses. On AP's we will zap the low mappings as soon as we jump to C code. Signed-off-by: Suresh Siddha Signed-off-by: Ashok Raj Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/acpi/boot.c | 2 +- arch/x86_64/kernel/head.S | 37 +++++++++++++++++++++++-------------- arch/x86_64/kernel/head64.c | 8 ++++++++ arch/x86_64/kernel/mpparse.c | 2 +- arch/x86_64/kernel/setup.c | 2 ++ arch/x86_64/kernel/setup64.c | 2 +- arch/x86_64/kernel/smpboot.c | 3 --- arch/x86_64/mm/init.c | 28 +++++++++++++++++----------- 8 files changed, 53 insertions(+), 31 deletions(-) (limited to 'arch') diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index 82754bb80e20..f36677241ecd 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c @@ -542,7 +542,7 @@ acpi_scan_rsdp(unsigned long start, unsigned long length) * RSDP signature. */ for (offset = 0; offset < length; offset += 16) { - if (strncmp((char *)(start + offset), "RSD PTR ", sig_len)) + if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len)) continue; return (start + offset); } diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S index b92e5f45ed46..15290968e49d 100644 --- a/arch/x86_64/kernel/head.S +++ b/arch/x86_64/kernel/head.S @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -70,7 +71,7 @@ startup_32: movl %eax, %cr4 /* Setup early boot stage 4 level pagetables */ - movl $(init_level4_pgt - __START_KERNEL_map), %eax + movl $(boot_level4_pgt - __START_KERNEL_map), %eax movl %eax, %cr3 /* Setup EFER (Extended Feature Enable Register) */ @@ -113,7 +114,7 @@ startup_64: movq %rax, %cr4 /* Setup early boot stage 4 level pagetables. */ - movq $(init_level4_pgt - __START_KERNEL_map), %rax + movq $(boot_level4_pgt - __START_KERNEL_map), %rax movq %rax, %cr3 /* Check if nx is implemented */ @@ -240,20 +241,10 @@ ljumpvector: ENTRY(stext) ENTRY(_stext) - /* - * This default setting generates an ident mapping at address 0x100000 - * and a mapping for the kernel that precisely maps virtual address - * 0xffffffff80000000 to physical address 0x000000. (always using - * 2Mbyte large pages provided by PAE mode) - */ .org 0x1000 ENTRY(init_level4_pgt) - .quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */ - .fill 255,8,0 - .quad 0x000000000000a007 + __PHYSICAL_START - .fill 254,8,0 - /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ - .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */ + /* This gets initialized in x86_64_start_kernel */ + .fill 512,8,0 .org 0x2000 ENTRY(level3_ident_pgt) @@ -350,6 +341,24 @@ ENTRY(wakeup_level4_pgt) .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */ #endif +#ifndef CONFIG_HOTPLUG_CPU + __INITDATA +#endif + /* + * This default setting generates an ident mapping at address 0x100000 + * and a mapping for the kernel that precisely maps virtual address + * 0xffffffff80000000 to physical address 0x000000. (always using + * 2Mbyte large pages provided by PAE mode) + */ + .align PAGE_SIZE +ENTRY(boot_level4_pgt) + .quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */ + .fill 255,8,0 + .quad 0x000000000000a007 + __PHYSICAL_START + .fill 254,8,0 + /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ + .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */ + .data .align 16 diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c index cf6ab147a2a5..e0f12d49db4c 100644 --- a/arch/x86_64/kernel/head64.c +++ b/arch/x86_64/kernel/head64.c @@ -19,6 +19,7 @@ #include #include #include +#include /* Don't add a printk in there. printk relies on the PDA which is not initialized yet. */ @@ -86,6 +87,13 @@ void __init x86_64_start_kernel(char * real_mode_data) set_intr_gate(i, early_idt_handler); asm volatile("lidt %0" :: "m" (idt_descr)); clear_bss(); + + /* + * switch to init_level4_pgt from boot_level4_pgt + */ + memcpy(init_level4_pgt, boot_level4_pgt, PTRS_PER_PGD*sizeof(pgd_t)); + asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt))); + pda_init(0); copy_bootdata(real_mode_data); #ifdef CONFIG_SMP diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c index 8f6958e79455..4a581d1cefbd 100644 --- a/arch/x86_64/kernel/mpparse.c +++ b/arch/x86_64/kernel/mpparse.c @@ -549,7 +549,7 @@ void __init get_smp_config (void) * Read the physical hardware table. Anything here will * override the defaults. */ - if (!smp_read_mpc((void *)(unsigned long)mpf->mpf_physptr)) { + if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) { smp_found_config = 0; printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 99cfa751949a..f76f3743f5d7 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -571,6 +571,8 @@ void __init setup_arch(char **cmdline_p) init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); + zap_low_mappings(0); + #ifdef CONFIG_ACPI /* * Initialize the ACPI boot-time table parser (gets the RSDP and SDT). diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c index 79190891fbc5..06dc354375c3 100644 --- a/arch/x86_64/kernel/setup64.c +++ b/arch/x86_64/kernel/setup64.c @@ -141,7 +141,6 @@ void pda_init(int cpu) panic("cannot allocate irqstack for cpu %d", cpu); } - asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt))); pda->irqstackptr += IRQSTACKSIZE-64; } @@ -197,6 +196,7 @@ void __cpuinit cpu_init (void) /* CPU 0 is initialised in head64.c */ if (cpu != 0) { pda_init(cpu); + zap_low_mappings(cpu); } else estacks = boot_exception_stacks; diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index 658a81b33f3b..3393fc08823b 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c @@ -1063,9 +1063,6 @@ int __cpuinit __cpu_up(unsigned int cpu) */ void __init smp_cpus_done(unsigned int max_cpus) { -#ifndef CONFIG_HOTPLUG_CPU - zap_low_mappings(); -#endif smp_cleanup_boot(); #ifdef CONFIG_X86_IO_APIC diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 2b1d6c382396..be483a1d7b54 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -312,12 +312,19 @@ void __init init_memory_mapping(unsigned long start, unsigned long end) extern struct x8664_pda cpu_pda[NR_CPUS]; -/* Assumes all CPUs still execute in init_mm */ -void zap_low_mappings(void) +void __cpuinit zap_low_mappings(int cpu) { - pgd_t *pgd = pgd_offset_k(0UL); - pgd_clear(pgd); - flush_tlb_all(); + if (cpu == 0) { + pgd_t *pgd = pgd_offset_k(0UL); + pgd_clear(pgd); + } else { + /* + * For AP's, zap the low identity mappings by changing the cr3 + * to init_level4_pgt and doing local flush tlb all + */ + asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt))); + } + __flush_tlb_all(); } /* Compute zone sizes for the DMA and DMA32 zones in a node. */ @@ -474,14 +481,13 @@ void __init mem_init(void) datasize >> 10, initsize >> 10); +#ifdef CONFIG_SMP /* - * Subtle. SMP is doing its boot stuff late (because it has to - * fork idle threads) - but it also needs low mappings for the - * protected-mode entry to work. We zap these entries only after - * the WP-bit has been tested. + * Sync boot_level4_pgt mappings with the init_level4_pgt + * except for the low identity mappings which are already zapped + * in init_level4_pgt. This sync-up is essential for AP's bringup */ -#ifndef CONFIG_SMP - zap_low_mappings(); + memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t)); #endif } -- cgit v1.2.3 From 2bc0414ee04fd8bb798760801f5d7476dff44241 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Only use asm/sections.h to declare section symbols Adding __initdata_* to asm-generic/sections.h Replaces a lot of open coded externs in arch/x86_64/* I had to change __bss_end to __bss_stop to match the other architectures. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/e820.c | 3 +-- arch/x86_64/kernel/head64.c | 6 ++---- arch/x86_64/kernel/setup.c | 2 +- arch/x86_64/kernel/vmlinux.lds.S | 2 +- arch/x86_64/mm/init.c | 10 ++-------- 5 files changed, 7 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c index ab3f87aaff70..17579a1a174b 100644 --- a/arch/x86_64/kernel/e820.c +++ b/arch/x86_64/kernel/e820.c @@ -23,8 +23,7 @@ #include #include #include - -extern char _end[]; +#include /* * PFN of last memory page. diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c index e0f12d49db4c..b675c5add01e 100644 --- a/arch/x86_64/kernel/head64.c +++ b/arch/x86_64/kernel/head64.c @@ -20,14 +20,14 @@ #include #include #include +#include /* Don't add a printk in there. printk relies on the PDA which is not initialized yet. */ static void __init clear_bss(void) { - extern char __bss_start[], __bss_end[]; memset(__bss_start, 0, - (unsigned long) __bss_end - (unsigned long) __bss_start); + (unsigned long) __bss_stop - (unsigned long) __bss_start); } #define NEW_CL_POINTER 0x228 /* Relative to real mode data */ @@ -76,8 +76,6 @@ static void __init setup_boot_cpu_data(void) boot_cpu_data.x86_mask = eax & 0xf; } -extern char _end[]; - void __init x86_64_start_kernel(char * real_mode_data) { char *s; diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index f76f3743f5d7..2ad3556dda52 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -60,6 +60,7 @@ #include #include #include +#include /* * Machine setup.. @@ -103,7 +104,6 @@ struct edid_info edid_info; struct e820map e820; extern int root_mountflags; -extern char _text, _etext, _edata, _end; char command_line[COMMAND_LINE_SIZE]; diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S index 6dd642cad2ef..58b19215b4b3 100644 --- a/arch/x86_64/kernel/vmlinux.lds.S +++ b/arch/x86_64/kernel/vmlinux.lds.S @@ -50,7 +50,7 @@ SECTIONS *(.bss.page_aligned) *(.bss) } - __bss_end = .; + __bss_stop = .; . = ALIGN(PAGE_SIZE); . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index be483a1d7b54..489e18df1fe9 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -36,6 +36,7 @@ #include #include #include +#include #ifndef Dprintk #define Dprintk(x...) @@ -45,8 +46,6 @@ extern int swiotlb; #endif -extern char _stext[]; - static unsigned long dma_reserve __initdata; DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); @@ -88,9 +87,6 @@ void show_mem(void) /* References to section boundaries */ -extern char _text, _etext, _edata, __bss_start, _end[]; -extern char __init_begin, __init_end; - int after_bootmem; static void *spp_getpage(void) @@ -491,8 +487,6 @@ void __init mem_init(void) #endif } -extern char __initdata_begin[], __initdata_end[]; - void free_initmem(void) { unsigned long addr; @@ -506,7 +500,7 @@ void free_initmem(void) totalram_pages++; } memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin); - printk ("Freeing unused kernel memory: %luk freed\n", (&__init_end - &__init_begin) >> 10); + printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10); } #ifdef CONFIG_BLK_DEV_INITRD -- cgit v1.2.3 From 4d74dbd79a571b31f7cd2c69bb5e44368972bf93 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Replace cpu_pda extern with include Minor cleanup - remove obsolete extern Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/mm/init.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 489e18df1fe9..63f192cd7c1c 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -306,8 +306,6 @@ void __init init_memory_mapping(unsigned long start, unsigned long end) table_end< Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Replace swiotlb extern with include Minor victory on the continuous quest against all stray extern. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/mm/init.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 63f192cd7c1c..854a41b8372b 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -42,10 +43,6 @@ #define Dprintk(x...) #endif -#ifdef CONFIG_GART_IOMMU -extern int swiotlb; -#endif - static unsigned long dma_reserve __initdata; DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); -- cgit v1.2.3 From 420f8f68c9c5148dddf946bebdbc7eacde2172cb Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: New heuristics to find out hotpluggable CPUs. With a NR_CPUS==128 kernel with CPU hotplug enabled we would waste 4MB on per CPU data of all possible CPUs. The reason was that HOTPLUG always set up possible map to NR_CPUS cpus and then we need to allocate that much (each per CPU data is roughly ~32k now) The underlying problem is that ACPI didn't tell us how many hotplug CPUs the platform supports. So the old code just assumed all, which would lead to this memory wastage. This implements some new heuristics: - If the BIOS specified disabled CPUs in the ACPI/mptables assume they can be enabled later (this is bending the ACPI specification a bit, but seems like a obvious extension) - The user can overwrite it with a new additionals_cpus=NUM option - Otherwise use half of the available CPUs or 2, whatever is more. Cc: ashok.raj@intel.com Cc: len.brown@intel.com Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/mpparse.c | 8 ++++++-- arch/x86_64/kernel/smpboot.c | 39 ++++++++++++++++++++++++++++++++++++--- 2 files changed, 42 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c index 4a581d1cefbd..1d61f10a92c6 100644 --- a/arch/x86_64/kernel/mpparse.c +++ b/arch/x86_64/kernel/mpparse.c @@ -65,7 +65,9 @@ unsigned long mp_lapic_addr = 0; /* Processor that is doing the boot up */ unsigned int boot_cpu_id = -1U; /* Internal processor count */ -static unsigned int num_processors = 0; +unsigned int num_processors __initdata = 0; + +unsigned disabled_cpus __initdata; /* Bitmask of physically existing CPUs */ physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE; @@ -109,8 +111,10 @@ static void __init MP_processor_info (struct mpc_config_processor *m) int ver, cpu; static int found_bsp=0; - if (!(m->mpc_cpuflag & CPU_ENABLED)) + if (!(m->mpc_cpuflag & CPU_ENABLED)) { + disabled_cpus++; return; + } printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n", m->mpc_apicid, diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index 3393fc08823b..f74319a80659 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c @@ -880,6 +880,9 @@ static __init void disable_smp(void) } #ifdef CONFIG_HOTPLUG_CPU + +int additional_cpus __initdata = -1; + /* * cpu_possible_map should be static, it cannot change as cpu's * are onlined, or offlined. The reason is per-cpu data-structures @@ -888,14 +891,38 @@ static __init void disable_smp(void) * cpu_present_map on the other hand can change dynamically. * In case when cpu_hotplug is not compiled, then we resort to current * behaviour, which is cpu_possible == cpu_present. - * If cpu-hotplug is supported, then we need to preallocate for all - * those NR_CPUS, hence cpu_possible_map represents entire NR_CPUS range. * - Ashok Raj + * + * Three ways to find out the number of additional hotplug CPUs: + * - If the BIOS specified disabled CPUs in ACPI/mptables use that. + * - otherwise use half of the available CPUs or 2, whatever is more. + * - The user can overwrite it with additional_cpus=NUM + * We do this because additional CPUs waste a lot of memory. + * -AK */ __init void prefill_possible_map(void) { int i; - for (i = 0; i < NR_CPUS; i++) + int possible; + + if (additional_cpus == -1) { + if (disabled_cpus > 0) { + additional_cpus = disabled_cpus; + } else { + additional_cpus = num_processors / 2; + if (additional_cpus == 0) + additional_cpus = 2; + } + } + possible = num_processors + additional_cpus; + if (possible > NR_CPUS) + possible = NR_CPUS; + + printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", + possible, + max_t(int, possible - num_processors, 0)); + + for (i = 0; i < possible; i++) cpu_set(i, cpu_possible_map); } #endif @@ -1151,6 +1178,12 @@ void __cpu_die(unsigned int cpu) printk(KERN_ERR "CPU %u didn't die...\n", cpu); } +static __init int setup_additional_cpus(char *s) +{ + return get_option(&s, &additional_cpus); +} +__setup("additional_cpus=", setup_additional_cpus); + #else /* ... !CONFIG_HOTPLUG_CPU */ int __cpu_disable(void) -- cgit v1.2.3 From e4e5d324b9c5586f408a72d1534474c449dd5212 Mon Sep 17 00:00:00 2001 From: Bryan Ford Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Save/restore CS in 64bit signal handlers and force __USER_CS for CS This allows to run 64bit signal handlers in 64bit processes that run small code snippets in compat mode. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/signal.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'arch') diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c index d642fbf3da29..5876df116c92 100644 --- a/arch/x86_64/kernel/signal.c +++ b/arch/x86_64/kernel/signal.c @@ -110,6 +110,15 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned COPY(r14); COPY(r15); + /* Kernel saves and restores only the CS segment register on signals, + * which is the bare minimum needed to allow mixed 32/64-bit code. + * App's signal handler can save/restore other segments if needed. */ + { + unsigned cs; + err |= __get_user(cs, &sc->cs); + regs->cs = cs | 3; /* Force into user mode */ + } + { unsigned int tmpflags; err |= __get_user(tmpflags, &sc->eflags); @@ -187,6 +196,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned lo { int err = 0; + err |= __put_user(regs->cs, &sc->cs); err |= __put_user(0, &sc->gs); err |= __put_user(0, &sc->fs); @@ -318,7 +328,14 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, regs->rsp = (unsigned long)frame; + /* Set up the CS register to run signal handlers in 64-bit mode, + even if the handler happens to be interrupting 32-bit code. */ + regs->cs = __USER_CS; + + /* This, by contrast, has nothing to do with segment registers - + see include/asm-x86_64/uaccess.h for details. */ set_fs(USER_DS); + regs->eflags &= ~TF_MASK; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); -- cgit v1.2.3 From 529a340402e419f935d411ce0a085e96fcaf3872 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Optimize NUMA node hash function Compute the highest possible value for memnode_shift, in order to reduce footprint of memnodemap[] to the minimum, thus making all users (phys_to_nid(), kfree()), more cache friendly. Before the patch : Node 0 MemBase 0000000000000000 Limit 00000001ffffffff Node 1 MemBase 0000000200000000 Limit 00000003ffffffff Using 23 for the hash shift. Max adder is 3ffffffff After the patch : Node 0 MemBase 0000000000000000 Limit 00000001ffffffff Node 1 MemBase 0000000200000000 Limit 00000003ffffffff Using 33 for the hash shift. In this case, only 2 bytes of memnodemap[] are used, instead of 2048 Signed-off-by: Eric Dumazet Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/mm/numa.c | 67 +++++++++++++++++++++++++++++++++------------------ 1 file changed, 43 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c index 4bf64583ba3b..edd5559380d3 100644 --- a/arch/x86_64/mm/numa.c +++ b/arch/x86_64/mm/numa.c @@ -38,38 +38,57 @@ cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly; int numa_off __initdata; -int __init compute_hash_shift(struct node *nodes, int numnodes) + +/* + * Given a shift value, try to populate memnodemap[] + * Returns : + * 1 if OK + * 0 if memnodmap[] too small (of shift too small) + * -1 if node overlap or lost ram (shift too big) + */ +static int __init populate_memnodemap( + const struct node *nodes, int numnodes, int shift) { int i; - int shift = 20; - unsigned long addr,maxend=0; - - for (i = 0; i < numnodes; i++) - if ((nodes[i].start != nodes[i].end) && (nodes[i].end > maxend)) - maxend = nodes[i].end; + int res = -1; + unsigned long addr, end; - while ((1UL << shift) < (maxend / NODEMAPSIZE)) - shift++; - - printk (KERN_DEBUG"Using %d for the hash shift. Max adder is %lx \n", - shift,maxend); - memset(memnodemap,0xff,sizeof(*memnodemap) * NODEMAPSIZE); + memset(memnodemap, 0xff, sizeof(memnodemap)); for (i = 0; i < numnodes; i++) { - if (nodes[i].start == nodes[i].end) + addr = nodes[i].start; + end = nodes[i].end; + if (addr >= end) continue; - for (addr = nodes[i].start; - addr < nodes[i].end; - addr += (1UL << shift)) { - if (memnodemap[addr >> shift] != 0xff) { - printk(KERN_INFO - "Your memory is not aligned you need to rebuild your kernel " - "with a bigger NODEMAPSIZE shift=%d adder=%lu\n", - shift,addr); + if ((end >> shift) >= NODEMAPSIZE) + return 0; + do { + if (memnodemap[addr >> shift] != 0xff) return -1; - } memnodemap[addr >> shift] = i; - } + addr += (1 << shift); + } while (addr < end); + res = 1; } + return res; +} + +int __init compute_hash_shift(struct node *nodes, int numnodes) +{ + int shift = 20; + + while (populate_memnodemap(nodes, numnodes, shift + 1) >= 0) + shift++; + + printk(KERN_DEBUG "Using %d for the hash shift.\n", + shift); + + if (populate_memnodemap(nodes, numnodes, shift) != 1) { + printk(KERN_INFO + "Your memory is not aligned you need to rebuild your kernel " + "with a bigger NODEMAPSIZE shift=%d\n", + shift); + return -1; + } return shift; } -- cgit v1.2.3 From af9c142de94ecf724a18700273bbba390873e072 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Force correct address space size for MTRR on some 64bit Intel Xeons They report 40bit, but only have 36bits of physical address space. This caused problems with setting up the correct masks for MTRR. CPUID workaround for steppings 0F33h(supporting x86) and 0F34h(supporting x86 and EM64T). Detail info can be found at: http://download.intel.com/design/Xeon/specupdt/30240216.pdf http://download.intel.com/design/Pentium4/specupdt/30235221.pdf Signed-off-by: Shaohua Li Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/cpu/mtrr/main.c | 8 ++++++++ arch/x86_64/kernel/setup.c | 5 +++++ 2 files changed, 13 insertions(+) (limited to 'arch') diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c index dd4ebd6af7e4..1e9db198c440 100644 --- a/arch/i386/kernel/cpu/mtrr/main.c +++ b/arch/i386/kernel/cpu/mtrr/main.c @@ -626,6 +626,14 @@ void __init mtrr_bp_init(void) if (cpuid_eax(0x80000000) >= 0x80000008) { u32 phys_addr; phys_addr = cpuid_eax(0x80000008) & 0xff; + /* CPUID workaround for Intel 0F33/0F34 CPU */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 0xF && + boot_cpu_data.x86_model == 0x3 && + (boot_cpu_data.x86_mask == 0x3 || + boot_cpu_data.x86_mask == 0x4)) + phys_addr = 36; + size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1); size_and_mask = ~size_or_mask & 0xfff00000; } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 2ad3556dda52..476ee034fca2 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -995,6 +995,11 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) unsigned eax = cpuid_eax(0x80000008); c->x86_virt_bits = (eax >> 8) & 0xff; c->x86_phys_bits = eax & 0xff; + /* CPUID workaround for Intel 0F34 CPU */ + if (c->x86_vendor == X86_VENDOR_INTEL && + c->x86 == 0xF && c->x86_model == 0x3 && + c->x86_mask == 0x4) + c->x86_phys_bits = 36; } if (c->x86 == 15) -- cgit v1.2.3 From ea0be473a1f0ee89024a24d8ea4b05fbf6efcee3 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Allow modular build of ia32 aout loader Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/Kconfig | 2 +- arch/x86_64/ia32/ia32_aout.c | 3 --- arch/x86_64/ia32/ia32_binfmt.c | 4 +++- arch/x86_64/kernel/x8664_ksyms.c | 3 +++ 4 files changed, 7 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index 504dc52e8bfa..fd10bf82f8d4 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig @@ -510,7 +510,7 @@ config IA32_EMULATION left. config IA32_AOUT - bool "IA32 a.out support" + tristate "IA32 a.out support" depends on IA32_EMULATION help Support old a.out binaries in the 32bit emulation. diff --git a/arch/x86_64/ia32/ia32_aout.c b/arch/x86_64/ia32/ia32_aout.c index 93c60f4aa47a..3bf58af98936 100644 --- a/arch/x86_64/ia32/ia32_aout.c +++ b/arch/x86_64/ia32/ia32_aout.c @@ -36,9 +36,6 @@ #undef WARN_OLD #undef CORE_DUMP /* probably broken */ -extern int ia32_setup_arg_pages(struct linux_binprm *bprm, - unsigned long stack_top, int exec_stack); - static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs); static int load_aout_library(struct file*); diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c index d9161e395978..830feb272eca 100644 --- a/arch/x86_64/ia32/ia32_binfmt.c +++ b/arch/x86_64/ia32/ia32_binfmt.c @@ -335,7 +335,8 @@ static void elf32_init(struct pt_regs *regs) me->thread.es = __USER_DS; } -int setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int executable_stack) +int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, + int executable_stack) { unsigned long stack_base; struct vm_area_struct *mpnt; @@ -389,6 +390,7 @@ int setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int exec return 0; } +EXPORT_SYMBOL(ia32_setup_arg_pages); static unsigned long elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type) diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c index fd99ddd009bc..4a54221e10bc 100644 --- a/arch/x86_64/kernel/x8664_ksyms.c +++ b/arch/x86_64/kernel/x8664_ksyms.c @@ -203,3 +203,6 @@ EXPORT_SYMBOL(flush_tlb_page); #endif EXPORT_SYMBOL(cpu_khz); + +EXPORT_SYMBOL(load_gs_index); + -- cgit v1.2.3 From a88cde13bae3fffd6ecc812bdd02c91eafb6073e Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Formatting fixes for arch/x86_64/kernel/process.c No functional changes. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/process.c | 47 +++++++++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index b5a89c0bdf59..bc44c92ca198 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c @@ -148,7 +148,8 @@ void cpu_idle_wait(void) do { ssleep(1); for_each_online_cpu(cpu) { - if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) + if (cpu_isset(cpu, map) && + !per_cpu(cpu_idle_state, cpu)) cpu_clear(cpu, map); } cpus_and(map, map, cpu_online_map); @@ -278,7 +279,8 @@ void __show_regs(struct pt_regs * regs) system_utsname.version); printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip); printk_address(regs->rip); - printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, regs->eflags); + printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, + regs->eflags); printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", regs->rax, regs->rbx, regs->rcx); printk("RDX: %016lx RSI: %016lx RDI: %016lx\n", @@ -430,15 +432,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, struct pt_regs * childregs; struct task_struct *me = current; - childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1; - + childregs = ((struct pt_regs *) + (THREAD_SIZE + (unsigned long) p->thread_info)) - 1; *childregs = *regs; childregs->rax = 0; childregs->rsp = rsp; - if (rsp == ~0UL) { + if (rsp == ~0UL) childregs->rsp = (unsigned long)childregs; - } p->thread.rsp = (unsigned long) childregs; p->thread.rsp0 = (unsigned long) (childregs+1); @@ -460,7 +461,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, p->thread.io_bitmap_max = 0; return -ENOMEM; } - memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES); + memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, + IO_BITMAP_BYTES); } /* @@ -497,7 +499,8 @@ out: * - fold all the options into a flag word and test it with a single test. * - could test fs/gs bitsliced */ -struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p) +struct task_struct * +__switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; @@ -568,7 +571,8 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct * prev->userrsp = read_pda(oldrsp); write_pda(oldrsp, next->userrsp); write_pda(pcurrent, next_p); - write_pda(kernelstack, (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET); + write_pda(kernelstack, + (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET); /* * Now maybe reload the debug registers @@ -649,7 +653,9 @@ asmlinkage long sys_fork(struct pt_regs *regs) return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL); } -asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) +asmlinkage long +sys_clone(unsigned long clone_flags, unsigned long newsp, + void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) { if (!newsp) newsp = regs->rsp; @@ -685,7 +691,8 @@ unsigned long get_wchan(struct task_struct *p) return 0; fp = *(u64 *)(p->thread.rsp); do { - if (fp < (unsigned long)stack || fp > (unsigned long)stack+THREAD_SIZE) + if (fp < (unsigned long)stack || + fp > (unsigned long)stack+THREAD_SIZE) return 0; rip = *(u64 *)(fp+8); if (!in_sched_functions(rip)) @@ -720,8 +727,8 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) task->thread.gsindex = 0; task->thread.gs = addr; if (doit) { - load_gs_index(0); - ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); + load_gs_index(0); + ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); } } put_cpu(); @@ -738,7 +745,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) set_32bit_tls(task, FS_TLS, addr); if (doit) { load_TLS(&task->thread, cpu); - asm volatile("movl %0,%%fs" :: "r" (FS_TLS_SEL)); + asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL)); } task->thread.fsindex = FS_TLS_SEL; task->thread.fs = 0; @@ -748,8 +755,8 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) if (doit) { /* set the selector to 0 to not confuse __switch_to */ - asm volatile("movl %0,%%fs" :: "r" (0)); - ret = checking_wrmsrl(MSR_FS_BASE, addr); + asm volatile("movl %0,%%fs" :: "r" (0)); + ret = checking_wrmsrl(MSR_FS_BASE, addr); } } put_cpu(); @@ -758,9 +765,9 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) unsigned long base; if (task->thread.fsindex == FS_TLS_SEL) base = read_32bit_tls(task, FS_TLS); - else if (doit) { + else if (doit) rdmsrl(MSR_FS_BASE, base); - } else + else base = task->thread.fs; ret = put_user(base, (unsigned long __user *)addr); break; @@ -769,9 +776,9 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) unsigned long base; if (task->thread.gsindex == GS_TLS_SEL) base = read_32bit_tls(task, GS_TLS); - else if (doit) { + else if (doit) rdmsrl(MSR_KERNEL_GS_BASE, base); - } else + else base = task->thread.gs; ret = put_user(base, (unsigned long __user *)addr); break; -- cgit v1.2.3 From 3506229ff9968e5dbc862a50285fbea0e8821b58 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Don't enable interrupt unconditionally in reboot path When they were disabled before (e.g. after a panic) it's better to keep them off, otherwise followon panics can happen from timer interrupt handlers etc. Drawback is that pageup in the console won't work anymore though. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/reboot.c | 5 +++-- arch/x86_64/kernel/smp.c | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/kernel/reboot.c b/arch/x86_64/kernel/reboot.c index 47f95687905f..97804bc2c0a0 100644 --- a/arch/x86_64/kernel/reboot.c +++ b/arch/x86_64/kernel/reboot.c @@ -77,6 +77,7 @@ static inline void kb_wait(void) void machine_shutdown(void) { + unsigned long flags; /* Stop the cpus and apics */ #ifdef CONFIG_SMP int reboot_cpu_id; @@ -98,7 +99,7 @@ void machine_shutdown(void) smp_send_stop(); #endif - local_irq_disable(); + local_irq_save(flags); #ifndef CONFIG_SMP disable_local_APIC(); @@ -106,7 +107,7 @@ void machine_shutdown(void) disable_IO_APIC(); - local_irq_enable(); + local_irq_restore(flags); } void machine_emergency_restart(void) diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c index 493a0d1f1279..cfc3d9dccbd9 100644 --- a/arch/x86_64/kernel/smp.c +++ b/arch/x86_64/kernel/smp.c @@ -450,13 +450,14 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, void smp_stop_cpu(void) { + unsigned long flags; /* * Remove this CPU: */ cpu_clear(smp_processor_id(), cpu_online_map); - local_irq_disable(); + local_irq_save(flags); disable_local_APIC(); - local_irq_enable(); + local_irq_restore(flags); } static void smp_really_stop_cpu(void *dummy) -- cgit v1.2.3 From 94605eff572b727aaad9b4b29bc358b919096503 Mon Sep 17 00:00:00 2001 From: "Siddha, Suresh B" Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86-64/i386: Intel HT, Multi core detection fixes Fields obtained through cpuid vector 0x1(ebx[16:23]) and vector 0x4(eax[14:25], eax[26:31]) indicate the maximum values and might not always be the same as what is available and what OS sees. So make sure "siblings" and "cpu cores" values in /proc/cpuinfo reflect the values as seen by OS instead of what cpuid instruction says. This will also fix the buggy BIOS cases (for example where cpuid on a single core cpu says there are "2" siblings, even when HT is disabled in the BIOS. http://bugzilla.kernel.org/show_bug.cgi?id=4359) Signed-off-by: Suresh Siddha Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/cpu/amd.c | 12 +++--- arch/i386/kernel/cpu/common.c | 36 +++++++---------- arch/i386/kernel/cpu/intel.c | 2 +- arch/i386/kernel/cpu/intel_cacheinfo.c | 2 +- arch/i386/kernel/cpu/proc.c | 7 ++-- arch/i386/kernel/smpboot.c | 73 ++++++++++++++++++++++++---------- arch/x86_64/kernel/setup.c | 69 ++++++++++++++------------------ arch/x86_64/kernel/smpboot.c | 69 +++++++++++++++++++++++++------- 8 files changed, 162 insertions(+), 108 deletions(-) (limited to 'arch') diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index 53a1681cd964..e344ef88cfcd 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c @@ -206,9 +206,9 @@ static void __init init_amd(struct cpuinfo_x86 *c) display_cacheinfo(c); if (cpuid_eax(0x80000000) >= 0x80000008) { - c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; - if (c->x86_num_cores & (c->x86_num_cores - 1)) - c->x86_num_cores = 1; + c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; + if (c->x86_max_cores & (c->x86_max_cores - 1)) + c->x86_max_cores = 1; } #ifdef CONFIG_X86_HT @@ -217,15 +217,15 @@ static void __init init_amd(struct cpuinfo_x86 *c) * distingush the cores. Assumes number of cores is a power * of two. */ - if (c->x86_num_cores > 1) { + if (c->x86_max_cores > 1) { int cpu = smp_processor_id(); unsigned bits = 0; - while ((1 << bits) < c->x86_num_cores) + while ((1 << bits) < c->x86_max_cores) bits++; cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<>= bits; printk(KERN_INFO "CPU %d(%d) -> Core %d\n", - cpu, c->x86_num_cores, cpu_core_id[cpu]); + cpu, c->x86_max_cores, cpu_core_id[cpu]); } #endif } diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 35a67dab4a94..4e9c2e99b0a5 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c @@ -335,7 +335,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c) c->x86_model = c->x86_mask = 0; /* So far unknown... */ c->x86_vendor_id[0] = '\0'; /* Unset */ c->x86_model_id[0] = '\0'; /* Unset */ - c->x86_num_cores = 1; + c->x86_max_cores = 1; memset(&c->x86_capability, 0, sizeof c->x86_capability); if (!have_cpuid_p()) { @@ -446,52 +446,44 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c) void __devinit detect_ht(struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; - int index_msb, tmp; + int index_msb, core_bits; int cpu = smp_processor_id(); + cpuid(1, &eax, &ebx, &ecx, &edx); + + c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); + if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) return; - cpuid(1, &eax, &ebx, &ecx, &edx); smp_num_siblings = (ebx & 0xff0000) >> 16; if (smp_num_siblings == 1) { printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); } else if (smp_num_siblings > 1 ) { - index_msb = 31; if (smp_num_siblings > NR_CPUS) { printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); smp_num_siblings = 1; return; } - tmp = smp_num_siblings; - while ((tmp & 0x80000000 ) == 0) { - tmp <<=1 ; - index_msb--; - } - if (smp_num_siblings & (smp_num_siblings - 1)) - index_msb++; + + index_msb = get_count_order(smp_num_siblings); phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); printk(KERN_INFO "CPU: Physical Processor ID: %d\n", phys_proc_id[cpu]); - smp_num_siblings = smp_num_siblings / c->x86_num_cores; + smp_num_siblings = smp_num_siblings / c->x86_max_cores; - tmp = smp_num_siblings; - index_msb = 31; - while ((tmp & 0x80000000) == 0) { - tmp <<=1 ; - index_msb--; - } + index_msb = get_count_order(smp_num_siblings) ; - if (smp_num_siblings & (smp_num_siblings - 1)) - index_msb++; + core_bits = get_count_order(c->x86_max_cores); - cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); + cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & + ((1 << core_bits) - 1); - if (c->x86_num_cores > 1) + if (c->x86_max_cores > 1) printk(KERN_INFO "CPU: Processor Core ID: %d\n", cpu_core_id[cpu]); } diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c index 43601de0f633..8d603ba28126 100644 --- a/arch/i386/kernel/cpu/intel.c +++ b/arch/i386/kernel/cpu/intel.c @@ -157,7 +157,7 @@ static void __devinit init_intel(struct cpuinfo_x86 *c) if ( p ) strcpy(c->x86_model_id, p); - c->x86_num_cores = num_cpu_cores(c); + c->x86_max_cores = num_cpu_cores(c); detect_ht(c); diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index 4dc42a189ae5..e66d14099564 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c @@ -307,7 +307,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) #ifdef CONFIG_X86_HT else if (num_threads_sharing == smp_num_siblings) this_leaf->shared_cpu_map = cpu_sibling_map[cpu]; - else if (num_threads_sharing == (c->x86_num_cores * smp_num_siblings)) + else if (num_threads_sharing == (c->x86_max_cores * smp_num_siblings)) this_leaf->shared_cpu_map = cpu_core_map[cpu]; else printk(KERN_DEBUG "Number of CPUs sharing cache didn't match " diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c index 41b871ecf4b3..e7921315ae9d 100644 --- a/arch/i386/kernel/cpu/proc.c +++ b/arch/i386/kernel/cpu/proc.c @@ -94,12 +94,11 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (c->x86_cache_size >= 0) seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); #ifdef CONFIG_X86_HT - if (c->x86_num_cores * smp_num_siblings > 1) { + if (c->x86_max_cores * smp_num_siblings > 1) { seq_printf(m, "physical id\t: %d\n", phys_proc_id[n]); - seq_printf(m, "siblings\t: %d\n", - c->x86_num_cores * smp_num_siblings); + seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n])); seq_printf(m, "core id\t\t: %d\n", cpu_core_id[n]); - seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores); + seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); } #endif diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 01b618e73ecd..0a9c64655236 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -74,9 +74,11 @@ EXPORT_SYMBOL(phys_proc_id); int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID}; EXPORT_SYMBOL(cpu_core_id); +/* representing HT siblings of each logical CPU */ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_sibling_map); +/* representing HT and core siblings of each logical CPU */ cpumask_t cpu_core_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_core_map); @@ -444,35 +446,60 @@ static void __devinit smp_callin(void) static int cpucount; +/* representing cpus for which sibling maps can be computed */ +static cpumask_t cpu_sibling_setup_map; + static inline void set_cpu_sibling_map(int cpu) { int i; + struct cpuinfo_x86 *c = cpu_data; + + cpu_set(cpu, cpu_sibling_setup_map); if (smp_num_siblings > 1) { - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_isset(i, cpu_callout_map)) - continue; - if (cpu_core_id[cpu] == cpu_core_id[i]) { + for_each_cpu_mask(i, cpu_sibling_setup_map) { + if (phys_proc_id[cpu] == phys_proc_id[i] && + cpu_core_id[cpu] == cpu_core_id[i]) { cpu_set(i, cpu_sibling_map[cpu]); cpu_set(cpu, cpu_sibling_map[i]); + cpu_set(i, cpu_core_map[cpu]); + cpu_set(cpu, cpu_core_map[i]); } } } else { cpu_set(cpu, cpu_sibling_map[cpu]); } - if (current_cpu_data.x86_num_cores > 1) { - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_isset(i, cpu_callout_map)) - continue; - if (phys_proc_id[cpu] == phys_proc_id[i]) { - cpu_set(i, cpu_core_map[cpu]); - cpu_set(cpu, cpu_core_map[i]); - } - } - } else { + if (current_cpu_data.x86_max_cores == 1) { cpu_core_map[cpu] = cpu_sibling_map[cpu]; + c[cpu].booted_cores = 1; + return; + } + + for_each_cpu_mask(i, cpu_sibling_setup_map) { + if (phys_proc_id[cpu] == phys_proc_id[i]) { + cpu_set(i, cpu_core_map[cpu]); + cpu_set(cpu, cpu_core_map[i]); + /* + * Does this new cpu bringup a new core? + */ + if (cpus_weight(cpu_sibling_map[cpu]) == 1) { + /* + * for each core in package, increment + * the booted_cores for this new cpu + */ + if (first_cpu(cpu_sibling_map[i]) == i) + c[cpu].booted_cores++; + /* + * increment the core count for all + * the other cpus in this package + */ + if (i != cpu) + c[i].booted_cores++; + } else if (i != cpu && !c[cpu].booted_cores) + c[cpu].booted_cores = c[i].booted_cores; + } } } @@ -1096,11 +1123,8 @@ static void __init smp_boot_cpus(unsigned int max_cpus) current_thread_info()->cpu = 0; smp_tune_scheduling(); - cpus_clear(cpu_sibling_map[0]); - cpu_set(0, cpu_sibling_map[0]); - cpus_clear(cpu_core_map[0]); - cpu_set(0, cpu_core_map[0]); + set_cpu_sibling_map(0); /* * If we couldn't find an SMP configuration at boot time, @@ -1279,15 +1303,24 @@ static void remove_siblinginfo(int cpu) { int sibling; + struct cpuinfo_x86 *c = cpu_data; + for_each_cpu_mask(sibling, cpu_core_map[cpu]) { + cpu_clear(cpu, cpu_core_map[sibling]); + /* + * last thread sibling in this cpu core going down + */ + if (cpus_weight(cpu_sibling_map[cpu]) == 1) + c[sibling].booted_cores--; + } + for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) cpu_clear(cpu, cpu_sibling_map[sibling]); - for_each_cpu_mask(sibling, cpu_core_map[cpu]) - cpu_clear(cpu, cpu_core_map[sibling]); cpus_clear(cpu_sibling_map[cpu]); cpus_clear(cpu_core_map[cpu]); phys_proc_id[cpu] = BAD_APICID; cpu_core_id[cpu] = BAD_APICID; + cpu_clear(cpu, cpu_sibling_setup_map); } int __cpu_disable(void) diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 476ee034fca2..40c77f6fe4b0 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -795,7 +795,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) #endif bits = 0; - while ((1 << bits) < c->x86_num_cores) + while ((1 << bits) < c->x86_max_cores) bits++; /* Low order bits define the core id (index of core in socket) */ @@ -828,7 +828,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) numa_set_node(cpu, node); printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n", - cpu, c->x86_num_cores, node, cpu_core_id[cpu]); + cpu, c->x86_max_cores, node, cpu_core_id[cpu]); #endif #endif } @@ -877,9 +877,9 @@ static int __init init_amd(struct cpuinfo_x86 *c) display_cacheinfo(c); if (c->extended_cpuid_level >= 0x80000008) { - c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; - if (c->x86_num_cores & (c->x86_num_cores - 1)) - c->x86_num_cores = 1; + c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; + if (c->x86_max_cores & (c->x86_max_cores - 1)) + c->x86_max_cores = 1; amd_detect_cmp(c); } @@ -891,54 +891,44 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP u32 eax, ebx, ecx, edx; - int index_msb, tmp; + int index_msb, core_bits; int cpu = smp_processor_id(); - + + cpuid(1, &eax, &ebx, &ecx, &edx); + + c->apicid = phys_pkg_id(0); + if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) return; - cpuid(1, &eax, &ebx, &ecx, &edx); smp_num_siblings = (ebx & 0xff0000) >> 16; - + if (smp_num_siblings == 1) { printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); - } else if (smp_num_siblings > 1) { - index_msb = 31; - /* - * At this point we only support two siblings per - * processor package. - */ + } else if (smp_num_siblings > 1 ) { + if (smp_num_siblings > NR_CPUS) { printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); smp_num_siblings = 1; return; } - tmp = smp_num_siblings; - while ((tmp & 0x80000000 ) == 0) { - tmp <<=1 ; - index_msb--; - } - if (smp_num_siblings & (smp_num_siblings - 1)) - index_msb++; + + index_msb = get_count_order(smp_num_siblings); phys_proc_id[cpu] = phys_pkg_id(index_msb); - + printk(KERN_INFO "CPU: Physical Processor ID: %d\n", phys_proc_id[cpu]); - smp_num_siblings = smp_num_siblings / c->x86_num_cores; + smp_num_siblings = smp_num_siblings / c->x86_max_cores; - tmp = smp_num_siblings; - index_msb = 31; - while ((tmp & 0x80000000) == 0) { - tmp <<=1 ; - index_msb--; - } - if (smp_num_siblings & (smp_num_siblings - 1)) - index_msb++; + index_msb = get_count_order(smp_num_siblings) ; + + core_bits = get_count_order(c->x86_max_cores); - cpu_core_id[cpu] = phys_pkg_id(index_msb); + cpu_core_id[cpu] = phys_pkg_id(index_msb) & + ((1 << core_bits) - 1); - if (c->x86_num_cores > 1) + if (c->x86_max_cores > 1) printk(KERN_INFO "CPU: Processor Core ID: %d\n", cpu_core_id[cpu]); } @@ -1006,7 +996,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) c->x86_cache_alignment = c->x86_clflush_size * 2; if (c->x86 >= 15) set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); - c->x86_num_cores = intel_num_cpu_cores(c); + c->x86_max_cores = intel_num_cpu_cores(c); srat_detect_node(); } @@ -1044,7 +1034,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) c->x86_model_id[0] = '\0'; /* Unset */ c->x86_clflush_size = 64; c->x86_cache_alignment = c->x86_clflush_size; - c->x86_num_cores = 1; + c->x86_max_cores = 1; c->extended_cpuid_level = 0; memset(&c->x86_capability, 0, sizeof c->x86_capability); @@ -1278,13 +1268,12 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); #ifdef CONFIG_SMP - if (smp_num_siblings * c->x86_num_cores > 1) { + if (smp_num_siblings * c->x86_max_cores > 1) { int cpu = c - cpu_data; seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]); - seq_printf(m, "siblings\t: %d\n", - c->x86_num_cores * smp_num_siblings); + seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu])); seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]); - seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores); + seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); } #endif diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index f74319a80659..2b9ddba61b37 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c @@ -64,6 +64,7 @@ int smp_num_siblings = 1; /* Package ID of each logical CPU */ u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; +/* core ID of each logical CPU */ u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; EXPORT_SYMBOL(phys_proc_id); EXPORT_SYMBOL(cpu_core_id); @@ -89,7 +90,10 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; /* Set when the idlers are all forked */ int smp_threads_ready; +/* representing HT siblings of each logical CPU */ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; + +/* representing HT and core siblings of each logical CPU */ cpumask_t cpu_core_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_core_map); @@ -436,30 +440,59 @@ void __cpuinit smp_callin(void) cpu_set(cpuid, cpu_callin_map); } +/* representing cpus for which sibling maps can be computed */ +static cpumask_t cpu_sibling_setup_map; + static inline void set_cpu_sibling_map(int cpu) { int i; + struct cpuinfo_x86 *c = cpu_data; + + cpu_set(cpu, cpu_sibling_setup_map); if (smp_num_siblings > 1) { - for_each_cpu(i) { - if (cpu_core_id[cpu] == cpu_core_id[i]) { + for_each_cpu_mask(i, cpu_sibling_setup_map) { + if (phys_proc_id[cpu] == phys_proc_id[i] && + cpu_core_id[cpu] == cpu_core_id[i]) { cpu_set(i, cpu_sibling_map[cpu]); cpu_set(cpu, cpu_sibling_map[i]); + cpu_set(i, cpu_core_map[cpu]); + cpu_set(cpu, cpu_core_map[i]); } } } else { cpu_set(cpu, cpu_sibling_map[cpu]); } - if (current_cpu_data.x86_num_cores > 1) { - for_each_cpu(i) { - if (phys_proc_id[cpu] == phys_proc_id[i]) { - cpu_set(i, cpu_core_map[cpu]); - cpu_set(cpu, cpu_core_map[i]); - } - } - } else { + if (current_cpu_data.x86_max_cores == 1) { cpu_core_map[cpu] = cpu_sibling_map[cpu]; + c[cpu].booted_cores = 1; + return; + } + + for_each_cpu_mask(i, cpu_sibling_setup_map) { + if (phys_proc_id[cpu] == phys_proc_id[i]) { + cpu_set(i, cpu_core_map[cpu]); + cpu_set(cpu, cpu_core_map[i]); + /* + * Does this new cpu bringup a new core? + */ + if (cpus_weight(cpu_sibling_map[cpu]) == 1) { + /* + * for each core in package, increment + * the booted_cores for this new cpu + */ + if (first_cpu(cpu_sibling_map[i]) == i) + c[cpu].booted_cores++; + /* + * increment the core count for all + * the other cpus in this package + */ + if (i != cpu) + c[i].booted_cores++; + } else if (i != cpu && !c[cpu].booted_cores) + c[cpu].booted_cores = c[i].booted_cores; + } } } @@ -993,6 +1026,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) nmi_watchdog_default(); current_cpu_data = boot_cpu_data; current_thread_info()->cpu = 0; /* needed? */ + set_cpu_sibling_map(0); if (smp_sanity_check(max_cpus) < 0) { printk(KERN_INFO "SMP disabled\n"); @@ -1036,8 +1070,6 @@ void __init smp_prepare_boot_cpu(void) int me = smp_processor_id(); cpu_set(me, cpu_online_map); cpu_set(me, cpu_callout_map); - cpu_set(0, cpu_sibling_map[0]); - cpu_set(0, cpu_core_map[0]); per_cpu(cpu_state, me) = CPU_ONLINE; } @@ -1106,15 +1138,24 @@ void __init smp_cpus_done(unsigned int max_cpus) static void remove_siblinginfo(int cpu) { int sibling; + struct cpuinfo_x86 *c = cpu_data; + for_each_cpu_mask(sibling, cpu_core_map[cpu]) { + cpu_clear(cpu, cpu_core_map[sibling]); + /* + * last thread sibling in this cpu core going down + */ + if (cpus_weight(cpu_sibling_map[cpu]) == 1) + c[sibling].booted_cores--; + } + for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) cpu_clear(cpu, cpu_sibling_map[sibling]); - for_each_cpu_mask(sibling, cpu_core_map[cpu]) - cpu_clear(cpu, cpu_core_map[sibling]); cpus_clear(cpu_sibling_map[cpu]); cpus_clear(cpu_core_map[cpu]); phys_proc_id[cpu] = BAD_APICID; cpu_core_id[cpu] = BAD_APICID; + cpu_clear(cpu, cpu_sibling_setup_map); } void remove_cpu_from_maps(void) -- cgit v1.2.3 From 2b0918758dd68d6b8d01318a5200b65b9209760d Mon Sep 17 00:00:00 2001 From: "Siddha, Suresh B" Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: x86_64/i386 fix Intel cache detection code assumption about threads sharing Fix the Intel cache detection code assumption that number of threads sharing the cache will either be equal to number of HT or core siblings. This also cleans up the code in general a bit. Signed-off-by: Suresh Siddha Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/cpu/intel_cacheinfo.c | 46 +++++++++++++++++++++++----------- 1 file changed, 32 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index e66d14099564..fbfd374aa336 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c @@ -293,29 +293,45 @@ static struct _cpuid4_info *cpuid4_info[NR_CPUS]; #ifdef CONFIG_SMP static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) { - struct _cpuid4_info *this_leaf; + struct _cpuid4_info *this_leaf, *sibling_leaf; unsigned long num_threads_sharing; -#ifdef CONFIG_X86_HT - struct cpuinfo_x86 *c = cpu_data + cpu; -#endif + int index_msb, i; + struct cpuinfo_x86 *c = cpu_data; this_leaf = CPUID4_INFO_IDX(cpu, index); num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; if (num_threads_sharing == 1) cpu_set(cpu, this_leaf->shared_cpu_map); -#ifdef CONFIG_X86_HT - else if (num_threads_sharing == smp_num_siblings) - this_leaf->shared_cpu_map = cpu_sibling_map[cpu]; - else if (num_threads_sharing == (c->x86_max_cores * smp_num_siblings)) - this_leaf->shared_cpu_map = cpu_core_map[cpu]; - else - printk(KERN_DEBUG "Number of CPUs sharing cache didn't match " - "any known set of CPUs\n"); -#endif + else { + index_msb = get_count_order(num_threads_sharing); + + for_each_online_cpu(i) { + if (c[i].apicid >> index_msb == + c[cpu].apicid >> index_msb) { + cpu_set(i, this_leaf->shared_cpu_map); + if (i != cpu && cpuid4_info[i]) { + sibling_leaf = CPUID4_INFO_IDX(i, index); + cpu_set(cpu, sibling_leaf->shared_cpu_map); + } + } + } + } +} +static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index) +{ + struct _cpuid4_info *this_leaf, *sibling_leaf; + int sibling; + + this_leaf = CPUID4_INFO_IDX(cpu, index); + for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) { + sibling_leaf = CPUID4_INFO_IDX(sibling, index); + cpu_clear(cpu, sibling_leaf->shared_cpu_map); + } } #else static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {} +static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {} #endif static void free_cache_attributes(unsigned int cpu) @@ -574,8 +590,10 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev) unsigned int cpu = sys_dev->id; unsigned long i; - for (i = 0; i < num_cache_leaves; i++) + for (i = 0; i < num_cache_leaves; i++) { + cache_remove_shared_cpu_map(cpu, i); kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); + } kobject_unregister(cache_kobject[cpu]); cpuid4_cache_sysfs_exit(cpu); return; -- cgit v1.2.3 From a6f5deb2be4c82f24fefadcbf7e448f540c05ae6 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Reduce number of retries for reset through keyboard controller Old code could retry for 10 seconds worst time. Only try it for one second now. Suggested by Yinghai Lu Cc: Yinghai.Lu@amd.com Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/reboot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86_64/kernel/reboot.c b/arch/x86_64/kernel/reboot.c index 97804bc2c0a0..75235ed2b31b 100644 --- a/arch/x86_64/kernel/reboot.c +++ b/arch/x86_64/kernel/reboot.c @@ -121,7 +121,7 @@ void machine_emergency_restart(void) /* Could also try the reset bit in the Hammer NB */ switch (reboot_type) { case BOOT_KBD: - for (i=0; i<100; i++) { + for (i=0; i<10; i++) { kb_wait(); udelay(50); outb(0xfe,0x64); /* pulse reset low */ -- cgit v1.2.3 From a5b250a428aabc619ace872f8220a7d0b8f7d557 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Remove optimization for B stepping AMD K8 B stepping were the first shipping Opterons. memcpy/memset/copy_page/ clear_page had special optimized version for them. These are really old and in the minority now and the difference to the generic versions (using rep microcode) is not that big anyways. So just remove them. TODO: figure out optimized versions for Intel Netburst based EM64T Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/lib/clear_page.S | 38 ------------------ arch/x86_64/lib/copy_page.S | 87 ---------------------------------------- arch/x86_64/lib/memcpy.S | 93 +------------------------------------------ arch/x86_64/lib/memset.S | 94 -------------------------------------------- 4 files changed, 2 insertions(+), 310 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/lib/clear_page.S b/arch/x86_64/lib/clear_page.S index 30a9da458c15..43d9fa136180 100644 --- a/arch/x86_64/lib/clear_page.S +++ b/arch/x86_64/lib/clear_page.S @@ -5,46 +5,8 @@ .globl clear_page .p2align 4 clear_page: - xorl %eax,%eax - movl $4096/64,%ecx - .p2align 4 -.Lloop: - decl %ecx -#define PUT(x) movq %rax,x*8(%rdi) - movq %rax,(%rdi) - PUT(1) - PUT(2) - PUT(3) - PUT(4) - PUT(5) - PUT(6) - PUT(7) - leaq 64(%rdi),%rdi - jnz .Lloop - nop - ret -clear_page_end: - - /* C stepping K8 run faster using the string instructions. - It is also a lot simpler. Use this when possible */ - -#include - - .section .altinstructions,"a" - .align 8 - .quad clear_page - .quad clear_page_c - .byte X86_FEATURE_K8_C - .byte clear_page_end-clear_page - .byte clear_page_c_end-clear_page_c - .previous - - .section .altinstr_replacement,"ax" -clear_page_c: movl $4096/8,%ecx xorl %eax,%eax rep stosq ret -clear_page_c_end: - .previous diff --git a/arch/x86_64/lib/copy_page.S b/arch/x86_64/lib/copy_page.S index dd3aa47b6bf5..621a19769406 100644 --- a/arch/x86_64/lib/copy_page.S +++ b/arch/x86_64/lib/copy_page.S @@ -8,94 +8,7 @@ .globl copy_page .p2align 4 copy_page: - subq $3*8,%rsp - movq %rbx,(%rsp) - movq %r12,1*8(%rsp) - movq %r13,2*8(%rsp) - - movl $(4096/64)-5,%ecx - .p2align 4 -.Loop64: - dec %rcx - - movq (%rsi), %rax - movq 8 (%rsi), %rbx - movq 16 (%rsi), %rdx - movq 24 (%rsi), %r8 - movq 32 (%rsi), %r9 - movq 40 (%rsi), %r10 - movq 48 (%rsi), %r11 - movq 56 (%rsi), %r12 - - prefetcht0 5*64(%rsi) - - movq %rax, (%rdi) - movq %rbx, 8 (%rdi) - movq %rdx, 16 (%rdi) - movq %r8, 24 (%rdi) - movq %r9, 32 (%rdi) - movq %r10, 40 (%rdi) - movq %r11, 48 (%rdi) - movq %r12, 56 (%rdi) - - leaq 64 (%rsi), %rsi - leaq 64 (%rdi), %rdi - - jnz .Loop64 - - movl $5,%ecx - .p2align 4 -.Loop2: - decl %ecx - - movq (%rsi), %rax - movq 8 (%rsi), %rbx - movq 16 (%rsi), %rdx - movq 24 (%rsi), %r8 - movq 32 (%rsi), %r9 - movq 40 (%rsi), %r10 - movq 48 (%rsi), %r11 - movq 56 (%rsi), %r12 - - movq %rax, (%rdi) - movq %rbx, 8 (%rdi) - movq %rdx, 16 (%rdi) - movq %r8, 24 (%rdi) - movq %r9, 32 (%rdi) - movq %r10, 40 (%rdi) - movq %r11, 48 (%rdi) - movq %r12, 56 (%rdi) - - leaq 64(%rdi),%rdi - leaq 64(%rsi),%rsi - - jnz .Loop2 - - movq (%rsp),%rbx - movq 1*8(%rsp),%r12 - movq 2*8(%rsp),%r13 - addq $3*8,%rsp - ret - - /* C stepping K8 run faster using the string copy instructions. - It is also a lot simpler. Use this when possible */ - -#include - - .section .altinstructions,"a" - .align 8 - .quad copy_page - .quad copy_page_c - .byte X86_FEATURE_K8_C - .byte copy_page_c_end-copy_page_c - .byte copy_page_c_end-copy_page_c - .previous - - .section .altinstr_replacement,"ax" -copy_page_c: movl $4096/8,%ecx rep movsq ret -copy_page_c_end: - .previous diff --git a/arch/x86_64/lib/memcpy.S b/arch/x86_64/lib/memcpy.S index c6c46494fef5..92dd80544602 100644 --- a/arch/x86_64/lib/memcpy.S +++ b/arch/x86_64/lib/memcpy.S @@ -11,6 +11,8 @@ * * Output: * rax original destination + * + * TODO: check best memcpy for PSC */ .globl __memcpy @@ -18,95 +20,6 @@ .p2align 4 __memcpy: memcpy: - pushq %rbx - movq %rdi,%rax - - movl %edx,%ecx - shrl $6,%ecx - jz .Lhandle_tail - - .p2align 4 -.Lloop_64: - decl %ecx - - movq (%rsi),%r11 - movq 8(%rsi),%r8 - - movq %r11,(%rdi) - movq %r8,1*8(%rdi) - - movq 2*8(%rsi),%r9 - movq 3*8(%rsi),%r10 - - movq %r9,2*8(%rdi) - movq %r10,3*8(%rdi) - - movq 4*8(%rsi),%r11 - movq 5*8(%rsi),%r8 - - movq %r11,4*8(%rdi) - movq %r8,5*8(%rdi) - - movq 6*8(%rsi),%r9 - movq 7*8(%rsi),%r10 - - movq %r9,6*8(%rdi) - movq %r10,7*8(%rdi) - - leaq 64(%rsi),%rsi - leaq 64(%rdi),%rdi - jnz .Lloop_64 - -.Lhandle_tail: - movl %edx,%ecx - andl $63,%ecx - shrl $3,%ecx - jz .Lhandle_7 - .p2align 4 -.Lloop_8: - decl %ecx - movq (%rsi),%r8 - movq %r8,(%rdi) - leaq 8(%rdi),%rdi - leaq 8(%rsi),%rsi - jnz .Lloop_8 - -.Lhandle_7: - movl %edx,%ecx - andl $7,%ecx - jz .Lende - .p2align 4 -.Lloop_1: - movb (%rsi),%r8b - movb %r8b,(%rdi) - incq %rdi - incq %rsi - decl %ecx - jnz .Lloop_1 - -.Lende: - popq %rbx - ret -.Lfinal: - - /* C stepping K8 run faster using the string copy instructions. - It is also a lot simpler. Use this when possible */ - - .section .altinstructions,"a" - .align 8 - .quad memcpy - .quad memcpy_c - .byte X86_FEATURE_K8_C - .byte .Lfinal-memcpy - .byte memcpy_c_end-memcpy_c - .previous - - .section .altinstr_replacement,"ax" - /* rdi destination - * rsi source - * rdx count - */ -memcpy_c: movq %rdi,%rax movl %edx,%ecx shrl $3,%ecx @@ -117,5 +30,3 @@ memcpy_c: rep movsb ret -memcpy_c_end: - .previous diff --git a/arch/x86_64/lib/memset.S b/arch/x86_64/lib/memset.S index 4b4c40638640..2aa48f24ed1e 100644 --- a/arch/x86_64/lib/memset.S +++ b/arch/x86_64/lib/memset.S @@ -13,98 +13,6 @@ .p2align 4 memset: __memset: - movq %rdi,%r10 - movq %rdx,%r11 - - /* expand byte value */ - movzbl %sil,%ecx - movabs $0x0101010101010101,%rax - mul %rcx /* with rax, clobbers rdx */ - - /* align dst */ - movl %edi,%r9d - andl $7,%r9d - jnz .Lbad_alignment -.Lafter_bad_alignment: - - movl %r11d,%ecx - shrl $6,%ecx - jz .Lhandle_tail - - .p2align 4 -.Lloop_64: - decl %ecx - movq %rax,(%rdi) - movq %rax,8(%rdi) - movq %rax,16(%rdi) - movq %rax,24(%rdi) - movq %rax,32(%rdi) - movq %rax,40(%rdi) - movq %rax,48(%rdi) - movq %rax,56(%rdi) - leaq 64(%rdi),%rdi - jnz .Lloop_64 - - /* Handle tail in loops. The loops should be faster than hard - to predict jump tables. */ - .p2align 4 -.Lhandle_tail: - movl %r11d,%ecx - andl $63&(~7),%ecx - jz .Lhandle_7 - shrl $3,%ecx - .p2align 4 -.Lloop_8: - decl %ecx - movq %rax,(%rdi) - leaq 8(%rdi),%rdi - jnz .Lloop_8 - -.Lhandle_7: - movl %r11d,%ecx - andl $7,%ecx - jz .Lende - .p2align 4 -.Lloop_1: - decl %ecx - movb %al,(%rdi) - leaq 1(%rdi),%rdi - jnz .Lloop_1 - -.Lende: - movq %r10,%rax - ret - -.Lbad_alignment: - cmpq $7,%r11 - jbe .Lhandle_7 - movq %rax,(%rdi) /* unaligned store */ - movq $8,%r8 - subq %r9,%r8 - addq %r8,%rdi - subq %r8,%r11 - jmp .Lafter_bad_alignment - - /* C stepping K8 run faster using the string instructions. - It is also a lot simpler. Use this when possible */ - -#include - - .section .altinstructions,"a" - .align 8 - .quad memset - .quad memset_c - .byte X86_FEATURE_K8_C - .byte memset_c_end-memset_c - .byte memset_c_end-memset_c - .previous - - .section .altinstr_replacement,"ax" - /* rdi destination - * rsi value - * rdx count - */ -memset_c: movq %rdi,%r9 movl %edx,%r8d andl $7,%r8d @@ -121,5 +29,3 @@ memset_c: stosb movq %r9,%rax ret -memset_c_end: - .previous -- cgit v1.2.3 From efbbdce94f6ea54cf06d9a06e4c95f6874ad64a8 Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Use common sys_time64 Keeping this function does not makes sense because it's a copied (and buggy) copy of sys_time. The only difference is that now.tv_sec (which is a time_t, i.e. a 64-bit long) is copied (and truncated) into a int (32-bit). The prototype is the same (they both take a long __user *), so let's drop this and redirect it to sys_time (and make sure it exists by defining __ARCH_WANT_SYS_TIME). Only disadvantage is that the sys_stime definition is also compiled (may be fixed if needed by adding a separate __ARCH_WANT_SYS_STIME macro, and defining it for all arch's defining __ARCH_WANT_SYS_TIME except x86_64). Acked-by: Andi Kleen Signed-off-by: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/sys_x86_64.c | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/kernel/sys_x86_64.c b/arch/x86_64/kernel/sys_x86_64.c index cc7821c68851..6449ea8fe756 100644 --- a/arch/x86_64/kernel/sys_x86_64.c +++ b/arch/x86_64/kernel/sys_x86_64.c @@ -154,17 +154,3 @@ asmlinkage long sys_uname(struct new_utsname __user * name) err |= copy_to_user(&name->machine, "i686", 5); return err ? -EFAULT : 0; } - -asmlinkage long sys_time64(long __user * tloc) -{ - struct timeval now; - int i; - - do_gettimeofday(&now); - i = now.tv_sec; - if (tloc) { - if (put_user(i,tloc)) - i = -EFAULT; - } - return i; -} -- cgit v1.2.3 From b0bd35e622ffbda2c01dc67a0381c6a18817a29a Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Make ACPI NUMA and NUMA emulation peers of K8_NUMA in Kconfig On x86_64 arches, there is no way to choose ACPI_NUMA without having to choose K8_NUMA. CONFIG_K8_NUMA is not needed for Intel EM64T NUMA boxes. It also looks odd if you have to select ACPI_NUMA from the power management menu. This patch fixes those oddities. Patch does the following: 1. Makes NUMA a config option like other arches 2. Makes topology detection options like K8_NUMA dependent on NUMA 3. Choosing ACPI NUMA detection can be done from the standard "Processor type and features" menu AK: I fixed up the dependencies and changed the help texts a bit on top of Kiran's patch. Signed-off-by: Ravikiran Thirumalai Signed-off-by: Shai Fultheim Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/Kconfig | 47 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 32 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index fd10bf82f8d4..1d6242a5cd0a 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig @@ -226,22 +226,42 @@ config SCHED_SMT source "kernel/Kconfig.preempt" -config K8_NUMA - bool "K8 NUMA support" - select NUMA +config NUMA + bool "Non Uniform Memory Access (NUMA) Support" depends on SMP help - Enable NUMA (Non Unified Memory Architecture) support for - AMD Opteron Multiprocessor systems. The kernel will try to allocate - memory used by a CPU on the local memory controller of the CPU - and add some more NUMA awareness to the kernel. - This code is recommended on all multiprocessor Opteron systems - and normally doesn't hurt on others. + Enable NUMA (Non Uniform Memory Access) support. The kernel + will try to allocate memory used by a CPU on the local memory + controller of the CPU and add some more NUMA awareness to the kernel. + This code is recommended on all multiprocessor Opteron systems. + If the system is EM64T, you should say N unless your system is EM64T + NUMA. + +config K8_NUMA + bool "Old style AMD Opteron NUMA detection" + depends on NUMA + default y + help + Enable K8 NUMA node topology detection. You should say Y here if + you have a multi processor AMD K8 system. This uses an old + method to read the NUMA configurtion directly from the builtin + Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA + instead, which also takes priority if both are compiled in. + +# Dummy CONFIG option to select ACPI_NUMA from drivers/acpi/Kconfig. + +config X86_64_ACPI_NUMA + bool "ACPI NUMA detection" + depends on NUMA + select ACPI + select ACPI_NUMA + default y + help + Enable ACPI SRAT based node topology detection. config NUMA_EMU - bool "NUMA emulation support" - select NUMA - depends on SMP + bool "NUMA emulation" + depends on NUMA help Enable NUMA emulation. A flat machine will be split into virtual nodes when booted with "numa=fake=N", where N is the @@ -252,9 +272,6 @@ config ARCH_DISCONTIGMEM_ENABLE depends on NUMA default y -config NUMA - bool - default n config ARCH_DISCONTIGMEM_ENABLE def_bool y -- cgit v1.2.3 From e583538f077d5f70191670b47a046ba436ec3428 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Log machine checks from boot on Intel systems The logging for boot errors was turned off because it was broken on some AMD systems. But give Intel EM64T systems a chance because they are supposed to be correct there. The advantage is that there is a chance to actually log uncorrected machine checks after the reset. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/mce.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c index cf8a76f0f47e..183dc6105429 100644 --- a/arch/x86_64/kernel/mce.c +++ b/arch/x86_64/kernel/mce.c @@ -37,7 +37,7 @@ static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL }; static unsigned long console_logged; static int notify_user; static int rip_msr; -static int mce_bootlog; +static int mce_bootlog = 1; /* * Lockless MCE logging infrastructure. @@ -347,7 +347,11 @@ static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) /* disable GART TBL walk error reporting, which trips off incorrectly with the IOMMU & 3ware & Cerberus. */ clear_bit(10, &bank[4]); + /* Lots of broken BIOS around that don't clear them + by default and leave crap in there. Don't log. */ + mce_bootlog = 0; } + } static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) @@ -498,16 +502,16 @@ static int __init mcheck_disable(char *str) /* mce=off disables machine check. Note you can reenable it later using sysfs. mce=TOLERANCELEVEL (number, see above) - mce=bootlog Log MCEs from before booting. Disabled by default to work - around buggy BIOS that leave bogus MCEs. */ + mce=bootlog Log MCEs from before booting. Disabled by default on AMD. + mce=nobootlog Don't log MCEs from before booting. */ static int __init mcheck_enable(char *str) { if (*str == '=') str++; if (!strcmp(str, "off")) mce_dont_init = 1; - else if (!strcmp(str, "bootlog")) - mce_bootlog = 1; + else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog")) + mce_bootlog = str[0] == 'b'; else if (isdigit(str[0])) get_option(&str, &tolerant); else -- cgit v1.2.3 From ffd10a2b77bca50dd05ba26acd5a6e68bcc8f61f Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Make node boundaries consistent The current x86_64 NUMA memory code is inconsequent when it comes to node memory ranges. The exact behaviour varies depending on which config option that is used. setup_node_bootmem() has start and end as arguments and these are used to calculate the size of the node like this: (end - start). This is all fine if end is pointing to the first non-available byte. The problem is that the current x86_64 code sometimes treats it as the last present byte and sometimes as the first non-available byte. The result is that some configurations might lose a page at the end of the range. This patch tries to fix CONFIG_ACPI_NUMA, CONFIG_K8_NUMA and CONFIG_NUMA_EMU so they all treat the end variable as the first non-available byte. This is the same way as the single node code. The patch is boot tested on dual x86_64 hardware with the above configurations, but maybe the removed code is needed as some workaround? Signed-off-by: Magnus Damm Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/mm/k8topology.c | 1 + arch/x86_64/mm/numa.c | 2 -- arch/x86_64/mm/srat.c | 4 ---- 3 files changed, 1 insertion(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/mm/k8topology.c b/arch/x86_64/mm/k8topology.c index 65417b040c1b..a5663e0bb01c 100644 --- a/arch/x86_64/mm/k8topology.c +++ b/arch/x86_64/mm/k8topology.c @@ -108,6 +108,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end) limit >>= 16; limit <<= 24; limit |= (1<<24)-1; + limit++; if (limit > end_pfn << PAGE_SHIFT) limit = end_pfn << PAGE_SHIFT; diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c index edd5559380d3..629ff0621b3d 100644 --- a/arch/x86_64/mm/numa.c +++ b/arch/x86_64/mm/numa.c @@ -209,8 +209,6 @@ static int numa_emulation(unsigned long start_pfn, unsigned long end_pfn) if (i == numa_fake-1) sz = (end_pfn<start = nd->end; } if (nd->end > end) { - if (!(end & 0xfff)) - end--; nd->end = end; if (nd->start > nd->end) nd->start = nd->end; @@ -166,8 +164,6 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) if (nd->end < end) nd->end = end; } - if (!(nd->end & 0xfff)) - nd->end--; printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm, nd->start, nd->end); } -- cgit v1.2.3 From 9e43e1b7c7c9872da032442d8e4bb112a02d16f4 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Remove CONFIG_CHECKING and add command line option for pagefault tracing CONFIG_CHECKING covered some debugging code used in the early times of the port. But it wasn't even SMP safe for quite some time and the bugs it checked for seem to be gone. This patch removes all the code to verify GS at kernel entry. There haven't been any new bugs in this area for a long time. Previously it also covered the sysctl for the page fault tracing. That didn't make much sense because that code was unconditionally compiled in. I made that a boot option now because it is typically only useful at boot. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/Kconfig.debug | 9 --------- arch/x86_64/kernel/traps.c | 40 ---------------------------------------- arch/x86_64/mm/fault.c | 19 +++++++------------ arch/x86_64/mm/init.c | 4 ---- 4 files changed, 7 insertions(+), 65 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug index 9cf1410d2f5a..3ccf6f4d1068 100644 --- a/arch/x86_64/Kconfig.debug +++ b/arch/x86_64/Kconfig.debug @@ -2,15 +2,6 @@ menu "Kernel hacking" source "lib/Kconfig.debug" -# !SMP for now because the context switch early causes GPF in segment reloading -# and the GS base checking does the wrong thing then, causing a hang. -config CHECKING - bool "Additional run-time checks" - depends on DEBUG_KERNEL && !SMP - help - Enables some internal consistency checks for kernel debugging. - You should normally say N. - config INIT_DEBUG bool "Debug __init statements" depends on DEBUG_KERNEL diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index 4a836384dd0f..bf337f493189 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c @@ -428,19 +428,6 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, { conditional_sti(regs); -#ifdef CONFIG_CHECKING - { - unsigned long gs; - struct x8664_pda *pda = cpu_pda + safe_smp_processor_id(); - rdmsrl(MSR_GS_BASE, gs); - if (gs != (unsigned long)pda) { - wrmsrl(MSR_GS_BASE, pda); - printk("%s: wrong gs %lx expected %p rip %lx\n", str, gs, pda, - regs->rip); - } - } -#endif - if (user_mode(regs)) { struct task_struct *tsk = current; @@ -513,20 +500,6 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs, { conditional_sti(regs); -#ifdef CONFIG_CHECKING - { - unsigned long gs; - struct x8664_pda *pda = cpu_pda + safe_smp_processor_id(); - rdmsrl(MSR_GS_BASE, gs); - if (gs != (unsigned long)pda) { - wrmsrl(MSR_GS_BASE, pda); - oops_in_progress++; - printk("general protection handler: wrong gs %lx expected %p\n", gs, pda); - oops_in_progress--; - } - } -#endif - if (user_mode(regs)) { struct task_struct *tsk = current; @@ -665,19 +638,6 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs, struct task_struct *tsk = current; siginfo_t info; -#ifdef CONFIG_CHECKING - { - /* RED-PEN interaction with debugger - could destroy gs */ - unsigned long gs; - struct x8664_pda *pda = cpu_pda + safe_smp_processor_id(); - rdmsrl(MSR_GS_BASE, gs); - if (gs != (unsigned long)pda) { - wrmsrl(MSR_GS_BASE, pda); - printk("debug handler: wrong gs %lx expected %p\n", gs, pda); - } - } -#endif - get_debugreg(condition, 6); if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index b75b872ec154..3a63707a698b 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c @@ -308,18 +308,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long flags; siginfo_t info; -#ifdef CONFIG_CHECKING - { - unsigned long gs; - struct x8664_pda *pda = cpu_pda + stack_smp_processor_id(); - rdmsrl(MSR_GS_BASE, gs); - if (gs != (unsigned long)pda) { - wrmsrl(MSR_GS_BASE, pda); - printk("page_fault: wrong gs %lx expected %p\n", gs, pda); - } - } -#endif - /* get the address */ __asm__("movq %%cr2,%0":"=r" (address)); if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, @@ -571,3 +559,10 @@ do_sigbus: force_sig_info(SIGBUS, &info, tsk); return; } + +static int __init enable_pagefaulttrace(char *str) +{ + page_fault_trace = 1; + return 0; +} +__setup("pagefaulttrace", enable_pagefaulttrace); diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 854a41b8372b..286f6a624c3a 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -565,10 +565,6 @@ extern int exception_trace, page_fault_trace; static ctl_table debug_table2[] = { { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL, proc_dointvec }, -#ifdef CONFIG_CHECKING - { 100, "page-fault-trace", &page_fault_trace, sizeof(int), 0644, NULL, - proc_dointvec }, -#endif { 0, } }; -- cgit v1.2.3 From 8893166ff8694f36655009aa9bf8e7f2e1c9339f Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Increase the maximum number of local APICs to the maximum This is needed for large multinode IBM systems which have a sparse APIC space in clustered mode, fully covering the available 8 bits. The previous kernels would limit the local APIC number to 127, which caused it to reject some of the CPUs at boot. I increased the maximum and shrunk the apic_version array a bit to make up for that (the version is only 8 bit, so don't need an full int to store) Cc: Chris McDermott Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/mpparse.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c index 1d61f10a92c6..1105250bf02c 100644 --- a/arch/x86_64/kernel/mpparse.c +++ b/arch/x86_64/kernel/mpparse.c @@ -42,7 +42,7 @@ int acpi_found_madt; * Various Linux-internal data structures created from the * MP-table. */ -int apic_version [MAX_APICS]; +unsigned char apic_version [MAX_APICS]; unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; @@ -108,7 +108,8 @@ static int __init mpf_checksum(unsigned char *mp, int len) static void __init MP_processor_info (struct mpc_config_processor *m) { - int ver, cpu; + int cpu; + unsigned char ver; static int found_bsp=0; if (!(m->mpc_cpuflag & CPU_ENABLED)) { @@ -133,12 +134,14 @@ static void __init MP_processor_info (struct mpc_config_processor *m) } cpu = num_processors++; - - if (m->mpc_apicid > MAX_APICS) { + +#if MAX_APICS < 255 + if ((int)m->mpc_apicid > MAX_APICS) { printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n", m->mpc_apicid, MAX_APICS); return; } +#endif ver = m->mpc_apicver; physid_set(m->mpc_apicid, phys_cpu_present_map); -- cgit v1.2.3 From d3ee871e63d0a0c70413dc0aa5534b8d6cd6ec37 Mon Sep 17 00:00:00 2001 From: Bob Picco Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Fix sparse mem Fix up booting with sparse mem enabled. Otherwise it would just cause an early PANIC at boot. Signed-off-by: Bob Picco Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/setup.c | 3 --- arch/x86_64/mm/numa.c | 18 +++++++++++++++++- 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 40c77f6fe4b0..750e01dcbdf4 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -412,7 +412,6 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn) { unsigned long bootmap_size, bootmap; - memory_present(0, start_pfn, end_pfn); bootmap_size = bootmem_bootmap_pages(end_pfn)<> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; - memory_present(nodeid, start_pfn, end_pfn); nodedata_phys = find_e820_area(start, end, pgdat_size); if (nodedata_phys == -1L) panic("Cannot find memory pgdat in node %d\n", nodeid); @@ -285,9 +284,26 @@ unsigned long __init numa_free_all_bootmem(void) return pages; } +#ifdef CONFIG_SPARSEMEM +static void __init arch_sparse_init(void) +{ + int i; + + for_each_online_node(i) + memory_present(i, node_start_pfn(i), node_end_pfn(i)); + + sparse_init(); +} +#else +#define arch_sparse_init() do {} while (0) +#endif + void __init paging_init(void) { int i; + + arch_sparse_init(); + for_each_online_node(i) { setup_node_zones(i); } -- cgit v1.2.3