summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorHuang, Ying <ying.huang@intel.com>2008-02-04 16:48:06 +0100
committerIngo Molnar <mingo@elte.hu>2008-02-04 16:48:06 +0100
commit1c083eb2cbdd917149f6acaa55efca129d05c2a9 (patch)
tree8d44b2d7daf85393994e71b5962e10c8ca9daa6f /arch/x86
parentf56d005d30342a45d8af2b75ecccc82200f09600 (diff)
x86: fix EFI mapping
The patch updates EFI runtime memory mapping code, by making EFI areas explicitly executable. Signed-off-by: Huang Ying <ying.huang@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/efi.c57
-rw-r--r--arch/x86/kernel/efi_64.c22
2 files changed, 41 insertions, 38 deletions
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index 1411324a625c..32dd62b36ff7 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -379,11 +379,9 @@ void __init efi_init(void)
#endif
}
-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
static void __init runtime_code_page_mkexec(void)
{
efi_memory_desc_t *md;
- unsigned long end;
void *p;
if (!(__supported_pte_mask & _PAGE_NX))
@@ -392,18 +390,13 @@ static void __init runtime_code_page_mkexec(void)
/* Make EFI runtime service code area executable */
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
- end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
- if (md->type == EFI_RUNTIME_SERVICES_CODE &&
- (end >> PAGE_SHIFT) <= max_pfn_mapped) {
- set_memory_x(md->virt_addr, md->num_pages);
- set_memory_uc(md->virt_addr, md->num_pages);
- }
+
+ if (md->type != EFI_RUNTIME_SERVICES_CODE)
+ continue;
+
+ set_memory_x(md->virt_addr, md->num_pages << EFI_PAGE_SHIFT);
}
- __flush_tlb_all();
}
-#else
-static inline void __init runtime_code_page_mkexec(void) { }
-#endif
/*
* This function will switch the EFI runtime services to virtual mode.
@@ -417,30 +410,40 @@ void __init efi_enter_virtual_mode(void)
{
efi_memory_desc_t *md;
efi_status_t status;
- unsigned long end;
- void *p;
+ unsigned long size;
+ u64 end, systab;
+ void *p, *va;
efi.systab = NULL;
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
if (!(md->attribute & EFI_MEMORY_RUNTIME))
continue;
- end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
- if ((md->attribute & EFI_MEMORY_WB) &&
- ((end >> PAGE_SHIFT) <= max_pfn_mapped))
- md->virt_addr = (unsigned long)__va(md->phys_addr);
+
+ size = md->num_pages << EFI_PAGE_SHIFT;
+ end = md->phys_addr + size;
+
+ if ((end >> PAGE_SHIFT) <= max_pfn_mapped)
+ va = __va(md->phys_addr);
else
- md->virt_addr = (unsigned long)
- efi_ioremap(md->phys_addr,
- md->num_pages << EFI_PAGE_SHIFT);
- if (!md->virt_addr)
+ va = efi_ioremap(md->phys_addr, size);
+
+ if (md->attribute & EFI_MEMORY_WB)
+ set_memory_uc(md->virt_addr, size);
+
+ md->virt_addr = (u64) (unsigned long) va;
+
+ if (!va) {
printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n",
(unsigned long long)md->phys_addr);
- if ((md->phys_addr <= (unsigned long)efi_phys.systab) &&
- ((unsigned long)efi_phys.systab < end))
- efi.systab = (efi_system_table_t *)(unsigned long)
- (md->virt_addr - md->phys_addr +
- (unsigned long)efi_phys.systab);
+ continue;
+ }
+
+ systab = (u64) (unsigned long) efi_phys.systab;
+ if (md->phys_addr <= systab && systab < end) {
+ systab += md->virt_addr - md->phys_addr;
+ efi.systab = (efi_system_table_t *) (unsigned long) systab;
+ }
}
BUG_ON(!efi.systab);
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c
index 674f2379480f..09d5c2330934 100644
--- a/arch/x86/kernel/efi_64.c
+++ b/arch/x86/kernel/efi_64.c
@@ -54,10 +54,10 @@ static void __init early_mapping_set_exec(unsigned long start,
else
set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \
__supported_pte_mask));
- if (level == 4)
- start = (start + PMD_SIZE) & PMD_MASK;
- else
+ if (level == PG_LEVEL_4K)
start = (start + PAGE_SIZE) & PAGE_MASK;
+ else
+ start = (start + PMD_SIZE) & PMD_MASK;
}
}
@@ -109,23 +109,23 @@ void __init efi_reserve_bootmem(void)
memmap.nr_map * memmap.desc_size);
}
-void __iomem * __init efi_ioremap(unsigned long offset,
- unsigned long size)
+void __iomem * __init efi_ioremap(unsigned long phys_addr, unsigned long size)
{
static unsigned pages_mapped;
- unsigned long last_addr;
unsigned i, pages;
- last_addr = offset + size - 1;
- offset &= PAGE_MASK;
- pages = (PAGE_ALIGN(last_addr) - offset) >> PAGE_SHIFT;
+ /* phys_addr and size must be page aligned */
+ if ((phys_addr & ~PAGE_MASK) || (size & ~PAGE_MASK))
+ return NULL;
+
+ pages = size >> PAGE_SHIFT;
if (pages_mapped + pages > MAX_EFI_IO_PAGES)
return NULL;
for (i = 0; i < pages; i++) {
__set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
- offset, PAGE_KERNEL_EXEC_NOCACHE);
- offset += PAGE_SIZE;
+ phys_addr, PAGE_KERNEL);
+ phys_addr += PAGE_SIZE;
pages_mapped++;
}