diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-01-30 13:34:07 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 13:34:07 +0100 |
commit | f62d0f008e889915c93631c04d4c7d871f05bea7 (patch) | |
tree | 36eb08ed99de278c77ef58df06b282736ff19b08 | |
parent | d806e5ee20f62a892b09aa59559f143d465285db (diff) |
x86: cpa: set_memory_notpresent()
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/mm/init_64.c | 17 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 34 | ||||
-rw-r--r-- | include/asm-x86/cacheflush.h | 1 |
3 files changed, 42 insertions, 10 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 05bb12db0b09..4757be7b5e55 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -559,8 +559,21 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) free_page(addr); totalram_pages++; } - if (addr > __START_KERNEL_map) - global_flush_tlb(); +#ifdef CONFIG_DEBUG_RODATA + /* + * This will make the __init pages not present and + * not executable, so that any attempt to use a + * __init function from now on will fault immediately + * rather than supriously later when memory gets reused. + * + * We only do this for DEBUG_RODATA to not break up the + * 2Mb kernel mapping just for this debug feature. + */ + if (begin >= __START_KERNEL_map) { + set_memory_np(begin, (end - begin)/PAGE_SIZE); + set_memory_nx(begin, (end - begin)/PAGE_SIZE); + } +#endif } void free_initmem(void) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index fcd96125c5ae..e5910ac37e59 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -357,8 +357,6 @@ int change_page_attr_clear(unsigned long addr, int numpages, pgprot_t prot) return change_page_attr_addr(addr, numpages, prot); } - - int set_memory_uc(unsigned long addr, int numpages) { pgprot_t uncached; @@ -402,7 +400,6 @@ int set_memory_ro(unsigned long addr, int numpages) pgprot_val(rw) = _PAGE_RW; return change_page_attr_clear(addr, numpages, rw); } -EXPORT_SYMBOL(set_memory_ro); int set_memory_rw(unsigned long addr, int numpages) { @@ -411,7 +408,14 @@ int set_memory_rw(unsigned long addr, int numpages) pgprot_val(rw) = _PAGE_RW; return change_page_attr_set(addr, numpages, rw); } -EXPORT_SYMBOL(set_memory_rw); + +int set_memory_np(unsigned long addr, int numpages) +{ + pgprot_t present; + + pgprot_val(present) = _PAGE_PRESENT; + return change_page_attr_clear(addr, numpages, present); +} int set_pages_uc(struct page *page, int numpages) { @@ -461,7 +465,6 @@ int set_pages_ro(struct page *page, int numpages) pgprot_val(rw) = _PAGE_RW; return change_page_attr_clear(addr, numpages, rw); } -EXPORT_SYMBOL(set_pages_ro); int set_pages_rw(struct page *page, int numpages) { @@ -471,8 +474,6 @@ int set_pages_rw(struct page *page, int numpages) pgprot_val(rw) = _PAGE_RW; return change_page_attr_set(addr, numpages, rw); } -EXPORT_SYMBOL(set_pages_rw); - void clflush_cache_range(void *addr, int size) { @@ -503,6 +504,20 @@ void global_flush_tlb(void) EXPORT_SYMBOL(global_flush_tlb); #ifdef CONFIG_DEBUG_PAGEALLOC + +static int __set_pages_p(struct page *page, int numpages) +{ + unsigned long addr = (unsigned long)page_address(page); + return change_page_attr_set(addr, numpages, + __pgprot(_PAGE_PRESENT | _PAGE_RW)); +} + +static int __set_pages_np(struct page *page, int numpages) +{ + unsigned long addr = (unsigned long)page_address(page); + return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT)); +} + void kernel_map_pages(struct page *page, int numpages, int enable) { if (PageHighMem(page)) @@ -522,7 +537,10 @@ void kernel_map_pages(struct page *page, int numpages, int enable) * The return value is ignored - the calls cannot fail, * large pages are disabled at boot time: */ - change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); + if (enable) + __set_pages_p(page, numpages); + else + __set_pages_np(page, numpages); /* * We should perform an IPI and flush all tlbs, diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index e79159bc0987..a95afaf1240d 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -42,6 +42,7 @@ int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); +int set_memory_np(unsigned long addr, int numpages); void clflush_cache_range(void *addr, int size); |