summaryrefslogtreecommitdiff
path: root/arch/i386/mm
diff options
context:
space:
mode:
authorZachary Amsden <zach@vmware.com>2005-09-03 15:56:36 -0700
committerLinus Torvalds <torvalds@evo.osdl.org>2005-09-05 00:06:11 -0700
commit4bb0d3ec3e5b1e9e2399cdc641b3b6521ac9cdaa (patch)
tree5e8d7646f5c6a2cec990b6d591f230d496b20664 /arch/i386/mm
parent2a0694d15d55d0deed928786a6393d5e45e37d76 (diff)
[PATCH] i386: inline asm cleanup
i386 Inline asm cleanup. Use cr/dr accessor functions. Also, a potential bugfix. Also, some CR accessors really should be volatile. Reads from CR0 (numeric state may change in an exception handler), writes to CR4 (flipping CR4.TSD) and reads from CR2 (page fault) prevent instruction re-ordering. I did not add memory clobber to CR3 / CR4 / CR0 updates, as it was not there to begin with, and in no case should kernel memory be clobbered, except when doing a TLB flush, which already has memory clobber. I noticed that page invalidation does not have a memory clobber. I can't find a bug as a result, but there is definitely a potential for a bug here: #define __flush_tlb_single(addr) \ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) Signed-off-by: Zachary Amsden <zach@vmware.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/mm')
-rw-r--r--arch/i386/mm/fault.c6
-rw-r--r--arch/i386/mm/pageattr.c2
2 files changed, 4 insertions, 4 deletions
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 61d9e34af5a6..411b8500ad1b 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -233,7 +233,7 @@ fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code)
int write, si_code;
/* get the address */
- __asm__("movl %%cr2,%0":"=r" (address));
+ address = read_cr2();
if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
SIGSEGV) == NOTIFY_STOP)
@@ -453,7 +453,7 @@ no_context:
printk(" at virtual address %08lx\n",address);
printk(KERN_ALERT " printing eip:\n");
printk("%08lx\n", regs->eip);
- asm("movl %%cr3,%0":"=r" (page));
+ page = read_cr3();
page = ((unsigned long *) __va(page))[address >> 22];
printk(KERN_ALERT "*pde = %08lx\n", page);
/*
@@ -526,7 +526,7 @@ vmalloc_fault:
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
- asm("movl %%cr3,%0":"=r" (pgd_paddr));
+ pgd_paddr = read_cr3();
pgd = index + (pgd_t *)__va(pgd_paddr);
pgd_k = init_mm.pgd + index;
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index cb3da6baa704..bce06a79eafa 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -62,7 +62,7 @@ static void flush_kernel_map(void *dummy)
{
/* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
if (boot_cpu_data.x86_model >= 4)
- asm volatile("wbinvd":::"memory");
+ wbinvd();
/* Flush all to work around Errata in early athlons regarding
* large page flushing.
*/