diff options
author | Jeff Dike <jdike@addtoit.com> | 2007-10-16 01:27:00 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 09:43:05 -0700 |
commit | ba180fd437156f7fd8cfb2fdd021d949eeef08d6 (patch) | |
tree | b9f38b9cdd7a5b1aacf00341d1948314663c5871 /arch/um/sys-i386 | |
parent | 77bf4400319db9d2a8af6b00c2be6faa0f3d07cb (diff) |
uml: style fixes pass 3
Formatting changes in the files which have been changed in the course
of folding foo_skas functions into their callers. These include:
copyright updates
header file trimming
style fixes
adding severity to printks
These changes should be entirely non-functional.
Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/um/sys-i386')
-rw-r--r-- | arch/um/sys-i386/bugs.c | 106 | ||||
-rw-r--r-- | arch/um/sys-i386/fault.c | 8 | ||||
-rw-r--r-- | arch/um/sys-i386/ldt.c | 217 | ||||
-rw-r--r-- | arch/um/sys-i386/ptrace.c | 109 | ||||
-rw-r--r-- | arch/um/sys-i386/signal.c | 62 | ||||
-rw-r--r-- | arch/um/sys-i386/tls.c | 74 |
6 files changed, 286 insertions, 290 deletions
diff --git a/arch/um/sys-i386/bugs.c b/arch/um/sys-i386/bugs.c index 25c1165d8093..806895d73bcc 100644 --- a/arch/um/sys-i386/bugs.c +++ b/arch/um/sys-i386/bugs.c @@ -1,18 +1,15 @@ /* - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ -#include <unistd.h> #include <errno.h> +#include <signal.h> #include <string.h> -#include <sys/signal.h> -#include <asm/ldt.h> -#include "kern_util.h" -#include "user.h" -#include "sysdep/ptrace.h" -#include "task.h" +#include "kern_constants.h" #include "os.h" +#include "task.h" +#include "user.h" #define MAXTOKEN 64 @@ -30,18 +27,20 @@ static char token(int fd, char *buf, int len, char stop) do { n = os_read_file(fd, ptr, sizeof(*ptr)); c = *ptr++; - if(n != sizeof(*ptr)){ - if(n == 0) + if (n != sizeof(*ptr)) { + if (n == 0) return 0; - printk("Reading /proc/cpuinfo failed, err = %d\n", -n); - if(n < 0) + printk(UM_KERN_ERR "Reading /proc/cpuinfo failed, " + "err = %d\n", -n); + if (n < 0) return n; else return -EIO; } - } while((c != '\n') && (c != stop) && (ptr < end)); + } while ((c != '\n') && (c != stop) && (ptr < end)); - if(ptr == end){ - printk("Failed to find '%c' in /proc/cpuinfo\n", stop); + if (ptr == end) { + printk(UM_KERN_ERR "Failed to find '%c' in /proc/cpuinfo\n", + stop); return -1; } *(ptr - 1) = '\0'; @@ -54,26 +53,27 @@ static int find_cpuinfo_line(int fd, char *key, char *scratch, int len) char c; scratch[len - 1] = '\0'; - while(1){ + while (1) { c = token(fd, scratch, len - 1, ':'); - if(c <= 0) + if (c <= 0) return 0; - else if(c != ':'){ - printk("Failed to find ':' in /proc/cpuinfo\n"); + else if (c != ':') { + printk(UM_KERN_ERR "Failed to find ':' in " + "/proc/cpuinfo\n"); return 0; } - if(!strncmp(scratch, key, strlen(key))) + if (!strncmp(scratch, key, strlen(key))) return 1; do { n = os_read_file(fd, &c, sizeof(c)); - if(n != sizeof(c)){ - printk("Failed to find newline in " + if (n != sizeof(c)) { + printk(UM_KERN_ERR "Failed to find newline in " "/proc/cpuinfo, err = %d\n", -n); return 0; } - } while(c != '\n'); + } while (c != '\n'); } return 0; } @@ -83,46 +83,50 @@ static int check_cpu_flag(char *feature, int *have_it) char buf[MAXTOKEN], c; int fd, len = ARRAY_SIZE(buf); - printk("Checking for host processor %s support...", feature); + printk(UM_KERN_INFO "Checking for host processor %s support...", + feature); fd = os_open_file("/proc/cpuinfo", of_read(OPENFLAGS()), 0); - if(fd < 0){ - printk("Couldn't open /proc/cpuinfo, err = %d\n", -fd); + if (fd < 0) { + printk(UM_KERN_ERR "Couldn't open /proc/cpuinfo, err = %d\n", + -fd); return 0; } *have_it = 0; - if(!find_cpuinfo_line(fd, "flags", buf, ARRAY_SIZE(buf))) + if (!find_cpuinfo_line(fd, "flags", buf, ARRAY_SIZE(buf))) goto out; c = token(fd, buf, len - 1, ' '); - if(c < 0) + if (c < 0) goto out; - else if(c != ' '){ - printk("Failed to find ' ' in /proc/cpuinfo\n"); + else if (c != ' ') { + printk(UM_KERN_ERR "Failed to find ' ' in /proc/cpuinfo\n"); goto out; } - while(1){ + while (1) { c = token(fd, buf, len - 1, ' '); - if(c < 0) + if (c < 0) goto out; - else if(c == '\n') break; + else if (c == '\n') + break; - if(!strcmp(buf, feature)){ + if (!strcmp(buf, feature)) { *have_it = 1; goto out; } } out: - if(*have_it == 0) + if (*have_it == 0) printk("No\n"); - else if(*have_it == 1) + else if (*have_it == 1) printk("Yes\n"); os_close_file(fd); return 1; } -#if 0 /* This doesn't work in tt mode, plus it's causing compilation problems +#if 0 /* + * This doesn't work in tt mode, plus it's causing compilation problems * for some people. */ static void disable_lcall(void) @@ -135,8 +139,9 @@ static void disable_lcall(void) ldt.base_addr = 0; ldt.limit = 0; err = modify_ldt(1, &ldt, sizeof(ldt)); - if(err) - printk("Failed to disable lcall7 - errno = %d\n", errno); + if (err) + printk(UM_KERN_ERR "Failed to disable lcall7 - errno = %d\n", + errno); } #endif @@ -151,14 +156,14 @@ void arch_check_bugs(void) { int have_it; - if(os_access("/proc/cpuinfo", OS_ACC_R_OK) < 0){ - printk("/proc/cpuinfo not available - skipping CPU capability " - "checks\n"); + if (os_access("/proc/cpuinfo", OS_ACC_R_OK) < 0) { + printk(UM_KERN_ERR "/proc/cpuinfo not available - skipping CPU " + "capability checks\n"); return; } - if(check_cpu_flag("cmov", &have_it)) + if (check_cpu_flag("cmov", &have_it)) host_has_cmov = have_it; - if(check_cpu_flag("xmm", &have_it)) + if (check_cpu_flag("xmm", &have_it)) host_has_xmm = have_it; } @@ -166,25 +171,26 @@ int arch_handle_signal(int sig, struct uml_pt_regs *regs) { unsigned char tmp[2]; - /* This is testing for a cmov (0x0f 0x4x) instruction causing a + /* + * This is testing for a cmov (0x0f 0x4x) instruction causing a * SIGILL in init. */ - if((sig != SIGILL) || (TASK_PID(get_current()) != 1)) + if ((sig != SIGILL) || (TASK_PID(get_current()) != 1)) return 0; if (copy_from_user_proc(tmp, (void *) UPT_IP(regs), 2)) panic("SIGILL in init, could not read instructions!\n"); - if((tmp[0] != 0x0f) || ((tmp[1] & 0xf0) != 0x40)) + if ((tmp[0] != 0x0f) || ((tmp[1] & 0xf0) != 0x40)) return 0; - if(host_has_cmov == 0) + if (host_has_cmov == 0) panic("SIGILL caused by cmov, which this processor doesn't " "implement, boot a filesystem compiled for older " "processors"); - else if(host_has_cmov == 1) + else if (host_has_cmov == 1) panic("SIGILL caused by cmov, which this processor claims to " "implement"); - else if(host_has_cmov == -1) + else if (host_has_cmov == -1) panic("SIGILL caused by cmov, couldn't tell if this processor " "implements it, boot a filesystem compiled for older " "processors"); diff --git a/arch/um/sys-i386/fault.c b/arch/um/sys-i386/fault.c index cc06a5737df0..d670f68532f4 100644 --- a/arch/um/sys-i386/fault.c +++ b/arch/um/sys-i386/fault.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2002 - 2004 Jeff Dike (jdike@addtoit.com) + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ @@ -20,9 +20,9 @@ int arch_fixup(unsigned long address, struct uml_pt_regs *regs) const struct exception_table_entry *fixup; fixup = search_exception_tables(address); - if(fixup != 0){ + if (fixup != 0) { UPT_IP(regs) = fixup->fixup; - return(1); + return 1; } - return(0); + return 0; } diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c index 906c2a4e7279..0bf7572a80a3 100644 --- a/arch/um/sys-i386/ldt.c +++ b/arch/um/sys-i386/ldt.c @@ -1,34 +1,26 @@ /* - * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com) + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ -#include "linux/sched.h" -#include "linux/slab.h" -#include "linux/types.h" -#include "linux/errno.h" -#include "linux/spinlock.h" -#include "asm/uaccess.h" -#include "asm/smp.h" -#include "asm/ldt.h" +#include "linux/mm.h" #include "asm/unistd.h" -#include "kern.h" #include "os.h" - -extern int modify_ldt(int func, void *ptr, unsigned long bytecount); - +#include "proc_mm.h" #include "skas.h" #include "skas_ptrace.h" -#include "asm/mmu_context.h" -#include "proc_mm.h" +#include "sysdep/tls.h" + +extern int modify_ldt(int func, void *ptr, unsigned long bytecount); long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, void **addr, int done) { long res; - if(proc_mm){ - /* This is a special handling for the case, that the mm to + if (proc_mm) { + /* + * This is a special handling for the case, that the mm to * modify isn't current->active_mm. * If this is called directly by modify_ldt, * (current->active_mm->context.skas.u == mm_idp) @@ -40,12 +32,12 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, * * Note: I'm unsure: should interrupts be disabled here? */ - if(!current->active_mm || current->active_mm == &init_mm || - mm_idp != ¤t->active_mm->context.skas.id) + if (!current->active_mm || current->active_mm == &init_mm || + mm_idp != ¤t->active_mm->context.skas.id) __switch_mm(mm_idp); } - if(ptrace_ldt) { + if (ptrace_ldt) { struct ptrace_ldt ldt_op = (struct ptrace_ldt) { .func = func, .ptr = desc, @@ -53,7 +45,7 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, u32 cpu; int pid; - if(!proc_mm) + if (!proc_mm) pid = mm_idp->u.pid; else { cpu = get_cpu(); @@ -62,7 +54,7 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op); - if(proc_mm) + if (proc_mm) put_cpu(); } else { @@ -71,7 +63,7 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, (sizeof(*desc) + sizeof(long) - 1) & ~(sizeof(long) - 1), addr, &stub_addr); - if(!res){ + if (!res) { unsigned long args[] = { func, (unsigned long)stub_addr, sizeof(*desc), @@ -81,12 +73,13 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, } } - if(proc_mm){ - /* This is the second part of special handling, that makes + if (proc_mm) { + /* + * This is the second part of special handling, that makes * PTRACE_LDT possible to implement. */ - if(current->active_mm && current->active_mm != &init_mm && - mm_idp != ¤t->active_mm->context.skas.id) + if (current->active_mm && current->active_mm != &init_mm && + mm_idp != ¤t->active_mm->context.skas.id) __switch_mm(¤t->active_mm->context.skas.id); } @@ -102,21 +95,22 @@ static long read_ldt_from_host(void __user * ptr, unsigned long bytecount) .ptr = kmalloc(bytecount, GFP_KERNEL)}; u32 cpu; - if(ptrace_ldt.ptr == NULL) + if (ptrace_ldt.ptr == NULL) return -ENOMEM; - /* This is called from sys_modify_ldt only, so userspace_pid gives + /* + * This is called from sys_modify_ldt only, so userspace_pid gives * us the right number */ cpu = get_cpu(); res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt); put_cpu(); - if(res < 0) + if (res < 0) goto out; n = copy_to_user(ptr, ptrace_ldt.ptr, res); - if(n != 0) + if (n != 0) res = -EFAULT; out: @@ -143,33 +137,32 @@ static int read_ldt(void __user * ptr, unsigned long bytecount) unsigned long size; uml_ldt_t * ldt = ¤t->mm->context.skas.ldt; - if(!ldt->entry_count) + if (!ldt->entry_count) goto out; - if(bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) + if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; err = bytecount; - if(ptrace_ldt){ + if (ptrace_ldt) return read_ldt_from_host(ptr, bytecount); - } down(&ldt->semaphore); - if(ldt->entry_count <= LDT_DIRECT_ENTRIES){ + if (ldt->entry_count <= LDT_DIRECT_ENTRIES) { size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES; - if(size > bytecount) + if (size > bytecount) size = bytecount; - if(copy_to_user(ptr, ldt->u.entries, size)) + if (copy_to_user(ptr, ldt->u.entries, size)) err = -EFAULT; bytecount -= size; ptr += size; } else { - for(i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount; - i++){ + for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount; + i++) { size = PAGE_SIZE; - if(size > bytecount) + if (size > bytecount) size = bytecount; - if(copy_to_user(ptr, ldt->u.pages[i], size)){ + if (copy_to_user(ptr, ldt->u.pages[i], size)) { err = -EFAULT; break; } @@ -179,10 +172,10 @@ static int read_ldt(void __user * ptr, unsigned long bytecount) } up(&ldt->semaphore); - if(bytecount == 0 || err == -EFAULT) + if (bytecount == 0 || err == -EFAULT) goto out; - if(clear_user(ptr, bytecount)) + if (clear_user(ptr, bytecount)) err = -EFAULT; out: @@ -193,15 +186,16 @@ static int read_default_ldt(void __user * ptr, unsigned long bytecount) { int err; - if(bytecount > 5*LDT_ENTRY_SIZE) + if (bytecount > 5*LDT_ENTRY_SIZE) bytecount = 5*LDT_ENTRY_SIZE; err = bytecount; - /* UML doesn't support lcall7 and lcall27. + /* + * UML doesn't support lcall7 and lcall27. * So, we don't really have a default ldt, but emulate * an empty ldt of common host default ldt size. */ - if(clear_user(ptr, bytecount)) + if (clear_user(ptr, bytecount)) err = -EFAULT; return err; @@ -217,52 +211,52 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int func) void *addr = NULL; err = -EINVAL; - if(bytecount != sizeof(ldt_info)) + if (bytecount != sizeof(ldt_info)) goto out; err = -EFAULT; - if(copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) + if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) goto out; err = -EINVAL; - if(ldt_info.entry_number >= LDT_ENTRIES) + if (ldt_info.entry_number >= LDT_ENTRIES) goto out; - if(ldt_info.contents == 3){ + if (ldt_info.contents == 3) { if (func == 1) goto out; if (ldt_info.seg_not_present == 0) goto out; } - if(!ptrace_ldt) - down(&ldt->semaphore); + if (!ptrace_ldt) + down(&ldt->semaphore); err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1); - if(err) + if (err) goto out_unlock; - else if(ptrace_ldt) { - /* With PTRACE_LDT available, this is used as a flag only */ - ldt->entry_count = 1; - goto out; - } - - if(ldt_info.entry_number >= ldt->entry_count && - ldt_info.entry_number >= LDT_DIRECT_ENTRIES){ - for(i=ldt->entry_count/LDT_ENTRIES_PER_PAGE; - i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number; - i++){ - if(i == 0) + else if (ptrace_ldt) { + /* With PTRACE_LDT available, this is used as a flag only */ + ldt->entry_count = 1; + goto out; + } + + if (ldt_info.entry_number >= ldt->entry_count && + ldt_info.entry_number >= LDT_DIRECT_ENTRIES) { + for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE; + i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number; + i++) { + if (i == 0) memcpy(&entry0, ldt->u.entries, sizeof(entry0)); ldt->u.pages[i] = (struct ldt_entry *) __get_free_page(GFP_KERNEL|__GFP_ZERO); - if(!ldt->u.pages[i]){ + if (!ldt->u.pages[i]) { err = -ENOMEM; /* Undo the change in host */ memset(&ldt_info, 0, sizeof(ldt_info)); write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1); goto out_unlock; } - if(i == 0) { + if (i == 0) { memcpy(ldt->u.pages[0], &entry0, sizeof(entry0)); memcpy(ldt->u.pages[0]+1, ldt->u.entries+1, @@ -271,17 +265,17 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int func) ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE; } } - if(ldt->entry_count <= ldt_info.entry_number) + if (ldt->entry_count <= ldt_info.entry_number) ldt->entry_count = ldt_info.entry_number + 1; - if(ldt->entry_count <= LDT_DIRECT_ENTRIES) + if (ldt->entry_count <= LDT_DIRECT_ENTRIES) ldt_p = ldt->u.entries + ldt_info.entry_number; else ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] + ldt_info.entry_number%LDT_ENTRIES_PER_PAGE; - if(ldt_info.base_addr == 0 && ldt_info.limit == 0 && - (func == 1 || LDT_empty(&ldt_info))){ + if (ldt_info.base_addr == 0 && ldt_info.limit == 0 && + (func == 1 || LDT_empty(&ldt_info))) { ldt_p->a = 0; ldt_p->b = 0; } @@ -332,7 +326,7 @@ static void ldt_get_host_info(void) spin_lock(&host_ldt_lock); - if(host_ldt_entries != NULL){ + if (host_ldt_entries != NULL) { spin_unlock(&host_ldt_lock); return; } @@ -340,49 +334,49 @@ static void ldt_get_host_info(void) spin_unlock(&host_ldt_lock); - for(i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++); + for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++) + ; ldt = (struct ldt_entry *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); - if(ldt == NULL) { - printk("ldt_get_host_info: couldn't allocate buffer for host " - "ldt\n"); + if (ldt == NULL) { + printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer " + "for host ldt\n"); return; } ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE); - if(ret < 0) { - printk("ldt_get_host_info: couldn't read host ldt\n"); + if (ret < 0) { + printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n"); goto out_free; } - if(ret == 0) { + if (ret == 0) { /* default_ldt is active, simply write an empty entry 0 */ host_ldt_entries = dummy_list; goto out_free; } - for(i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++){ - if(ldt[i].a != 0 || ldt[i].b != 0) + for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) { + if (ldt[i].a != 0 || ldt[i].b != 0) size++; } - if(size < ARRAY_SIZE(dummy_list)) + if (size < ARRAY_SIZE(dummy_list)) host_ldt_entries = dummy_list; else { size = (size + 1) * sizeof(dummy_list[0]); tmp = kmalloc(size, GFP_KERNEL); - if(tmp == NULL) { - printk("ldt_get_host_info: couldn't allocate host ldt " - "list\n"); + if (tmp == NULL) { + printk(KERN_ERR "ldt_get_host_info: couldn't allocate " + "host ldt list\n"); goto out_free; } host_ldt_entries = tmp; } - for(i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++){ - if(ldt[i].a != 0 || ldt[i].b != 0) { + for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) { + if (ldt[i].a != 0 || ldt[i].b != 0) host_ldt_entries[k++] = i; - } } host_ldt_entries[k] = -1; @@ -401,15 +395,15 @@ long init_new_ldt(struct mmu_context_skas * new_mm, struct proc_mm_op copy; - if(!ptrace_ldt) + if (!ptrace_ldt) init_MUTEX(&new_mm->ldt.semaphore); - if(!from_mm){ + if (!from_mm) { memset(&desc, 0, sizeof(desc)); /* * We have to initialize a clean ldt. */ - if(proc_mm) { + if (proc_mm) { /* * If the new mm was created using proc_mm, host's * default-ldt currently is assigned, which normally @@ -417,8 +411,7 @@ long init_new_ldt(struct mmu_context_skas * new_mm, * To remove these gates, we simply write an empty * entry as number 0 to the host. */ - err = write_ldt_entry(&new_mm->id, 1, &desc, - &addr, 1); + err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, 1); } else{ /* @@ -427,11 +420,11 @@ long init_new_ldt(struct mmu_context_skas * new_mm, * will be reset in the following loop */ ldt_get_host_info(); - for(num_p=host_ldt_entries; *num_p != -1; num_p++){ + for (num_p=host_ldt_entries; *num_p != -1; num_p++) { desc.entry_number = *num_p; err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, *(num_p + 1) == -1); - if(err) + if (err) break; } } @@ -440,8 +433,9 @@ long init_new_ldt(struct mmu_context_skas * new_mm, goto out; } - if(proc_mm){ - /* We have a valid from_mm, so we now have to copy the LDT of + if (proc_mm) { + /* + * We have a valid from_mm, so we now have to copy the LDT of * from_mm to new_mm, because using proc_mm an new mm with * an empty/default LDT was created in new_mm() */ @@ -450,27 +444,27 @@ long init_new_ldt(struct mmu_context_skas * new_mm, { .copy_segments = from_mm->id.u.mm_fd } } ); i = os_write_file(new_mm->id.u.mm_fd, ©, sizeof(copy)); - if(i != sizeof(copy)) - printk("new_mm : /proc/mm copy_segments failed, " - "err = %d\n", -i); + if (i != sizeof(copy)) + printk(KERN_ERR "new_mm : /proc/mm copy_segments " + "failed, err = %d\n", -i); } - if(!ptrace_ldt) { - /* Our local LDT is used to supply the data for + if (!ptrace_ldt) { + /* + * Our local LDT is used to supply the data for * modify_ldt(READLDT), if PTRACE_LDT isn't available, * i.e., we have to use the stub for modify_ldt, which * can't handle the big read buffer of up to 64kB. */ down(&from_mm->ldt.semaphore); - if(from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES){ + if (from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES) memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries, sizeof(new_mm->ldt.u.entries)); - } - else{ + else { i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE; - while(i-->0){ + while (i-->0) { page = __get_free_page(GFP_KERNEL|__GFP_ZERO); - if (!page){ + if (!page) { err = -ENOMEM; break; } @@ -493,11 +487,10 @@ void free_ldt(struct mmu_context_skas * mm) { int i; - if(!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES){ + if (!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES) { i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE; - while(i-- > 0){ - free_page((long )mm->ldt.u.pages[i]); - } + while (i-- > 0) + free_page((long) mm->ldt.u.pages[i]); } mm->ldt.entry_count = 0; } diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c index dcf0c6b310c8..178f894384f4 100644 --- a/arch/um/sys-i386/ptrace.c +++ b/arch/um/sys-i386/ptrace.c @@ -1,18 +1,11 @@ -/* - * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) +/* + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ -#include <linux/compiler.h> -#include "linux/sched.h" #include "linux/mm.h" -#include "asm/elf.h" -#include "asm/ptrace.h" +#include "linux/sched.h" #include "asm/uaccess.h" -#include "asm/unistd.h" -#include "sysdep/ptrace.h" -#include "sysdep/sigcontext.h" -#include "sysdep/sc.h" extern int arch_switch_tls(struct task_struct *from, struct task_struct *to); @@ -23,7 +16,8 @@ void arch_switch_to(struct task_struct *from, struct task_struct *to) return; if (err != -EINVAL) - printk(KERN_WARNING "arch_switch_tls failed, errno %d, not EINVAL\n", -err); + printk(KERN_WARNING "arch_switch_tls failed, errno %d, " + "not EINVAL\n", -err); else printk(KERN_WARNING "arch_switch_tls failed, errno = EINVAL\n"); } @@ -34,21 +28,21 @@ int is_syscall(unsigned long addr) int n; n = copy_from_user(&instr, (void __user *) addr, sizeof(instr)); - if(n){ + if (n) { /* access_process_vm() grants access to vsyscall and stub, * while copy_from_user doesn't. Maybe access_process_vm is * slow, but that doesn't matter, since it will be called only * in case of singlestepping, if copy_from_user failed. */ n = access_process_vm(current, addr, &instr, sizeof(instr), 0); - if(n != sizeof(instr)) { - printk("is_syscall : failed to read instruction from " - "0x%lx\n", addr); - return(1); + if (n != sizeof(instr)) { + printk(KERN_ERR "is_syscall : failed to read " + "instruction from 0x%lx\n", addr); + return 1; } } /* int 0x80 or sysenter */ - return((instr == 0x80cd) || (instr == 0x340f)); + return (instr == 0x80cd) || (instr == 0x340f); } /* determines which flags the user has access to. */ @@ -92,21 +86,21 @@ int putreg(struct task_struct *child, int regno, unsigned long value) int poke_user(struct task_struct *child, long addr, long data) { - if ((addr & 3) || addr < 0) - return -EIO; - - if (addr < MAX_REG_OFFSET) - return putreg(child, addr, data); - - else if((addr >= offsetof(struct user, u_debugreg[0])) && - (addr <= offsetof(struct user, u_debugreg[7]))){ - addr -= offsetof(struct user, u_debugreg[0]); - addr = addr >> 2; - if((addr == 4) || (addr == 5)) return -EIO; - child->thread.arch.debugregs[addr] = data; - return 0; - } - return -EIO; + if ((addr & 3) || addr < 0) + return -EIO; + + if (addr < MAX_REG_OFFSET) + return putreg(child, addr, data); + else if ((addr >= offsetof(struct user, u_debugreg[0])) && + (addr <= offsetof(struct user, u_debugreg[7]))) { + addr -= offsetof(struct user, u_debugreg[0]); + addr = addr >> 2; + if ((addr == 4) || (addr == 5)) + return -EIO; + child->thread.arch.debugregs[addr] = data; + return 0; + } + return -EIO; } unsigned long getreg(struct task_struct *child, int regno) @@ -129,20 +123,20 @@ unsigned long getreg(struct task_struct *child, int regno) return retval; } +/* read the word at location addr in the USER area. */ int peek_user(struct task_struct *child, long addr, long data) { -/* read the word at location addr in the USER area. */ unsigned long tmp; if ((addr & 3) || addr < 0) return -EIO; tmp = 0; /* Default return condition */ - if(addr < MAX_REG_OFFSET){ + if (addr < MAX_REG_OFFSET) { tmp = getreg(child, addr); } - else if((addr >= offsetof(struct user, u_debugreg[0])) && - (addr <= offsetof(struct user, u_debugreg[7]))){ + else if ((addr >= offsetof(struct user, u_debugreg[0])) && + (addr <= offsetof(struct user, u_debugreg[7]))) { addr -= offsetof(struct user, u_debugreg[0]); addr = addr >> 2; tmp = child->thread.arch.debugregs[addr]; @@ -173,15 +167,15 @@ struct i387_fxsave_struct { static inline unsigned short twd_i387_to_fxsr( unsigned short twd ) { unsigned int tmp; /* to avoid 16 bit prefixes in the code */ - + /* Transform each pair of bits into 01 (valid) or 00 (empty) */ - tmp = ~twd; - tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ - /* and move the valid bits to the lower byte. */ - tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ - tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ - tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ - return tmp; + tmp = ~twd; + tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ + /* and move the valid bits to the lower byte. */ + tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ + tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ + tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ + return tmp; } static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave ) @@ -235,7 +229,7 @@ static inline int convert_fxsr_to_user(struct _fpstate __user *buf, return 0; } -static inline int convert_fxsr_from_user(struct pt_regs *regs, +static inline int convert_fxsr_from_user(struct pt_regs *regs, struct _fpstate __user *buf) { return 0; @@ -247,18 +241,20 @@ int get_fpregs(unsigned long buf, struct task_struct *child) err = convert_fxsr_to_user((struct _fpstate __user *) buf, &child->thread.regs); - if(err) return(-EFAULT); - else return(0); + if (err) + return -EFAULT; + return 0; } int set_fpregs(unsigned long buf, struct task_struct *child) { int err; - err = convert_fxsr_from_user(&child->thread.regs, + err = convert_fxsr_from_user(&child->thread.regs, (struct _fpstate __user *) buf); - if(err) return(-EFAULT); - else return(0); + if (err) + return -EFAULT; + return 0; } int get_fpxregs(unsigned long buf, struct task_struct *tsk) @@ -284,7 +280,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) fpu->fos = 0; memcpy(fpu->st_space, (void *) SC_FP_ST(PT_REGS_SC(regs)), sizeof(fpu->st_space)); - return(1); + return 1; } #endif @@ -292,14 +288,3 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu ) { return 1; } - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/sys-i386/signal.c b/arch/um/sys-i386/signal.c index c64d48734e3a..c82e5f562ec6 100644 --- a/arch/um/sys-i386/signal.c +++ b/arch/um/sys-i386/signal.c @@ -1,17 +1,13 @@ /* - * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com) + * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ -#include "linux/signal.h" #include "linux/ptrace.h" -#include "asm/current.h" -#include "asm/ucontext.h" -#include "asm/uaccess.h" #include "asm/unistd.h" +#include "asm/uaccess.h" +#include "asm/ucontext.h" #include "frame_kern.h" -#include "sigcontext.h" -#include "registers.h" #include "skas.h" void copy_sc(struct uml_pt_regs *regs, void *from) @@ -39,21 +35,21 @@ void copy_sc(struct uml_pt_regs *regs, void *from) static int copy_sc_from_user(struct pt_regs *regs, struct sigcontext __user *from) { - struct sigcontext sc; + struct sigcontext sc; unsigned long fpregs[HOST_FP_SIZE]; int err; err = copy_from_user(&sc, from, sizeof(sc)); err |= copy_from_user(fpregs, sc.fpstate, sizeof(fpregs)); - if(err) + if (err) return err; copy_sc(®s->regs, &sc); err = restore_fp_registers(userspace_pid[0], fpregs); - if(err < 0) { - printk("copy_sc_from_user_skas - PTRACE_SETFPREGS failed, " - "errno = %d\n", -err); + if (err < 0) { + printk(KERN_ERR "copy_sc_from_user_skas - PTRACE_SETFPREGS " + "failed, errno = %d\n", -err); return err; } @@ -64,7 +60,7 @@ static int copy_sc_to_user(struct sigcontext __user *to, struct _fpstate __user *to_fp, struct pt_regs *regs, unsigned long sp) { - struct sigcontext sc; + struct sigcontext sc; unsigned long fpregs[HOST_FP_SIZE]; struct faultinfo * fi = ¤t->thread.arch.faultinfo; int err; @@ -86,28 +82,29 @@ static int copy_sc_to_user(struct sigcontext __user *to, sc.eflags = REGS_EFLAGS(regs->regs.regs); sc.esp_at_signal = regs->regs.regs[UESP]; sc.ss = regs->regs.regs[SS]; - sc.cr2 = fi->cr2; - sc.err = fi->error_code; - sc.trapno = fi->trap_no; + sc.cr2 = fi->cr2; + sc.err = fi->error_code; + sc.trapno = fi->trap_no; err = save_fp_registers(userspace_pid[0], fpregs); - if(err < 0){ - printk("copy_sc_to_user_skas - PTRACE_GETFPREGS failed, " - "errno = %d\n", err); + if (err < 0) { + printk(KERN_ERR "copy_sc_to_user_skas - PTRACE_GETFPREGS " + "failed, errno = %d\n", err); return 1; } to_fp = (to_fp ? to_fp : (struct _fpstate __user *) (to + 1)); sc.fpstate = to_fp; - if(err) + if (err) return err; return copy_to_user(to, &sc, sizeof(sc)) || copy_to_user(to_fp, fpregs, sizeof(fpregs)); } -static int copy_ucontext_to_user(struct ucontext __user *uc, struct _fpstate __user *fp, - sigset_t *set, unsigned long sp) +static int copy_ucontext_to_user(struct ucontext __user *uc, + struct _fpstate __user *fp, sigset_t *set, + unsigned long sp) { int err = 0; @@ -157,7 +154,7 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig, return 1; restorer = frame->retcode; - if(ka->sa.sa_flags & SA_RESTORER) + if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; /* Update SP now because the page fault handler refuses to extend @@ -189,7 +186,7 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig, err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2)); err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); - if(err) + if (err) goto err; PT_REGS_SP(regs) = (unsigned long) frame; @@ -222,7 +219,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, return 1; restorer = frame->retcode; - if(ka->sa.sa_flags & SA_RESTORER) + if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; /* See comment above about why this is here */ @@ -247,7 +244,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1)); err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); - if(err) + if (err) goto err; PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler; @@ -274,8 +271,8 @@ long sys_sigreturn(struct pt_regs regs) unsigned long __user *extramask = frame->extramask; int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long); - if(copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) || - copy_from_user(&set.sig[1], extramask, sig_size)) + if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) || + copy_from_user(&set.sig[1], extramask, sig_size)) goto segfault; sigdelsetmask(&set, ~_BLOCKABLE); @@ -285,7 +282,7 @@ long sys_sigreturn(struct pt_regs regs) recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - if(copy_sc_from_user(¤t->thread.regs, sc)) + if (copy_sc_from_user(¤t->thread.regs, sc)) goto segfault; /* Avoid ERESTART handling */ @@ -300,12 +297,13 @@ long sys_sigreturn(struct pt_regs regs) long sys_rt_sigreturn(struct pt_regs regs) { unsigned long sp = PT_REGS_SP(¤t->thread.regs); - struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (sp - 4); + struct rt_sigframe __user *frame = + (struct rt_sigframe __user *) (sp - 4); sigset_t set; struct ucontext __user *uc = &frame->uc; int sig_size = _NSIG_WORDS * sizeof(unsigned long); - if(copy_from_user(&set, &uc->uc_sigmask, sig_size)) + if (copy_from_user(&set, &uc->uc_sigmask, sig_size)) goto segfault; sigdelsetmask(&set, ~_BLOCKABLE); @@ -315,7 +313,7 @@ long sys_rt_sigreturn(struct pt_regs regs) recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - if(copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext)) + if (copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext)) goto segfault; /* Avoid ERESTART handling */ diff --git a/arch/um/sys-i386/tls.c b/arch/um/sys-i386/tls.c index 6cb7cbd137a0..b02266ab5c55 100644 --- a/arch/um/sys-i386/tls.c +++ b/arch/um/sys-i386/tls.c @@ -3,19 +3,12 @@ * Licensed under the GPL */ -#include "linux/kernel.h" +#include "linux/percpu.h" #include "linux/sched.h" -#include "linux/slab.h" -#include "linux/types.h" #include "asm/uaccess.h" -#include "asm/ptrace.h" -#include "asm/segment.h" -#include "asm/smp.h" -#include "asm/desc.h" -#include "kern.h" -#include "kern_util.h" #include "os.h" #include "skas.h" +#include "sysdep/tls.h" /* * If needed we can detect when it's uninitialized. @@ -74,7 +67,8 @@ static inline void clear_user_desc(struct user_desc* info) /* Postcondition: LDT_empty(info) returns true. */ memset(info, 0, sizeof(*info)); - /* Check the LDT_empty or the i386 sys_get_thread_area code - we obtain + /* + * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain * indeed an empty user_desc. */ info->read_exec_only = 1; @@ -89,10 +83,13 @@ static int load_TLS(int flags, struct task_struct *to) int idx; for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) { - struct uml_tls_struct* curr = &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN]; + struct uml_tls_struct* curr = + &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN]; - /* Actually, now if it wasn't flushed it gets cleared and - * flushed to the host, which will clear it.*/ + /* + * Actually, now if it wasn't flushed it gets cleared and + * flushed to the host, which will clear it. + */ if (!curr->present) { if (!curr->flushed) { clear_user_desc(&curr->tls); @@ -116,7 +113,8 @@ out: return ret; } -/* Verify if we need to do a flush for the new process, i.e. if there are any +/* + * Verify if we need to do a flush for the new process, i.e. if there are any * present desc's, only if they haven't been flushed. */ static inline int needs_TLS_update(struct task_struct *task) @@ -125,10 +123,13 @@ static inline int needs_TLS_update(struct task_struct *task) int ret = 0; for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) { - struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; + struct uml_tls_struct* curr = + &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; - /* Can't test curr->present, we may need to clear a descriptor - * which had a value. */ + /* + * Can't test curr->present, we may need to clear a descriptor + * which had a value. + */ if (curr->flushed) continue; ret = 1; @@ -137,7 +138,8 @@ static inline int needs_TLS_update(struct task_struct *task) return ret; } -/* On a newly forked process, the TLS descriptors haven't yet been flushed. So +/* + * On a newly forked process, the TLS descriptors haven't yet been flushed. So * we mark them as such and the first switch_to will do the job. */ void clear_flushed_tls(struct task_struct *task) @@ -145,10 +147,13 @@ void clear_flushed_tls(struct task_struct *task) int i; for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) { - struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; + struct uml_tls_struct* curr = + &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; - /* Still correct to do this, if it wasn't present on the host it - * will remain as flushed as it was. */ + /* + * Still correct to do this, if it wasn't present on the host it + * will remain as flushed as it was. + */ if (!curr->present) continue; @@ -156,23 +161,27 @@ void clear_flushed_tls(struct task_struct *task) } } -/* In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a +/* + * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a * common host process. So this is needed in SKAS0 too. * * However, if each thread had a different host process (and this was discussed * for SMP support) this won't be needed. * * And this will not need be used when (and if) we'll add support to the host - * SKAS patch. */ + * SKAS patch. + */ int arch_switch_tls(struct task_struct *from, struct task_struct *to) { if (!host_supports_tls) return 0; - /* We have no need whatsoever to switch TLS for kernel threads; beyond + /* + * We have no need whatsoever to switch TLS for kernel threads; beyond * that, that would also result in us calling os_set_thread_area with - * userspace_pid[cpu] == 0, which gives an error. */ + * userspace_pid[cpu] == 0, which gives an error. + */ if (likely(to->mm)) return load_TLS(O_FORCE, to); @@ -232,17 +241,20 @@ static int get_tls_entry(struct task_struct* task, struct user_desc *info, int i *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls; out: - /* Temporary debugging check, to make sure that things have been + /* + * Temporary debugging check, to make sure that things have been * flushed. This could be triggered if load_TLS() failed. */ - if (unlikely(task == current && !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) { + if (unlikely(task == current && + !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) { printk(KERN_ERR "get_tls_entry: task with pid %d got here " "without flushed TLS.", current->pid); } return 0; clear: - /* When the TLS entry has not been set, the values read to user in the + /* + * When the TLS entry has not been set, the values read to user in the * tls_array are 0 (because it's cleared at boot, see * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that. */ @@ -344,8 +356,10 @@ out: } -/* XXX: This part is probably common to i386 and x86-64. Don't create a common - * file for now, do that when implementing x86-64 support.*/ +/* + * XXX: This part is probably common to i386 and x86-64. Don't create a common + * file for now, do that when implementing x86-64 support. + */ static int __init __setup_host_supports_tls(void) { check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min); |