diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-02-18 08:53:56 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 17:40:47 +0200 |
commit | ca5d3f14915f5f8db75f7b0c198c0c154947fc5e (patch) | |
tree | d5cef00bdd5b56adebcecfecf55636908358fbaf /arch/x86/lib | |
parent | 04aaa7ba096c707a8df337b29303f1a5a65f0462 (diff) |
x86: clean up mmx_32.c
checkpatch.pl --file cleanups:
before:
total: 74 errors, 3 warnings, 386 lines checked
after:
total: 0 errors, 0 warnings, 377 lines checked
no code changed:
arch/x86/lib/mmx_32.o:
text data bss dec hex filename
1323 0 8 1331 533 mmx_32.o.before
1323 0 8 1331 533 mmx_32.o.after
md5:
4cc39f1017dc40a5ebf02ce0ff7312bc mmx_32.o.before.asm
4cc39f1017dc40a5ebf02ce0ff7312bc mmx_32.o.after.asm
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/lib')
-rw-r--r-- | arch/x86/lib/mmx_32.c | 197 |
1 files changed, 94 insertions, 103 deletions
diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c index cc9b4a4450f3..c9f2d9ba8dd8 100644 --- a/arch/x86/lib/mmx_32.c +++ b/arch/x86/lib/mmx_32.c @@ -1,32 +1,30 @@ -#include <linux/types.h> -#include <linux/string.h> -#include <linux/sched.h> -#include <linux/hardirq.h> -#include <linux/module.h> - -#include <asm/asm.h> -#include <asm/i387.h> - - /* * MMX 3DNow! library helper functions * * To do: - * We can use MMX just for prefetch in IRQ's. This may be a win. + * We can use MMX just for prefetch in IRQ's. This may be a win. * (reported so on K6-III) * We should use a better code neutral filler for the short jump * leal ebx. [ebx] is apparently best for K6-2, but Cyrix ?? * We also want to clobber the filler register so we don't get any - * register forwarding stalls on the filler. + * register forwarding stalls on the filler. * * Add *user handling. Checksums are not a win with MMX on any CPU * tested so far for any MMX solution figured. * - * 22/09/2000 - Arjan van de Ven - * Improved for non-egineering-sample Athlons + * 22/09/2000 - Arjan van de Ven + * Improved for non-egineering-sample Athlons * */ - +#include <linux/hardirq.h> +#include <linux/string.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/types.h> + +#include <asm/i387.h> +#include <asm/asm.h> + void *_mmx_memcpy(void *to, const void *from, size_t len) { void *p; @@ -51,12 +49,10 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from) ); - - - for(; i>5; i--) - { + _ASM_EXTABLE(1b, 3b) + : : "r" (from)); + + for ( ; i > 5; i--) { __asm__ __volatile__ ( "1: prefetch 320(%0)\n" "2: movq (%0), %%mm0\n" @@ -79,14 +75,14 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from), "r" (to) : "memory"); - from+=64; - to+=64; + _ASM_EXTABLE(1b, 3b) + : : "r" (from), "r" (to) : "memory"); + + from += 64; + to += 64; } - for(; i>0; i--) - { + for ( ; i > 0; i--) { __asm__ __volatile__ ( " movq (%0), %%mm0\n" " movq 8(%0), %%mm1\n" @@ -104,17 +100,20 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) " movq %%mm1, 40(%1)\n" " movq %%mm2, 48(%1)\n" " movq %%mm3, 56(%1)\n" - : : "r" (from), "r" (to) : "memory"); - from+=64; - to+=64; + : : "r" (from), "r" (to) : "memory"); + + from += 64; + to += 64; } /* - * Now do the tail of the block + * Now do the tail of the block: */ - __memcpy(to, from, len&63); + __memcpy(to, from, len & 63); kernel_fpu_end(); + return p; } +EXPORT_SYMBOL(_mmx_memcpy); #ifdef CONFIG_MK7 @@ -128,13 +127,12 @@ static void fast_clear_page(void *page) int i; kernel_fpu_begin(); - + __asm__ __volatile__ ( " pxor %%mm0, %%mm0\n" : : ); - for(i=0;i<4096/64;i++) - { + for (i = 0; i < 4096/64; i++) { __asm__ __volatile__ ( " movntq %%mm0, (%0)\n" " movntq %%mm0, 8(%0)\n" @@ -145,14 +143,15 @@ static void fast_clear_page(void *page) " movntq %%mm0, 48(%0)\n" " movntq %%mm0, 56(%0)\n" : : "r" (page) : "memory"); - page+=64; + page += 64; } - /* since movntq is weakly-ordered, a "sfence" is needed to become - * ordered again. + + /* + * Since movntq is weakly-ordered, a "sfence" is needed to become + * ordered again: */ - __asm__ __volatile__ ( - " sfence \n" : : - ); + __asm__ __volatile__("sfence\n"::); + kernel_fpu_end(); } @@ -162,10 +161,11 @@ static void fast_copy_page(void *to, void *from) kernel_fpu_begin(); - /* maybe the prefetch stuff can go before the expensive fnsave... + /* + * maybe the prefetch stuff can go before the expensive fnsave... * but that is for later. -AV */ - __asm__ __volatile__ ( + __asm__ __volatile__( "1: prefetch (%0)\n" " prefetch 64(%0)\n" " prefetch 128(%0)\n" @@ -176,11 +176,9 @@ static void fast_copy_page(void *to, void *from) "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from) ); + _ASM_EXTABLE(1b, 3b) : : "r" (from)); - for(i=0; i<(4096-320)/64; i++) - { + for (i = 0; i < (4096-320)/64; i++) { __asm__ __volatile__ ( "1: prefetch 320(%0)\n" "2: movq (%0), %%mm0\n" @@ -203,13 +201,13 @@ static void fast_copy_page(void *to, void *from) "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from), "r" (to) : "memory"); - from+=64; - to+=64; + _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory"); + + from += 64; + to += 64; } - for(i=(4096-320)/64; i<4096/64; i++) - { + + for (i = (4096-320)/64; i < 4096/64; i++) { __asm__ __volatile__ ( "2: movq (%0), %%mm0\n" " movntq %%mm0, (%1)\n" @@ -227,37 +225,34 @@ static void fast_copy_page(void *to, void *from) " movntq %%mm6, 48(%1)\n" " movq 56(%0), %%mm7\n" " movntq %%mm7, 56(%1)\n" - : : "r" (from), "r" (to) : "memory"); - from+=64; - to+=64; + : : "r" (from), "r" (to) : "memory"); + from += 64; + to += 64; } - /* since movntq is weakly-ordered, a "sfence" is needed to become - * ordered again. + /* + * Since movntq is weakly-ordered, a "sfence" is needed to become + * ordered again: */ - __asm__ __volatile__ ( - " sfence \n" : : - ); + __asm__ __volatile__("sfence \n"::); kernel_fpu_end(); } -#else +#else /* CONFIG_MK7 */ /* * Generic MMX implementation without K7 specific streaming */ - static void fast_clear_page(void *page) { int i; - + kernel_fpu_begin(); - + __asm__ __volatile__ ( " pxor %%mm0, %%mm0\n" : : ); - for(i=0;i<4096/128;i++) - { + for (i = 0; i < 4096/128; i++) { __asm__ __volatile__ ( " movq %%mm0, (%0)\n" " movq %%mm0, 8(%0)\n" @@ -275,8 +270,8 @@ static void fast_clear_page(void *page) " movq %%mm0, 104(%0)\n" " movq %%mm0, 112(%0)\n" " movq %%mm0, 120(%0)\n" - : : "r" (page) : "memory"); - page+=128; + : : "r" (page) : "memory"); + page += 128; } kernel_fpu_end(); @@ -285,8 +280,7 @@ static void fast_clear_page(void *page) static void fast_copy_page(void *to, void *from) { int i; - - + kernel_fpu_begin(); __asm__ __volatile__ ( @@ -300,11 +294,9 @@ static void fast_copy_page(void *to, void *from) "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from) ); + _ASM_EXTABLE(1b, 3b) : : "r" (from)); - for(i=0; i<4096/64; i++) - { + for (i = 0; i < 4096/64; i++) { __asm__ __volatile__ ( "1: prefetch 320(%0)\n" "2: movq (%0), %%mm0\n" @@ -327,60 +319,59 @@ static void fast_copy_page(void *to, void *from) "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from), "r" (to) : "memory"); - from+=64; - to+=64; + _ASM_EXTABLE(1b, 3b) + : : "r" (from), "r" (to) : "memory"); + + from += 64; + to += 64; } kernel_fpu_end(); } - -#endif +#endif /* !CONFIG_MK7 */ /* - * Favour MMX for page clear and copy. + * Favour MMX for page clear and copy: */ - -static void slow_zero_page(void * page) +static void slow_zero_page(void *page) { int d0, d1; - __asm__ __volatile__( \ - "cld\n\t" \ - "rep ; stosl" \ - : "=&c" (d0), "=&D" (d1) - :"a" (0),"1" (page),"0" (1024) - :"memory"); + + __asm__ __volatile__( + "cld\n\t" + "rep ; stosl" + + : "=&c" (d0), "=&D" (d1) + :"a" (0), "1" (page), "0" (1024) + :"memory"); } - -void mmx_clear_page(void * page) + +void mmx_clear_page(void *page) { - if(unlikely(in_interrupt())) + if (unlikely(in_interrupt())) slow_zero_page(page); else fast_clear_page(page); } +EXPORT_SYMBOL(mmx_clear_page); static void slow_copy_page(void *to, void *from) { int d0, d1, d2; - __asm__ __volatile__( \ - "cld\n\t" \ - "rep ; movsl" \ - : "=&c" (d0), "=&D" (d1), "=&S" (d2) \ - : "0" (1024),"1" ((long) to),"2" ((long) from) \ + + __asm__ __volatile__( + "cld\n\t" + "rep ; movsl" + : "=&c" (d0), "=&D" (d1), "=&S" (d2) + : "0" (1024), "1" ((long) to), "2" ((long) from) : "memory"); } - void mmx_copy_page(void *to, void *from) { - if(unlikely(in_interrupt())) + if (unlikely(in_interrupt())) slow_copy_page(to, from); else fast_copy_page(to, from); } - -EXPORT_SYMBOL(_mmx_memcpy); -EXPORT_SYMBOL(mmx_clear_page); EXPORT_SYMBOL(mmx_copy_page); |