diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-04-14 06:29:07 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-04-14 06:29:07 +0900 |
commit | f499cae1e59d75d5eb24c23d47cf8986e6032c6d (patch) | |
tree | 1af6235c18391212c40116eb90b01eae8938efee /arch/mn10300/include/asm/div64.h | |
parent | fc3f55e672e1ed917dd9e215af81939cd3d717da (diff) | |
parent | 80a04d3f2f94fb68b5df05e3ac6697130bc3467a (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/mn10300/include/asm/div64.h')
-rw-r--r-- | arch/mn10300/include/asm/div64.h | 100 |
1 files changed, 100 insertions, 0 deletions
diff --git a/arch/mn10300/include/asm/div64.h b/arch/mn10300/include/asm/div64.h new file mode 100644 index 000000000000..3a8329b3e869 --- /dev/null +++ b/arch/mn10300/include/asm/div64.h @@ -0,0 +1,100 @@ +/* MN10300 64-bit division + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef _ASM_DIV64 +#define _ASM_DIV64 + +#include <linux/types.h> + +extern void ____unhandled_size_in_do_div___(void); + +/* + * divide n by base, leaving the result in n and returning the remainder + * - we can do this quite efficiently on the MN10300 by cascading the divides + * through the MDR register + */ +#define do_div(n, base) \ +({ \ + unsigned __rem = 0; \ + if (sizeof(n) <= 4) { \ + asm("mov %1,mdr \n" \ + "divu %2,%0 \n" \ + "mov mdr,%1 \n" \ + : "+r"(n), "=d"(__rem) \ + : "r"(base), "1"(__rem) \ + : "cc" \ + ); \ + } else if (sizeof(n) <= 8) { \ + union { \ + unsigned long long l; \ + u32 w[2]; \ + } __quot; \ + __quot.l = n; \ + asm("mov %0,mdr \n" /* MDR = 0 */ \ + "divu %3,%1 \n" \ + /* __quot.MSL = __div.MSL / base, */ \ + /* MDR = MDR:__div.MSL % base */ \ + "divu %3,%2 \n" \ + /* __quot.LSL = MDR:__div.LSL / base, */ \ + /* MDR = MDR:__div.LSL % base */ \ + "mov mdr,%0 \n" \ + : "=d"(__rem), "=r"(__quot.w[1]), "=r"(__quot.w[0]) \ + : "r"(base), "0"(__rem), "1"(__quot.w[1]), \ + "2"(__quot.w[0]) \ + : "cc" \ + ); \ + n = __quot.l; \ + } else { \ + ____unhandled_size_in_do_div___(); \ + } \ + __rem; \ +}) + +/* + * do an unsigned 32-bit multiply and divide with intermediate 64-bit product + * so as not to lose accuracy + * - we use the MDR register to hold the MSW of the product + */ +static inline __attribute__((const)) +unsigned __muldiv64u(unsigned val, unsigned mult, unsigned div) +{ + unsigned result; + + asm("mulu %2,%0 \n" /* MDR:val = val*mult */ + "divu %3,%0 \n" /* val = MDR:val/div; + * MDR = MDR:val%div */ + : "=r"(result) + : "0"(val), "ir"(mult), "r"(div) + ); + + return result; +} + +/* + * do a signed 32-bit multiply and divide with intermediate 64-bit product so + * as not to lose accuracy + * - we use the MDR register to hold the MSW of the product + */ +static inline __attribute__((const)) +signed __muldiv64s(signed val, signed mult, signed div) +{ + signed result; + + asm("mul %2,%0 \n" /* MDR:val = val*mult */ + "div %3,%0 \n" /* val = MDR:val/div; + * MDR = MDR:val%div */ + : "=r"(result) + : "0"(val), "ir"(mult), "r"(div) + ); + + return result; +} + +#endif /* _ASM_DIV64 */ |