summaryrefslogtreecommitdiff
path: root/include/asm-frv
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-frv')
-rw-r--r--include/asm-frv/checksum.h41
-rw-r--r--include/asm-frv/device.h7
-rw-r--r--include/asm-frv/highmem.h27
3 files changed, 40 insertions, 35 deletions
diff --git a/include/asm-frv/checksum.h b/include/asm-frv/checksum.h
index 42bf0db2287a..9b1689850187 100644
--- a/include/asm-frv/checksum.h
+++ b/include/asm-frv/checksum.h
@@ -26,7 +26,7 @@
*
* it's best to have buff aligned on a 32-bit boundary
*/
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+__wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* the same as csum_partial, but copies from src while it
@@ -35,7 +35,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
-unsigned int csum_partial_copy(const char *src, char *dst, int len, int sum);
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
/*
* the same as csum_partial_copy, but copies from user space.
@@ -43,11 +43,8 @@ unsigned int csum_partial_copy(const char *src, char *dst, int len, int sum);
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
-extern unsigned int csum_partial_copy_from_user(const char __user *src, char *dst,
- int len, int sum, int *csum_err);
-
-#define csum_partial_copy_nocheck(src, dst, len, sum) \
- csum_partial_copy((src), (dst), (len), (sum))
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *csum_err);
/*
* This is a version of ip_compute_csum() optimized for IP headers,
@@ -55,7 +52,7 @@ extern unsigned int csum_partial_copy_from_user(const char __user *src, char *ds
*
*/
static inline
-unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int tmp, inc, sum = 0;
@@ -81,13 +78,13 @@ unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
: "icc0", "icc1"
);
- return ~sum;
+ return (__force __sum16)~sum;
}
/*
* Fold a partial checksum
*/
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
{
unsigned int tmp;
@@ -100,16 +97,16 @@ static inline unsigned int csum_fold(unsigned int sum)
: "0"(sum)
);
- return ~sum;
+ return (__force __sum16)~sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
-static inline unsigned int
-csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
- unsigned short proto, unsigned int sum)
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
{
asm(" addcc %1,%0,%0,icc0 \n"
" addxcc %2,%0,%0,icc0 \n"
@@ -122,9 +119,9 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
return sum;
}
-static inline unsigned short int
-csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len,
- unsigned short proto, unsigned int sum)
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
@@ -133,12 +130,12 @@ csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len,
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
-extern unsigned short ip_compute_csum(const unsigned char * buff, int len);
+extern __sum16 ip_compute_csum(const void *buff, int len);
#define _HAVE_ARCH_IPV6_CSUM
-static inline unsigned short int
-csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
- __u32 len, unsigned short proto, unsigned int sum)
+static inline __sum16
+csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
+ __u32 len, unsigned short proto, __wsum sum)
{
unsigned long tmp, tmp2;
@@ -177,7 +174,7 @@ csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
: "icc0"
);
- return ~sum;
+ return (__force __sum16)~sum;
}
#endif /* _ASM_CHECKSUM_H */
diff --git a/include/asm-frv/device.h b/include/asm-frv/device.h
new file mode 100644
index 000000000000..d8f9872b0e2d
--- /dev/null
+++ b/include/asm-frv/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-frv/highmem.h b/include/asm-frv/highmem.h
index e2247c22a638..0f390f41f816 100644
--- a/include/asm-frv/highmem.h
+++ b/include/asm-frv/highmem.h
@@ -82,11 +82,11 @@ extern struct page *kmap_atomic_to_page(void *ptr);
dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \
\
if (type != __KM_CACHE) \
- asm volatile("movgs %0,dampr"#ampr :: "r"(dampr)); \
+ asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory"); \
else \
asm volatile("movgs %0,iampr"#ampr"\n" \
"movgs %0,dampr"#ampr"\n" \
- :: "r"(dampr) \
+ :: "r"(dampr) : "memory" \
); \
\
asm("movsg damlr"#ampr",%0" : "=r"(damlr)); \
@@ -104,7 +104,7 @@ extern struct page *kmap_atomic_to_page(void *ptr);
asm volatile("movgs %0,tplr \n" \
"movgs %1,tppr \n" \
"tlbpr %0,gr0,#2,#1" \
- : : "r"(damlr), "r"(dampr)); \
+ : : "r"(damlr), "r"(dampr) : "memory"); \
\
/*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/ \
\
@@ -115,7 +115,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
{
unsigned long paddr;
- preempt_disable();
+ inc_preempt_count();
paddr = page_to_phys(page);
switch (type) {
@@ -138,16 +138,16 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
}
}
-#define __kunmap_atomic_primary(type, ampr) \
-do { \
- asm volatile("movgs gr0,dampr"#ampr"\n"); \
- if (type == __KM_CACHE) \
- asm volatile("movgs gr0,iampr"#ampr"\n"); \
+#define __kunmap_atomic_primary(type, ampr) \
+do { \
+ asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory"); \
+ if (type == __KM_CACHE) \
+ asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory"); \
} while(0)
-#define __kunmap_atomic_secondary(slot, vaddr) \
-do { \
- asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr)); \
+#define __kunmap_atomic_secondary(slot, vaddr) \
+do { \
+ asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \
} while(0)
static inline void kunmap_atomic(void *kvaddr, enum km_type type)
@@ -170,7 +170,8 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
default:
BUG();
}
- preempt_enable();
+ dec_preempt_count();
+ preempt_check_resched();
}
#endif /* !__ASSEMBLY__ */