diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-generic/unaligned.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-generic/unaligned.h')
-rw-r--r-- | include/asm-generic/unaligned.h | 121 |
1 files changed, 121 insertions, 0 deletions
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h new file mode 100644 index 000000000000..c856a43e3b45 --- /dev/null +++ b/include/asm-generic/unaligned.h @@ -0,0 +1,121 @@ +#ifndef _ASM_GENERIC_UNALIGNED_H_ +#define _ASM_GENERIC_UNALIGNED_H_ + +/* + * For the benefit of those who are trying to port Linux to another + * architecture, here are some C-language equivalents. + * + * This is based almost entirely upon Richard Henderson's + * asm-alpha/unaligned.h implementation. Some comments were + * taken from David Mosberger's asm-ia64/unaligned.h header. + */ + +#include <linux/types.h> + +/* + * The main single-value unaligned transfer routines. + */ +#define get_unaligned(ptr) \ + ((__typeof__(*(ptr)))__get_unaligned((ptr), sizeof(*(ptr)))) +#define put_unaligned(x,ptr) \ + __put_unaligned((unsigned long)(x), (ptr), sizeof(*(ptr))) + +/* + * This function doesn't actually exist. The idea is that when + * someone uses the macros below with an unsupported size (datatype), + * the linker will alert us to the problem via an unresolved reference + * error. + */ +extern void bad_unaligned_access_length(void) __attribute__((noreturn)); + +struct __una_u64 { __u64 x __attribute__((packed)); }; +struct __una_u32 { __u32 x __attribute__((packed)); }; +struct __una_u16 { __u16 x __attribute__((packed)); }; + +/* + * Elemental unaligned loads + */ + +static inline unsigned long __uldq(const __u64 *addr) +{ + const struct __una_u64 *ptr = (const struct __una_u64 *) addr; + return ptr->x; +} + +static inline unsigned long __uldl(const __u32 *addr) +{ + const struct __una_u32 *ptr = (const struct __una_u32 *) addr; + return ptr->x; +} + +static inline unsigned long __uldw(const __u16 *addr) +{ + const struct __una_u16 *ptr = (const struct __una_u16 *) addr; + return ptr->x; +} + +/* + * Elemental unaligned stores + */ + +static inline void __ustq(__u64 val, __u64 *addr) +{ + struct __una_u64 *ptr = (struct __una_u64 *) addr; + ptr->x = val; +} + +static inline void __ustl(__u32 val, __u32 *addr) +{ + struct __una_u32 *ptr = (struct __una_u32 *) addr; + ptr->x = val; +} + +static inline void __ustw(__u16 val, __u16 *addr) +{ + struct __una_u16 *ptr = (struct __una_u16 *) addr; + ptr->x = val; +} + +static inline unsigned long __get_unaligned(const void *ptr, size_t size) +{ + unsigned long val; + switch (size) { + case 1: + val = *(const __u8 *)ptr; + break; + case 2: + val = __uldw((const __u16 *)ptr); + break; + case 4: + val = __uldl((const __u32 *)ptr); + break; + case 8: + val = __uldq((const __u64 *)ptr); + break; + default: + bad_unaligned_access_length(); + }; + return val; +} + +static inline void __put_unaligned(unsigned long val, void *ptr, size_t size) +{ + switch (size) { + case 1: + *(__u8 *)ptr = val; + break; + case 2: + __ustw(val, (__u16 *)ptr); + break; + case 4: + __ustl(val, (__u32 *)ptr); + break; + case 8: + __ustq(val, (__u64 *)ptr); + break; + default: + bad_unaligned_access_length(); + }; +} + +#endif /* _ASM_GENERIC_UNALIGNED_H */ |