diff options
author | Matt Fleming <matt.fleming@intel.com> | 2012-02-28 13:37:20 +0000 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2012-02-28 10:22:51 -0800 |
commit | a07f7672d7cf0ff0d6e548a9feb6e0bd016d9c6c (patch) | |
tree | 656594c3fb97e31ed6a207d8ba9a5dda026aa04f /tools/include | |
parent | 6b21d18ed50c7d145220b0724ea7f2613abf0f95 (diff) | |
download | linux-3.10-a07f7672d7cf0ff0d6e548a9feb6e0bd016d9c6c.tar.gz linux-3.10-a07f7672d7cf0ff0d6e548a9feb6e0bd016d9c6c.tar.bz2 linux-3.10-a07f7672d7cf0ff0d6e548a9feb6e0bd016d9c6c.zip |
tools/include: Add byteshift headers for endian access
There are various hostprogs in the kernel that are rolling their own
implementations of {get,put}_unaligned_le*(). Copy the byteshift
headers from include/linux/unaligned so that they can all use a single
implementation.
This requires changing some of the data types to the userspace
exported ones (u32 -> __u32, etc).
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Link: http://lkml.kernel.org/r/1330436245-24875-2-git-send-email-matt@console-pimps.org
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'tools/include')
-rw-r--r-- | tools/include/tools/be_byteshift.h | 70 | ||||
-rw-r--r-- | tools/include/tools/le_byteshift.h | 70 |
2 files changed, 140 insertions, 0 deletions
diff --git a/tools/include/tools/be_byteshift.h b/tools/include/tools/be_byteshift.h new file mode 100644 index 00000000000..f4912e2668b --- /dev/null +++ b/tools/include/tools/be_byteshift.h @@ -0,0 +1,70 @@ +#ifndef _TOOLS_BE_BYTESHIFT_H +#define _TOOLS_BE_BYTESHIFT_H + +#include <linux/types.h> + +static inline __u16 __get_unaligned_be16(const __u8 *p) +{ + return p[0] << 8 | p[1]; +} + +static inline __u32 __get_unaligned_be32(const __u8 *p) +{ + return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; +} + +static inline __u64 __get_unaligned_be64(const __u8 *p) +{ + return (__u64)__get_unaligned_be32(p) << 32 | + __get_unaligned_be32(p + 4); +} + +static inline void __put_unaligned_be16(__u16 val, __u8 *p) +{ + *p++ = val >> 8; + *p++ = val; +} + +static inline void __put_unaligned_be32(__u32 val, __u8 *p) +{ + __put_unaligned_be16(val >> 16, p); + __put_unaligned_be16(val, p + 2); +} + +static inline void __put_unaligned_be64(__u64 val, __u8 *p) +{ + __put_unaligned_be32(val >> 32, p); + __put_unaligned_be32(val, p + 4); +} + +static inline __u16 get_unaligned_be16(const void *p) +{ + return __get_unaligned_be16((const __u8 *)p); +} + +static inline __u32 get_unaligned_be32(const void *p) +{ + return __get_unaligned_be32((const __u8 *)p); +} + +static inline __u64 get_unaligned_be64(const void *p) +{ + return __get_unaligned_be64((const __u8 *)p); +} + +static inline void put_unaligned_be16(__u16 val, void *p) +{ + __put_unaligned_be16(val, p); +} + +static inline void put_unaligned_be32(__u32 val, void *p) +{ + __put_unaligned_be32(val, p); +} + +static inline void put_unaligned_be64(__u64 val, void *p) +{ + __put_unaligned_be64(val, p); +} + +#endif /* _TOOLS_BE_BYTESHIFT_H */ diff --git a/tools/include/tools/le_byteshift.h b/tools/include/tools/le_byteshift.h new file mode 100644 index 00000000000..c99d45a68bd --- /dev/null +++ b/tools/include/tools/le_byteshift.h @@ -0,0 +1,70 @@ +#ifndef _TOOLS_LE_BYTESHIFT_H +#define _TOOLS_LE_BYTESHIFT_H + +#include <linux/types.h> + +static inline __u16 __get_unaligned_le16(const __u8 *p) +{ + return p[0] | p[1] << 8; +} + +static inline __u32 __get_unaligned_le32(const __u8 *p) +{ + return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; +} + +static inline __u64 __get_unaligned_le64(const __u8 *p) +{ + return (__u64)__get_unaligned_le32(p + 4) << 32 | + __get_unaligned_le32(p); +} + +static inline void __put_unaligned_le16(__u16 val, __u8 *p) +{ + *p++ = val; + *p++ = val >> 8; +} + +static inline void __put_unaligned_le32(__u32 val, __u8 *p) +{ + __put_unaligned_le16(val >> 16, p + 2); + __put_unaligned_le16(val, p); +} + +static inline void __put_unaligned_le64(__u64 val, __u8 *p) +{ + __put_unaligned_le32(val >> 32, p + 4); + __put_unaligned_le32(val, p); +} + +static inline __u16 get_unaligned_le16(const void *p) +{ + return __get_unaligned_le16((const __u8 *)p); +} + +static inline __u32 get_unaligned_le32(const void *p) +{ + return __get_unaligned_le32((const __u8 *)p); +} + +static inline __u64 get_unaligned_le64(const void *p) +{ + return __get_unaligned_le64((const __u8 *)p); +} + +static inline void put_unaligned_le16(__u16 val, void *p) +{ + __put_unaligned_le16(val, p); +} + +static inline void put_unaligned_le32(__u32 val, void *p) +{ + __put_unaligned_le32(val, p); +} + +static inline void put_unaligned_le64(__u64 val, void *p) +{ + __put_unaligned_le64(val, p); +} + +#endif /* _TOOLS_LE_BYTESHIFT_H */ |