mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 05:26:07 +00:00
cb8747b7d2
This macro is defined by glibc itself, which makes the issue go unnoticed on those systems. On non-glibc systems it causes build failures on several utilities and libraries, like bpftool and objtool. Fixes: 1d509f2a6ebc ("x86/insn: Support big endian cross-compiles") Fixes: 2d7ce0e8a704 ("tools/virtio: more stubs") Fixes: 3fb321fde22d ("selftests/net: ipv6 flowlabel") Fixes: 50b3ed57dee9 ("selftests/bpf: test bpf flow dissection") Fixes: 9cacf81f8161 ("bpf: Remove extra lock_sock for TCP_ZEROCOPY_RECEIVE") Fixes: a4b2061242ec ("tools include uapi: Grab a copy of linux/in.h") Fixes: b12d6ec09730 ("bpf: btf: add btf print functionality") Fixes: c0dd967818a2 ("tools, include: Grab a copy of linux/erspan.h") Fixes: c4b6014e8bb0 ("tools: Add copy of perf_event.h to tools/include/linux/") Signed-off-by: Ismael Luceno <ismael@iodev.co.uk> Acked-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com> Link: https://lore.kernel.org/r/20211115134647.1921-1-ismael@iodev.co.uk Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Vasily Gorbik <gor@linux.ibm.com>
108 lines
3.9 KiB
C
108 lines
3.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
|
#ifndef _UAPI_LINUX_BYTEORDER_LITTLE_ENDIAN_H
|
|
#define _UAPI_LINUX_BYTEORDER_LITTLE_ENDIAN_H
|
|
|
|
#ifndef __LITTLE_ENDIAN
|
|
#define __LITTLE_ENDIAN 1234
|
|
#endif
|
|
#ifndef __LITTLE_ENDIAN_BITFIELD
|
|
#define __LITTLE_ENDIAN_BITFIELD
|
|
#endif
|
|
|
|
#include <linux/stddef.h>
|
|
#include <linux/types.h>
|
|
#include <linux/swab.h>
|
|
|
|
#define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
|
|
#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
|
|
#define __constant_htons(x) ((__force __be16)___constant_swab16((x)))
|
|
#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x))
|
|
#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
|
|
#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
|
|
#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
|
|
#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
|
|
#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x))
|
|
#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x))
|
|
#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x)))
|
|
#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x))
|
|
#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x)))
|
|
#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x))
|
|
#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x)))
|
|
#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x))
|
|
#define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
|
|
#define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
|
|
#define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
|
|
#define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
|
|
#define __cpu_to_le16(x) ((__force __le16)(__u16)(x))
|
|
#define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
|
|
#define __cpu_to_be64(x) ((__force __be64)__swab64((x)))
|
|
#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
|
|
#define __cpu_to_be32(x) ((__force __be32)__swab32((x)))
|
|
#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
|
|
#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
|
|
#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
|
|
|
|
static __always_inline __le64 __cpu_to_le64p(const __u64 *p)
|
|
{
|
|
return (__force __le64)*p;
|
|
}
|
|
static __always_inline __u64 __le64_to_cpup(const __le64 *p)
|
|
{
|
|
return (__force __u64)*p;
|
|
}
|
|
static __always_inline __le32 __cpu_to_le32p(const __u32 *p)
|
|
{
|
|
return (__force __le32)*p;
|
|
}
|
|
static __always_inline __u32 __le32_to_cpup(const __le32 *p)
|
|
{
|
|
return (__force __u32)*p;
|
|
}
|
|
static __always_inline __le16 __cpu_to_le16p(const __u16 *p)
|
|
{
|
|
return (__force __le16)*p;
|
|
}
|
|
static __always_inline __u16 __le16_to_cpup(const __le16 *p)
|
|
{
|
|
return (__force __u16)*p;
|
|
}
|
|
static __always_inline __be64 __cpu_to_be64p(const __u64 *p)
|
|
{
|
|
return (__force __be64)__swab64p(p);
|
|
}
|
|
static __always_inline __u64 __be64_to_cpup(const __be64 *p)
|
|
{
|
|
return __swab64p((__u64 *)p);
|
|
}
|
|
static __always_inline __be32 __cpu_to_be32p(const __u32 *p)
|
|
{
|
|
return (__force __be32)__swab32p(p);
|
|
}
|
|
static __always_inline __u32 __be32_to_cpup(const __be32 *p)
|
|
{
|
|
return __swab32p((__u32 *)p);
|
|
}
|
|
static __always_inline __be16 __cpu_to_be16p(const __u16 *p)
|
|
{
|
|
return (__force __be16)__swab16p(p);
|
|
}
|
|
static __always_inline __u16 __be16_to_cpup(const __be16 *p)
|
|
{
|
|
return __swab16p((__u16 *)p);
|
|
}
|
|
#define __cpu_to_le64s(x) do { (void)(x); } while (0)
|
|
#define __le64_to_cpus(x) do { (void)(x); } while (0)
|
|
#define __cpu_to_le32s(x) do { (void)(x); } while (0)
|
|
#define __le32_to_cpus(x) do { (void)(x); } while (0)
|
|
#define __cpu_to_le16s(x) do { (void)(x); } while (0)
|
|
#define __le16_to_cpus(x) do { (void)(x); } while (0)
|
|
#define __cpu_to_be64s(x) __swab64s((x))
|
|
#define __be64_to_cpus(x) __swab64s((x))
|
|
#define __cpu_to_be32s(x) __swab32s((x))
|
|
#define __be32_to_cpus(x) __swab32s((x))
|
|
#define __cpu_to_be16s(x) __swab16s((x))
|
|
#define __be16_to_cpus(x) __swab16s((x))
|
|
|
|
|
|
#endif /* _UAPI_LINUX_BYTEORDER_LITTLE_ENDIAN_H */
|