#define inline_memcpy(dest, src, n) memcpy(dest, src, n)
#endif
+#define LOAD_UNALIGNED_INT(type, p) \
+ ({ \
+ struct packed_struct { type __v; } __attribute__((packed)); \
+ (((const struct packed_struct *) (p))->__v); \
+ })
+
+#define STORE_UNALIGNED_INT(type, p, v) \
+ do { \
+ struct packed_struct { type __v; } __attribute__((packed)); \
+ ((struct packed_struct *) (p))->__v = (v); \
+ } while (0)
+
+/*
+ * Copy from src into dest, assuming unaligned src and dest.
+ */
+static inline
+void lttng_inline_memcpy(void *dest, const void *src,
+ unsigned long len)
+ __attribute__((always_inline));
+static inline
+void lttng_inline_memcpy(void *dest, const void *src,
+ unsigned long len)
+{
+ switch (len) {
+ case 1:
+ *(uint8_t *) dest = *(const uint8_t *) src;
+ break;
+ case 2:
+ STORE_UNALIGNED_INT(uint16_t, dest, LOAD_UNALIGNED_INT(uint16_t, src));
+ break;
+ case 4:
+ STORE_UNALIGNED_INT(uint32_t, dest, LOAD_UNALIGNED_INT(uint32_t, src));
+ break;
+ case 8:
+ STORE_UNALIGNED_INT(uint64_t, dest, LOAD_UNALIGNED_INT(uint64_t, src));
+ break;
+ default:
+ inline_memcpy(dest, src, len);
+ }
+}
+
/*
* Use the architecture-specific memcpy implementation for constant-sized
* inputs, but rely on an inline memcpy for length statically unknown.
if (__builtin_constant_p(len)) \
memcpy(dest, src, __len); \
else \
- inline_memcpy(dest, src, __len); \
+ lttng_inline_memcpy(dest, src, __len); \
} while (0)
/*