From 8617eb9a6f9dd4e63f8cb649120b3b3ae79df4f0 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Sun, 25 Sep 2016 10:43:22 -0400 Subject: [PATCH] Performance: mark lib_ring_buffer_write always inline The underlying copy operation is more efficient if the size is a constant, which only happens if this function is inlined in the caller. Otherwise, we end up calling memcpy for each field. Force inlining for performance reasons. Signed-off-by: Mathieu Desnoyers --- lib/ringbuffer/backend.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ringbuffer/backend.h b/lib/ringbuffer/backend.h index 8f6d7d04..449d6635 100644 --- a/lib/ringbuffer/backend.h +++ b/lib/ringbuffer/backend.h @@ -83,7 +83,7 @@ lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb, * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write) * if copy is crossing a page boundary. */ -static inline +static inline __attribute__((always_inline)) void lib_ring_buffer_write(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_ctx *ctx, const void *src, size_t len) -- 2.34.1