From 9cccf98a6efe4d77d247610fbab29305159a9b46 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Fri, 9 Dec 2011 09:22:30 -0500 Subject: [PATCH] lttng: cleanup one-bit signed bitfields * Dan Carpenter wrote: > Sparse complains that these signed bitfields look "dubious". The > problem is that instead of being either 0 or 1 like people would expect, > signed one bit variables like this are either 0 or -1. It doesn't cause > a problem in this case but it's ugly so lets fix them. * walter harms (wharms@bfs.de) wrote: > hi, > This patch looks ok to me but this design is ugly by itself. > It should be replaced by an uchar uint whatever or use a > real bool (obviously not preferred by this programmes). bool :1, uchar :1 or uint :1 could make sense. uchar:1/bool:1 won't save any space here, because the surrounding fields are either uint or pointers, so alignment will just add padding. I try to use int/uint whenever possible because x86 CPUs tend to get less register false-dependencies when using instructions modifying the whole register (generated by using int/uint types) rather than only part of it (uchar/char/bool). I only use char/uchar/bool when there is a clear wanted space gain. The reason why I never use the bool type within a structure when I want a compact representation is that bool takes a whole byte just to represent one bit: struct usebitfield { int a; unsigned int f:1, g:1, h:1, i:1, j:1; int b; }; struct usebool { int a; bool f, g, h, i, j; int b; }; struct useboolbf { int a; bool f:1, g:1, h:1, i:1, j:1; int b; }; int main() { printf("bitfield %d bytes, bool %d bytes, boolbitfield %d bytes\n", sizeof(struct usebitfield), sizeof(struct usebool), sizeof(struct useboolbf)); } result: bitfield 12 bytes, bool 16 bytes, boolbitfield 12 bytes This is because each bool takes one byte, while the bitfields are put in units of "unsigned int" (or bool for the 3rd struct). So in this example, we need 5 bytes + 3 bytes alignment for the bool, but only 4 bytes to hold the "unsigned int" unit for the bitfields. The choice between bool and bitfields must also take into account the frequency of access to the variable, because bitfields require mask operations to access the selected bit(s). You will notice that none of these bitfields are accessed on the tracing fast-path: only in slow-paths. Therefore, space gain is more important than speed here. One might argue that I have so few of these fields here that it does not make an actual difference to go for bitfield or bool. I am just trying to choose types best suited for their intended purpose, ensuring they are future-proof and will allow simply adding more fields using the same type, as needed. So I guess I'll go for unsigned int :1 (rather than uint:1, so we keep source-level compatibility with user-space LTTng-UST). Reported-by: Dan Carpenter Signed-off-by: Mathieu Desnoyers --- lib/ringbuffer/backend_types.h | 4 ++-- lib/ringbuffer/frontend_types.h | 14 +++++++------- ltt-events.h | 10 +++++----- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/lib/ringbuffer/backend_types.h b/lib/ringbuffer/backend_types.h index 1d301de2..3d1fcc06 100644 --- a/lib/ringbuffer/backend_types.h +++ b/lib/ringbuffer/backend_types.h @@ -53,7 +53,7 @@ struct lib_ring_buffer_backend { struct channel *chan; /* Associated channel */ int cpu; /* This buffer's cpu. -1 if global. */ union v_atomic records_read; /* Number of records read */ - unsigned int allocated:1; /* Bool: is buffer allocated ? */ + unsigned int allocated:1; /* is buffer allocated ? */ }; struct channel_backend { @@ -65,7 +65,7 @@ struct channel_backend { * for writer. */ unsigned int buf_size_order; /* Order of buffer size */ - int extra_reader_sb:1; /* Bool: has extra reader subbuffer */ + unsigned int extra_reader_sb:1; /* has extra reader subbuffer ? */ struct lib_ring_buffer *buf; /* Channel per-cpu buffers */ unsigned long num_subbuf; /* Number of sub-buffers for writer */ diff --git a/lib/ringbuffer/frontend_types.h b/lib/ringbuffer/frontend_types.h index 5c7437f8..5029bc78 100644 --- a/lib/ringbuffer/frontend_types.h +++ b/lib/ringbuffer/frontend_types.h @@ -60,8 +60,8 @@ struct channel { struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */ struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */ struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */ - int cpu_hp_enable:1; /* Enable CPU hotplug notif. */ - int hp_iter_enable:1; /* Enable hp iter notif. */ + unsigned int cpu_hp_enable:1; /* Enable CPU hotplug notif. */ + unsigned int hp_iter_enable:1; /* Enable hp iter notif. */ wait_queue_head_t read_wait; /* reader wait queue */ wait_queue_head_t hp_wait; /* CPU hotplug wait queue */ int finalized; /* Has channel been finalized */ @@ -94,8 +94,8 @@ struct lib_ring_buffer_iter { ITER_NEXT_RECORD, ITER_PUT_SUBBUF, } state; - int allocated:1; - int read_open:1; /* Opened for reading ? */ + unsigned int allocated:1; + unsigned int read_open:1; /* Opened for reading ? */ }; /* ring buffer state */ @@ -138,9 +138,9 @@ struct lib_ring_buffer { unsigned long get_subbuf_consumed; /* Read-side consumed */ unsigned long prod_snapshot; /* Producer count snapshot */ unsigned long cons_snapshot; /* Consumer count snapshot */ - int get_subbuf:1; /* Sub-buffer being held by reader */ - int switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */ - int read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */ + unsigned int get_subbuf:1, /* Sub-buffer being held by reader */ + switch_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */ + read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */ }; static inline diff --git a/ltt-events.h b/ltt-events.h index 36b281a7..0b90b45b 100644 --- a/ltt-events.h +++ b/ltt-events.h @@ -67,8 +67,8 @@ struct lttng_enum_entry { struct lttng_integer_type { unsigned int size; /* in bits */ unsigned short alignment; /* in bits */ - unsigned int signedness:1; - unsigned int reverse_byte_order:1; + unsigned int signedness:1, + reverse_byte_order:1; unsigned int base; /* 2, 8, 10, 16, for pretty print */ enum lttng_string_encodings encoding; }; @@ -191,7 +191,7 @@ struct ltt_event { } ftrace; } u; struct list_head list; /* Event list */ - int metadata_dumped:1; + unsigned int metadata_dumped:1; }; struct ltt_channel_ops { @@ -251,7 +251,7 @@ struct ltt_channel { struct ltt_event *sc_compat_unknown; struct ltt_event *sc_exit; /* for syscall exit */ int header_type; /* 0: unset, 1: compact, 2: large */ - int metadata_dumped:1; + unsigned int metadata_dumped:1; }; struct ltt_session { @@ -264,7 +264,7 @@ struct ltt_session { struct list_head list; /* Session list */ unsigned int free_chan_id; /* Next chan ID to allocate */ uuid_le uuid; /* Trace session unique ID */ - int metadata_dumped:1; + unsigned int metadata_dumped:1; }; struct ltt_session *ltt_session_create(void); -- 2.34.1