From d0bbd9c2e8322f036e0a0a70091cae98cad7e390 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Mon, 21 May 2012 18:35:22 -0400 Subject: [PATCH] Cleanup: header comments coding style Signed-off-by: Mathieu Desnoyers --- urcu/arch/generic.h | 14 +++++++------- urcu/system.h | 10 ++++++---- urcu/uatomic/generic.h | 6 ++++-- urcu/uatomic/ppc.h | 18 ++++++++++++------ urcu/uatomic/x86.h | 42 ++++++++++++++++++++++++++++-------------- 5 files changed, 57 insertions(+), 33 deletions(-) diff --git a/urcu/arch/generic.h b/urcu/arch/generic.h index 1ea7f59..5ec3a11 100644 --- a/urcu/arch/generic.h +++ b/urcu/arch/generic.h @@ -37,9 +37,9 @@ extern "C" { /* * Architectures with cache coherency must _not_ define cmm_mc/cmm_rmc/cmm_wmc. * - * For them, cmm_mc/cmm_rmc/cmm_wmc are implemented with a * simple compiler barrier; - * in addition, we provide defaults for cmm_mb (using GCC builtins) as well as - * cmm_rmb and cmm_wmb (defaulting to cmm_mb). + * For them, cmm_mc/cmm_rmc/cmm_wmc are implemented with a simple + * compiler barrier; in addition, we provide defaults for cmm_mb (using + * GCC builtins) as well as cmm_rmb and cmm_wmb (defaulting to cmm_mb). */ #ifndef cmm_mb @@ -61,14 +61,14 @@ extern "C" { /* * Architectures without cache coherency need something like the following: * - * #define cmm_mc() arch_cache_flush() + * #define cmm_mc() arch_cache_flush() * #define cmm_rmc() arch_cache_flush_read() * #define cmm_wmc() arch_cache_flush_write() * - * Of these, only cmm_mc is mandatory. cmm_rmc and cmm_wmc default to cmm_mc. - * cmm_mb/cmm_rmb/cmm_wmb use these definitions by default: + * Of these, only cmm_mc is mandatory. cmm_rmc and cmm_wmc default to + * cmm_mc. cmm_mb/cmm_rmb/cmm_wmb use these definitions by default: * - * #define cmm_mb() cmm_mc() + * #define cmm_mb() cmm_mc() * #define cmm_rmb() cmm_rmc() * #define cmm_wmb() cmm_wmc() */ diff --git a/urcu/system.h b/urcu/system.h index 38e8ecc..acce7e9 100644 --- a/urcu/system.h +++ b/urcu/system.h @@ -23,7 +23,8 @@ #include /* - * Identify a shared load. A cmm_smp_rmc() or cmm_smp_mc() should come before the load. + * Identify a shared load. A cmm_smp_rmc() or cmm_smp_mc() should come + * before the load. */ #define _CMM_LOAD_SHARED(p) CMM_ACCESS_ONCE(p) @@ -37,13 +38,14 @@ }) /* - * Identify a shared store. A cmm_smp_wmc() or cmm_smp_mc() should follow the store. + * Identify a shared store. A cmm_smp_wmc() or cmm_smp_mc() should + * follow the store. */ #define _CMM_STORE_SHARED(x, v) ({ CMM_ACCESS_ONCE(x) = (v); }) /* - * Store v into x, where x is located in shared memory. Performs the required - * cache flush after writing. Returns v. + * Store v into x, where x is located in shared memory. Performs the + * required cache flush after writing. Returns v. */ #define CMM_STORE_SHARED(x, v) \ ({ \ diff --git a/urcu/uatomic/generic.h b/urcu/uatomic/generic.h index 9e2e780..54d2a8c 100644 --- a/urcu/uatomic/generic.h +++ b/urcu/uatomic/generic.h @@ -41,8 +41,10 @@ static inline __attribute__((always_inline)) void _uatomic_link_error() { #ifdef ILLEGAL_INSTR - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ __asm__ __volatile__(ILLEGAL_INSTR); #else __builtin_trap (); diff --git a/urcu/uatomic/ppc.h b/urcu/uatomic/ppc.h index ee3c516..57fcd7a 100644 --- a/urcu/uatomic/ppc.h +++ b/urcu/uatomic/ppc.h @@ -93,8 +93,10 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) } #endif } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ __asm__ __volatile__(ILLEGAL_INSTR); return 0; } @@ -153,8 +155,10 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, } #endif } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ __asm__ __volatile__(ILLEGAL_INSTR); return 0; } @@ -210,8 +214,10 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val, } #endif } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ __asm__ __volatile__(ILLEGAL_INSTR); return 0; } diff --git a/urcu/uatomic/x86.h b/urcu/uatomic/x86.h index c55ac4e..ce1ce5e 100644 --- a/urcu/uatomic/x86.h +++ b/urcu/uatomic/x86.h @@ -95,8 +95,10 @@ unsigned long __uatomic_cmpxchg(void *addr, unsigned long old, } #endif } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ __asm__ __volatile__("ud2"); return 0; } @@ -157,8 +159,10 @@ unsigned long __uatomic_exchange(void *addr, unsigned long val, int len) } #endif } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ __asm__ __volatile__("ud2"); return 0; } @@ -222,8 +226,10 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val, } #endif } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ __asm__ __volatile__("ud2"); return 0; } @@ -278,8 +284,10 @@ void __uatomic_and(void *addr, unsigned long val, int len) } #endif } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ __asm__ __volatile__("ud2"); return; } @@ -332,8 +340,10 @@ void __uatomic_or(void *addr, unsigned long val, int len) } #endif } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ __asm__ __volatile__("ud2"); return; } @@ -386,8 +396,10 @@ void __uatomic_add(void *addr, unsigned long val, int len) } #endif } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ __asm__ __volatile__("ud2"); return; } @@ -494,8 +506,10 @@ void __uatomic_dec(void *addr, int len) } #endif } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ __asm__ __volatile__("ud2"); return; } -- 2.34.1