+// SPDX-FileCopyrightText: 2010 Paolo Bonzini <pbonzini@redhat.com>
+//
+// SPDX-License-Identifier: LGPL-2.1-or-later
+
#ifndef _URCU_ARCH_GENERIC_H
#define _URCU_ARCH_GENERIC_H
/*
* arch_generic.h: common definitions for multiple architectures.
- *
- * Copyright (c) 2010 Paolo Bonzini <pbonzini@redhat.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <urcu/compiler.h>
* GCC builtins) as well as cmm_rmb and cmm_wmb (defaulting to cmm_mb).
*/
+#ifdef CONFIG_RCU_USE_ATOMIC_BUILTINS
+
+# ifndef cmm_smp_mb
+# define cmm_smp_mb() __atomic_thread_fence(__ATOMIC_SEQ_CST)
+# endif
+
+#endif /* CONFIG_RCU_USE_ATOMIC_BUILTINS */
+
+
+/*
+ * cmm_mb() expands to __sync_synchronize() instead of __atomic_thread_fence
+ * with SEQ_CST because the former "issues a full memory barrier" while the
+ * latter "acts as a synchronization fence between threads" which is too weak
+ * for what we want, for example with I/O devices.
+ *
+ * Even though sync_synchronize seems to be an alias for a sequential consistent
+ * atomic thread fence on every architecture on GCC and Clang, this assumption
+ * might be untrue in future. Therefore, the definitions above are used to
+ * ensure correct behavior in the future.
+ *
+ * The above defintions are quoted from the GCC manual.
+ */
#ifndef cmm_mb
#define cmm_mb() __sync_synchronize()
#endif
#ifndef HAS_CAA_GET_CYCLES
#define HAS_CAA_GET_CYCLES
-#ifdef CONFIG_RCU_HAVE_CLOCK_GETTIME
+#if defined(__APPLE__)
+#include <mach/mach.h>
+#include <mach/clock.h>
+#include <mach/mach_time.h>
#include <time.h>
#include <stdint.h>
static inline caa_cycles_t caa_get_cycles (void)
{
- struct timespec ts;
+ mach_timespec_t ts = { 0, 0 };
+ static clock_serv_t clock_service;
- if (caa_unlikely(clock_gettime(CLOCK_MONOTONIC, &ts)))
+ if (caa_unlikely(!clock_service)) {
+ if (host_get_clock_service(mach_host_self(),
+ SYSTEM_CLOCK, &clock_service))
+ return -1ULL;
+ }
+ if (caa_unlikely(clock_get_time(clock_service, &ts)))
return -1ULL;
return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
}
-#elif defined(__APPLE__)
+#elif defined(CONFIG_RCU_HAVE_CLOCK_GETTIME)
-#include <mach/mach.h>
-#include <mach/clock.h>
-#include <mach/mach_time.h>
#include <time.h>
#include <stdint.h>
static inline caa_cycles_t caa_get_cycles (void)
{
- mach_timespec_t ts = { 0, 0 };
- static clock_serv_t clock_service;
+ struct timespec ts;
- if (caa_unlikely(!clock_service)) {
- if (host_get_clock_service(mach_host_self(),
- SYSTEM_CLOCK, &clock_service))
- return -1ULL;
- }
- if (caa_unlikely(clock_get_time(clock_service, &ts)))
+ if (caa_unlikely(clock_gettime(CLOCK_MONOTONIC, &ts)))
return -1ULL;
return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
}