+// SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+// SPDX-FileCopyrightText: 2009 Paul E. McKenney, IBM Corporation.
+//
+// SPDX-License-Identifier: LGPL-2.1-or-later
+
#ifndef _URCU_MEMB_STATIC_H
#define _URCU_MEMB_STATIC_H
/*
- * urcu-memb-static.h
- *
* Userspace RCU header.
*
* TO BE INCLUDED ONLY IN CODE THAT IS TO BE RECOMPILED ON EACH LIBURCU
* RELEASE. See urcu.h for linking dynamically with the userspace rcu library.
*
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
* IBM's contributions to this file may be relicensed under LGPLv2 or later.
*/
#include <unistd.h>
#include <stdint.h>
+#include <urcu/annotate.h>
#include <urcu/debug.h>
#include <urcu/config.h>
#include <urcu/compiler.h>
/*
* Helper for _rcu_read_lock(). The format of urcu_memb_gp.ctr (as well as
- * the per-thread rcu_reader.ctr) has the upper bits containing a count of
- * _rcu_read_lock() nesting, and a lower-order bit that contains either zero
- * or URCU_GP_CTR_PHASE. The smp_mb_slave() ensures that the accesses in
+ * the per-thread rcu_reader.ctr) has the lower-order bits containing a count of
+ * _rcu_read_lock() nesting, and a single high-order URCU_BP_GP_CTR_PHASE bit
+ * that contains either zero or one. The smp_mb_slave() ensures that the accesses in
* _rcu_read_lock() happen before the subsequent read-side critical section.
*/
static inline void _urcu_memb_read_lock_update(unsigned long tmp)
{
+ unsigned long *ctr = &URCU_TLS(urcu_memb_reader).ctr;
+
if (caa_likely(!(tmp & URCU_GP_CTR_NEST_MASK))) {
- _CMM_STORE_SHARED(URCU_TLS(urcu_memb_reader).ctr, _CMM_LOAD_SHARED(urcu_memb_gp.ctr));
+ unsigned long *pgctr = &urcu_memb_gp.ctr;
+ unsigned long gctr = uatomic_load(pgctr, CMM_RELAXED);
+
+ /* Paired with following mb slave. */
+ cmm_annotate_mem_acquire(pgctr);
+ uatomic_store(ctr, gctr, CMM_RELAXED);
+
urcu_memb_smp_mb_slave();
- } else
- _CMM_STORE_SHARED(URCU_TLS(urcu_memb_reader).ctr, tmp + URCU_GP_COUNT);
+ } else {
+ uatomic_store(ctr, tmp + URCU_GP_COUNT, CMM_RELAXED);
+ }
}
/*
*/
static inline void _urcu_memb_read_unlock_update_and_wakeup(unsigned long tmp)
{
+ unsigned long *ctr = &URCU_TLS(urcu_memb_reader).ctr;
+
if (caa_likely((tmp & URCU_GP_CTR_NEST_MASK) == URCU_GP_COUNT)) {
urcu_memb_smp_mb_slave();
- _CMM_STORE_SHARED(URCU_TLS(urcu_memb_reader).ctr, tmp - URCU_GP_COUNT);
+ cmm_annotate_mem_release(ctr);
+ uatomic_store(ctr, tmp - URCU_GP_COUNT, CMM_RELAXED);
urcu_memb_smp_mb_slave();
urcu_common_wake_up_gp(&urcu_memb_gp);
- } else
- _CMM_STORE_SHARED(URCU_TLS(urcu_memb_reader).ctr, tmp - URCU_GP_COUNT);
+ } else {
+ uatomic_store(ctr, tmp - URCU_GP_COUNT, CMM_RELAXED);
+ }
}
/*