cmm: provide lightweight smp_rmb/smp_wmb on PPC
[urcu.git] / urcu / arch / ppc.h
CommitLineData
ec4e58a3
MD
1#ifndef _URCU_ARCH_PPC_H
2#define _URCU_ARCH_PPC_H
121a5d44 3
6d0ce021 4/*
af02d47e 5 * arch_ppc.h: trivial definitions for the powerpc architecture.
6d0ce021 6 *
af02d47e 7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
6982d6d7 8 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6d0ce021 9 *
af02d47e
MD
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
05dd4b94 14 *
af02d47e 15 * This library is distributed in the hope that it will be useful,
6d0ce021 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
af02d47e
MD
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
6d0ce021 19 *
af02d47e
MD
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
6d0ce021
PM
23 */
24
ec4e58a3 25#include <urcu/compiler.h>
c96a3726 26#include <urcu/config.h>
121a5d44 27
36bc70a8
MD
28#ifdef __cplusplus
29extern "C" {
30#endif
31
b4e52e3e 32/* Include size of POWER5+ L3 cache lines: 256 bytes */
06f22bdb 33#define CAA_CACHE_LINE_SIZE 256
b4e52e3e 34
0174d10d
PB
35/*
36 * Use sync for all cmm_mb/rmb/wmb barriers because lwsync does not
37 * preserve ordering of cacheable vs. non-cacheable accesses, so it
38 * should not be used to order with respect to MMIO operations. An
39 * eieio+lwsync pair is also not enough for cmm_rmb, because it will
40 * order cacheable and non-cacheable memory operations separately---i.e.
41 * not the latter against the former.
42 */
43#define cmm_mb() asm volatile("sync":::"memory")
44
45/*
46 * lwsync orders loads in cacheable memory with respect to other loads,
47 * and stores in cacheable memory with respect to other stores.
48 * Therefore, use it for barriers ordering accesses to cacheable memory
49 * only.
50 */
51#define cmm_smp_rmb() asm volatile("lwsync":::"memory")
52#define cmm_smp_wmb() asm volatile("lwsync":::"memory")
6d0ce021 53
af02d47e
MD
54#define mftbl() \
55 ({ \
56 unsigned long rval; \
57 asm volatile("mftbl %0" : "=r" (rval)); \
58 rval; \
59 })
60
61#define mftbu() \
62 ({ \
63 unsigned long rval; \
64 asm volatile("mftbu %0" : "=r" (rval)); \
65 rval; \
66 })
6d0ce021 67
9a9d403a
TMQMF
68#define mftb() \
69 ({ \
70 unsigned long long rval; \
71 asm volatile("mftb %0" : "=r" (rval)); \
72 rval; \
73 })
74
6d0ce021
PM
75typedef unsigned long long cycles_t;
76
9a9d403a
TMQMF
77#ifdef __powerpc64__
78static inline cycles_t caa_get_cycles(void)
6d0ce021 79{
9a9d403a
TMQMF
80 return (cycles_t) mftb();
81}
82#else
83static inline cycles_t caa_get_cycles(void)
84{
85 unsigned long h, l;
6d0ce021
PM
86
87 for (;;) {
88 h = mftbu();
5481ddb3 89 cmm_barrier();
6d0ce021 90 l = mftbl();
5481ddb3 91 cmm_barrier();
6d0ce021 92 if (mftbu() == h)
af02d47e 93 return (((cycles_t) h) << 32) + l;
6d0ce021
PM
94 }
95}
9a9d403a 96#endif
121a5d44 97
36bc70a8
MD
98#ifdef __cplusplus
99}
100#endif
101
1b9119f8 102#include <urcu/arch/generic.h>
e4d1eb09 103
ec4e58a3 104#endif /* _URCU_ARCH_PPC_H */
This page took 0.031547 seconds and 4 git commands to generate.