Fix c99 compatibility: use __asm__ and __volatile__ in public headers
[urcu.git] / urcu / arch / ppc.h
CommitLineData
ec4e58a3
MD
1#ifndef _URCU_ARCH_PPC_H
2#define _URCU_ARCH_PPC_H
121a5d44 3
6d0ce021 4/*
af02d47e 5 * arch_ppc.h: trivial definitions for the powerpc architecture.
6d0ce021 6 *
af02d47e 7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
6982d6d7 8 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6d0ce021 9 *
af02d47e
MD
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
05dd4b94 14 *
af02d47e 15 * This library is distributed in the hope that it will be useful,
6d0ce021 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
af02d47e
MD
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
6d0ce021 19 *
af02d47e
MD
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
6d0ce021
PM
23 */
24
ec4e58a3 25#include <urcu/compiler.h>
c96a3726 26#include <urcu/config.h>
121a5d44 27
36bc70a8
MD
28#ifdef __cplusplus
29extern "C" {
30#endif
31
b4e52e3e 32/* Include size of POWER5+ L3 cache lines: 256 bytes */
06f22bdb 33#define CAA_CACHE_LINE_SIZE 256
b4e52e3e 34
e62b2f86
MD
35#ifdef __NO_LWSYNC__
36#define LWSYNC_OPCODE "sync\n"
37#else
38#define LWSYNC_OPCODE "lwsync\n"
39#endif
40
0174d10d
PB
41/*
42 * Use sync for all cmm_mb/rmb/wmb barriers because lwsync does not
43 * preserve ordering of cacheable vs. non-cacheable accesses, so it
44 * should not be used to order with respect to MMIO operations. An
45 * eieio+lwsync pair is also not enough for cmm_rmb, because it will
46 * order cacheable and non-cacheable memory operations separately---i.e.
47 * not the latter against the former.
48 */
e51500ed 49#define cmm_mb() __asm__ __volatile__ ("sync":::"memory")
0174d10d
PB
50
51/*
52 * lwsync orders loads in cacheable memory with respect to other loads,
53 * and stores in cacheable memory with respect to other stores.
54 * Therefore, use it for barriers ordering accesses to cacheable memory
55 * only.
56 */
e51500ed
MD
57#define cmm_smp_rmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory")
58#define cmm_smp_wmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory")
6d0ce021 59
af02d47e
MD
60#define mftbl() \
61 ({ \
62 unsigned long rval; \
e51500ed 63 __asm__ __volatile__ ("mftbl %0" : "=r" (rval)); \
af02d47e
MD
64 rval; \
65 })
66
67#define mftbu() \
68 ({ \
69 unsigned long rval; \
e51500ed 70 __asm__ __volatile__ ("mftbu %0" : "=r" (rval)); \
af02d47e
MD
71 rval; \
72 })
6d0ce021 73
9a9d403a
TMQMF
74#define mftb() \
75 ({ \
76 unsigned long long rval; \
e51500ed 77 __asm__ __volatile__ ("mftb %0" : "=r" (rval)); \
9a9d403a
TMQMF
78 rval; \
79 })
80
6d0ce021
PM
81typedef unsigned long long cycles_t;
82
9a9d403a
TMQMF
83#ifdef __powerpc64__
84static inline cycles_t caa_get_cycles(void)
6d0ce021 85{
9a9d403a
TMQMF
86 return (cycles_t) mftb();
87}
88#else
89static inline cycles_t caa_get_cycles(void)
90{
91 unsigned long h, l;
6d0ce021
PM
92
93 for (;;) {
94 h = mftbu();
5481ddb3 95 cmm_barrier();
6d0ce021 96 l = mftbl();
5481ddb3 97 cmm_barrier();
6d0ce021 98 if (mftbu() == h)
af02d47e 99 return (((cycles_t) h) << 32) + l;
6d0ce021
PM
100 }
101}
9a9d403a 102#endif
121a5d44 103
36bc70a8
MD
104#ifdef __cplusplus
105}
106#endif
107
1b9119f8 108#include <urcu/arch/generic.h>
e4d1eb09 109
ec4e58a3 110#endif /* _URCU_ARCH_PPC_H */
This page took 0.03331 seconds and 4 git commands to generate.