Add ACCESS_ONCE to _STORE_SHARED
[urcu.git] / arch_ppc.h
CommitLineData
6d0ce021
PM
1/*
2 * arch_x86.h: Definitions for the x86 architecture, derived from Linux.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; but only version 2 of the License given
7 * that this comes from the Linux kernel.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
19 */
20
21#define CONFIG_HAVE_FENCE 1
22#define CONFIG_HAVE_MEM_COHERENCY
23
24#define mb() asm volatile("sync":::"memory")
25#define rmb() asm volatile("sync":::"memory")
26#define wmb() asm volatile("sync"::: "memory")
27
28/*
29 * Architectures without cache coherency need something like the following:
30 *
31 * #define mb() mc()
32 * #define rmb() rmc()
33 * #define wmb() wmc()
34 * #define mc() arch_cache_flush()
35 * #define rmc() arch_cache_flush_read()
36 * #define wmc() arch_cache_flush_write()
37 */
38
39#define mc() barrier()
40#define rmc() barrier()
41#define wmc() barrier()
42
43static inline void cpu_relax(void)
44{
45 barrier();
46}
47
48#define PPC405_ERR77(ra,rb)
49#define LWSYNC_ON_SMP "\n\tlwsync\n"
50#define ISYNC_ON_SMP "\n\tisync\n"
51
52#ifndef _INCLUDE_API_H
53
54static __inline__ void atomic_inc(int *v)
55{
56 int t;
57
58 __asm__ __volatile__(
59"1: lwarx %0,0,%2 # atomic_inc\n\
60 addic %0,%0,1\n"
61 PPC405_ERR77(0,%2)
62" stwcx. %0,0,%2 \n\
63 bne- 1b"
64 : "=&r" (t), "+m" (v)
65 : "r" (&v)
66 : "cc", "xer");
67}
68
69#endif /* #ifndef _INCLUDE_API_H */
70
71struct __xchg_dummy {
72 unsigned long a[100];
73};
74#define __xg(x) ((struct __xchg_dummy *)(x))
75
76#ifndef _INCLUDE_API_H
77
78/*
79 * Atomic exchange
80 *
81 * Changes the memory location '*ptr' to be val and returns
82 * the previous value stored there.
83 */
84static __always_inline unsigned long
85__xchg_u32(volatile void *p, unsigned long val)
86{
87 unsigned long prev;
88
89 __asm__ __volatile__(
90 LWSYNC_ON_SMP
91"1: lwarx %0,0,%2 \n"
92 PPC405_ERR77(0,%2)
93" stwcx. %3,0,%2 \n\
94 bne- 1b"
95 ISYNC_ON_SMP
96 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
97 : "r" (p), "r" (val)
98 : "cc", "memory");
99
100 return prev;
101}
102
103/*
104 * This function doesn't exist, so you'll get a linker error
105 * if something tries to do an invalid xchg().
106 */
107extern void __xchg_called_with_bad_pointer(void);
108
109static __always_inline unsigned long
110__xchg(volatile void *ptr, unsigned long x, unsigned int size)
111{
112 switch (size) {
113 case 4:
114 return __xchg_u32(ptr, x);
115#ifdef CONFIG_PPC64
116 case 8:
117 return __xchg_u64(ptr, x);
118#endif
119 }
120 __xchg_called_with_bad_pointer();
121 return x;
122}
123
124#define xchg(ptr,x) \
125 ({ \
126 __typeof__(*(ptr)) _x_ = (x); \
127 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
128 })
129
130#endif /* #ifndef _INCLUDE_API_H */
131
132#define mftbl() ({unsigned long rval; \
133 asm volatile("mftbl %0" : "=r" (rval)); rval;})
134#define mftbu() ({unsigned long rval; \
135 asm volatile("mftbu %0" : "=r" (rval)); rval;})
136
137typedef unsigned long long cycles_t;
138
139static inline cycles_t get_cycles (void)
140{
141 long h;
142 long l;
143
144 for (;;) {
145 h = mftbu();
146 smp_mb();
147 l = mftbl();
148 smp_mb();
149 if (mftbu() == h)
150 return (((long long)h) << 32) + l;
151 }
152}
This page took 0.027072 seconds and 4 git commands to generate.