update manual
[ust.git] / share / kernelcompat.h
... / ...
CommitLineData
1#ifndef KERNELCOMPAT_H
2#define KERNELCOMPAT_H
3
4#include <kcompat.h>
5
6#include "compiler.h"
7
8#include <string.h>
9
10#define container_of(ptr, type, member) ({ \
11 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
12 (type *)( (char *)__mptr - offsetof(type,member) );})
13
14#define KERN_DEBUG ""
15#define KERN_NOTICE ""
16#define KERN_INFO ""
17#define KERN_ERR ""
18#define KERN_ALERT ""
19#define KERN_WARNING ""
20
21/* ERROR OPS */
22
23#define MAX_ERRNO 4095
24
25#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
26
27static inline void *ERR_PTR(long error)
28{
29 return (void *) error;
30}
31
32static inline long PTR_ERR(const void *ptr)
33{
34 return (long) ptr;
35}
36
37static inline long IS_ERR(const void *ptr)
38{
39 return IS_ERR_VALUE((unsigned long)ptr);
40}
41
42
43/* FIXED SIZE INTEGERS */
44
45//#include <stdint.h>
46
47//typedef uint8_t u8;
48//typedef uint16_t u16;
49//typedef uint32_t u32;
50//typedef uint64_t u64;
51
52#define min_t(type, x, y) ({ \
53 type __min1 = (x); \
54 type __min2 = (y); \
55 __min1 < __min2 ? __min1: __min2; })
56
57#define max_t(type, x, y) ({ \
58 type __max1 = (x); \
59 type __max2 = (y); \
60 __max1 > __max2 ? __max1: __max2; })
61
62
63/* MUTEXES */
64
65#include <pthread.h>
66
67#define DEFINE_MUTEX(m) pthread_mutex_t (m) = PTHREAD_MUTEX_INITIALIZER;
68#define DECLARE_MUTEX(m) extern pthread_mutex_t (m);
69
70#define mutex_lock(m) pthread_mutex_lock(m)
71
72#define mutex_unlock(m) pthread_mutex_unlock(m)
73
74
75/* MALLOCATION */
76
77#include <stdlib.h>
78
79#define kmalloc(s, t) malloc(s)
80#define kzalloc(s, t) zmalloc(s)
81#define kfree(p) free((void *)p)
82#define kstrdup(s, t) strdup(s)
83
84#define zmalloc(s) calloc(1, s)
85
86#define GFP_KERNEL
87
88/* PRINTK */
89
90#include <stdio.h>
91#define printk(fmt, args...) printf(fmt, ## args)
92
93/* MEMORY BARRIERS */
94
95#define smp_mb__after_atomic_inc() do {} while(0)
96
97/* RCU */
98
99#include "urcu.h"
100#define call_rcu_sched(a,b) b(a); synchronize_rcu()
101#define rcu_barrier_sched() do {} while(0) /* this nop is ok if call_rcu_sched does a synchronize_rcu() */
102#define rcu_read_lock_sched_notrace() rcu_read_lock()
103#define rcu_read_unlock_sched_notrace() rcu_read_unlock()
104
105/* ATOMICITY */
106
107#include <signal.h>
108
109static inline int atomic_dec_and_test(atomic_t *p)
110{
111 (p->counter)--;
112 return !p->counter;
113}
114
115static inline void atomic_set(atomic_t *p, int v)
116{
117 p->counter=v;
118}
119
120static inline void atomic_inc(atomic_t *p)
121{
122 p->counter++;
123}
124
125static int atomic_read(atomic_t *p)
126{
127 return p->counter;
128}
129
130#define atomic_long_t atomic_t
131#define atomic_long_set atomic_set
132#define atomic_long_read atomic_read
133
134//#define __xg(x) ((volatile long *)(x))
135
136#define cmpxchg(ptr, o, n) \
137 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
138 (unsigned long)(n), sizeof(*(ptr))))
139
140//#define local_cmpxchg cmpxchg
141#define local_cmpxchg(l, o, n) (cmpxchg(&((l)->a.counter), (o), (n)))
142
143#define atomic_long_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
144
145
146/* LOCAL OPS */
147
148//typedef int local_t;
149typedef struct
150{
151 atomic_long_t a;
152} local_t;
153
154
155static inline void local_inc(local_t *l)
156{
157 (l->a.counter)++;
158}
159
160static inline void local_set(local_t *l, int v)
161{
162 l->a.counter = v;
163}
164
165static inline void local_add(int v, local_t *l)
166{
167 l->a.counter += v;
168}
169
170static int local_add_return(int v, local_t *l)
171{
172 return l->a.counter += v;
173}
174
175static inline int local_read(local_t *l)
176{
177 return l->a.counter;
178}
179
180
181/* ATTRIBUTES */
182
183#define ____cacheline_aligned
184#define __init
185#define __exit
186
187/* MATH */
188
189static inline unsigned int hweight32(unsigned int w)
190{
191 unsigned int res = w - ((w >> 1) & 0x55555555);
192 res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
193 res = (res + (res >> 4)) & 0x0F0F0F0F;
194 res = res + (res >> 8);
195 return (res + (res >> 16)) & 0x000000FF;
196}
197
198static inline int fls(int x)
199{
200 int r;
201//ust// #ifdef CONFIG_X86_CMOV
202 asm("bsrl %1,%0\n\t"
203 "cmovzl %2,%0"
204 : "=&r" (r) : "rm" (x), "rm" (-1));
205//ust// #else
206//ust// asm("bsrl %1,%0\n\t"
207//ust// "jnz 1f\n\t"
208//ust// "movl $-1,%0\n"
209//ust// "1:" : "=r" (r) : "rm" (x));
210//ust// #endif
211 return r + 1;
212}
213
214static __inline__ int get_count_order(unsigned int count)
215{
216 int order;
217
218 order = fls(count) - 1;
219 if (count & (count - 1))
220 order++;
221 return order;
222}
223
224
225
226
227#include <unistd.h>
228
229#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
230#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
231#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
232#define PAGE_SIZE sysconf(_SC_PAGE_SIZE)
233#define PAGE_MASK (PAGE_SIZE-1)
234
235
236
237
238/* ARRAYS */
239
240#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
241
242/* TRACE CLOCK */
243
244//ust// static inline u64 trace_clock_read64(void)
245//ust// {
246//ust// uint32_t low;
247//ust// uint32_t high;
248//ust// uint64_t retval;
249//ust// __asm__ volatile ("rdtsc\n" : "=a" (low), "=d" (high));
250//ust//
251//ust// retval = high;
252//ust// retval <<= 32;
253//ust// return retval | low;
254//ust// }
255
256static inline u64 trace_clock_read64(void)
257{
258 struct timeval tv;
259 u64 retval;
260
261 gettimeofday(&tv, NULL);
262 retval = tv.tv_sec;
263 retval *= 1000000;
264 retval += tv.tv_usec;
265
266 return retval;
267}
268
269static inline u64 trace_clock_frequency(void)
270{
271 return 1000000LL;
272}
273
274static inline u32 trace_clock_freq_scale(void)
275{
276 return 1;
277}
278
279
280/* LISTS */
281
282#define list_add_rcu list_add
283#define list_for_each_entry_rcu list_for_each_entry
284
285
286#define EXPORT_SYMBOL_GPL(a) /*nothing*/
287
288#define smp_processor_id() (-1)
289
290#endif /* KERNELCOMPAT_H */
This page took 0.028641 seconds and 4 git commands to generate.