X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=compat_arch_x86.c;h=3e73f9c75953a8e967afe9064e3c79609dc0d2d5;hp=e08ac89f2748721682cdc3fe251534a0822a92cf;hb=84f4ccb4bcf32c0336a0c7a3a4ba76d90d6dea1b;hpb=d45599f4bfd3283dbab0f023d051b9b9b0639c0d diff --git a/compat_arch_x86.c b/compat_arch_x86.c index e08ac89..3e73f9c 100644 --- a/compat_arch_x86.c +++ b/compat_arch_x86.c @@ -3,7 +3,7 @@ * * Userspace RCU library - x86 compatibility checks * - * Copyright (c) 2009 Mathieu Desnoyers + * Copyright (c) 2009 Mathieu Desnoyers * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -24,41 +24,52 @@ #include #include #include -#include +#include + +/* + * Using attribute "weak" for __rcu_cas_avail and + * __urcu_x86_compat_mutex. Those are globally visible by the entire + * program, even though many shared objects may have their own version. + * The first version that gets loaded will be used by the entire + * program (executable and all shared objects). + */ /* * It does not really matter if the constructor is called before using - * the library, as long as the caller checks if __urcu_cas_avail < 0 and calls + * the library, as long as the caller checks if __rcu_cas_avail < 0 and calls * compat_arch_init() explicitely if needed. */ -int __attribute__((constructor)) __urcu_cas_init(void); - -static pthread_mutex_t compat_mutex = PTHREAD_MUTEX_INITIALIZER; +int __attribute__((constructor)) __rcu_cas_init(void); /* * -1: unknown * 1: available * 0: unavailable */ -int __urcu_cas_avail = -1; +__attribute__((weak)) +int __rcu_cas_avail = -1; + +__attribute__((weak)) +pthread_mutex_t __urcu_x86_compat_mutex = PTHREAD_MUTEX_INITIALIZER; /* - * Imported from glibc 2.3.5. linuxthreads/sysdeps/i386/pt-machine.h. + * get_eflags/set_eflags/compare_and_swap_is_available imported from glibc + * 2.3.5. linuxthreads/sysdeps/i386/pt-machine.h. */ -int get_eflags (void) +static int get_eflags (void) { int res; __asm__ __volatile__ ("pushfl; popl %0" : "=r" (res) : ); return res; } -void set_eflags (int newflags) +static void set_eflags (int newflags) { __asm__ __volatile__ ("pushl %0; popfl" : : "r" (newflags) : "cc"); } -int compare_and_swap_is_available (void) +static int compare_and_swap_is_available (void) { int oldflags = get_eflags (); int changed; @@ -73,57 +84,220 @@ int compare_and_swap_is_available (void) return changed != 0; } -unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old, - unsigned long _new, int len) +static void mutex_lock_signal_save(pthread_mutex_t *mutex, sigset_t *oldmask) { - sigset_t newmask, oldmask; + sigset_t newmask; int ret; /* Disable signals */ - ret = sigemptyset(&newmask); + ret = sigfillset(&newmask); + assert(!ret); + ret = pthread_sigmask(SIG_BLOCK, &newmask, oldmask); + assert(!ret); + ret = pthread_mutex_lock(&__urcu_x86_compat_mutex); assert(!ret); - ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask); +} + +static void mutex_lock_signal_restore(pthread_mutex_t *mutex, sigset_t *oldmask) +{ + int ret; + + ret = pthread_mutex_unlock(&__urcu_x86_compat_mutex); assert(!ret); - ret = pthread_mutex_lock(&compat_mutex); + ret = pthread_sigmask(SIG_SETMASK, oldmask, NULL); assert(!ret); +} + +unsigned long _compat_uatomic_set(void *addr, unsigned long _new, int len) +{ + sigset_t mask; + unsigned long result; + mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask); + switch (len) { + case 1: + *(unsigned char *)addr = (unsigned char)_new; + result = *(unsigned char *)addr; + break; + case 2: + *(unsigned short *)addr = (unsigned short)_new; + result = *(unsigned short *)addr; + break; + case 4: + *(unsigned int *)addr = (unsigned int)_new; + result = *(unsigned int *)addr; + break; + default: + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ + result = 0; + __asm__ __volatile__("ud2"); + } + mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask); + return result; +} + +unsigned long _compat_uatomic_xchg(void *addr, unsigned long _new, int len) +{ + sigset_t mask; + unsigned long retval; + + mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask); + switch (len) { + case 1: + retval = *(unsigned char *)addr; + *(unsigned char *)addr = (unsigned char)_new; + break; + case 2: + retval = *(unsigned short *)addr; + *(unsigned short *)addr = (unsigned short)_new; + break; + case 4: + retval = *(unsigned int *)addr; + *(unsigned int *)addr = (unsigned int)_new; + break; + default: + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ + retval = 0; /* silence gcc warnings */ + __asm__ __volatile__("ud2"); + } + mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask); + return retval; +} + +unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old, + unsigned long _new, int len) +{ + unsigned long retval; + sigset_t mask; + + mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask); switch (len) { case 1: { unsigned char result = *(unsigned char *)addr; - if (result == old) + if (result == (unsigned char)old) *(unsigned char *)addr = (unsigned char)_new; - return result; + retval = result; + break; } case 2: { unsigned short result = *(unsigned short *)addr; - if (result == old) + if (result == (unsigned short)old) *(unsigned short *)addr = (unsigned short)_new; - return result; + retval = result; + break; } case 4: { unsigned int result = *(unsigned int *)addr; - if (result == old) + if (result == (unsigned int)old) *(unsigned int *)addr = (unsigned int)_new; - return result; + retval = result; + break; } + default: + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ + retval = 0; /* silence gcc warnings */ + __asm__ __volatile__("ud2"); } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ - __asm__ __volatile__("ud2"); - return 0; + mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask); + return retval; +} - ret = pthread_mutex_unlock(&compat_mutex); - assert(!ret); - ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); - assert(!ret); +void _compat_uatomic_or(void *addr, unsigned long v, int len) +{ + sigset_t mask; + + mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask); + switch (len) { + case 1: + *(unsigned char *)addr |= (unsigned char)v; + break; + case 2: + *(unsigned short *)addr |= (unsigned short)v; + break; + case 4: + *(unsigned int *)addr |= (unsigned int)v; + break; + default: + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ + __asm__ __volatile__("ud2"); + } + mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask); +} + +void _compat_uatomic_and(void *addr, unsigned long v, int len) +{ + sigset_t mask; + + mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask); + switch (len) { + case 1: + *(unsigned char *)addr &= (unsigned char)v; + break; + case 2: + *(unsigned short *)addr &= (unsigned short)v; + break; + case 4: + *(unsigned int *)addr &= (unsigned int)v; + break; + default: + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ + __asm__ __volatile__("ud2"); + } + mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask); +} + +unsigned long _compat_uatomic_add_return(void *addr, unsigned long v, int len) +{ + sigset_t mask; + unsigned long result; + + mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask); + switch (len) { + case 1: + *(unsigned char *)addr += (unsigned char)v; + result = *(unsigned char *)addr; + break; + case 2: + *(unsigned short *)addr += (unsigned short)v; + result = *(unsigned short *)addr; + break; + case 4: + *(unsigned int *)addr += (unsigned int)v; + result = *(unsigned int *)addr; + break; + default: + /* + * generate an illegal instruction. Cannot catch this with + * linker tricks when optimizations are disabled. + */ + result = 0; /* silence gcc warnings */ + __asm__ __volatile__("ud2"); + } + mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask); + return result; } -int __urcu_cas_init(void) +int __rcu_cas_init(void) { - if (__urcu_cas_avail < 0) - __urcu_cas_avail = compare_and_swap_is_available(); - return __urcu_cas_avail; + if (__rcu_cas_avail < 0) + __rcu_cas_avail = compare_and_swap_is_available(); + return __rcu_cas_avail; }