config.h   config.h 
skipping to change at line 14 skipping to change at line 14
/* Defined when on a system that has memory fence instructions. */ /* Defined when on a system that has memory fence instructions. */
/* #undef CONFIG_URCU_HAVE_FENCE */ /* #undef CONFIG_URCU_HAVE_FENCE */
/* Defined when on a system with futex support. */ /* Defined when on a system with futex support. */
#define CONFIG_URCU_HAVE_FUTEX 1 #define CONFIG_URCU_HAVE_FUTEX 1
/* Enable SMP support. With SMP support enabled, uniprocessors are also /* Enable SMP support. With SMP support enabled, uniprocessors are also
supported. With SMP support disabled, UP systems work fine, but the supported. With SMP support disabled, UP systems work fine, but the
behavior of SMP systems is undefined. */ behavior of SMP systems is undefined. */
#define CONFIG_URCU_SMP 1 #define CONFIG_URCU_SMP 1
/* Compatibility mode for i386 which lacks cmpxchg instruction. */
/* #undef CONFIG_URCU_COMPAT_ARCH */
 End of changes. 1 change blocks. 
0 lines changed or deleted 0 lines changed or added


 uatomic_arch.h   uatomic_arch.h 
skipping to change at line 24 skipping to change at line 24
* for any purpose, provided the above notices are retained on all copies. * for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted , * Permission to modify the code and to distribute modified code is granted ,
* provided the above notices are retained, and a notice that the code was * provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice. * modified is included with the above copyright notice.
* *
* Code inspired from libuatomic_ops-1.2, inherited in part from the * Code inspired from libuatomic_ops-1.2, inherited in part from the
* Boehm-Demers-Weiser conservative garbage collector. * Boehm-Demers-Weiser conservative garbage collector.
*/ */
#include <urcu/compiler.h> #include <urcu/compiler.h>
#include <urcu/system.h>
#ifndef __SIZEOF_LONG__ #ifndef __SIZEOF_LONG__
#if defined(__x86_64__) || defined(__amd64__) #if defined(__x86_64__) || defined(__amd64__)
#define __SIZEOF_LONG__ 8 #define __SIZEOF_LONG__ 8
#else #else
#define __SIZEOF_LONG__ 4 #define __SIZEOF_LONG__ 4
#endif #endif
#endif #endif
#ifndef BITS_PER_LONG #ifndef BITS_PER_LONG
skipping to change at line 46 skipping to change at line 47
/* /*
* Derived from AO_compare_and_swap() and AO_test_and_set_full(). * Derived from AO_compare_and_swap() and AO_test_and_set_full().
*/ */
struct __uatomic_dummy { struct __uatomic_dummy {
unsigned long v[10]; unsigned long v[10];
}; };
#define __hp(x) ((struct __uatomic_dummy *)(x)) #define __hp(x) ((struct __uatomic_dummy *)(x))
#define uatomic_set(addr, v) \ #define _uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
do { \ #define _uatomic_read(addr) LOAD_SHARED(*(addr))
ACCESS_ONCE(*(addr)) = (v); \
} while (0)
#define uatomic_read(addr) ACCESS_ONCE(*(addr))
/* cmpxchg */ /* cmpxchg */
static inline __attribute__((always_inline)) static inline __attribute__((always_inline))
unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
unsigned long _new, int len) unsigned long _new, int len)
{ {
switch (len) { switch (len) {
case 1: case 1:
{ {
unsigned char result = old; unsigned char result = old;
__asm__ __volatile__( __asm__ __volatile__(
"lock; cmpxchgb %2, %1" "lock; cmpxchgb %2, %1"
: "+a"(result), "+m"(*__hp(addr)) : "+a"(result), "+m"(*__hp(addr))
skipping to change at line 113 skipping to change at line 110
return result; return result;
} }
#endif #endif
} }
/* generate an illegal instruction. Cannot catch this with linker tr icks /* generate an illegal instruction. Cannot catch this with linker tr icks
* when optimizations are disabled. */ * when optimizations are disabled. */
__asm__ __volatile__("ud2"); __asm__ __volatile__("ud2");
return 0; return 0;
} }
#define uatomic_cmpxchg(addr, old, _new) #define _uatomic_cmpxchg(addr, old, _new)
\ \
((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old) ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old
,\ ),\
(unsigned long)(_new), (unsigned long)(_new),
\ \
sizeof(*(addr)))) sizeof(*(addr))))
/* xchg */ /* xchg */
static inline __attribute__((always_inline)) static inline __attribute__((always_inline))
unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
{ {
/* Note: the "xchg" instruction does not need a "lock" prefix. */ /* Note: the "xchg" instruction does not need a "lock" prefix. */
switch (len) { switch (len) {
case 1: case 1:
{ {
unsigned char result; unsigned char result;
__asm__ __volatile__( __asm__ __volatile__(
"xchgb %0, %1" "xchgb %0, %1"
: "=q"(result), "+m"(*__hp(addr)) : "=q"(result), "+m"(*__hp(addr))
: "0" ((unsigned char)val) : "0" ((unsigned char)val)
skipping to change at line 174 skipping to change at line 171
return result; return result;
} }
#endif #endif
} }
/* generate an illegal instruction. Cannot catch this with linker tr icks /* generate an illegal instruction. Cannot catch this with linker tr icks
* when optimizations are disabled. */ * when optimizations are disabled. */
__asm__ __volatile__("ud2"); __asm__ __volatile__("ud2");
return 0; return 0;
} }
#define uatomic_xchg(addr, v) #define _uatomic_xchg(addr, v)
\ \
((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v)
\ , \
sizeof(*(addr)))) sizeof(*(addr))))
/* uatomic_add_return, uatomic_sub_return */ /* uatomic_add_return, uatomic_sub_return */
static inline __attribute__((always_inline)) static inline __attribute__((always_inline))
unsigned long _uatomic_add_return(void *addr, unsigned long val, unsigned long __uatomic_add_return(void *addr, unsigned long val,
int len) int len)
{ {
switch (len) { switch (len) {
case 1: case 1:
{ {
unsigned char result = val; unsigned char result = val;
__asm__ __volatile__( __asm__ __volatile__(
"lock; xaddb %1, %0" "lock; xaddb %1, %0"
: "+m"(*__hp(addr)), "+q" (result) : "+m"(*__hp(addr)), "+q" (result)
skipping to change at line 238 skipping to change at line 235
return result + (unsigned long)val; return result + (unsigned long)val;
} }
#endif #endif
} }
/* generate an illegal instruction. Cannot catch this with linker tr icks /* generate an illegal instruction. Cannot catch this with linker tr icks
* when optimizations are disabled. */ * when optimizations are disabled. */
__asm__ __volatile__("ud2"); __asm__ __volatile__("ud2");
return 0; return 0;
} }
#define uatomic_add_return(addr, v) \ #define _uatomic_add_return(addr, v) \
((__typeof__(*(addr))) _uatomic_add_return((addr), \ ((__typeof__(*(addr))) __uatomic_add_return((addr), \
(unsigned long)(v), \ (unsigned long)(v), \
sizeof(*(addr)))) sizeof(*(addr))))
#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v)) #define _uatomic_sub_return(addr, v) _uatomic_add_return((addr), -(v))
/* uatomic_add, uatomic_sub */ /* uatomic_add, uatomic_sub */
static inline __attribute__((always_inline)) static inline __attribute__((always_inline))
void _uatomic_add(void *addr, unsigned long val, int len) void __uatomic_add(void *addr, unsigned long val, int len)
{ {
switch (len) { switch (len) {
case 1: case 1:
{ {
__asm__ __volatile__( __asm__ __volatile__(
"lock; addb %1, %0" "lock; addb %1, %0"
: "=m"(*__hp(addr)) : "=m"(*__hp(addr))
: "iq" ((unsigned char)val) : "iq" ((unsigned char)val)
: "memory"); : "memory");
return; return;
skipping to change at line 296 skipping to change at line 293
return; return;
} }
#endif #endif
} }
/* generate an illegal instruction. Cannot catch this with linker tr icks /* generate an illegal instruction. Cannot catch this with linker tr icks
* when optimizations are disabled. */ * when optimizations are disabled. */
__asm__ __volatile__("ud2"); __asm__ __volatile__("ud2");
return; return;
} }
#define uatomic_add(addr, v) \ #define _uatomic_add(addr, v) \
(_uatomic_add((addr), (unsigned long)(v), sizeof(*(addr)))) (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
#define uatomic_sub(addr, v) uatomic_add((addr), -(v)) #define _uatomic_sub(addr, v) _uatomic_add((addr), -(v))
/* uatomic_inc */ /* uatomic_inc */
static inline __attribute__((always_inline)) static inline __attribute__((always_inline))
void _uatomic_inc(void *addr, int len) void __uatomic_inc(void *addr, int len)
{ {
switch (len) { switch (len) {
case 1: case 1:
{ {
__asm__ __volatile__( __asm__ __volatile__(
"lock; incb %0" "lock; incb %0"
: "=m"(*__hp(addr)) : "=m"(*__hp(addr))
: :
: "memory"); : "memory");
return; return;
skipping to change at line 352 skipping to change at line 349
return; return;
} }
#endif #endif
} }
/* generate an illegal instruction. Cannot catch this with linker tr icks /* generate an illegal instruction. Cannot catch this with linker tr icks
* when optimizations are disabled. */ * when optimizations are disabled. */
__asm__ __volatile__("ud2"); __asm__ __volatile__("ud2");
return; return;
} }
#define uatomic_inc(addr) (_uatomic_inc((addr), sizeof(*(addr)))) #define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
/* uatomic_dec */ /* uatomic_dec */
static inline __attribute__((always_inline)) static inline __attribute__((always_inline))
void _uatomic_dec(void *addr, int len) void __uatomic_dec(void *addr, int len)
{ {
switch (len) { switch (len) {
case 1: case 1:
{ {
__asm__ __volatile__( __asm__ __volatile__(
"lock; decb %0" "lock; decb %0"
: "=m"(*__hp(addr)) : "=m"(*__hp(addr))
: :
: "memory"); : "memory");
return; return;
skipping to change at line 405 skipping to change at line 402
return; return;
} }
#endif #endif
} }
/* generate an illegal instruction. Cannot catch this with linker tr icks /* generate an illegal instruction. Cannot catch this with linker tr icks
* when optimizations are disabled. */ * when optimizations are disabled. */
__asm__ __volatile__("ud2"); __asm__ __volatile__("ud2");
return; return;
} }
#define uatomic_dec(addr) (_uatomic_dec((addr), sizeof(*(addr)))) #define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
#if (BITS_PER_LONG == 64) #if ((BITS_PER_LONG != 64) && defined(CONFIG_URCU_COMPAT_ARCH))
#define URCU_CAS_AVAIL() 1
#define compat_uatomic_cmpxchg(ptr, old, _new) uatomic_cmpxchg(ptr, old, _n
ew)
#else
extern int __urcu_cas_avail; extern int __urcu_cas_avail;
extern int __urcu_cas_init(void); extern int __urcu_cas_init(void);
#define URCU_CAS_AVAIL() \
((likely(__urcu_cas_avail > 0)) ? \ #define UATOMIC_COMPAT(insn)
(1) : \ \
((unlikely(__urcu_cas_avail < 0) ? \ ((likely(__urcu_cas_avail > 0))
(__urcu_cas_init()) : \ \
(0)))) ? (_uatomic_##insn)
\
: ((unlikely(__urcu_cas_avail < 0)
\
? ((__urcu_cas_init() > 0)
\
? (_uatomic_##insn)
\
: (compat_uatomic_##insn))
\
: (compat_uatomic_##insn))))
extern unsigned long _compat_uatomic_set(void *addr,
unsigned long _new, int len);
#define compat_uatomic_set(addr, _new)
\
((__typeof__(*(addr))) _compat_uatomic_set((addr),
\
(unsigned long)(_new),
\
sizeof(*(addr))))
extern unsigned long _compat_uatomic_xchg(void *addr,
unsigned long _new, int len);
#define compat_uatomic_xchg(addr, _new)
\
((__typeof__(*(addr))) _compat_uatomic_xchg((addr),
\
(unsigned long)(_new),
\
sizeof(*(addr))))
extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old, extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
unsigned long _new, int len); unsigned long _new, int len);
#define compat_uatomic_cmpxchg(addr, old, _new)
\
((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr),
\
(unsigned long)(old),
\
(unsigned long)(_new),
\
sizeof(*(addr))))
#define compat_uatomic_cmpxchg(addr, old, _new) extern unsigned long _compat_uatomic_xchg(void *addr,
\ unsigned long _new, int len);
((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old) #define compat_uatomic_add_return(addr, v)
,\ \
(unsigned long)(_new), ((__typeof__(*(addr))) _compat_uatomic_add_return((addr),
\ \
(unsigned long)(v),
\
sizeof(*(addr)))) sizeof(*(addr))))
#define compat_uatomic_sub_return(addr, v)
\
compat_uatomic_add_return((addr), -(v))
#define compat_uatomic_add(addr, v)
\
((void)compat_uatomic_add_return((addr), (v)))
#define compat_uatomic_sub(addr, v)
\
((void)compat_uatomic_sub_return((addr), (v)))
#define compat_uatomic_inc(addr)
\
(compat_uatomic_add((addr), 1))
#define compat_uatomic_dec(addr)
\
(compat_uatomic_sub((addr), 1))
#else
#define UATOMIC_COMPAT(insn) (_uatomic_##insn)
#endif #endif
/* Read is atomic even in compat mode */
#define uatomic_read(addr) _uatomic_read(addr)
#define uatomic_set(addr, v) \
UATOMIC_COMPAT(set(addr, v))
#define uatomic_cmpxchg(addr, old, _new) \
UATOMIC_COMPAT(cmpxchg(addr, old, _new))
#define uatomic_xchg(addr, v) \
UATOMIC_COMPAT(xchg(addr, v))
#define uatomic_add_return(addr, v) \
UATOMIC_COMPAT(add_return(addr, v))
#define uatomic_sub_return(addr, v) \
UATOMIC_COMPAT(sub_return(addr, v))
#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
#define uatomic_sub(addr, v) UATOMIC_COMPAT(sub(addr, v))
#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
#endif /* _URCU_ARCH_UATOMIC_X86_H */ #endif /* _URCU_ARCH_UATOMIC_X86_H */
 End of changes. 22 change blocks. 
48 lines changed or deleted 119 lines changed or added


 urcu-pointer-static.h   urcu-pointer-static.h 
skipping to change at line 81 skipping to change at line 81
* should not be freed !). * should not be freed !).
*/ */
#define _rcu_cmpxchg_pointer(p, old, _new) \ #define _rcu_cmpxchg_pointer(p, old, _new) \
({ \ ({ \
typeof(*p) _________pold = (old); \ typeof(*p) _________pold = (old); \
typeof(*p) _________pnew = (_new); \ typeof(*p) _________pnew = (_new); \
if (!__builtin_constant_p(_new) || \ if (!__builtin_constant_p(_new) || \
((_new) != NULL)) \ ((_new) != NULL)) \
wmb(); \ wmb(); \
(likely(URCU_CAS_AVAIL()) ? \ uatomic_cmpxchg(p, _________pold, _________pnew); \
(uatomic_cmpxchg(p, _________pold, _________pnew)) :
\
(compat_uatomic_cmpxchg(p, _________pold, \
_________pnew))) \
}) })
/** /**
* _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous * _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous
* pointer to the data structure, which can be safely freed after waiting f or a * pointer to the data structure, which can be safely freed after waiting f or a
* quiescent state using synchronize_rcu(). * quiescent state using synchronize_rcu().
*/ */
#define _rcu_xchg_pointer(p, v) \ #define _rcu_xchg_pointer(p, v) \
({ \ ({ \
skipping to change at line 108 skipping to change at line 105
wmb(); \ wmb(); \
uatomic_xchg(p, _________pv); \ uatomic_xchg(p, _________pv); \
}) })
#define _rcu_set_pointer(p, v) \ #define _rcu_set_pointer(p, v) \
({ \ ({ \
typeof(*p) _________pv = (v); \ typeof(*p) _________pv = (v); \
if (!__builtin_constant_p(v) || \ if (!__builtin_constant_p(v) || \
((v) != NULL)) \ ((v) != NULL)) \
wmb(); \ wmb(); \
STORE_SHARED(*(p), _________pv); \ uatomic_set(p, _________pv); \
}) })
/** /**
* _rcu_assign_pointer - assign (publicize) a pointer to a new data structu re * _rcu_assign_pointer - assign (publicize) a pointer to a new data structu re
* meant to be read by RCU read-side critical sections. Returns the assigne d * meant to be read by RCU read-side critical sections. Returns the assigne d
* value. * value.
* *
* Documents which pointers will be dereferenced by RCU read-side critical * Documents which pointers will be dereferenced by RCU read-side critical
* sections and adds the required memory barriers on architectures requirin g * sections and adds the required memory barriers on architectures requirin g
* them. It also makes sure the compiler does not reorder code initializing the * them. It also makes sure the compiler does not reorder code initializing the
 End of changes. 2 change blocks. 
6 lines changed or deleted 2 lines changed or added

This html diff was produced by rfcdiff 1.41. The latest version is available from http://tools.ietf.org/tools/rfcdiff/