| uatomic.h | | uatomic.h | |
| | | | |
| skipping to change at line 98 | | skipping to change at line 98 | |
| | | | |
| __asm__ __volatile__( | | __asm__ __volatile__( | |
| "lock; cmpxchgq %2, %1" | | "lock; cmpxchgq %2, %1" | |
| : "+a"(result), "+m"(*__hp(addr)) | | : "+a"(result), "+m"(*__hp(addr)) | |
| : "r"((unsigned long)_new) | | : "r"((unsigned long)_new) | |
| : "memory"); | | : "memory"); | |
| return result; | | return result; | |
| } | | } | |
| #endif | | #endif | |
| } | | } | |
|
| /* generate an illegal instruction. Cannot catch this with linker tr | | /* | |
| icks | | * generate an illegal instruction. Cannot catch this with | |
| * when optimizations are disabled. */ | | * linker tricks when optimizations are disabled. | |
| | | */ | |
| __asm__ __volatile__("ud2"); | | __asm__ __volatile__("ud2"); | |
| return 0; | | return 0; | |
| } | | } | |
| | | | |
| #define _uatomic_cmpxchg(addr, old, _new)
\ | | #define _uatomic_cmpxchg(addr, old, _new)
\ | |
|
| ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old | | ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), | |
| ),\ | | \ | |
| (unsigned long)(_new), | | caa_cast_long_keep_sign(old) | |
| \ | | , \ | |
| | | caa_cast_long_keep_sign(_new | |
| | | ),\ | |
| sizeof(*(addr)))) | | sizeof(*(addr)))) | |
| | | | |
| /* xchg */ | | /* xchg */ | |
| | | | |
| static inline __attribute__((always_inline)) | | static inline __attribute__((always_inline)) | |
| unsigned long __uatomic_exchange(void *addr, unsigned long val, int len) | | unsigned long __uatomic_exchange(void *addr, unsigned long val, int len) | |
| { | | { | |
| /* Note: the "xchg" instruction does not need a "lock" prefix. */ | | /* Note: the "xchg" instruction does not need a "lock" prefix. */ | |
| switch (len) { | | switch (len) { | |
| case 1: | | case 1: | |
| | | | |
| skipping to change at line 159 | | skipping to change at line 162 | |
| unsigned long result; | | unsigned long result; | |
| __asm__ __volatile__( | | __asm__ __volatile__( | |
| "xchgq %0, %1" | | "xchgq %0, %1" | |
| : "=r"(result), "+m"(*__hp(addr)) | | : "=r"(result), "+m"(*__hp(addr)) | |
| : "0" ((unsigned long)val) | | : "0" ((unsigned long)val) | |
| : "memory"); | | : "memory"); | |
| return result; | | return result; | |
| } | | } | |
| #endif | | #endif | |
| } | | } | |
|
| /* generate an illegal instruction. Cannot catch this with linker tr | | /* | |
| icks | | * generate an illegal instruction. Cannot catch this with | |
| * when optimizations are disabled. */ | | * linker tricks when optimizations are disabled. | |
| | | */ | |
| __asm__ __volatile__("ud2"); | | __asm__ __volatile__("ud2"); | |
| return 0; | | return 0; | |
| } | | } | |
| | | | |
| #define _uatomic_xchg(addr, v)
\ | | #define _uatomic_xchg(addr, v)
\ | |
|
| ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v) | | ((__typeof__(*(addr))) __uatomic_exchange((addr), | |
| , \ | | \ | |
| | | caa_cast_long_keep_sign(v), | |
| | | \ | |
| sizeof(*(addr)))) | | sizeof(*(addr)))) | |
| | | | |
| /* uatomic_add_return */ | | /* uatomic_add_return */ | |
| | | | |
| static inline __attribute__((always_inline)) | | static inline __attribute__((always_inline)) | |
| unsigned long __uatomic_add_return(void *addr, unsigned long val, | | unsigned long __uatomic_add_return(void *addr, unsigned long val, | |
| int len) | | int len) | |
| { | | { | |
| switch (len) { | | switch (len) { | |
| case 1: | | case 1: | |
| | | | |
| skipping to change at line 223 | | skipping to change at line 229 | |
| | | | |
| __asm__ __volatile__( | | __asm__ __volatile__( | |
| "lock; xaddq %1, %0" | | "lock; xaddq %1, %0" | |
| : "+m"(*__hp(addr)), "+r" (result) | | : "+m"(*__hp(addr)), "+r" (result) | |
| : | | : | |
| : "memory"); | | : "memory"); | |
| return result + (unsigned long)val; | | return result + (unsigned long)val; | |
| } | | } | |
| #endif | | #endif | |
| } | | } | |
|
| /* generate an illegal instruction. Cannot catch this with linker tr | | /* | |
| icks | | * generate an illegal instruction. Cannot catch this with | |
| * when optimizations are disabled. */ | | * linker tricks when optimizations are disabled. | |
| | | */ | |
| __asm__ __volatile__("ud2"); | | __asm__ __volatile__("ud2"); | |
| return 0; | | return 0; | |
| } | | } | |
| | | | |
|
| #define _uatomic_add_return(addr, v) \ | | #define _uatomic_add_return(addr, v) | |
| ((__typeof__(*(addr))) __uatomic_add_return((addr), \ | | \ | |
| (unsigned long)(v), \ | | ((__typeof__(*(addr))) __uatomic_add_return((addr), | |
| sizeof(*(addr)))) | | \ | |
| | | caa_cast_long_keep_sign(v), | |
| | | \ | |
| | | sizeof(*(addr)))) | |
| | | | |
| /* uatomic_and */ | | /* uatomic_and */ | |
| | | | |
| static inline __attribute__((always_inline)) | | static inline __attribute__((always_inline)) | |
| void __uatomic_and(void *addr, unsigned long val, int len) | | void __uatomic_and(void *addr, unsigned long val, int len) | |
| { | | { | |
| switch (len) { | | switch (len) { | |
| case 1: | | case 1: | |
| { | | { | |
| __asm__ __volatile__( | | __asm__ __volatile__( | |
| | | | |
| skipping to change at line 279 | | skipping to change at line 287 | |
| { | | { | |
| __asm__ __volatile__( | | __asm__ __volatile__( | |
| "lock; andq %1, %0" | | "lock; andq %1, %0" | |
| : "=m"(*__hp(addr)) | | : "=m"(*__hp(addr)) | |
| : "er" ((unsigned long)val) | | : "er" ((unsigned long)val) | |
| : "memory"); | | : "memory"); | |
| return; | | return; | |
| } | | } | |
| #endif | | #endif | |
| } | | } | |
|
| /* generate an illegal instruction. Cannot catch this with linker tr | | /* | |
| icks | | * generate an illegal instruction. Cannot catch this with | |
| * when optimizations are disabled. */ | | * linker tricks when optimizations are disabled. | |
| | | */ | |
| __asm__ __volatile__("ud2"); | | __asm__ __volatile__("ud2"); | |
| return; | | return; | |
| } | | } | |
| | | | |
| #define _uatomic_and(addr, v) \ | | #define _uatomic_and(addr, v) \ | |
|
| (__uatomic_and((addr), (unsigned long)(v), sizeof(*(addr)))) | | (__uatomic_and((addr), caa_cast_long_keep_sign(v), sizeof(*(addr)))) | |
| | | | |
| /* uatomic_or */ | | /* uatomic_or */ | |
| | | | |
| static inline __attribute__((always_inline)) | | static inline __attribute__((always_inline)) | |
| void __uatomic_or(void *addr, unsigned long val, int len) | | void __uatomic_or(void *addr, unsigned long val, int len) | |
| { | | { | |
| switch (len) { | | switch (len) { | |
| case 1: | | case 1: | |
| { | | { | |
| __asm__ __volatile__( | | __asm__ __volatile__( | |
| | | | |
| skipping to change at line 333 | | skipping to change at line 343 | |
| { | | { | |
| __asm__ __volatile__( | | __asm__ __volatile__( | |
| "lock; orq %1, %0" | | "lock; orq %1, %0" | |
| : "=m"(*__hp(addr)) | | : "=m"(*__hp(addr)) | |
| : "er" ((unsigned long)val) | | : "er" ((unsigned long)val) | |
| : "memory"); | | : "memory"); | |
| return; | | return; | |
| } | | } | |
| #endif | | #endif | |
| } | | } | |
|
| /* generate an illegal instruction. Cannot catch this with linker tr | | /* | |
| icks | | * generate an illegal instruction. Cannot catch this with | |
| * when optimizations are disabled. */ | | * linker tricks when optimizations are disabled. | |
| | | */ | |
| __asm__ __volatile__("ud2"); | | __asm__ __volatile__("ud2"); | |
| return; | | return; | |
| } | | } | |
| | | | |
| #define _uatomic_or(addr, v) \ | | #define _uatomic_or(addr, v) \ | |
|
| (__uatomic_or((addr), (unsigned long)(v), sizeof(*(addr)))) | | (__uatomic_or((addr), caa_cast_long_keep_sign(v), sizeof(*(addr)))) | |
| | | | |
| /* uatomic_add */ | | /* uatomic_add */ | |
| | | | |
| static inline __attribute__((always_inline)) | | static inline __attribute__((always_inline)) | |
| void __uatomic_add(void *addr, unsigned long val, int len) | | void __uatomic_add(void *addr, unsigned long val, int len) | |
| { | | { | |
| switch (len) { | | switch (len) { | |
| case 1: | | case 1: | |
| { | | { | |
| __asm__ __volatile__( | | __asm__ __volatile__( | |
| | | | |
| skipping to change at line 387 | | skipping to change at line 399 | |
| { | | { | |
| __asm__ __volatile__( | | __asm__ __volatile__( | |
| "lock; addq %1, %0" | | "lock; addq %1, %0" | |
| : "=m"(*__hp(addr)) | | : "=m"(*__hp(addr)) | |
| : "er" ((unsigned long)val) | | : "er" ((unsigned long)val) | |
| : "memory"); | | : "memory"); | |
| return; | | return; | |
| } | | } | |
| #endif | | #endif | |
| } | | } | |
|
| /* generate an illegal instruction. Cannot catch this with linker tr | | /* | |
| icks | | * generate an illegal instruction. Cannot catch this with | |
| * when optimizations are disabled. */ | | * linker tricks when optimizations are disabled. | |
| | | */ | |
| __asm__ __volatile__("ud2"); | | __asm__ __volatile__("ud2"); | |
| return; | | return; | |
| } | | } | |
| | | | |
| #define _uatomic_add(addr, v) \ | | #define _uatomic_add(addr, v) \ | |
|
| (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr)))) | | (__uatomic_add((addr), caa_cast_long_keep_sign(v), sizeof(*(addr)))) | |
| | | | |
| /* uatomic_inc */ | | /* uatomic_inc */ | |
| | | | |
| static inline __attribute__((always_inline)) | | static inline __attribute__((always_inline)) | |
| void __uatomic_inc(void *addr, int len) | | void __uatomic_inc(void *addr, int len) | |
| { | | { | |
| switch (len) { | | switch (len) { | |
| case 1: | | case 1: | |
| { | | { | |
| __asm__ __volatile__( | | __asm__ __volatile__( | |
| | | | |
| skipping to change at line 494 | | skipping to change at line 508 | |
| { | | { | |
| __asm__ __volatile__( | | __asm__ __volatile__( | |
| "lock; decq %0" | | "lock; decq %0" | |
| : "=m"(*__hp(addr)) | | : "=m"(*__hp(addr)) | |
| : | | : | |
| : "memory"); | | : "memory"); | |
| return; | | return; | |
| } | | } | |
| #endif | | #endif | |
| } | | } | |
|
| /* generate an illegal instruction. Cannot catch this with linker tr | | /* | |
| icks | | * generate an illegal instruction. Cannot catch this with | |
| * when optimizations are disabled. */ | | * linker tricks when optimizations are disabled. | |
| | | */ | |
| __asm__ __volatile__("ud2"); | | __asm__ __volatile__("ud2"); | |
| return; | | return; | |
| } | | } | |
| | | | |
| #define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr)))) | | #define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr)))) | |
| | | | |
| #if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH)) | | #if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH)) | |
| extern int __rcu_cas_avail; | | extern int __rcu_cas_avail; | |
| extern int __rcu_cas_init(void); | | extern int __rcu_cas_init(void); | |
| | | | |
| | | | |
| skipping to change at line 519 | | skipping to change at line 535 | |
| : ((caa_unlikely(__rcu_cas_avail < 0)
\ | | : ((caa_unlikely(__rcu_cas_avail < 0)
\ | |
| ? ((__rcu_cas_init() > 0)
\ | | ? ((__rcu_cas_init() > 0)
\ | |
| ? (_uatomic_##insn)
\ | | ? (_uatomic_##insn)
\ | |
| : (compat_uatomic_##insn))
\ | | : (compat_uatomic_##insn))
\ | |
| : (compat_uatomic_##insn)))) | | : (compat_uatomic_##insn)))) | |
| | | | |
| extern unsigned long _compat_uatomic_set(void *addr, | | extern unsigned long _compat_uatomic_set(void *addr, | |
| unsigned long _new, int len); | | unsigned long _new, int len); | |
| #define compat_uatomic_set(addr, _new)
\ | | #define compat_uatomic_set(addr, _new)
\ | |
| ((__typeof__(*(addr))) _compat_uatomic_set((addr),
\ | | ((__typeof__(*(addr))) _compat_uatomic_set((addr),
\ | |
|
| (unsigned long)(_new),
\ | | caa_cast_long_keep_sign(_new
), \ | |
| sizeof(*(addr)))) | | sizeof(*(addr)))) | |
| | | | |
| extern unsigned long _compat_uatomic_xchg(void *addr, | | extern unsigned long _compat_uatomic_xchg(void *addr, | |
| unsigned long _new, int len); | | unsigned long _new, int len); | |
| #define compat_uatomic_xchg(addr, _new)
\ | | #define compat_uatomic_xchg(addr, _new)
\ | |
| ((__typeof__(*(addr))) _compat_uatomic_xchg((addr),
\ | | ((__typeof__(*(addr))) _compat_uatomic_xchg((addr),
\ | |
|
| (unsigned long)(_new),
\ | | caa_cast_long_keep_sign(_new
), \ | |
| sizeof(*(addr)))) | | sizeof(*(addr)))) | |
| | | | |
| extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old, | | extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old, | |
| unsigned long _new, int len); | | unsigned long _new, int len); | |
| #define compat_uatomic_cmpxchg(addr, old, _new)
\ | | #define compat_uatomic_cmpxchg(addr, old, _new)
\ | |
| ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr),
\ | | ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr),
\ | |
|
| (unsigned long)(old), | | caa_cast_long_keep_sign(old) | |
| \ | | , \ | |
| (unsigned long)(_new), | | caa_cast_long_keep_sign(_new | |
| \ | | ), \ | |
| sizeof(*(addr)))) | | sizeof(*(addr)))) | |
| | | | |
| extern void _compat_uatomic_and(void *addr, unsigned long _new, int len); | | extern void _compat_uatomic_and(void *addr, unsigned long _new, int len); | |
| #define compat_uatomic_and(addr, v) \ | | #define compat_uatomic_and(addr, v) \ | |
| (_compat_uatomic_and((addr), \ | | (_compat_uatomic_and((addr), \ | |
|
| (unsigned long)(v), \ | | caa_cast_long_keep_sign(v), \ | |
| sizeof(*(addr)))) | | sizeof(*(addr)))) | |
| | | | |
| extern void _compat_uatomic_or(void *addr, unsigned long _new, int len); | | extern void _compat_uatomic_or(void *addr, unsigned long _new, int len); | |
| #define compat_uatomic_or(addr, v) \ | | #define compat_uatomic_or(addr, v) \ | |
| (_compat_uatomic_or((addr), \ | | (_compat_uatomic_or((addr), \ | |
|
| (unsigned long)(v), \ | | caa_cast_long_keep_sign(v), \ | |
| sizeof(*(addr)))) | | sizeof(*(addr)))) | |
| | | | |
| extern unsigned long _compat_uatomic_add_return(void *addr, | | extern unsigned long _compat_uatomic_add_return(void *addr, | |
| unsigned long _new, int len)
; | | unsigned long _new, int len)
; | |
|
| #define compat_uatomic_add_return(addr, v) \ | | #define compat_uatomic_add_return(addr, v) | |
| ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \ | | \ | |
| (unsigned long)(v), \ | | ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), | |
| sizeof(*(addr)))) | | \ | |
| | | caa_cast_long_keep_sign(v), | |
| | | \ | |
| | | sizeof(*(addr)))) | |
| | | | |
| #define compat_uatomic_add(addr, v)
\ | | #define compat_uatomic_add(addr, v)
\ | |
| ((void)compat_uatomic_add_return((addr), (v))) | | ((void)compat_uatomic_add_return((addr), (v))) | |
| #define compat_uatomic_inc(addr)
\ | | #define compat_uatomic_inc(addr)
\ | |
| (compat_uatomic_add((addr), 1)) | | (compat_uatomic_add((addr), 1)) | |
| #define compat_uatomic_dec(addr)
\ | | #define compat_uatomic_dec(addr)
\ | |
| (compat_uatomic_add((addr), -1)) | | (compat_uatomic_add((addr), -1)) | |
| | | | |
| #else | | #else | |
| #define UATOMIC_COMPAT(insn) (_uatomic_##insn) | | #define UATOMIC_COMPAT(insn) (_uatomic_##insn) | |
| #endif | | #endif | |
| | | | |
| /* Read is atomic even in compat mode */ | | /* Read is atomic even in compat mode */ | |
| #define uatomic_set(addr, v) \ | | #define uatomic_set(addr, v) \ | |
| UATOMIC_COMPAT(set(addr, v)) | | UATOMIC_COMPAT(set(addr, v)) | |
| | | | |
| #define uatomic_cmpxchg(addr, old, _new) \ | | #define uatomic_cmpxchg(addr, old, _new) \ | |
| UATOMIC_COMPAT(cmpxchg(addr, old, _new)) | | UATOMIC_COMPAT(cmpxchg(addr, old, _new)) | |
| #define uatomic_xchg(addr, v) \ | | #define uatomic_xchg(addr, v) \ | |
| UATOMIC_COMPAT(xchg(addr, v)) | | UATOMIC_COMPAT(xchg(addr, v)) | |
|
| | | | |
| #define uatomic_and(addr, v) \ | | #define uatomic_and(addr, v) \ | |
| UATOMIC_COMPAT(and(addr, v)) | | UATOMIC_COMPAT(and(addr, v)) | |
|
| | | #define cmm_smp_mb__before_uatomic_and() cmm_barrier() | |
| | | #define cmm_smp_mb__after_uatomic_and() cmm_barrier() | |
| | | | |
| #define uatomic_or(addr, v) \ | | #define uatomic_or(addr, v) \ | |
| UATOMIC_COMPAT(or(addr, v)) | | UATOMIC_COMPAT(or(addr, v)) | |
|
| | | #define cmm_smp_mb__before_uatomic_or() cmm_barrier() | |
| | | #define cmm_smp_mb__after_uatomic_or() cmm_barrier() | |
| | | | |
| #define uatomic_add_return(addr, v) \ | | #define uatomic_add_return(addr, v) \ | |
| UATOMIC_COMPAT(add_return(addr, v)) | | UATOMIC_COMPAT(add_return(addr, v)) | |
| | | | |
| #define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v)) | | #define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v)) | |
|
| | | #define cmm_smp_mb__before_uatomic_add() cmm_barrier() | |
| | | #define cmm_smp_mb__after_uatomic_add() cmm_barrier() | |
| | | | |
| #define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr)) | | #define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr)) | |
|
| | | #define cmm_smp_mb__before_uatomic_inc() cmm_barrier() | |
| | | #define cmm_smp_mb__after_uatomic_inc() cmm_barrier() | |
| | | | |
| #define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr)) | | #define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr)) | |
|
| | | #define cmm_smp_mb__before_uatomic_dec() cmm_barrier() | |
| | | #define cmm_smp_mb__after_uatomic_dec() cmm_barrier() | |
| | | | |
| #ifdef __cplusplus | | #ifdef __cplusplus | |
| } | | } | |
| #endif | | #endif | |
| | | | |
| #include <urcu/uatomic/generic.h> | | #include <urcu/uatomic/generic.h> | |
| | | | |
| #endif /* _URCU_ARCH_UATOMIC_X86_H */ | | #endif /* _URCU_ARCH_UATOMIC_X86_H */ | |
| | | | |
End of changes. 25 change blocks. |
| 46 lines changed or deleted | | 78 lines changed or added | |
|
| urcu-pointer.h | | urcu-pointer.h | |
| | | | |
| skipping to change at line 50 | | skipping to change at line 50 | |
| | | | |
| /* | | /* | |
| * rcu_dereference(ptr) | | * rcu_dereference(ptr) | |
| * | | * | |
| * Fetch a RCU-protected pointer. Typically used to copy the variable ptr t
o a | | * Fetch a RCU-protected pointer. Typically used to copy the variable ptr t
o a | |
| * local variable. | | * local variable. | |
| */ | | */ | |
| #define rcu_dereference _rcu_dereference | | #define rcu_dereference _rcu_dereference | |
| | | | |
| /* | | /* | |
|
| * rcu_cmpxchg_pointer(type **ptr, type *new, type *old) | | * type *rcu_cmpxchg_pointer(type **ptr, type *new, type *old) | |
| * type *rcu_xchg_pointer(type **ptr, type *new) | | * type *rcu_xchg_pointer(type **ptr, type *new) | |
|
| * type *rcu_set_pointer(type **ptr, type *new) | | * void rcu_set_pointer(type **ptr, type *new) | |
| * | | * | |
| * RCU pointer updates. | | * RCU pointer updates. | |
| * @ptr: address of the pointer to modify | | * @ptr: address of the pointer to modify | |
| * @new: new pointer value | | * @new: new pointer value | |
| * @old: old pointer value (expected) | | * @old: old pointer value (expected) | |
| * | | * | |
| * return: old pointer value | | * return: old pointer value | |
| */ | | */ | |
| #define rcu_cmpxchg_pointer _rcu_cmpxchg_pointer | | #define rcu_cmpxchg_pointer _rcu_cmpxchg_pointer | |
| #define rcu_xchg_pointer _rcu_xchg_pointer | | #define rcu_xchg_pointer _rcu_xchg_pointer | |
| | | | |
| skipping to change at line 97 | | skipping to change at line 97 | |
| extern void *rcu_xchg_pointer_sym(void **p, void *v); | | extern void *rcu_xchg_pointer_sym(void **p, void *v); | |
| #define rcu_xchg_pointer(p, v)
\ | | #define rcu_xchg_pointer(p, v)
\ | |
| ({
\ | | ({
\ | |
| typeof(*(p)) _________pv = (v);
\ | | typeof(*(p)) _________pv = (v);
\ | |
| typeof(*(p)) _________p1 = URCU_FORCE_CAST(typeof(*(p)),
\ | | typeof(*(p)) _________p1 = URCU_FORCE_CAST(typeof(*(p)),
\ | |
| rcu_xchg_pointer_sym(URCU_FORCE_CAST(void **, p),
\ | | rcu_xchg_pointer_sym(URCU_FORCE_CAST(void **, p),
\ | |
| _________pv));
\ | | _________pv));
\ | |
| (_________p1);
\ | | (_________p1);
\ | |
| }) | | }) | |
| | | | |
|
| | | /* | |
| | | * Note: rcu_set_pointer_sym returns @v because we don't want to break | |
| | | * the ABI. At the API level, rcu_set_pointer() now returns void. Use of | |
| | | * the return value is therefore deprecated, and will cause a build | |
| | | * error. | |
| | | */ | |
| extern void *rcu_set_pointer_sym(void **p, void *v); | | extern void *rcu_set_pointer_sym(void **p, void *v); | |
| #define rcu_set_pointer(p, v)
\ | | #define rcu_set_pointer(p, v)
\ | |
|
| ({
\ | | do {
\ | |
| typeof(*(p)) _________pv = (v);
\ | | typeof(*(p)) _________pv = (v);
\ | |
|
| typeof(*(p)) _________p1 = URCU_FORCE_CAST(typeof(*(p)), | | (void) rcu_set_pointer_sym(URCU_FORCE_CAST(void **, p), | |
| \ | | \ | |
| rcu_set_pointer_sym(URCU_FORCE_CAST(void **, p), | | _________pv); | |
| \ | | \ | |
| _________pv)); | | } while (0) | |
| \ | | | |
| (_________p1); | | | |
| \ | | | |
| }) | | | |
| | | | |
| #endif /* !_LGPL_SOURCE */ | | #endif /* !_LGPL_SOURCE */ | |
| | | | |
| /* | | /* | |
|
| * rcu_assign_pointer(type *ptr, type *new) | | * void rcu_assign_pointer(type *ptr, type *new) | |
| * | | * | |
| * Same as rcu_set_pointer, but takes the pointer to assign to rather than
its | | * Same as rcu_set_pointer, but takes the pointer to assign to rather than
its | |
| * address as first parameter. Provided for compatibility with the Linux ke
rnel | | * address as first parameter. Provided for compatibility with the Linux ke
rnel | |
| * RCU semantic. | | * RCU semantic. | |
| */ | | */ | |
| #define rcu_assign_pointer(p, v) rcu_set_pointer((&p), (v)) | | #define rcu_assign_pointer(p, v) rcu_set_pointer((&p), (v)) | |
| | | | |
| #ifdef __cplusplus | | #ifdef __cplusplus | |
| } | | } | |
| #endif | | #endif | |
| | | | |
End of changes. 6 change blocks. |
| 13 lines changed or deleted | | 15 lines changed or added | |
|