arch.h   arch.h 
skipping to change at line 32 skipping to change at line 32
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/ */
#include <urcu/compiler.h> #include <urcu/compiler.h>
#include <urcu/config.h> #include <urcu/config.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
#define CONFIG_HAVE_MEM_COHERENCY
#define CACHE_LINE_SIZE 128 #define CACHE_LINE_SIZE 128
#ifdef CONFIG_RCU_HAVE_FENCE #ifdef CONFIG_RCU_HAVE_FENCE
#define mb() asm volatile("mfence":::"memory") #define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory") #define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence"::: "memory") #define wmb() asm volatile("sfence"::: "memory")
#else #else
/* /*
* Some non-Intel clones support out of order store. wmb() ceases to be a * Some non-Intel clones support out of order store. wmb() ceases to be a
* nop for these. * nop for these.
*/ */
#define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory") #define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
#define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory") #define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
#define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory") #define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
#endif #endif
/* #define cpu_relax() asm volatile("rep; nop" : : : "memory");
* Architectures without cache coherency need something like the following:
*
* #define mb() mc()
* #define rmb() rmc()
* #define wmb() wmc()
* #define mc() arch_cache_flush()
* #define rmc() arch_cache_flush_read()
* #define wmc() arch_cache_flush_write()
*/
#define mc() barrier()
#define rmc() barrier()
#define wmc() barrier()
#ifdef CONFIG_RCU_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_mc() mc()
#define smp_rmc() rmc()
#define smp_wmc() wmc()
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_mc() barrier()
#define smp_rmc() barrier()
#define smp_wmc() barrier()
#endif
/* Nop everywhere except on alpha. */
#define smp_read_barrier_depends()
static inline void rep_nop(void)
{
asm volatile("rep; nop" : : : "memory");
}
static inline void cpu_relax(void)
{
rep_nop();
}
/* /*
* Serialize core instruction execution. Also acts as a compiler barrier. * Serialize core instruction execution. Also acts as a compiler barrier.
* On PIC ebx cannot be clobbered
*/ */
#ifdef __PIC__ #ifdef __PIC__
/* #define sync_core() \
* Cannot use cpuid because it clobbers the ebx register and clashes asm volatile("push %%ebx; cpuid; pop %%ebx" \
* with -fPIC : : : : "memory", "eax", "ecx", "edx");
* error: PIC register 'ebx' clobbered in 'asm' #endif
*/ #ifndef __PIC__
static inline void sync_core(void) #define sync_core() \
{
mb();
}
#else
static inline void sync_core(void)
{
asm volatile("cpuid" : : : "memory", "eax", "ebx", "ecx", "edx"); asm volatile("cpuid" : : : "memory", "eax", "ebx", "ecx", "edx");
}
#endif #endif
#define rdtscll(val) \ #define rdtscll(val) \
do { \ do { \
unsigned int __a, __d; \ unsigned int __a, __d; \
asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
(val) = ((unsigned long long)__a) \ (val) = ((unsigned long long)__a) \
| (((unsigned long long)__d) << 32); \ | (((unsigned long long)__d) << 32); \
} while(0) } while(0)
skipping to change at line 136 skipping to change at line 86
cycles_t ret = 0; cycles_t ret = 0;
rdtscll(ret); rdtscll(ret);
return ret; return ret;
} }
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#include <urcu/arch_generic.h>
#endif /* _URCU_ARCH_X86_H */ #endif /* _URCU_ARCH_X86_H */
 End of changes. 6 change blocks. 
58 lines changed or deleted 10 lines changed or added


 compiler.h   compiler.h 
skipping to change at line 40 skipping to change at line 40
* (prohibits merging and refetching). The compiler is also forbidden to re order * (prohibits merging and refetching). The compiler is also forbidden to re order
* successive instances of ACCESS_ONCE(), but only when the compiler is awa re of * successive instances of ACCESS_ONCE(), but only when the compiler is awa re of
* particular ordering. Compiler ordering can be ensured, for example, by * particular ordering. Compiler ordering can be ensured, for example, by
* putting two ACCESS_ONCE() in separate C statements. * putting two ACCESS_ONCE() in separate C statements.
* *
* This macro does absolutely -nothing- to prevent the CPU from reordering, * This macro does absolutely -nothing- to prevent the CPU from reordering,
* merging, or refetching absolutely anything at any time. Its main intend ed * merging, or refetching absolutely anything at any time. Its main intend ed
* use is to mediate communication between process-level code and irq/NMI * use is to mediate communication between process-level code and irq/NMI
* handlers, all running on the same CPU. * handlers, all running on the same CPU.
*/ */
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&x) #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
#ifndef max #ifndef max
#define max(a,b) ((a)>(b)?(a):(b)) #define max(a,b) ((a)>(b)?(a):(b))
#endif #endif
#ifndef min #ifndef min
#define min(a,b) ((a)<(b)?(a):(b)) #define min(a,b) ((a)<(b)?(a):(b))
#endif #endif
#if defined(__SIZEOF_LONG__)
#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
#elif defined(_LP64)
#define BITS_PER_LONG 64
#else
#define BITS_PER_LONG 32
#endif
#endif /* _URCU_COMPILER_H */ #endif /* _URCU_COMPILER_H */
 End of changes. 2 change blocks. 
1 lines changed or deleted 9 lines changed or added


 hlist.h   hlist.h 
skipping to change at line 12 skipping to change at line 12
#define _KCOMPAT_HLIST_H #define _KCOMPAT_HLIST_H
/* /*
* Kernel sourcecode compatible lightweight single pointer list head useful * Kernel sourcecode compatible lightweight single pointer list head useful
* for implementing hash tables * for implementing hash tables
* *
* Copyright (C) 2009 Novell Inc. * Copyright (C) 2009 Novell Inc.
* *
* Author: Jan Blunck <jblunck@suse.de> * Author: Jan Blunck <jblunck@suse.de>
* *
* Copyright (C) 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License version 2.1 as * under the terms of the GNU Lesser General Public License version 2.1 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
struct hlist_head struct hlist_head
{ {
struct hlist_node *next; struct hlist_node *next;
}; };
 End of changes. 1 change blocks. 
0 lines changed or deleted 2 lines changed or added


 list.h   list.h 
skipping to change at line 87 skipping to change at line 87
} }
/* delete from list, add to another list as head */ /* delete from list, add to another list as head */
static inline void static inline void
list_move (list_t *elem, list_t *head) list_move (list_t *elem, list_t *head)
{ {
__list_del (elem->prev, elem->next); __list_del (elem->prev, elem->next);
list_add (elem, head); list_add (elem, head);
} }
/* replace an old entry.
*/
static inline void
list_replace(list_t *old, list_t *_new)
{
_new->next = old->next;
_new->prev = old->prev;
_new->prev->next = _new;
_new->next->prev = _new;
}
/* Join two lists. */ /* Join two lists. */
static inline void static inline void
list_splice (list_t *add, list_t *head) list_splice (list_t *add, list_t *head)
{ {
/* Do nothing if the list which gets added is empty. */ /* Do nothing if the list which gets added is empty. */
if (add != add->next) if (add != add->next)
{ {
add->next->prev = head; add->next->prev = head;
add->prev->next = head->next; add->prev->next = head->next;
head->next->prev = add->prev; head->next->prev = add->prev;
skipping to change at line 142 skipping to change at line 153
p = list_entry(pos->member.next,typeof(*pos), member); \ p = list_entry(pos->member.next,typeof(*pos), member); \
&pos->member != (head); \ &pos->member != (head); \
pos = p, p = list_entry(pos->member.next, typeof(*pos), member) ) pos = p, p = list_entry(pos->member.next, typeof(*pos), member) )
static inline int list_empty(list_t *head) static inline int list_empty(list_t *head)
{ {
return head == head->next; return head == head->next;
} }
static inline void list_replace_init(list_t *old, static inline void list_replace_init(list_t *old,
list_t *new) list_t *_new)
{ {
list_t *head = old->next; list_t *head = old->next;
list_del(old); list_del(old);
list_add_tail(new, head); list_add_tail(_new, head);
INIT_LIST_HEAD(old); INIT_LIST_HEAD(old);
} }
#endif /* list.h */ #endif /* list.h */
 End of changes. 3 change blocks. 
2 lines changed or deleted 13 lines changed or added


 rculist.h   rculist.h 
/* Copyright (C) 2002 Free Software Foundation, Inc. /* Copyright (C) 2002 Free Software Foundation, Inc.
This file is part of the GNU C Library. This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
Copyright (C) 2009 Pierre-Marc Fournier Copyright (C) 2009 Pierre-Marc Fournier
Conversion to RCU list. Conversion to RCU list.
Copyright (C) 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
The GNU C Library is free software; you can redistribute it and/or The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version. version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful, The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details. Lesser General Public License for more details.
skipping to change at line 41 skipping to change at line 42
*/ */
static inline void list_add_rcu(list_t *newp, list_t *head) static inline void list_add_rcu(list_t *newp, list_t *head)
{ {
newp->next = head->next; newp->next = head->next;
newp->prev = head; newp->prev = head;
smp_wmb(); smp_wmb();
head->next->prev = newp; head->next->prev = newp;
head->next = newp; head->next = newp;
} }
/* replace an old entry atomically.
*/
static inline void list_replace_rcu(list_t *old, list_t *_new)
{
_new->next = old->next;
_new->prev = old->prev;
rcu_assign_pointer(_new->prev->next, _new);
_new->next->prev = _new;
}
/* Remove element from list. */ /* Remove element from list. */
static inline void list_del_rcu(list_t *elem) static inline void list_del_rcu(list_t *elem)
{ {
elem->next->prev = elem->prev; elem->next->prev = elem->prev;
elem->prev->next = elem->next; elem->prev->next = elem->next;
} }
/* Iterate through elements of the list. /*
* This must be done while rcu_read_lock() is held. * Iteration through all elements of the list must be done while rcu_read_l
ock()
* is held.
*/ */
/* Iterate forward over the elements of the list. */
#define list_for_each_rcu(pos, head) \
for (pos = rcu_dereference((head)->next); pos != (head); \
pos = rcu_dereference(pos->next))
/* Iterate through elements of the list.
*/
#define list_for_each_entry_rcu(pos, head, member) \ #define list_for_each_entry_rcu(pos, head, member) \
for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), m ember); \ for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), m ember); \
&pos->member != (head); \ &pos->member != (head); \
pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos ), member)) pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos ), member))
#endif /* _URCU_RCULIST_H */ #endif /* _URCU_RCULIST_H */
 End of changes. 4 change blocks. 
2 lines changed or deleted 22 lines changed or added


 system.h   system.h 
skipping to change at line 49 skipping to change at line 49
* Identify a shared store. A smp_wmc() or smp_mc() should follow the store . * Identify a shared store. A smp_wmc() or smp_mc() should follow the store .
*/ */
#define _STORE_SHARED(x, v) ({ ACCESS_ONCE(x) = (v); }) #define _STORE_SHARED(x, v) ({ ACCESS_ONCE(x) = (v); })
/* /*
* Store v into x, where x is located in shared memory. Performs the requir ed * Store v into x, where x is located in shared memory. Performs the requir ed
* cache flush after writing. Returns v. * cache flush after writing. Returns v.
*/ */
#define STORE_SHARED(x, v) \ #define STORE_SHARED(x, v) \
({ \ ({ \
_STORE_SHARED(x, v); \ typeof(x) _v = _STORE_SHARED(x, v); \
smp_wmc(); \ smp_wmc(); \
(v); \ _v; \
}) })
#endif /* _URCU_SYSTEM_H */ #endif /* _URCU_SYSTEM_H */
 End of changes. 2 change blocks. 
2 lines changed or deleted 2 lines changed or added


 uatomic_arch.h   uatomic_arch.h 
skipping to change at line 26 skipping to change at line 26
* provided the above notices are retained, and a notice that the code was * provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice. * modified is included with the above copyright notice.
* *
* Code inspired from libuatomic_ops-1.2, inherited in part from the * Code inspired from libuatomic_ops-1.2, inherited in part from the
* Boehm-Demers-Weiser conservative garbage collector. * Boehm-Demers-Weiser conservative garbage collector.
*/ */
#include <urcu/compiler.h> #include <urcu/compiler.h>
#include <urcu/system.h> #include <urcu/system.h>
#define UATOMIC_HAS_ATOMIC_BYTE
#define UATOMIC_HAS_ATOMIC_SHORT
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
#ifndef __SIZEOF_LONG__
#if defined(__x86_64__) || defined(__amd64__)
#define __SIZEOF_LONG__ 8
#else
#define __SIZEOF_LONG__ 4
#endif
#endif
#ifndef BITS_PER_LONG
#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
#endif
/* /*
* Derived from AO_compare_and_swap() and AO_test_and_set_full(). * Derived from AO_compare_and_swap() and AO_test_and_set_full().
*/ */
struct __uatomic_dummy { struct __uatomic_dummy {
unsigned long v[10]; unsigned long v[10];
}; };
#define __hp(x) ((struct __uatomic_dummy *)(x)) #define __hp(x) ((struct __uatomic_dummy *)(x))
#define _uatomic_set(addr, v) STORE_SHARED(*(addr), (v)) #define _uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
#define _uatomic_read(addr) LOAD_SHARED(*(addr))
/* cmpxchg */ /* cmpxchg */
static inline __attribute__((always_inline)) static inline __attribute__((always_inline))
unsigned long __uatomic_cmpxchg(void *addr, unsigned long old, unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
unsigned long _new, int len) unsigned long _new, int len)
{ {
switch (len) { switch (len) {
case 1: case 1:
{ {
skipping to change at line 179 skipping to change at line 169
/* generate an illegal instruction. Cannot catch this with linker tr icks /* generate an illegal instruction. Cannot catch this with linker tr icks
* when optimizations are disabled. */ * when optimizations are disabled. */
__asm__ __volatile__("ud2"); __asm__ __volatile__("ud2");
return 0; return 0;
} }
#define _uatomic_xchg(addr, v) \ #define _uatomic_xchg(addr, v) \
((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v) , \ ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v) , \
sizeof(*(addr)))) sizeof(*(addr))))
/* uatomic_add_return, uatomic_sub_return */ /* uatomic_add_return */
static inline __attribute__((always_inline)) static inline __attribute__((always_inline))
unsigned long __uatomic_add_return(void *addr, unsigned long val, unsigned long __uatomic_add_return(void *addr, unsigned long val,
int len) int len)
{ {
switch (len) { switch (len) {
case 1: case 1:
{ {
unsigned char result = val; unsigned char result = val;
skipping to change at line 244 skipping to change at line 234
* when optimizations are disabled. */ * when optimizations are disabled. */
__asm__ __volatile__("ud2"); __asm__ __volatile__("ud2");
return 0; return 0;
} }
#define _uatomic_add_return(addr, v) \ #define _uatomic_add_return(addr, v) \
((__typeof__(*(addr))) __uatomic_add_return((addr), \ ((__typeof__(*(addr))) __uatomic_add_return((addr), \
(unsigned long)(v), \ (unsigned long)(v), \
sizeof(*(addr)))) sizeof(*(addr))))
#define _uatomic_sub_return(addr, v) _uatomic_add_return((addr), -(v)) /* uatomic_add */
/* uatomic_add, uatomic_sub */
static inline __attribute__((always_inline)) static inline __attribute__((always_inline))
void __uatomic_add(void *addr, unsigned long val, int len) void __uatomic_add(void *addr, unsigned long val, int len)
{ {
switch (len) { switch (len) {
case 1: case 1:
{ {
__asm__ __volatile__( __asm__ __volatile__(
"lock; addb %1, %0" "lock; addb %1, %0"
: "=m"(*__hp(addr)) : "=m"(*__hp(addr))
skipping to change at line 300 skipping to change at line 288
} }
/* generate an illegal instruction. Cannot catch this with linker tr icks /* generate an illegal instruction. Cannot catch this with linker tr icks
* when optimizations are disabled. */ * when optimizations are disabled. */
__asm__ __volatile__("ud2"); __asm__ __volatile__("ud2");
return; return;
} }
#define _uatomic_add(addr, v) \ #define _uatomic_add(addr, v) \
(__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr)))) (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
#define _uatomic_sub(addr, v) _uatomic_add((addr), -(v))
/* uatomic_inc */ /* uatomic_inc */
static inline __attribute__((always_inline)) static inline __attribute__((always_inline))
void __uatomic_inc(void *addr, int len) void __uatomic_inc(void *addr, int len)
{ {
switch (len) { switch (len) {
case 1: case 1:
{ {
__asm__ __volatile__( __asm__ __volatile__(
"lock; incb %0" "lock; incb %0"
skipping to change at line 450 skipping to change at line 436
(unsigned long)(_new), \ (unsigned long)(_new), \
sizeof(*(addr)))) sizeof(*(addr))))
extern unsigned long _compat_uatomic_xchg(void *addr, extern unsigned long _compat_uatomic_xchg(void *addr,
unsigned long _new, int len); unsigned long _new, int len);
#define compat_uatomic_add_return(addr, v) \ #define compat_uatomic_add_return(addr, v) \
((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \ ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
(unsigned long)(v), \ (unsigned long)(v), \
sizeof(*(addr)))) sizeof(*(addr))))
#define compat_uatomic_sub_return(addr, v)
\
compat_uatomic_add_return((addr), -(v))
#define compat_uatomic_add(addr, v) \ #define compat_uatomic_add(addr, v) \
((void)compat_uatomic_add_return((addr), (v))) ((void)compat_uatomic_add_return((addr), (v)))
#define compat_uatomic_sub(addr, v)
\
((void)compat_uatomic_sub_return((addr), (v)))
#define compat_uatomic_inc(addr) \ #define compat_uatomic_inc(addr) \
(compat_uatomic_add((addr), 1)) (compat_uatomic_add((addr), 1))
#define compat_uatomic_dec(addr) \ #define compat_uatomic_dec(addr) \
(compat_uatomic_sub((addr), 1)) (compat_uatomic_add((addr), -1))
#else #else
#define UATOMIC_COMPAT(insn) (_uatomic_##insn) #define UATOMIC_COMPAT(insn) (_uatomic_##insn)
#endif #endif
/* Read is atomic even in compat mode */ /* Read is atomic even in compat mode */
#define uatomic_read(addr) _uatomic_read(addr)
#define uatomic_set(addr, v) \ #define uatomic_set(addr, v) \
UATOMIC_COMPAT(set(addr, v)) UATOMIC_COMPAT(set(addr, v))
#define uatomic_cmpxchg(addr, old, _new) \ #define uatomic_cmpxchg(addr, old, _new) \
UATOMIC_COMPAT(cmpxchg(addr, old, _new)) UATOMIC_COMPAT(cmpxchg(addr, old, _new))
#define uatomic_xchg(addr, v) \ #define uatomic_xchg(addr, v) \
UATOMIC_COMPAT(xchg(addr, v)) UATOMIC_COMPAT(xchg(addr, v))
#define uatomic_add_return(addr, v) \ #define uatomic_add_return(addr, v) \
UATOMIC_COMPAT(add_return(addr, v)) UATOMIC_COMPAT(add_return(addr, v))
#define uatomic_sub_return(addr, v) \
UATOMIC_COMPAT(sub_return(addr, v))
#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v)) #define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
#define uatomic_sub(addr, v) UATOMIC_COMPAT(sub(addr, v))
#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr)) #define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr)) #define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#include <urcu/uatomic_generic.h>
#endif /* _URCU_ARCH_UATOMIC_X86_H */ #endif /* _URCU_ARCH_UATOMIC_X86_H */
 End of changes. 14 change blocks. 
31 lines changed or deleted 10 lines changed or added

This html diff was produced by rfcdiff 1.41. The latest version is available from http://tools.ietf.org/tools/rfcdiff/