_flow_graph_types_impl.h   _flow_graph_types_impl.h 
skipping to change at line 308 skipping to change at line 308
}; };
template<typename T> struct size_of { static const int value = sizeof(T); } ; template<typename T> struct size_of { static const int value = sizeof(T); } ;
template<class T> struct alignment_of { template<class T> struct alignment_of {
typedef struct { char t; T padded; } test_alignment; typedef struct { char t; T padded; } test_alignment;
static const size_t value = sizeof(test_alignment) - sizeof(T); static const size_t value = sizeof(test_alignment) - sizeof(T);
}; };
template< size_t N, class Tuple, template<class> class Selector > struct pi ck_tuple_max { template< size_t N, class Tuple, template<class> class Selector > struct pi ck_tuple_max {
typedef typename pick_tuple_max<N-1, Tuple, Selector>::type LeftMaxType ; typedef typename pick_tuple_max<N-1, Tuple, Selector>::type LeftMaxType ;
typedef typename tuple_element<N-1, Tuple>::type ThisType; typedef typename tbb::flow::tuple_element<N-1, Tuple>::type ThisType;
typedef typename pick_max<Selector, LeftMaxType, ThisType>::type type; typedef typename pick_max<Selector, LeftMaxType, ThisType>::type type;
}; };
template< class Tuple, template<class> class Selector > struct pick_tuple_m ax<0, Tuple, Selector> { template< class Tuple, template<class> class Selector > struct pick_tuple_m ax<0, Tuple, Selector> {
typedef typename tuple_element<0, Tuple>::type type; typedef typename tbb::flow::tuple_element<0, Tuple>::type type;
}; };
// is the specified type included in a tuple? // is the specified type included in a tuple?
template<class U, class V> struct is_same_type { static const bool val ue = false; }; template<class U, class V> struct is_same_type { static const bool val ue = false; };
template<class W> struct is_same_type<W,W> { static const bool val ue = true; }; template<class W> struct is_same_type<W,W> { static const bool val ue = true; };
template<class Q, size_t N, class Tuple> template<class Q, size_t N, class Tuple>
struct is_element_of { struct is_element_of {
typedef typename tuple_element<N-1, Tuple>::type T_i; typedef typename tbb::flow::tuple_element<N-1, Tuple>::type T_i;
static const bool value = is_same_type<Q,T_i>::value || is_element_of<Q ,N-1,Tuple>::value; static const bool value = is_same_type<Q,T_i>::value || is_element_of<Q ,N-1,Tuple>::value;
}; };
template<class Q, class Tuple> template<class Q, class Tuple>
struct is_element_of<Q,0,Tuple> { struct is_element_of<Q,0,Tuple> {
typedef typename tuple_element<0, Tuple>::type T_i; typedef typename tbb::flow::tuple_element<0, Tuple>::type T_i;
static const bool value = is_same_type<Q,T_i>::value; static const bool value = is_same_type<Q,T_i>::value;
}; };
// allow the construction of types that are listed tuple. If a disallowed type // allow the construction of types that are listed tuple. If a disallowed type
// construction is written, a method involving this type is created. The // construction is written, a method involving this type is created. The
// type has no definition, so a syntax error is generated. // type has no definition, so a syntax error is generated.
template<typename T> struct ERROR_Type_Not_allowed_In_Tagged_Msg_Not_Member _Of_Tuple; template<typename T> struct ERROR_Type_Not_allowed_In_Tagged_Msg_Not_Member _Of_Tuple;
template<typename T, bool BUILD_IT> struct do_if; template<typename T, bool BUILD_IT> struct do_if;
template<typename T> template<typename T>
skipping to change at line 365 skipping to change at line 365
// Wrapper, and how big Wrapper is. // Wrapper, and how big Wrapper is.
// //
// the object can only be tested for type, and a read-only reference can be fetched by cast_to<T>(). // the object can only be tested for type, and a read-only reference can be fetched by cast_to<T>().
using tbb::internal::punned_cast; using tbb::internal::punned_cast;
struct tagged_null_type {}; struct tagged_null_type {};
template<typename TagType, typename T0, typename T1=tagged_null_type, typen ame T2=tagged_null_type, typename T3=tagged_null_type, template<typename TagType, typename T0, typename T1=tagged_null_type, typen ame T2=tagged_null_type, typename T3=tagged_null_type,
typename T4=tagged_null_type, typename T5=tagged _null_type, typename T6=tagged_null_type, typename T4=tagged_null_type, typename T5=tagged _null_type, typename T6=tagged_null_type,
typename T7=tagged_null_type, typename T8=tagged _null_type, typename T9=tagged_null_type> typename T7=tagged_null_type, typename T8=tagged _null_type, typename T9=tagged_null_type>
class tagged_msg { class tagged_msg {
typedef tuple<T0, T1, T2, T3, T4 typedef tbb::flow::tuple<T0, T1, T2, T3, T4
#if __TBB_VARIADIC_MAX >= 6 #if __TBB_VARIADIC_MAX >= 6
, T5 , T5
#endif #endif
#if __TBB_VARIADIC_MAX >= 7 #if __TBB_VARIADIC_MAX >= 7
, T6 , T6
#endif #endif
#if __TBB_VARIADIC_MAX >= 8 #if __TBB_VARIADIC_MAX >= 8
, T7 , T7
#endif #endif
#if __TBB_VARIADIC_MAX >= 9 #if __TBB_VARIADIC_MAX >= 9
, T8 , T8
#endif #endif
#if __TBB_VARIADIC_MAX >= 10 #if __TBB_VARIADIC_MAX >= 10
, T9 , T9
#endif #endif
> Tuple; > Tuple;
private: private:
class variant { class variant {
static const size_t N = tuple_size<Tuple>::value; static const size_t N = tbb::flow::tuple_size<Tuple>::value;
typedef typename pick_tuple_max<N, Tuple, alignment_of>::type Align Type; typedef typename pick_tuple_max<N, Tuple, alignment_of>::type Align Type;
typedef typename pick_tuple_max<N, Tuple, size_of>::type MaxSizeTyp e; typedef typename pick_tuple_max<N, Tuple, size_of>::type MaxSizeTyp e;
static const size_t MaxNBytes = (sizeof(Wrapper<MaxSizeType>)+sizeo f(AlignType)-1); static const size_t MaxNBytes = (sizeof(Wrapper<MaxSizeType>)+sizeo f(AlignType)-1);
static const size_t MaxNElements = MaxNBytes/sizeof(AlignType); static const size_t MaxNElements = MaxNBytes/sizeof(AlignType);
typedef typename tbb::aligned_space<AlignType, MaxNElements> SpaceT ype; typedef typename tbb::aligned_space<AlignType, MaxNElements> SpaceT ype;
SpaceType my_space; SpaceType my_space;
static const size_t MaxSize = sizeof(SpaceType); static const size_t MaxSize = sizeof(SpaceType);
public: public:
variant() { (void) new(&my_space) Wrapper<default_constructed>(defa ult_constructed()); } variant() { (void) new(&my_space) Wrapper<default_constructed>(defa ult_constructed()); }
skipping to change at line 446 skipping to change at line 446
TagType my_tag; TagType my_tag;
variant my_msg; variant my_msg;
public: public:
tagged_msg(): my_tag(TagType(~0)), my_msg(){} tagged_msg(): my_tag(TagType(~0)), my_msg(){}
template<typename T, typename R> template<typename T, typename R>
tagged_msg(T const &index, R const &value) : my_tag(index), my_msg(valu e) {} tagged_msg(T const &index, R const &value) : my_tag(index), my_msg(valu e) {}
#if __TBB_CONST_REF_TO_ARRAY_TEMPLATE_PARAM_BROKEN
template<typename T, typename R, size_t N>
tagged_msg(T const &index, R (&value)[N]) : my_tag(index), my_msg(valu
e) {}
#endif
void set_tag(TagType const &index) {my_tag = index;} void set_tag(TagType const &index) {my_tag = index;}
TagType tag() const {return my_tag;} TagType tag() const {return my_tag;}
template<typename V> template<typename V>
const V& cast_to() const {return my_msg.template variant_cast_to<V>();} const V& cast_to() const {return my_msg.template variant_cast_to<V>();}
template<typename V> template<typename V>
bool is_a() const {return my_msg.template variant_is_a<V>();} bool is_a() const {return my_msg.template variant_is_a<V>();}
bool is_default_constructed() const {return my_msg.variant_is_default_c onstructed();} bool is_default_constructed() const {return my_msg.variant_is_default_c onstructed();}
 End of changes. 7 change blocks. 
6 lines changed or deleted 12 lines changed or added


 aggregator.h   aggregator.h 
skipping to change at line 191 skipping to change at line 191
// release the handler // release the handler
itt_store_word_with_release(handler_busy, uintptr_t(0)); itt_store_word_with_release(handler_busy, uintptr_t(0));
} }
}; };
//! Basic aggregator interface //! Basic aggregator interface
class aggregator : private aggregator_ext<internal::basic_handler> { class aggregator : private aggregator_ext<internal::basic_handler> {
public: public:
aggregator() : aggregator_ext<internal::basic_handler>(internal::basic_ handler()) {} aggregator() : aggregator_ext<internal::basic_handler>(internal::basic_ handler()) {}
//! BASIC INTERFACE: Enter a function for exclusvie execution by the ag gregator. //! BASIC INTERFACE: Enter a function for exclusive execution by the ag gregator.
/** The calling thread stores the function object in a basic_operation and /** The calling thread stores the function object in a basic_operation and
places the operation in the aggregator's mailbox */ places the operation in the aggregator's mailbox */
template<typename Body> template<typename Body>
void execute(const Body& b) { void execute(const Body& b) {
internal::basic_operation<Body> op(b); internal::basic_operation<Body> op(b);
this->execute_impl(op); this->execute_impl(op);
} }
}; };
} // namespace interface6 } // namespace interface6
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 aligned_space.h   aligned_space.h 
skipping to change at line 40 skipping to change at line 40
#define __TBB_aligned_space_H #define __TBB_aligned_space_H
#include "tbb_stddef.h" #include "tbb_stddef.h"
#include "tbb_machine.h" #include "tbb_machine.h"
namespace tbb { namespace tbb {
//! Block of space aligned sufficiently to construct an array T with N elem ents. //! Block of space aligned sufficiently to construct an array T with N elem ents.
/** The elements are not constructed or destroyed by this class. /** The elements are not constructed or destroyed by this class.
@ingroup memory_allocation */ @ingroup memory_allocation */
template<typename T,size_t N> template<typename T,size_t N=1>
class aligned_space { class aligned_space {
private: private:
typedef __TBB_TypeWithAlignmentAtLeastAsStrict(T) element_type; typedef __TBB_TypeWithAlignmentAtLeastAsStrict(T) element_type;
element_type array[(sizeof(T)*N+sizeof(element_type)-1)/sizeof(element_ type)]; element_type array[(sizeof(T)*N+sizeof(element_type)-1)/sizeof(element_ type)];
public: public:
//! Pointer to beginning of array //! Pointer to beginning of array
T* begin() {return internal::punned_cast<T*>(this);} T* begin() {return internal::punned_cast<T*>(this);}
//! Pointer to one past last element in array. //! Pointer to one past last element in array.
T* end() {return begin()+N;} T* end() {return begin()+N;}
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 atomic.h   atomic.h 
skipping to change at line 427 skipping to change at line 427
T operator=( T rhs ) { T operator=( T rhs ) {
// "this" required here in strict ISO C++ because store_with_releas e is a dependent name // "this" required here in strict ISO C++ because store_with_releas e is a dependent name
return this->store_with_release(rhs); return this->store_with_release(rhs);
} }
atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release( rhs); return *this;} atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release( rhs); return *this;}
}; };
#if __TBB_ATOMIC_CTORS #if __TBB_ATOMIC_CTORS
#define __TBB_DECL_ATOMIC(T) \ #define __TBB_DECL_ATOMIC(T) \
template<> struct atomic<T>: internal::atomic_impl_with_arithmetic< T,T,char> { \ template<> struct atomic<T>: internal::atomic_impl_with_arithmetic< T,T,char> { \
atomic() = default; \ atomic() = default; \
constexpr atomic(T arg): internal::atomic_impl_with_arithmetic< T,T,char>(arg) {} \ constexpr atomic(T arg): internal::atomic_impl_with_arithmetic< T,T,char>(arg) {} \
\ \
T operator=( T rhs ) {return store_with_release(rhs);} \ T operator=( T rhs ) {return store_with_release(rhs);} \
atomic<T>& operator=( const atomic<T>& rhs ) {store_with_releas e(rhs); return *this;} \ atomic<T>& operator=( const atomic<T>& rhs ) {store_with_releas e(rhs); return *this;} \
}; };
#else #else
#define __TBB_DECL_ATOMIC(T) \ #define __TBB_DECL_ATOMIC(T) \
template<> struct atomic<T>: internal::atomic_impl_with_arithmetic< T,T,char> { \ template<> struct atomic<T>: internal::atomic_impl_with_arithmetic< T,T,char> { \
T operator=( T rhs ) {return store_with_release(rhs);} \ T operator=( T rhs ) {return store_with_release(rhs);} \
atomic<T>& operator=( const atomic<T>& rhs ) {store_with_releas e(rhs); return *this;} \ atomic<T>& operator=( const atomic<T>& rhs ) {store_with_releas e(rhs); return *this;} \
skipping to change at line 460 skipping to change at line 460
#if _MSC_VER && !_WIN64 #if _MSC_VER && !_WIN64
#if __TBB_ATOMIC_CTORS #if __TBB_ATOMIC_CTORS
/* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings fro m cl /Wp64 option. /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings fro m cl /Wp64 option.
It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces o perator=(T) It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces o perator=(T)
with an operator=(U) that explicitly converts the U to a T. Types T and U should be with an operator=(U) that explicitly converts the U to a T. Types T and U should be
type synonyms on the platform. Type U should be the wider variant of T from the type synonyms on the platform. Type U should be the wider variant of T from the
perspective of /Wp64. */ perspective of /Wp64. */
#define __TBB_DECL_ATOMIC_ALT(T,U) \ #define __TBB_DECL_ATOMIC_ALT(T,U) \
template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T, char> { \ template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T, char> { \
atomic() = default ; \ atomic() = default ; \
constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<T,T, char>(arg) {} \ constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<T,T, char>(arg) {} \
T operator=( U rhs ) {return store_with_release(T(rhs));} \ T operator=( U rhs ) {return store_with_release(T(rhs));} \
atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rh s); return *this;} \ atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rh s); return *this;} \
}; };
#else #else
#define __TBB_DECL_ATOMIC_ALT(T,U) \ #define __TBB_DECL_ATOMIC_ALT(T,U) \
template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T, char> { \ template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T, char> { \
T operator=( U rhs ) {return store_with_release(T(rhs));} \ T operator=( U rhs ) {return store_with_release(T(rhs));} \
atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rh s); return *this;} \ atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rh s); return *this;} \
}; };
skipping to change at line 532 skipping to change at line 532
// Helpers to workaround ugly syntax of calling template member function of a // Helpers to workaround ugly syntax of calling template member function of a
// template class with template argument dependent on template parameters. // template class with template argument dependent on template parameters.
template <memory_semantics M, typename T> template <memory_semantics M, typename T>
T load ( const atomic<T>& a ) { return a.template load<M>(); } T load ( const atomic<T>& a ) { return a.template load<M>(); }
template <memory_semantics M, typename T> template <memory_semantics M, typename T>
void store ( atomic<T>& a, T value ) { return a.template store<M>(value); } void store ( atomic<T>& a, T value ) { return a.template store<M>(value); }
namespace interface6{ namespace interface6{
//! Make an atomic for use in an initialization (list), as an alternative t o zero-initializaton or normal assignment. //! Make an atomic for use in an initialization (list), as an alternative t o zero-initialization or normal assignment.
template<typename T> template<typename T>
atomic<T> make_atomic(T t) { atomic<T> make_atomic(T t) {
atomic<T> a; atomic<T> a;
store<relaxed>(a,t); store<relaxed>(a,t);
return a; return a;
} }
} }
using interface6::make_atomic; using interface6::make_atomic;
namespace internal { namespace internal {
 End of changes. 3 change blocks. 
3 lines changed or deleted 3 lines changed or added


 concurrent_hash_map.h   concurrent_hash_map.h 
skipping to change at line 52 skipping to change at line 52
#include <cstring> // Need std::memset #include <cstring> // Need std::memset
#if !TBB_USE_EXCEPTIONS && _MSC_VER #if !TBB_USE_EXCEPTIONS && _MSC_VER
#pragma warning (pop) #pragma warning (pop)
#endif #endif
#include "cache_aligned_allocator.h" #include "cache_aligned_allocator.h"
#include "tbb_allocator.h" #include "tbb_allocator.h"
#include "spin_rw_mutex.h" #include "spin_rw_mutex.h"
#include "atomic.h" #include "atomic.h"
#include "aligned_space.h"
#include "tbb_exception.h" #include "tbb_exception.h"
#include "tbb_profiling.h" #include "tbb_profiling.h"
#include "internal/_concurrent_unordered_impl.h" // Need tbb_hasher #include "internal/_concurrent_unordered_impl.h" // Need tbb_hasher
#if __TBB_INITIALIZER_LISTS_PRESENT #if __TBB_INITIALIZER_LISTS_PRESENT
#include <initializer_list> #include <initializer_list>
#endif #endif
#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
#include <typeinfo> #include <typeinfo>
#endif #endif
#if __TBB_STATISTICS #if __TBB_STATISTICS
 End of changes. 1 change blocks. 
1 lines changed or deleted 0 lines changed or added


 enumerable_thread_specific.h   enumerable_thread_specific.h 
skipping to change at line 598 skipping to change at line 598
// != // !=
template<typename SegmentedContainer, typename T, typename U> template<typename SegmentedContainer, typename T, typename U>
bool operator!=( const segmented_iterator<SegmentedContainer,T>& i, bool operator!=( const segmented_iterator<SegmentedContainer,T>& i,
const segmented_iterator<SegmentedContainer,U>& j ) { const segmented_iterator<SegmentedContainer,U>& j ) {
return !(i==j); return !(i==j);
} }
template<typename T> template<typename T>
struct destruct_only: tbb::internal::no_copy { struct destruct_only: tbb::internal::no_copy {
tbb::aligned_space<T,1> value; tbb::aligned_space<T> value;
~destruct_only() {value.begin()[0].~T();} ~destruct_only() {value.begin()[0].~T();}
}; };
template<typename T> template<typename T>
struct construct_by_default: tbb::internal::no_assign { struct construct_by_default: tbb::internal::no_assign {
void construct(void*where) {new(where) T();} // C++ note: the ( ) in T() ensure zero initialization. void construct(void*where) {new(where) T();} // C++ note: the ( ) in T() ensure zero initialization.
construct_by_default( int ) {} construct_by_default( int ) {}
}; };
template<typename T> template<typename T>
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 flow_graph.h   flow_graph.h 
skipping to change at line 74 skipping to change at line 74
#include<list> #include<list>
#include<queue> #include<queue>
/** @file /** @file
\brief The graph related classes and functions \brief The graph related classes and functions
There are some applications that best express dependencies as messages There are some applications that best express dependencies as messages
passed between nodes in a graph. These messages may contain data or passed between nodes in a graph. These messages may contain data or
simply act as signals that a predecessors has completed. The graph simply act as signals that a predecessors has completed. The graph
class and its associated node classes can be used to express such class and its associated node classes can be used to express such
applcations. applications.
*/ */
namespace tbb { namespace tbb {
namespace flow { namespace flow {
//! An enumeration the provides the two most common concurrency levels: unl imited and serial //! An enumeration the provides the two most common concurrency levels: unl imited and serial
enum concurrency { unlimited = 0, serial = 1 }; enum concurrency { unlimited = 0, serial = 1 };
namespace interface7 { namespace interface7 {
skipping to change at line 1136 skipping to change at line 1136
~overwrite_node() {} ~overwrite_node() {}
#if TBB_PREVIEW_FLOW_GRAPH_TRACE #if TBB_PREVIEW_FLOW_GRAPH_TRACE
/* override */ void set_name( const char *name ) { /* override */ void set_name( const char *name ) {
tbb::internal::fgt_node_desc( this, name ); tbb::internal::fgt_node_desc( this, name );
} }
#endif #endif
/* override */ bool register_successor( successor_type &s ) { /* override */ bool register_successor( successor_type &s ) {
spin_mutex::scoped_lock l( my_mutex ); spin_mutex::scoped_lock l( my_mutex );
if ( my_buffer_is_valid ) { task* tp = this->my_graph.root_task(); // just to test if we are r
esetting
if (my_buffer_is_valid && tp) {
// We have a valid value that must be forwarded immediately. // We have a valid value that must be forwarded immediately.
if ( s.try_put( my_buffer ) || !s.register_predecessor( *this ) ) { if ( s.try_put( my_buffer ) || !s.register_predecessor( *this ) ) {
// We add the successor: it accepted our put or it rejected it but won't let us become a predecessor // We add the successor: it accepted our put or it rejected it but won't let us become a predecessor
my_successors.register_successor( s ); my_successors.register_successor( s );
} else { } else {
// We don't add the successor: it rejected our put and we b ecame its predecessor instead // We don't add the successor: it rejected our put and we b ecame its predecessor instead
return false; return false;
} }
} else { } else {
// No valid value yet, just add as successor // No valid value yet, just add as successor
 End of changes. 2 change blocks. 
2 lines changed or deleted 4 lines changed or added


 mic_common.h   mic_common.h 
skipping to change at line 50 skipping to change at line 50
#ifndef __TBB_PREFETCHING #ifndef __TBB_PREFETCHING
#define __TBB_PREFETCHING 1 #define __TBB_PREFETCHING 1
#endif #endif
#if __TBB_PREFETCHING #if __TBB_PREFETCHING
#include <immintrin.h> #include <immintrin.h>
#define __TBB_cl_prefetch(p) _mm_prefetch((const char*)p, _MM_HINT_T1) #define __TBB_cl_prefetch(p) _mm_prefetch((const char*)p, _MM_HINT_T1)
#define __TBB_cl_evict(p) _mm_clevict(p, _MM_HINT_T1) #define __TBB_cl_evict(p) _mm_clevict(p, _MM_HINT_T1)
#endif #endif
/** Intel(R) Many Integrated Core Architecture does not support mfence and pause instructions **/ /** Intel(R) Many Integrated Core Architecture does not support mfence and pause instructions **/
#define __TBB_full_memory_fence() __asm__ __volatile__("lock; addl $0,(%%rs #if !TBB_USE_ICC_BUILTINS
p)":::"memory") #define __TBB_full_memory_fence() __asm__ __volatile__("lock; addl $0,(
%%rsp)":::"memory")
#endif
#define __TBB_Pause(x) _mm_delay_32(16*(x)) #define __TBB_Pause(x) _mm_delay_32(16*(x))
#define __TBB_STEALING_PAUSE 1500/16 #define __TBB_STEALING_PAUSE 1500/16
#include <sched.h> #include <sched.h>
#define __TBB_Yield() sched_yield() #define __TBB_Yield() sched_yield()
// low-level timing intrinsic and its type // low-level timing intrinsic and its type
#define __TBB_machine_time_stamp() _rdtsc() #define __TBB_machine_time_stamp() _rdtsc()
typedef uint64_t machine_tsc_t; typedef uint64_t machine_tsc_t;
/** Specifics **/ /** Specifics **/
 End of changes. 1 change blocks. 
2 lines changed or deleted 4 lines changed or added


 mutex.h   mutex.h 
skipping to change at line 158 skipping to change at line 158
// Mutex traits // Mutex traits
static const bool is_rw_mutex = false; static const bool is_rw_mutex = false;
static const bool is_recursive_mutex = false; static const bool is_recursive_mutex = false;
static const bool is_fair_mutex = false; static const bool is_fair_mutex = false;
// ISO C++0x compatibility methods // ISO C++0x compatibility methods
//! Acquire lock //! Acquire lock
void lock() { void lock() {
#if TBB_USE_ASSERT #if TBB_USE_ASSERT
aligned_space<scoped_lock,1> tmp; aligned_space<scoped_lock> tmp;
new(tmp.begin()) scoped_lock(*this); new(tmp.begin()) scoped_lock(*this);
#else #else
#if _WIN32||_WIN64 #if _WIN32||_WIN64
EnterCriticalSection(&impl); EnterCriticalSection(&impl);
#else #else
pthread_mutex_lock(&impl); pthread_mutex_lock(&impl);
#endif /* _WIN32||_WIN64 */ #endif /* _WIN32||_WIN64 */
#endif /* TBB_USE_ASSERT */ #endif /* TBB_USE_ASSERT */
} }
//! Try acquiring lock (non-blocking) //! Try acquiring lock (non-blocking)
/** Return true if lock acquired; false otherwise. */ /** Return true if lock acquired; false otherwise. */
bool try_lock() { bool try_lock() {
#if TBB_USE_ASSERT #if TBB_USE_ASSERT
aligned_space<scoped_lock,1> tmp; aligned_space<scoped_lock> tmp;
scoped_lock& s = *tmp.begin(); scoped_lock& s = *tmp.begin();
s.my_mutex = NULL; s.my_mutex = NULL;
return s.internal_try_acquire(*this); return s.internal_try_acquire(*this);
#else #else
#if _WIN32||_WIN64 #if _WIN32||_WIN64
return TryEnterCriticalSection(&impl)!=0; return TryEnterCriticalSection(&impl)!=0;
#else #else
return pthread_mutex_trylock(&impl)==0; return pthread_mutex_trylock(&impl)==0;
#endif /* _WIN32||_WIN64 */ #endif /* _WIN32||_WIN64 */
#endif /* TBB_USE_ASSERT */ #endif /* TBB_USE_ASSERT */
} }
//! Release lock //! Release lock
void unlock() { void unlock() {
#if TBB_USE_ASSERT #if TBB_USE_ASSERT
aligned_space<scoped_lock,1> tmp; aligned_space<scoped_lock> tmp;
scoped_lock& s = *tmp.begin(); scoped_lock& s = *tmp.begin();
s.my_mutex = this; s.my_mutex = this;
s.internal_release(); s.internal_release();
#else #else
#if _WIN32||_WIN64 #if _WIN32||_WIN64
LeaveCriticalSection(&impl); LeaveCriticalSection(&impl);
#else #else
pthread_mutex_unlock(&impl); pthread_mutex_unlock(&impl);
#endif /* _WIN32||_WIN64 */ #endif /* _WIN32||_WIN64 */
#endif /* TBB_USE_ASSERT */ #endif /* TBB_USE_ASSERT */
 End of changes. 3 change blocks. 
3 lines changed or deleted 3 lines changed or added


 parallel_invoke.h   parallel_invoke.h 
skipping to change at line 339 skipping to change at line 339
parallel_invoke<F0, F1, F2, F3, F4, F5>(f0, f1, f2, f3, f4, f5, context ); parallel_invoke<F0, F1, F2, F3, F4, F5>(f0, f1, f2, f3, f4, f5, context );
} }
// seven arguments // seven arguments
template<typename F0, typename F1, typename F2, typename F3, typename F4, t ypename F5, typename F6> template<typename F0, typename F1, typename F2, typename F3, typename F4, t ypename F5, typename F6>
void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3 , const F4& f4, void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3 , const F4& f4,
const F5& f5, const F6& f6) const F5& f5, const F6& f6)
{ {
task_group_context context; task_group_context context;
parallel_invoke<F0, F1, F2, F3, F4, F5, F6>(f0, f1, f2, f3, f4, f5, f6, context); parallel_invoke<F0, F1, F2, F3, F4, F5, F6>(f0, f1, f2, f3, f4, f5, f6, context);
} }
// eigth arguments // eight arguments
template<typename F0, typename F1, typename F2, typename F3, typename F4, template<typename F0, typename F1, typename F2, typename F3, typename F4,
typename F5, typename F6, typename F7> typename F5, typename F6, typename F7>
void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3 , const F4& f4, void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3 , const F4& f4,
const F5& f5, const F6& f6, const F7& f7) const F5& f5, const F6& f6, const F7& f7)
{ {
task_group_context context; task_group_context context;
parallel_invoke<F0, F1, F2, F3, F4, F5, F6, F7>(f0, f1, f2, f3, f4, f5, f6, f7, context); parallel_invoke<F0, F1, F2, F3, F4, F5, F6, F7>(f0, f1, f2, f3, f4, f5, f6, f7, context);
} }
// nine arguments // nine arguments
template<typename F0, typename F1, typename F2, typename F3, typename F4, template<typename F0, typename F1, typename F2, typename F3, typename F4,
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 parallel_reduce.h   parallel_reduce.h 
skipping to change at line 62 skipping to change at line 62
typedef char reduction_context; typedef char reduction_context;
//! Task type used to combine the partial results of parallel_reduce. //! Task type used to combine the partial results of parallel_reduce.
/** @ingroup algorithms */ /** @ingroup algorithms */
template<typename Body> template<typename Body>
class finish_reduce: public flag_task { class finish_reduce: public flag_task {
//! Pointer to body, or NULL if the left child has not yet finished . //! Pointer to body, or NULL if the left child has not yet finished .
bool has_right_zombie; bool has_right_zombie;
const reduction_context my_context; const reduction_context my_context;
Body* my_body; Body* my_body;
aligned_space<Body,1> zombie_space; aligned_space<Body> zombie_space;
finish_reduce( reduction_context context_ ) : finish_reduce( reduction_context context_ ) :
has_right_zombie(false), // TODO: substitute by flag_task::chil d_stolen? has_right_zombie(false), // TODO: substitute by flag_task::chil d_stolen?
my_context(context_), my_context(context_),
my_body(NULL) my_body(NULL)
{ {
} }
~finish_reduce() { ~finish_reduce() {
if( has_right_zombie ) if( has_right_zombie )
zombie_space.begin()->~Body(); zombie_space.begin()->~Body();
} }
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 parallel_scan.h   parallel_scan.h 
skipping to change at line 61 skipping to change at line 61
//! @cond INTERNAL //! @cond INTERNAL
namespace internal { namespace internal {
//! Performs final scan for a leaf //! Performs final scan for a leaf
/** @ingroup algorithms */ /** @ingroup algorithms */
template<typename Range, typename Body> template<typename Range, typename Body>
class final_sum: public task { class final_sum: public task {
public: public:
Body my_body; Body my_body;
private: private:
aligned_space<Range,1> my_range; aligned_space<Range> my_range;
//! Where to put result of last subrange, or NULL if not last subra nge. //! Where to put result of last subrange, or NULL if not last subra nge.
Body* my_stuff_last; Body* my_stuff_last;
public: public:
final_sum( Body& body_ ) : final_sum( Body& body_ ) :
my_body(body_,split()) my_body(body_,split())
{ {
poison_pointer(my_stuff_last); poison_pointer(my_stuff_last);
} }
~final_sum() { ~final_sum() {
my_range.begin()->~Range(); my_range.begin()->~Range();
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 partitioner.h   partitioner.h 
skipping to change at line 152 skipping to change at line 152
#else #else
flag = true; flag = true;
#endif //TBB_USE_THREADING_TOOLS #endif //TBB_USE_THREADING_TOOLS
} }
static bool is_peer_stolen(task &t) { static bool is_peer_stolen(task &t) {
return static_cast<flag_task*>(t.parent())->my_child_stolen; return static_cast<flag_task*>(t.parent())->my_child_stolen;
} }
}; };
//! Depth is a relative depth of recursive division inside a range pool. Re lative depth allows //! Depth is a relative depth of recursive division inside a range pool. Re lative depth allows
//! infinite absolute depth of the recursion for heavily imbalanced workloa ds with range represented //! infinite absolute depth of the recursion for heavily unbalanced workloa ds with range represented
//! by a number that cannot fit into machine word. //! by a number that cannot fit into machine word.
typedef unsigned char depth_t; typedef unsigned char depth_t;
//! Range pool stores ranges of type T in a circular buffer with MaxCapacit y //! Range pool stores ranges of type T in a circular buffer with MaxCapacit y
template <typename T, depth_t MaxCapacity> template <typename T, depth_t MaxCapacity>
class range_vector { class range_vector {
depth_t my_head; depth_t my_head;
depth_t my_tail; depth_t my_tail;
depth_t my_size; depth_t my_size;
depth_t my_depth[MaxCapacity]; // relative depths of stored ranges depth_t my_depth[MaxCapacity]; // relative depths of stored ranges
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 pipeline.h   pipeline.h 
skipping to change at line 219 skipping to change at line 219
// item was processed // item was processed
success, success,
// item is currently not available // item is currently not available
item_not_available, item_not_available,
// there are no more items to process // there are no more items to process
end_of_stream end_of_stream
}; };
protected: protected:
thread_bound_filter(mode filter_mode): thread_bound_filter(mode filter_mode):
filter(static_cast<mode>(filter_mode | filter::filter_is_bound)) filter(static_cast<mode>(filter_mode | filter::filter_is_bound))
{} {
__TBB_ASSERT(filter_mode & filter::filter_is_serial, "thread-bound
filters must be serial");
}
public: public:
//! If a data item is available, invoke operator() on that item. //! If a data item is available, invoke operator() on that item.
/** This interface is non-blocking. /** This interface is non-blocking.
Returns 'success' if an item was processed. Returns 'success' if an item was processed.
Returns 'item_not_available' if no item can be processed now Returns 'item_not_available' if no item can be processed now
but more may arrive in the future, or if token limit is reached. but more may arrive in the future, or if token limit is reached.
Returns 'end_of_stream' if there are no more items to process. */ Returns 'end_of_stream' if there are no more items to process. */
result_type __TBB_EXPORTED_METHOD try_process_item(); result_type __TBB_EXPORTED_METHOD try_process_item();
//! Wait until a data item becomes available, and invoke operator() on that item. //! Wait until a data item becomes available, and invoke operator() on that item.
 End of changes. 1 change blocks. 
1 lines changed or deleted 4 lines changed or added


 recursive_mutex.h   recursive_mutex.h 
skipping to change at line 168 skipping to change at line 168
// Mutex traits // Mutex traits
static const bool is_rw_mutex = false; static const bool is_rw_mutex = false;
static const bool is_recursive_mutex = true; static const bool is_recursive_mutex = true;
static const bool is_fair_mutex = false; static const bool is_fair_mutex = false;
// C++0x compatibility interface // C++0x compatibility interface
//! Acquire lock //! Acquire lock
void lock() { void lock() {
#if TBB_USE_ASSERT #if TBB_USE_ASSERT
aligned_space<scoped_lock,1> tmp; aligned_space<scoped_lock> tmp;
new(tmp.begin()) scoped_lock(*this); new(tmp.begin()) scoped_lock(*this);
#else #else
#if _WIN32||_WIN64 #if _WIN32||_WIN64
EnterCriticalSection(&impl); EnterCriticalSection(&impl);
#else #else
pthread_mutex_lock(&impl); pthread_mutex_lock(&impl);
#endif /* _WIN32||_WIN64 */ #endif /* _WIN32||_WIN64 */
#endif /* TBB_USE_ASSERT */ #endif /* TBB_USE_ASSERT */
} }
//! Try acquiring lock (non-blocking) //! Try acquiring lock (non-blocking)
/** Return true if lock acquired; false otherwise. */ /** Return true if lock acquired; false otherwise. */
bool try_lock() { bool try_lock() {
#if TBB_USE_ASSERT #if TBB_USE_ASSERT
aligned_space<scoped_lock,1> tmp; aligned_space<scoped_lock> tmp;
return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this); return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this);
#else #else
#if _WIN32||_WIN64 #if _WIN32||_WIN64
return TryEnterCriticalSection(&impl)!=0; return TryEnterCriticalSection(&impl)!=0;
#else #else
return pthread_mutex_trylock(&impl)==0; return pthread_mutex_trylock(&impl)==0;
#endif /* _WIN32||_WIN64 */ #endif /* _WIN32||_WIN64 */
#endif /* TBB_USE_ASSERT */ #endif /* TBB_USE_ASSERT */
} }
//! Release lock //! Release lock
void unlock() { void unlock() {
#if TBB_USE_ASSERT #if TBB_USE_ASSERT
aligned_space<scoped_lock,1> tmp; aligned_space<scoped_lock> tmp;
scoped_lock& s = *tmp.begin(); scoped_lock& s = *tmp.begin();
s.my_mutex = this; s.my_mutex = this;
s.internal_release(); s.internal_release();
#else #else
#if _WIN32||_WIN64 #if _WIN32||_WIN64
LeaveCriticalSection(&impl); LeaveCriticalSection(&impl);
#else #else
pthread_mutex_unlock(&impl); pthread_mutex_unlock(&impl);
#endif /* _WIN32||_WIN64 */ #endif /* _WIN32||_WIN64 */
#endif /* TBB_USE_ASSERT */ #endif /* TBB_USE_ASSERT */
 End of changes. 3 change blocks. 
3 lines changed or deleted 3 lines changed or added


 runtime_loader.h   runtime_loader.h 
skipping to change at line 89 skipping to change at line 89
The most noticeable piece of global state is loaded TBB library. The most noticeable piece of global state is loaded TBB library.
There are some implications: There are some implications:
- Only one TBB library can be loaded per module. - Only one TBB library can be loaded per module.
- If one object has already loaded TBB library, another object will n ot load TBB. - If one object has already loaded TBB library, another object will n ot load TBB.
If the loaded TBB library is suitable for the second object, both w ill use TBB If the loaded TBB library is suitable for the second object, both w ill use TBB
cooperatively, otherwise the second object will report an error. cooperatively, otherwise the second object will report an error.
- \c runtime_loader objects will not work (correctly) in parallel due to absence of - \c runtime_loader objects will not work (correctly) in parallel due to absence of
syncronization. synchronization.
*/ */
class runtime_loader : tbb::internal::no_copy { class runtime_loader : tbb::internal::no_copy {
public: public:
//! Error mode constants. //! Error mode constants.
enum error_mode { enum error_mode {
em_status, //!< Save status of operation and continue. em_status, //!< Save status of operation and continue.
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 spin_mutex.h   spin_mutex.h 
skipping to change at line 158 skipping to change at line 158
// Mutex traits // Mutex traits
static const bool is_rw_mutex = false; static const bool is_rw_mutex = false;
static const bool is_recursive_mutex = false; static const bool is_recursive_mutex = false;
static const bool is_fair_mutex = false; static const bool is_fair_mutex = false;
// ISO C++0x compatibility methods // ISO C++0x compatibility methods
//! Acquire lock //! Acquire lock
void lock() { void lock() {
#if TBB_USE_THREADING_TOOLS #if TBB_USE_THREADING_TOOLS
aligned_space<scoped_lock,1> tmp; aligned_space<scoped_lock> tmp;
new(tmp.begin()) scoped_lock(*this); new(tmp.begin()) scoped_lock(*this);
#else #else
__TBB_LockByte(flag); __TBB_LockByte(flag);
#endif /* TBB_USE_THREADING_TOOLS*/ #endif /* TBB_USE_THREADING_TOOLS*/
} }
//! Try acquiring lock (non-blocking) //! Try acquiring lock (non-blocking)
/** Return true if lock acquired; false otherwise. */ /** Return true if lock acquired; false otherwise. */
bool try_lock() { bool try_lock() {
#if TBB_USE_THREADING_TOOLS #if TBB_USE_THREADING_TOOLS
aligned_space<scoped_lock,1> tmp; aligned_space<scoped_lock> tmp;
return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this); return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this);
#else #else
return __TBB_TryLockByte(flag); return __TBB_TryLockByte(flag);
#endif /* TBB_USE_THREADING_TOOLS*/ #endif /* TBB_USE_THREADING_TOOLS*/
} }
//! Release lock //! Release lock
void unlock() { void unlock() {
#if TBB_USE_THREADING_TOOLS #if TBB_USE_THREADING_TOOLS
aligned_space<scoped_lock,1> tmp; aligned_space<scoped_lock> tmp;
scoped_lock& s = *tmp.begin(); scoped_lock& s = *tmp.begin();
s.my_mutex = this; s.my_mutex = this;
s.internal_release(); s.internal_release();
#else #else
__TBB_store_with_release(flag, 0); __TBB_store_with_release(flag, 0);
#endif /* TBB_USE_THREADING_TOOLS */ #endif /* TBB_USE_THREADING_TOOLS */
} }
friend class scoped_lock; friend class scoped_lock;
}; // end of spin_mutex }; // end of spin_mutex
 End of changes. 3 change blocks. 
3 lines changed or deleted 3 lines changed or added


 task.h   task.h 
skipping to change at line 290 skipping to change at line 290
#if TBB_USE_CAPTURED_EXCEPTION #if TBB_USE_CAPTURED_EXCEPTION
class tbb_exception; class tbb_exception;
#else #else
namespace internal { namespace internal {
class tbb_exception_ptr; class tbb_exception_ptr;
} }
#endif /* !TBB_USE_CAPTURED_EXCEPTION */ #endif /* !TBB_USE_CAPTURED_EXCEPTION */
class task_scheduler_init; class task_scheduler_init;
namespace interface7 { class task_arena; }
//! Used to form groups of tasks //! Used to form groups of tasks
/** @ingroup task_scheduling /** @ingroup task_scheduling
The context services explicit cancellation requests from user code, and unhandled The context services explicit cancellation requests from user code, and unhandled
exceptions intercepted during tasks execution. Intercepting an exceptio n results exceptions intercepted during tasks execution. Intercepting an exceptio n results
in generating internal cancellation requests (which is processed in exa ctly the in generating internal cancellation requests (which is processed in exa ctly the
same way as external ones). same way as external ones).
The context is associated with one or more root tasks and defines the c ancellation The context is associated with one or more root tasks and defines the c ancellation
group that includes all the descendants of the corresponding root task( s). Association group that includes all the descendants of the corresponding root task( s). Association
skipping to change at line 316 skipping to change at line 317
all the other tasks in this group and groups bound to it (as children) get cancelled too. all the other tasks in this group and groups bound to it (as children) get cancelled too.
IMPLEMENTATION NOTE: IMPLEMENTATION NOTE:
When adding new members to task_group_context or changing types of exis ting ones, When adding new members to task_group_context or changing types of exis ting ones,
update the size of both padding buffers (_leading_padding and _trailing _padding) update the size of both padding buffers (_leading_padding and _trailing _padding)
appropriately. See also VERSIONING NOTE at the constructor definition b elow. **/ appropriately. See also VERSIONING NOTE at the constructor definition b elow. **/
class task_group_context : internal::no_copy { class task_group_context : internal::no_copy {
private: private:
friend class internal::generic_scheduler; friend class internal::generic_scheduler;
friend class task_scheduler_init; friend class task_scheduler_init;
friend class interface7::task_arena;
#if TBB_USE_CAPTURED_EXCEPTION #if TBB_USE_CAPTURED_EXCEPTION
typedef tbb_exception exception_container_type; typedef tbb_exception exception_container_type;
#else #else
typedef internal::tbb_exception_ptr exception_container_type; typedef internal::tbb_exception_ptr exception_container_type;
#endif #endif
enum version_traits_word_layout { enum version_traits_word_layout {
traits_offset = 16, traits_offset = 16,
version_mask = 0xFFFF, version_mask = 0xFFFF,
 End of changes. 2 change blocks. 
0 lines changed or deleted 2 lines changed or added


 task_arena.h   task_arena.h 
skipping to change at line 88 skipping to change at line 88
//! default context of the arena //! default context of the arena
task_group_context *my_context; task_group_context *my_context;
#endif #endif
//! Concurrency level for deferred initialization //! Concurrency level for deferred initialization
int my_max_concurrency; int my_max_concurrency;
//! Reserved master slots //! Reserved master slots
unsigned my_master_slots; unsigned my_master_slots;
//! Reserved for future use //! Special settings
intptr_t my_reserved; intptr_t my_version_and_traits;
enum {
default_flags = 0
#if __TBB_TASK_GROUP_CONTEXT
| (task_group_context::default_traits & task_group_context::exact_e
xception) // 0 or 1 << 16
, exact_exception_flag = task_group_context::exact_exception // use
d to specify flag for context directly
#endif
};
task_arena_base(int max_concurrency, unsigned reserved_for_masters) task_arena_base(int max_concurrency, unsigned reserved_for_masters)
: my_arena(0) : my_arena(0)
#if __TBB_TASK_GROUP_CONTEXT #if __TBB_TASK_GROUP_CONTEXT
, my_context(0) , my_context(0)
#endif #endif
, my_max_concurrency(max_concurrency) , my_max_concurrency(max_concurrency)
, my_master_slots(reserved_for_masters) , my_master_slots(reserved_for_masters)
, my_reserved(0) , my_version_and_traits(default_flags)
{} {}
void __TBB_EXPORTED_METHOD internal_initialize( ); void __TBB_EXPORTED_METHOD internal_initialize( );
void __TBB_EXPORTED_METHOD internal_terminate( ); void __TBB_EXPORTED_METHOD internal_terminate( );
void __TBB_EXPORTED_METHOD internal_enqueue( task&, intptr_t ) const; void __TBB_EXPORTED_METHOD internal_enqueue( task&, intptr_t ) const;
void __TBB_EXPORTED_METHOD internal_execute( delegate_base& ) const; void __TBB_EXPORTED_METHOD internal_execute( delegate_base& ) const;
void __TBB_EXPORTED_METHOD internal_wait() const; void __TBB_EXPORTED_METHOD internal_wait() const;
static int __TBB_EXPORTED_FUNC internal_current_slot(); static int __TBB_EXPORTED_FUNC internal_current_slot();
public: public:
//! Typedef for number of threads that is automatic. //! Typedef for number of threads that is automatic.
skipping to change at line 243 skipping to change at line 251
//! Wait for all work in the arena to be completed //! Wait for all work in the arena to be completed
//! Even submitted by other application threads //! Even submitted by other application threads
//! Joins arena if/when possible (in the same way as execute()) //! Joins arena if/when possible (in the same way as execute())
void debug_wait_until_empty() { void debug_wait_until_empty() {
initialize(); initialize();
internal_wait(); internal_wait();
} }
#endif //__TBB_EXTRA_DEBUG #endif //__TBB_EXTRA_DEBUG
//! Returns the index, aka slot number, of the calling thread in its cu rrent arena //! Returns the index, aka slot number, of the calling thread in its cu rrent arena
inline static int current_slot() { inline static int current_thread_index() {
return internal_current_slot(); return internal_current_slot();
} }
}; };
} // namespace interfaceX } // namespace interfaceX
using interface7::task_arena; using interface7::task_arena;
} // namespace tbb } // namespace tbb
 End of changes. 3 change blocks. 
4 lines changed or deleted 14 lines changed or added


 tbb_config.h   tbb_config.h 
skipping to change at line 608 skipping to change at line 608
// of 64 bit atomics (e.g. atomic<long long>) use different tactics dep ending upon // of 64 bit atomics (e.g. atomic<long long>) use different tactics dep ending upon
// whether the object is properly aligned or not. // whether the object is properly aligned or not.
#define __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 1 #define __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 1
#else #else
#define __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 0 #define __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 0
#endif #endif
#if __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT && __TBB_GCC_VERSION < 40700 & & !defined(__INTEL_COMPILER) && !defined (__clang__) #if __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT && __TBB_GCC_VERSION < 40700 & & !defined(__INTEL_COMPILER) && !defined (__clang__)
#define __TBB_ZERO_INIT_WITH_DEFAULTED_CTOR_BROKEN 1 #define __TBB_ZERO_INIT_WITH_DEFAULTED_CTOR_BROKEN 1
#endif #endif
#if _MSC_VER && _MSC_VER <= 1800 && !__INTEL_COMPILER
// With MSVC, when an array is passed by const reference to a template
function,
// constness from the function parameter may get propagated to the temp
late parameter.
#define __TBB_CONST_REF_TO_ARRAY_TEMPLATE_PARAM_BROKEN 1
#endif
/** End of __TBB_XXX_BROKEN macro section **/ /** End of __TBB_XXX_BROKEN macro section **/
#if defined(_MSC_VER) && _MSC_VER>=1500 && !defined(__INTEL_COMPILER) #if defined(_MSC_VER) && _MSC_VER>=1500 && !defined(__INTEL_COMPILER)
// A macro to suppress erroneous or benign "unreachable code" MSVC warn ing (4702) // A macro to suppress erroneous or benign "unreachable code" MSVC warn ing (4702)
#define __TBB_MSVC_UNREACHABLE_CODE_IGNORED 1 #define __TBB_MSVC_UNREACHABLE_CODE_IGNORED 1
#endif #endif
#define __TBB_ATOMIC_CTORS (__TBB_CONSTEXPR_PRESENT && __TBB_DEFAULTED_ AND_DELETED_FUNC_PRESENT && (!__TBB_ZERO_INIT_WITH_DEFAULTED_CTOR_BROKEN)) #define __TBB_ATOMIC_CTORS (__TBB_CONSTEXPR_PRESENT && __TBB_DEFAULTED_ AND_DELETED_FUNC_PRESENT && (!__TBB_ZERO_INIT_WITH_DEFAULTED_CTOR_BROKEN))
#define __TBB_ALLOCATOR_CONSTRUCT_VARIADIC (__TBB_CPP11_VARIADIC_TEMPL ATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT) #define __TBB_ALLOCATOR_CONSTRUCT_VARIADIC (__TBB_CPP11_VARIADIC_TEMPL ATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT)
 End of changes. 1 change blocks. 
0 lines changed or deleted 8 lines changed or added


 tbb_machine.h   tbb_machine.h 
skipping to change at line 180 skipping to change at line 180
template<> struct atomic_selector<8> { template<> struct atomic_selector<8> {
typedef int64_t word; typedef int64_t word;
inline static word fetch_store ( volatile void* location, word value ); inline static word fetch_store ( volatile void* location, word value );
}; };
}} //< namespaces internal @endcond, tbb }} //< namespaces internal @endcond, tbb
#define __TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(M) \ #define __TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(M) \
inline void __TBB_machine_generic_store8##M(volatile void *ptr, int64_t value) { \ inline void __TBB_machine_generic_store8##M(volatile void *ptr, int64_t value) { \
for(;;) { \ for(;;) { \
int64_t result = *(volatile int64_t *)ptr; \ int64_t result = *(volatile int64_t *)ptr; \
if( __TBB_machine_cmpswp8##M(ptr,value,result)==result ) break; \ if( __TBB_machine_cmpswp8##M(ptr,value,result)==result ) break; \
} \ } \
} \ } \
#define __TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(M) \ #define __TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(M) \
inline int64_t __TBB_machine_generic_load8##M(const volatile void *ptr) { \ inline int64_t __TBB_machine_generic_load8##M(const volatile void *ptr) { \
/* Comparand and new value may be anything, they only must be equal , and */ \ /* Comparand and new value may be anything, they only must be equal , and */ \
/* the value should have a low probability to be actually found in 'location'.*/ \ /* the value should have a low probability to be actually found in 'location'.*/ \
const int64_t anyvalue = 2305843009213693951LL; \ const int64_t anyvalue = 2305843009213693951LL; \
return __TBB_machine_cmpswp8##M(const_cast<volatile void *>(ptr),an yvalue,anyvalue); \ return __TBB_machine_cmpswp8##M(const_cast<volatile void *>(ptr),an yvalue,anyvalue); \
skipping to change at line 234 skipping to change at line 234
#include "machine/msvc_armv7.h" #include "machine/msvc_armv7.h"
#endif #endif
#ifdef _MANAGED #ifdef _MANAGED
#pragma managed(pop) #pragma managed(pop)
#endif #endif
#elif __TBB_DEFINE_MIC #elif __TBB_DEFINE_MIC
#include "machine/mic_common.h" #include "machine/mic_common.h"
//TODO: check if ICC atomic intrinsics are available for MIC #if (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT)
#include "machine/linux_intel64.h" #include "machine/icc_generic.h"
#else
#include "machine/linux_intel64.h"
#endif
#elif __linux__ || __FreeBSD__ || __NetBSD__ #elif __linux__ || __FreeBSD__ || __NetBSD__
#if (TBB_USE_GCC_BUILTINS && __TBB_GCC_BUILTIN_ATOMICS_PRESENT) #if (TBB_USE_GCC_BUILTINS && __TBB_GCC_BUILTIN_ATOMICS_PRESENT)
#include "machine/gcc_generic.h" #include "machine/gcc_generic.h"
#elif (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) #elif (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT)
#include "machine/icc_generic.h" #include "machine/icc_generic.h"
#elif __i386__ #elif __i386__
#include "machine/linux_ia32.h" #include "machine/linux_ia32.h"
#elif __x86_64__ #elif __x86_64__
 End of changes. 2 change blocks. 
3 lines changed or deleted 6 lines changed or added


 tbb_stddef.h   tbb_stddef.h 
skipping to change at line 37 skipping to change at line 37
*/ */
#ifndef __TBB_tbb_stddef_H #ifndef __TBB_tbb_stddef_H
#define __TBB_tbb_stddef_H #define __TBB_tbb_stddef_H
// Marketing-driven product version // Marketing-driven product version
#define TBB_VERSION_MAJOR 4 #define TBB_VERSION_MAJOR 4
#define TBB_VERSION_MINOR 2 #define TBB_VERSION_MINOR 2
// Engineering-focused interface version // Engineering-focused interface version
#define TBB_INTERFACE_VERSION 7004 #define TBB_INTERFACE_VERSION 7005
#define TBB_INTERFACE_VERSION_MAJOR TBB_INTERFACE_VERSION/1000 #define TBB_INTERFACE_VERSION_MAJOR TBB_INTERFACE_VERSION/1000
// The oldest major interface version still supported // The oldest major interface version still supported
// To be used in SONAME, manifests, etc. // To be used in SONAME, manifests, etc.
#define TBB_COMPATIBLE_INTERFACE_VERSION 2 #define TBB_COMPATIBLE_INTERFACE_VERSION 2
#define __TBB_STRING_AUX(x) #x #define __TBB_STRING_AUX(x) #x
#define __TBB_STRING(x) __TBB_STRING_AUX(x) #define __TBB_STRING(x) __TBB_STRING_AUX(x)
// We do not need defines below for resource processing on windows // We do not need defines below for resource processing on windows
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added

This html diff was produced by rfcdiff 1.41. The latest version is available from http://tools.ietf.org/tools/rfcdiff/