| _flow_graph_types_impl.h | | _flow_graph_types_impl.h | |
| | | | |
| skipping to change at line 167 | | skipping to change at line 167 | |
| PT<typename tbb::flow::tuple_element<4,TypeTuple>::type>, | | PT<typename tbb::flow::tuple_element<4,TypeTuple>::type>, | |
| PT<typename tbb::flow::tuple_element<5,TypeTuple>::type>, | | PT<typename tbb::flow::tuple_element<5,TypeTuple>::type>, | |
| PT<typename tbb::flow::tuple_element<6,TypeTuple>::type>, | | PT<typename tbb::flow::tuple_element<6,TypeTuple>::type>, | |
| PT<typename tbb::flow::tuple_element<7,TypeTuple>::type>, | | PT<typename tbb::flow::tuple_element<7,TypeTuple>::type>, | |
| PT<typename tbb::flow::tuple_element<8,TypeTuple>::type>, | | PT<typename tbb::flow::tuple_element<8,TypeTuple>::type>, | |
| PT<typename tbb::flow::tuple_element<9,TypeTuple>::type> > | | PT<typename tbb::flow::tuple_element<9,TypeTuple>::type> > | |
| type; | | type; | |
| }; | | }; | |
| #endif | | #endif | |
| | | | |
|
| | | #if TBB_PREVIEW_GRAPH_NODES | |
| | | // support for variant type | |
| | | // type we use when we're not storing a value | |
| | | struct default_constructed { }; | |
| | | | |
| | | // type which contains another type, tests for what type is contained, and | |
| | | references to it. | |
| | | // internal::Wrapper<T> | |
| | | // void CopyTo( void *newSpace) : builds a Wrapper<T> copy of itself in | |
| | | newSpace | |
| | | | |
| | | // struct to allow us to copy and test the type of objects | |
| | | struct WrapperBase { | |
| | | virtual ~WrapperBase() {} | |
| | | virtual void CopyTo(void* /*newSpace*/) const { } | |
| | | }; | |
| | | | |
| | | // Wrapper<T> contains a T, with the ability to test what T is. The Wrappe | |
| | | r<T> can be | |
| | | // constructed from a T, can be copy-constructed from another Wrapper<T>, a | |
| | | nd can be | |
| | | // examined via value(), but not modified. | |
| | | template<typename T> | |
| | | struct Wrapper: public WrapperBase { | |
| | | typedef T value_type; | |
| | | typedef T* pointer_type; | |
| | | private: | |
| | | T value_space; | |
| | | public: | |
| | | const value_type &value() const { return value_space; } | |
| | | | |
| | | private: | |
| | | Wrapper(); | |
| | | | |
| | | // on exception will ensure the Wrapper will contain only a trivially-c | |
| | | onstructed object | |
| | | struct _unwind_space { | |
| | | pointer_type space; | |
| | | _unwind_space(pointer_type p) : space(p) {} | |
| | | ~_unwind_space() { | |
| | | if(space) (void) new (space) Wrapper<default_constructed>(defau | |
| | | lt_constructed()); | |
| | | } | |
| | | }; | |
| | | public: | |
| | | explicit Wrapper( const T& other ) : value_space(other) { } | |
| | | explicit Wrapper(const Wrapper& other) : value_space(other.value_space) | |
| | | { } | |
| | | | |
| | | /*override*/void CopyTo(void* newSpace) const { | |
| | | _unwind_space guard((pointer_type)newSpace); | |
| | | (void) new(newSpace) Wrapper(value_space); | |
| | | guard.space = NULL; | |
| | | } | |
| | | /*override*/~Wrapper() { } | |
| | | }; | |
| | | | |
| | | // specialization for array objects | |
| | | template<typename T, size_t N> | |
| | | struct Wrapper<T[N]> : public WrapperBase { | |
| | | typedef T value_type; | |
| | | typedef T* pointer_type; | |
| | | // space must be untyped. | |
| | | typedef T ArrayType[N]; | |
| | | private: | |
| | | // The space is not of type T[N] because when copy-constructing, it wou | |
| | | ld be | |
| | | // default-initialized and then copied to in some fashion, resulting in | |
| | | two | |
| | | // constructions and one destruction per element. If the type is char[ | |
| | | ], we | |
| | | // placement new into each element, resulting in one construction per e | |
| | | lement. | |
| | | static const size_t space_size = sizeof(ArrayType) / sizeof(char); | |
| | | char value_space[space_size]; | |
| | | | |
| | | // on exception will ensure the already-built objects will be destructe | |
| | | d | |
| | | // (the value_space is a char array, so it is already trivially-destruc | |
| | | tible.) | |
| | | struct _unwind_class { | |
| | | pointer_type space; | |
| | | int already_built; | |
| | | _unwind_class(pointer_type p) : space(p), already_built(0) {} | |
| | | ~_unwind_class() { | |
| | | if(space) { | |
| | | for(size_t i = already_built; i > 0 ; --i ) space[i-1].~val | |
| | | ue_type(); | |
| | | (void) new(space) Wrapper<default_constructed>(default_cons | |
| | | tructed()); | |
| | | } | |
| | | } | |
| | | }; | |
| | | public: | |
| | | const ArrayType &value() const { | |
| | | char *vp = const_cast<char *>(value_space); | |
| | | return reinterpret_cast<ArrayType &>(*vp); | |
| | | } | |
| | | | |
| | | private: | |
| | | Wrapper(); | |
| | | public: | |
| | | // have to explicitly construct because other decays to a const value_t | |
| | | ype* | |
| | | explicit Wrapper(const ArrayType& other) { | |
| | | _unwind_class guard((pointer_type)value_space); | |
| | | pointer_type vp = reinterpret_cast<pointer_type>(&value_space); | |
| | | for(size_t i = 0; i < N; ++i ) { | |
| | | (void) new(vp++) value_type(other[i]); | |
| | | ++(guard.already_built); | |
| | | } | |
| | | guard.space = NULL; | |
| | | } | |
| | | explicit Wrapper(const Wrapper& other) : WrapperBase() { | |
| | | // we have to do the heavy lifting to copy contents | |
| | | _unwind_class guard((pointer_type)value_space); | |
| | | pointer_type dp = reinterpret_cast<pointer_type>(value_space); | |
| | | pointer_type sp = reinterpret_cast<pointer_type>(const_cast<char *> | |
| | | (other.value_space)); | |
| | | for(size_t i = 0; i < N; ++i, ++dp, ++sp) { | |
| | | (void) new(dp) value_type(*sp); | |
| | | ++(guard.already_built); | |
| | | } | |
| | | guard.space = NULL; | |
| | | } | |
| | | | |
| | | /*override*/void CopyTo(void* newSpace) const { | |
| | | (void) new(newSpace) Wrapper(*this); // exceptions handled in copy | |
| | | constructor | |
| | | } | |
| | | | |
| | | /*override*/~Wrapper() { | |
| | | // have to destroy explicitly in reverse order | |
| | | pointer_type vp = reinterpret_cast<pointer_type>(&value_space); | |
| | | for(size_t i = N; i > 0 ; --i ) vp[i-1].~value_type(); | |
| | | } | |
| | | }; | |
| | | | |
| | | // given a tuple, return the type of the element that has the maximum align | |
| | | ment requirement. | |
| | | // Given a tuple and that type, return the number of elements of the object | |
| | | with the max | |
| | | // alignment requirement that is at least as big as the largest object in t | |
| | | he tuple. | |
| | | | |
| | | template<bool, class T1, class T2> struct pick_one; | |
| | | template<class T1, class T2> struct pick_one<true , T1, T2> { typedef T1 ty | |
| | | pe; }; | |
| | | template<class T1, class T2> struct pick_one<false, T1, T2> { typedef T2 ty | |
| | | pe; }; | |
| | | | |
| | | template< template<class> class Selector, typename T1, typename T2 > | |
| | | struct pick_max { | |
| | | typedef typename pick_one< (Selector<T1>::value > Selector<T2>::value), | |
| | | T1, T2 >::type type; | |
| | | }; | |
| | | | |
| | | template<typename T> struct size_of { static const int value = sizeof(T); } | |
| | | ; | |
| | | template<class T> struct alignment_of { | |
| | | typedef struct { char t; T padded; } test_alignment; | |
| | | static const size_t value = sizeof(test_alignment) - sizeof(T); | |
| | | }; | |
| | | | |
| | | template< size_t N, class Tuple, template<class> class Selector > struct pi | |
| | | ck_tuple_max { | |
| | | typedef typename pick_tuple_max<N-1, Tuple, Selector>::type LeftMaxType | |
| | | ; | |
| | | typedef typename tuple_element<N-1, Tuple>::type ThisType; | |
| | | typedef typename pick_max<Selector, LeftMaxType, ThisType>::type type; | |
| | | }; | |
| | | | |
| | | template< class Tuple, template<class> class Selector > struct pick_tuple_m | |
| | | ax<0, Tuple, Selector> { | |
| | | typedef typename tuple_element<0, Tuple>::type type; | |
| | | }; | |
| | | | |
| | | // is the specified type included in a tuple? | |
| | | | |
| | | template<class U, class V> struct is_same_type { static const bool val | |
| | | ue = false; }; | |
| | | template<class W> struct is_same_type<W,W> { static const bool val | |
| | | ue = true; }; | |
| | | | |
| | | template<class Q, size_t N, class Tuple> | |
| | | struct is_element_of { | |
| | | typedef typename tuple_element<N-1, Tuple>::type T_i; | |
| | | static const bool value = is_same_type<Q,T_i>::value || is_element_of<Q | |
| | | ,N-1,Tuple>::value; | |
| | | }; | |
| | | | |
| | | template<class Q, class Tuple> | |
| | | struct is_element_of<Q,0,Tuple> { | |
| | | typedef typename tuple_element<0, Tuple>::type T_i; | |
| | | static const bool value = is_same_type<Q,T_i>::value; | |
| | | }; | |
| | | | |
| | | // allow the construction of types that are listed tuple. If a disallowed | |
| | | type | |
| | | // construction is written, a method involving this type is created. The | |
| | | // type has no definition, so a syntax error is generated. | |
| | | template<typename T> struct ERROR_Type_Not_allowed_In_Tagged_Msg_Not_Member | |
| | | _Of_Tuple; | |
| | | | |
| | | template<typename T, bool BUILD_IT> struct do_if; | |
| | | template<typename T> | |
| | | struct do_if<T, true> { | |
| | | static void construct(void *mySpace, const T& x) { | |
| | | (void) new(mySpace) Wrapper<T>(x); | |
| | | } | |
| | | }; | |
| | | template<typename T> | |
| | | struct do_if<T, false> { | |
| | | static void construct(void * /*mySpace*/, const T& x) { | |
| | | // This method is instantiated when the type T does not match any o | |
| | | f the | |
| | | // element types in the Tuple in variant<Tuple>. | |
| | | ERROR_Type_Not_allowed_In_Tagged_Msg_Not_Member_Of_Tuple<T>::bad_ty | |
| | | pe(x); | |
| | | } | |
| | | }; | |
| | | | |
| | | // Tuple tells us the allowed types that variant can hold. It determines t | |
| | | he alignment of the space in | |
| | | // Wrapper, and how big Wrapper is. | |
| | | // | |
| | | // the object can only be tested for type, and a read-only reference can be | |
| | | fetched by cast_to<T>(). | |
| | | | |
| | | using tbb::internal::punned_cast; | |
| | | struct tagged_null_type {}; | |
| | | template<typename TagType, typename T0, typename T1=tagged_null_type, typen | |
| | | ame T2=tagged_null_type, typename T3=tagged_null_type, | |
| | | typename T4=tagged_null_type, typename T5=tagged | |
| | | _null_type, typename T6=tagged_null_type, | |
| | | typename T7=tagged_null_type, typename T8=tagged | |
| | | _null_type, typename T9=tagged_null_type> | |
| | | class tagged_msg { | |
| | | typedef tuple<T0, T1, T2, T3, T4 | |
| | | #if __TBB_VARIADIC_MAX >= 6 | |
| | | , T5 | |
| | | #endif | |
| | | #if __TBB_VARIADIC_MAX >= 7 | |
| | | , T6 | |
| | | #endif | |
| | | #if __TBB_VARIADIC_MAX >= 8 | |
| | | , T7 | |
| | | #endif | |
| | | #if __TBB_VARIADIC_MAX >= 9 | |
| | | , T8 | |
| | | #endif | |
| | | #if __TBB_VARIADIC_MAX >= 10 | |
| | | , T9 | |
| | | #endif | |
| | | > Tuple; | |
| | | | |
| | | private: | |
| | | class variant { | |
| | | static const size_t N = tuple_size<Tuple>::value; | |
| | | typedef typename pick_tuple_max<N, Tuple, alignment_of>::type Align | |
| | | Type; | |
| | | typedef typename pick_tuple_max<N, Tuple, size_of>::type MaxSizeTyp | |
| | | e; | |
| | | static const size_t MaxNBytes = (sizeof(Wrapper<MaxSizeType>)+sizeo | |
| | | f(AlignType)-1); | |
| | | static const size_t MaxNElements = MaxNBytes/sizeof(AlignType); | |
| | | typedef typename tbb::aligned_space<AlignType, MaxNElements> SpaceT | |
| | | ype; | |
| | | SpaceType my_space; | |
| | | static const size_t MaxSize = sizeof(SpaceType); | |
| | | | |
| | | public: | |
| | | variant() { (void) new(&my_space) Wrapper<default_constructed>(defa | |
| | | ult_constructed()); } | |
| | | | |
| | | template<typename T> | |
| | | variant( const T& x ) { | |
| | | do_if<T, is_element_of<T, N, Tuple>::value>::construct(&my_spac | |
| | | e,x); | |
| | | } | |
| | | | |
| | | variant(const variant& other) { | |
| | | const WrapperBase * h = punned_cast<const WrapperBase *>(&(othe | |
| | | r.my_space)); | |
| | | h->CopyTo(&my_space); | |
| | | } | |
| | | | |
| | | // assignment must destroy and re-create the Wrapper type, as there | |
| | | is no way | |
| | | // to create a Wrapper-to-Wrapper assign even if we find they agree | |
| | | in type. | |
| | | void operator=( const variant& rhs ) { | |
| | | if(&rhs != this) { | |
| | | WrapperBase *h = punned_cast<WrapperBase *>(&my_space); | |
| | | h->~WrapperBase(); | |
| | | const WrapperBase *ch = punned_cast<const WrapperBase *>(&( | |
| | | rhs.my_space)); | |
| | | ch->CopyTo(&my_space); | |
| | | } | |
| | | } | |
| | | | |
| | | template<typename U> | |
| | | const U& variant_cast_to() const { | |
| | | const Wrapper<U> *h = dynamic_cast<const Wrapper<U>*>(punned_ca | |
| | | st<const WrapperBase *>(&my_space)); | |
| | | if(!h) { | |
| | | tbb::internal::throw_exception(tbb::internal::eid_bad_tagge | |
| | | d_msg_cast); | |
| | | } | |
| | | return h->value(); | |
| | | } | |
| | | template<typename U> | |
| | | bool variant_is_a() const { return dynamic_cast<const Wrapper<U>*>( | |
| | | punned_cast<const WrapperBase *>(&my_space)) != NULL; } | |
| | | | |
| | | bool variant_is_default_constructed() const {return variant_is_a<de | |
| | | fault_constructed>();} | |
| | | | |
| | | ~variant() { | |
| | | WrapperBase *h = punned_cast<WrapperBase *>(&my_space); | |
| | | h->~WrapperBase(); | |
| | | } | |
| | | }; //class variant | |
| | | | |
| | | TagType my_tag; | |
| | | variant my_msg; | |
| | | | |
| | | public: | |
| | | tagged_msg(): my_tag(TagType(~0)), my_msg(){} | |
| | | | |
| | | template<typename T, typename R> | |
| | | tagged_msg(T const &index, R const &value) : my_tag(index), my_msg(valu | |
| | | e) {} | |
| | | | |
| | | void set_tag(TagType const &index) {my_tag = index;} | |
| | | TagType tag() const {return my_tag;} | |
| | | | |
| | | template<typename V> | |
| | | const V& cast_to() const {return my_msg.template variant_cast_to<V>();} | |
| | | | |
| | | template<typename V> | |
| | | bool is_a() const {return my_msg.template variant_is_a<V>();} | |
| | | | |
| | | bool is_default_constructed() const {return my_msg.variant_is_default_c | |
| | | onstructed();} | |
| | | }; //class tagged_msg | |
| | | | |
| | | // template to simplify cast and test for tagged_msg in template contexts | |
| | | template<typename T, typename V> | |
| | | const T& cast_to(V const &v) { return v.template cast_to<T>(); } | |
| | | | |
| | | template<typename T, typename V> | |
| | | bool is_a(V const &v) { return v.template is_a<T>(); } | |
| | | | |
| | | #endif // TBB_PREVIEW_GRAPH_NODES | |
| } // namespace internal | | } // namespace internal | |
|
| | | | |
| #endif /* __TBB__flow_graph_types_impl_H */ | | #endif /* __TBB__flow_graph_types_impl_H */ | |
| | | | |
End of changes. 2 change blocks. |
| 0 lines changed or deleted | | 356 lines changed or added | |
|
| concurrent_vector.h | | concurrent_vector.h | |
| | | | |
| skipping to change at line 107 | | skipping to change at line 107 | |
| | | | |
| // Using enumerations due to Mac linking problems of static const v
ariables | | // Using enumerations due to Mac linking problems of static const v
ariables | |
| enum { | | enum { | |
| // Size constants | | // Size constants | |
| default_initial_segments = 1, // 2 initial items | | default_initial_segments = 1, // 2 initial items | |
| //! Number of slots for segment's pointers inside the class | | //! Number of slots for segment's pointers inside the class | |
| pointers_per_short_table = 3, // to fit into 8 words of entire
structure | | pointers_per_short_table = 3, // to fit into 8 words of entire
structure | |
| pointers_per_long_table = sizeof(segment_index_t) * 8 // one se
gment per bit | | pointers_per_long_table = sizeof(segment_index_t) * 8 // one se
gment per bit | |
| }; | | }; | |
| | | | |
|
| // Segment pointer. Can be zero-initialized | | struct segment_not_used {}; | |
| struct segment_t { | | struct segment_allocated {}; | |
| | | struct segment_allocation_failed {}; | |
| | | | |
| | | class segment_t; | |
| | | class segment_value_t { | |
| void* array; | | void* array; | |
|
| | | private: | |
| | | //TODO: More elegant way to grant access to selected functions | |
| | | _only_? | |
| | | friend class segment_t; | |
| | | segment_value_t(void* an_array):array(an_array) {} | |
| | | public: | |
| | | friend bool operator==(segment_value_t const& lhs, segment_not_ | |
| | | used ) { return lhs.array == 0;} | |
| | | friend bool operator==(segment_value_t const& lhs, segment_allo | |
| | | cated) { return lhs.array > internal::vector_allocation_error_flag;} | |
| | | friend bool operator==(segment_value_t const& lhs, segment_allo | |
| | | cation_failed) { return lhs.array == internal::vector_allocation_error_flag | |
| | | ;} | |
| | | template<typename argument_type> | |
| | | friend bool operator!=(segment_value_t const& lhs, argument_typ | |
| | | e arg) { return ! (lhs == arg);} | |
| | | | |
| | | template<typename T> | |
| | | T* pointer() const { return static_cast<T*>(const_cast<void*>( | |
| | | array)); } | |
| | | }; | |
| | | | |
| | | // Segment pointer. | |
| | | class segment_t { | |
| | | atomic<void*> array; | |
| | | public: | |
| | | segment_t(){ store<relaxed>(segment_not_used());} | |
| | | //Copy ctor and assignment operator are defined to ease using o | |
| | | f stl algorithms. | |
| | | //These algorithms usually not a synchronization point, so, sem | |
| | | antic is | |
| | | //intentionally relaxed here. | |
| | | segment_t(segment_t const& rhs ){ array.store<relaxed>(rhs.arra | |
| | | y.load<relaxed>());} | |
| | | | |
| | | void swap(segment_t & rhs ){ | |
| | | tbb::internal::swap<relaxed>(array, rhs.array); | |
| | | } | |
| | | | |
| | | segment_t& operator=(segment_t const& rhs ){ | |
| | | array.store<relaxed>(rhs.array.load<relaxed>()); | |
| | | return *this; | |
| | | } | |
| | | | |
| | | template<memory_semantics M> | |
| | | segment_value_t load() const { return segment_value_t(array.loa | |
| | | d<M>());} | |
| | | | |
| | | template<memory_semantics M> | |
| | | void store(segment_not_used) { | |
| | | array.store<M>(0); | |
| | | } | |
| | | | |
| | | template<memory_semantics M> | |
| | | void store(segment_allocation_failed) { | |
| | | __TBB_ASSERT(load<relaxed>() != segment_allocated(),"transi | |
| | | tion from \"allocated\" to \"allocation failed\" state looks non-logical"); | |
| | | array.store<M>(internal::vector_allocation_error_flag); | |
| | | } | |
| | | | |
| | | template<memory_semantics M> | |
| | | void store(void* allocated_segment_pointer) __TBB_NOEXCEPT(true | |
| | | ) { | |
| | | __TBB_ASSERT(segment_value_t(allocated_segment_pointer) == | |
| | | segment_allocated(), | |
| | | "other overloads of store should be used for marking s | |
| | | egment as not_used or allocation_failed" ); | |
| | | array.store<M>(allocated_segment_pointer); | |
| | | } | |
| | | | |
| #if TBB_USE_ASSERT | | #if TBB_USE_ASSERT | |
| ~segment_t() { | | ~segment_t() { | |
|
| __TBB_ASSERT( array <= internal::vector_allocation_error_fl
ag, "should have been freed by clear" ); | | __TBB_ASSERT(load<relaxed>() != segment_allocated(), "shoul
d have been freed by clear" ); | |
| } | | } | |
| #endif /* TBB_USE_ASSERT */ | | #endif /* TBB_USE_ASSERT */ | |
| }; | | }; | |
|
| | | friend void swap(segment_t & , segment_t & ) __TBB_NOEXCEPT(true); | |
| | | | |
| // Data fields | | // Data fields | |
| | | | |
| //! allocator function pointer | | //! allocator function pointer | |
| void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t); | | void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t); | |
| | | | |
| //! count of segments in the first block | | //! count of segments in the first block | |
| atomic<size_type> my_first_block; | | atomic<size_type> my_first_block; | |
| | | | |
| //! Requested size of vector | | //! Requested size of vector | |
| | | | |
| skipping to change at line 137 | | skipping to change at line 197 | |
| | | | |
| //! Pointer to the segments table | | //! Pointer to the segments table | |
| atomic<segment_t*> my_segment; | | atomic<segment_t*> my_segment; | |
| | | | |
| //! embedded storage of segment pointers | | //! embedded storage of segment pointers | |
| segment_t my_storage[pointers_per_short_table]; | | segment_t my_storage[pointers_per_short_table]; | |
| | | | |
| // Methods | | // Methods | |
| | | | |
| concurrent_vector_base_v3() { | | concurrent_vector_base_v3() { | |
|
| my_early_size = 0; | | //Here the semantic is intentionally relaxed. | |
| my_first_block = 0; // here is not default_initial_segments | | //The reason this is next: | |
| for( segment_index_t i = 0; i < pointers_per_short_table; i++) | | //Object that is in middle of construction (i.e. its constructo | |
| my_storage[i].array = NULL; | | r is not yet finished) | |
| my_segment = my_storage; | | //cannot be used concurrently until the construction is finishe | |
| | | d. | |
| | | //Thus to flag other threads that construction is finished, som | |
| | | e synchronization with | |
| | | //acquire-release semantic should be done by the (external) cod | |
| | | e that uses the vector. | |
| | | //So, no need to do the synchronization inside the vector. | |
| | | | |
| | | my_early_size.store<relaxed>(0); | |
| | | my_first_block.store<relaxed>(0); // here is not default_initia | |
| | | l_segments | |
| | | my_segment.store<relaxed>(my_storage); | |
| } | | } | |
| __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3(); | | __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3(); | |
| | | | |
| //these helpers methods use the fact that segments are allocated so | | //these helpers methods use the fact that segments are allocated so | |
| //that every segment size is a (increasing) power of 2. | | //that every segment size is a (increasing) power of 2. | |
| //with one exception 0 segment has size of 2 as well segment 1; | | //with one exception 0 segment has size of 2 as well segment 1; | |
| //e.g. size of segment with index of 3 is 2^3=8; | | //e.g. size of segment with index of 3 is 2^3=8; | |
| static segment_index_t segment_index_of( size_type index ) { | | static segment_index_t segment_index_of( size_type index ) { | |
| return segment_index_t( __TBB_Log2( index|1 ) ); | | return segment_index_t( __TBB_Log2( index|1 ) ); | |
| } | | } | |
| | | | |
| skipping to change at line 185 | | skipping to change at line 251 | |
| | | | |
| //! An operation on an n-element array starting at begin. | | //! An operation on an n-element array starting at begin. | |
| typedef void (__TBB_EXPORTED_FUNC *internal_array_op1)(void* begin,
size_type n ); | | typedef void (__TBB_EXPORTED_FUNC *internal_array_op1)(void* begin,
size_type n ); | |
| | | | |
| //! An operation on n-element destination array and n-element sourc
e array. | | //! An operation on n-element destination array and n-element sourc
e array. | |
| typedef void (__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, c
onst void* src, size_type n ); | | typedef void (__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, c
onst void* src, size_type n ); | |
| | | | |
| //! Internal structure for compact() | | //! Internal structure for compact() | |
| struct internal_segments_table { | | struct internal_segments_table { | |
| segment_index_t first_block; | | segment_index_t first_block; | |
|
| void* table[pointers_per_long_table]; | | segment_t table[pointers_per_long_table]; | |
| }; | | }; | |
| | | | |
| void __TBB_EXPORTED_METHOD internal_reserve( size_type n, size_type
element_size, size_type max_size ); | | void __TBB_EXPORTED_METHOD internal_reserve( size_type n, size_type
element_size, size_type max_size ); | |
| size_type __TBB_EXPORTED_METHOD internal_capacity() const; | | size_type __TBB_EXPORTED_METHOD internal_capacity() const; | |
| void internal_grow( size_type start, size_type finish, size_type el
ement_size, internal_array_op2 init, const void *src ); | | void internal_grow( size_type start, size_type finish, size_type el
ement_size, internal_array_op2 init, const void *src ); | |
| size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta,
size_type element_size, internal_array_op2 init, const void *src ); | | size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta,
size_type element_size, internal_array_op2 init, const void *src ); | |
| void* __TBB_EXPORTED_METHOD internal_push_back( size_type element_s
ize, size_type& index ); | | void* __TBB_EXPORTED_METHOD internal_push_back( size_type element_s
ize, size_type& index ); | |
| segment_index_t __TBB_EXPORTED_METHOD internal_clear( internal_arra
y_op1 destroy ); | | segment_index_t __TBB_EXPORTED_METHOD internal_clear( internal_arra
y_op1 destroy ); | |
| void* __TBB_EXPORTED_METHOD internal_compact( size_type element_siz
e, void *table, internal_array_op1 destroy, internal_array_op2 copy ); | | void* __TBB_EXPORTED_METHOD internal_compact( size_type element_siz
e, void *table, internal_array_op1 destroy, internal_array_op2 copy ); | |
| void __TBB_EXPORTED_METHOD internal_copy( const concurrent_vector_b
ase_v3& src, size_type element_size, internal_array_op2 copy ); | | void __TBB_EXPORTED_METHOD internal_copy( const concurrent_vector_b
ase_v3& src, size_type element_size, internal_array_op2 copy ); | |
| | | | |
| skipping to change at line 218 | | skipping to change at line 284 | |
| private: | | private: | |
| //! Private functionality | | //! Private functionality | |
| class helper; | | class helper; | |
| friend class helper; | | friend class helper; | |
| | | | |
| template<typename Container, typename Value> | | template<typename Container, typename Value> | |
| friend class vector_iterator; | | friend class vector_iterator; | |
| | | | |
| }; | | }; | |
| | | | |
|
| | | inline void swap(concurrent_vector_base_v3::segment_t & lhs, concurrent | |
| | | _vector_base_v3::segment_t & rhs) __TBB_NOEXCEPT(true) { | |
| | | lhs.swap(rhs); | |
| | | } | |
| | | | |
| typedef concurrent_vector_base_v3 concurrent_vector_base; | | typedef concurrent_vector_base_v3 concurrent_vector_base; | |
| | | | |
| //! Meets requirements of a forward iterator for STL and a Value for a
blocked_range.*/ | | //! Meets requirements of a forward iterator for STL and a Value for a
blocked_range.*/ | |
| /** Value is either the T or const T type of the container. | | /** Value is either the T or const T type of the container. | |
| @ingroup containers */ | | @ingroup containers */ | |
| template<typename Container, typename Value> | | template<typename Container, typename Value> | |
| class vector_iterator | | class vector_iterator | |
| { | | { | |
| //! concurrent_vector over which we are iterating. | | //! concurrent_vector over which we are iterating. | |
| Container* my_vector; | | Container* my_vector; | |
| | | | |
| skipping to change at line 537 | | skipping to change at line 607 | |
| // STL compatible constructors & destructors | | // STL compatible constructors & destructors | |
| //---------------------------------------------------------------------
--- | | //---------------------------------------------------------------------
--- | |
| | | | |
| //! Construct empty vector. | | //! Construct empty vector. | |
| explicit concurrent_vector(const allocator_type &a = allocator_type()) | | explicit concurrent_vector(const allocator_type &a = allocator_type()) | |
| : internal::allocator_base<T, A>(a), internal::concurrent_vector_ba
se() | | : internal::allocator_base<T, A>(a), internal::concurrent_vector_ba
se() | |
| { | | { | |
| vector_allocator_ptr = &internal_allocator; | | vector_allocator_ptr = &internal_allocator; | |
| } | | } | |
| | | | |
|
| | | //Constructors are not required to have synchronization | |
| | | //(for more details see comment in the concurrent_vector_base construct | |
| | | or). | |
| #if __TBB_INITIALIZER_LISTS_PRESENT | | #if __TBB_INITIALIZER_LISTS_PRESENT | |
| //! Constructor from initializer_list | | //! Constructor from initializer_list | |
| concurrent_vector(std::initializer_list<T> init_list, const allocator_t
ype &a = allocator_type()) | | concurrent_vector(std::initializer_list<T> init_list, const allocator_t
ype &a = allocator_type()) | |
| : internal::allocator_base<T, A>(a), internal::concurrent_vector_ba
se() | | : internal::allocator_base<T, A>(a), internal::concurrent_vector_ba
se() | |
| { | | { | |
| vector_allocator_ptr = &internal_allocator; | | vector_allocator_ptr = &internal_allocator; | |
| __TBB_TRY { | | __TBB_TRY { | |
| internal_assign_iterators(init_list.begin(), init_list.end()); | | internal_assign_iterators(init_list.begin(), init_list.end()); | |
| } __TBB_CATCH(...) { | | } __TBB_CATCH(...) { | |
|
| segment_t *table = my_segment; | | segment_t *table = my_segment.load<relaxed>();; | |
| internal_free_segments( reinterpret_cast<void**>(table), intern | | internal_free_segments( table, internal_clear(&destroy_array), | |
| al_clear(&destroy_array), my_first_block ); | | my_first_block.load<relaxed>()); | |
| __TBB_RETHROW(); | | __TBB_RETHROW(); | |
| } | | } | |
| | | | |
| } | | } | |
| #endif //# __TBB_INITIALIZER_LISTS_PRESENT | | #endif //# __TBB_INITIALIZER_LISTS_PRESENT | |
| | | | |
| //! Copying constructor | | //! Copying constructor | |
| concurrent_vector( const concurrent_vector& vector, const allocator_typ
e& a = allocator_type() ) | | concurrent_vector( const concurrent_vector& vector, const allocator_typ
e& a = allocator_type() ) | |
| : internal::allocator_base<T, A>(a), internal::concurrent_vector_ba
se() | | : internal::allocator_base<T, A>(a), internal::concurrent_vector_ba
se() | |
| { | | { | |
| vector_allocator_ptr = &internal_allocator; | | vector_allocator_ptr = &internal_allocator; | |
| __TBB_TRY { | | __TBB_TRY { | |
| internal_copy(vector, sizeof(T), ©_array); | | internal_copy(vector, sizeof(T), ©_array); | |
| } __TBB_CATCH(...) { | | } __TBB_CATCH(...) { | |
|
| segment_t *table = my_segment; | | segment_t *table = my_segment.load<relaxed>(); | |
| internal_free_segments( reinterpret_cast<void**>(table), intern | | internal_free_segments( table, internal_clear(&destroy_array), | |
| al_clear(&destroy_array), my_first_block ); | | my_first_block.load<relaxed>()); | |
| __TBB_RETHROW(); | | __TBB_RETHROW(); | |
| } | | } | |
| } | | } | |
| | | | |
| //! Copying constructor for vector with different allocator type | | //! Copying constructor for vector with different allocator type | |
| template<class M> | | template<class M> | |
| concurrent_vector( const concurrent_vector<T, M>& vector, const allocat
or_type& a = allocator_type() ) | | concurrent_vector( const concurrent_vector<T, M>& vector, const allocat
or_type& a = allocator_type() ) | |
| : internal::allocator_base<T, A>(a), internal::concurrent_vector_ba
se() | | : internal::allocator_base<T, A>(a), internal::concurrent_vector_ba
se() | |
| { | | { | |
| vector_allocator_ptr = &internal_allocator; | | vector_allocator_ptr = &internal_allocator; | |
| __TBB_TRY { | | __TBB_TRY { | |
| internal_copy(vector.internal_vector_base(), sizeof(T), ©_a
rray); | | internal_copy(vector.internal_vector_base(), sizeof(T), ©_a
rray); | |
| } __TBB_CATCH(...) { | | } __TBB_CATCH(...) { | |
|
| segment_t *table = my_segment; | | segment_t *table = my_segment.load<relaxed>(); | |
| internal_free_segments( reinterpret_cast<void**>(table), intern | | internal_free_segments( table, internal_clear(&destroy_array), | |
| al_clear(&destroy_array), my_first_block ); | | my_first_block.load<relaxed>() ); | |
| __TBB_RETHROW(); | | __TBB_RETHROW(); | |
| } | | } | |
| } | | } | |
| | | | |
| //! Construction with initial size specified by argument n | | //! Construction with initial size specified by argument n | |
| explicit concurrent_vector(size_type n) | | explicit concurrent_vector(size_type n) | |
| { | | { | |
| vector_allocator_ptr = &internal_allocator; | | vector_allocator_ptr = &internal_allocator; | |
| __TBB_TRY { | | __TBB_TRY { | |
| internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array
, &initialize_array ); | | internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array
, &initialize_array ); | |
| } __TBB_CATCH(...) { | | } __TBB_CATCH(...) { | |
|
| segment_t *table = my_segment; | | segment_t *table = my_segment.load<relaxed>(); | |
| internal_free_segments( reinterpret_cast<void**>(table), intern | | internal_free_segments( table, internal_clear(&destroy_array), | |
| al_clear(&destroy_array), my_first_block ); | | my_first_block.load<relaxed>() ); | |
| __TBB_RETHROW(); | | __TBB_RETHROW(); | |
| } | | } | |
| } | | } | |
| | | | |
| //! Construction with initial size specified by argument n, initializat
ion by copying of t, and given allocator instance | | //! Construction with initial size specified by argument n, initializat
ion by copying of t, and given allocator instance | |
| concurrent_vector(size_type n, const_reference t, const allocator_type&
a = allocator_type()) | | concurrent_vector(size_type n, const_reference t, const allocator_type&
a = allocator_type()) | |
| : internal::allocator_base<T, A>(a) | | : internal::allocator_base<T, A>(a) | |
| { | | { | |
| vector_allocator_ptr = &internal_allocator; | | vector_allocator_ptr = &internal_allocator; | |
| __TBB_TRY { | | __TBB_TRY { | |
| internal_resize( n, sizeof(T), max_size(), static_cast<const vo
id*>(&t), &destroy_array, &initialize_array_by ); | | internal_resize( n, sizeof(T), max_size(), static_cast<const vo
id*>(&t), &destroy_array, &initialize_array_by ); | |
| } __TBB_CATCH(...) { | | } __TBB_CATCH(...) { | |
|
| segment_t *table = my_segment; | | segment_t *table = my_segment.load<relaxed>(); | |
| internal_free_segments( reinterpret_cast<void**>(table), intern | | internal_free_segments( table, internal_clear(&destroy_array), | |
| al_clear(&destroy_array), my_first_block ); | | my_first_block.load<relaxed>() ); | |
| __TBB_RETHROW(); | | __TBB_RETHROW(); | |
| } | | } | |
| } | | } | |
| | | | |
| //! Construction with copying iteration range and given allocator insta
nce | | //! Construction with copying iteration range and given allocator insta
nce | |
| template<class I> | | template<class I> | |
| concurrent_vector(I first, I last, const allocator_type &a = allocator_
type()) | | concurrent_vector(I first, I last, const allocator_type &a = allocator_
type()) | |
| : internal::allocator_base<T, A>(a) | | : internal::allocator_base<T, A>(a) | |
| { | | { | |
| vector_allocator_ptr = &internal_allocator; | | vector_allocator_ptr = &internal_allocator; | |
| __TBB_TRY { | | __TBB_TRY { | |
| internal_assign_range(first, last, static_cast<is_integer_tag<s
td::numeric_limits<I>::is_integer> *>(0) ); | | internal_assign_range(first, last, static_cast<is_integer_tag<s
td::numeric_limits<I>::is_integer> *>(0) ); | |
| } __TBB_CATCH(...) { | | } __TBB_CATCH(...) { | |
|
| segment_t *table = my_segment; | | segment_t *table = my_segment.load<relaxed>(); | |
| internal_free_segments( reinterpret_cast<void**>(table), intern | | internal_free_segments( table, internal_clear(&destroy_array), | |
| al_clear(&destroy_array), my_first_block ); | | my_first_block.load<relaxed>() ); | |
| __TBB_RETHROW(); | | __TBB_RETHROW(); | |
| } | | } | |
| } | | } | |
| | | | |
| //! Assignment | | //! Assignment | |
| concurrent_vector& operator=( const concurrent_vector& vector ) { | | concurrent_vector& operator=( const concurrent_vector& vector ) { | |
| if( this != &vector ) | | if( this != &vector ) | |
| internal_assign(vector, sizeof(T), &destroy_array, &assign_arra
y, ©_array); | | internal_assign(vector, sizeof(T), &destroy_array, &assign_arra
y, ©_array); | |
| return *this; | | return *this; | |
| } | | } | |
| | | | |
| skipping to change at line 655 | | skipping to change at line 727 | |
| concurrent_vector& operator=( const std::initializer_list<T> & init_lis
t) { | | concurrent_vector& operator=( const std::initializer_list<T> & init_lis
t) { | |
| internal_clear(&destroy_array); | | internal_clear(&destroy_array); | |
| internal_assign_iterators(init_list.begin(), init_list.end()); | | internal_assign_iterators(init_list.begin(), init_list.end()); | |
| return *this; | | return *this; | |
| } | | } | |
| #endif //#if __TBB_INITIALIZER_LISTS_PRESENT | | #endif //#if __TBB_INITIALIZER_LISTS_PRESENT | |
| | | | |
| //---------------------------------------------------------------------
--- | | //---------------------------------------------------------------------
--- | |
| // Concurrent operations | | // Concurrent operations | |
| //---------------------------------------------------------------------
--- | | //---------------------------------------------------------------------
--- | |
|
| //TODO: consider adding overload of grow_by accepting range of iterator
s: grow_by(iterator,iterator) | | | |
| //TODO: consider adding overload of grow_by accepting initializer_list:
grow_by(std::initializer_list<T>), as a analogy to std::vector::insert(in
itializer_list) | | //TODO: consider adding overload of grow_by accepting initializer_list:
grow_by(std::initializer_list<T>), as a analogy to std::vector::insert(in
itializer_list) | |
| //! Grow by "delta" elements. | | //! Grow by "delta" elements. | |
| #if TBB_DEPRECATED | | #if TBB_DEPRECATED | |
| /** Returns old size. */ | | /** Returns old size. */ | |
| size_type grow_by( size_type delta ) { | | size_type grow_by( size_type delta ) { | |
| return delta ? internal_grow_by( delta, sizeof(T), &initialize_arra
y, NULL ) : my_early_size.load(); | | return delta ? internal_grow_by( delta, sizeof(T), &initialize_arra
y, NULL ) : my_early_size.load(); | |
| } | | } | |
| #else | | #else | |
| /** Returns iterator pointing to the first new element. */ | | /** Returns iterator pointing to the first new element. */ | |
| iterator grow_by( size_type delta ) { | | iterator grow_by( size_type delta ) { | |
| | | | |
| skipping to change at line 681 | | skipping to change at line 752 | |
| #if TBB_DEPRECATED | | #if TBB_DEPRECATED | |
| /** Returns old size. */ | | /** Returns old size. */ | |
| size_type grow_by( size_type delta, const_reference t ) { | | size_type grow_by( size_type delta, const_reference t ) { | |
| return delta ? internal_grow_by( delta, sizeof(T), &initialize_arra
y_by, static_cast<const void*>(&t) ) : my_early_size.load(); | | return delta ? internal_grow_by( delta, sizeof(T), &initialize_arra
y_by, static_cast<const void*>(&t) ) : my_early_size.load(); | |
| } | | } | |
| #else | | #else | |
| /** Returns iterator pointing to the first new element. */ | | /** Returns iterator pointing to the first new element. */ | |
| iterator grow_by( size_type delta, const_reference t ) { | | iterator grow_by( size_type delta, const_reference t ) { | |
| return iterator(*this, delta ? internal_grow_by( delta, sizeof(T),
&initialize_array_by, static_cast<const void*>(&t) ) : my_early_size.load()
); | | return iterator(*this, delta ? internal_grow_by( delta, sizeof(T),
&initialize_array_by, static_cast<const void*>(&t) ) : my_early_size.load()
); | |
| } | | } | |
|
| | | | |
| | | /** Returns iterator pointing to the first new element. */ | |
| | | template<typename I> | |
| | | iterator grow_by( I first, I last ) { | |
| | | typename std::iterator_traits<I>::difference_type | |
| | | delta = std::distance(first, last); | |
| | | __TBB_ASSERT( delta > 0, NULL); | |
| | | | |
| | | return iterator(*this, internal_grow_by( delta, sizeof(T), ©_r | |
| | | ange<I>, static_cast<const void*>(&first) )); | |
| | | } | |
| #endif | | #endif | |
| | | | |
| //! Append minimal sequence of elements such that size()>=n. | | //! Append minimal sequence of elements such that size()>=n. | |
| #if TBB_DEPRECATED | | #if TBB_DEPRECATED | |
| /** The new elements are default constructed. Blocks until all element
s in range [0..n) are allocated. | | /** The new elements are default constructed. Blocks until all element
s in range [0..n) are allocated. | |
| May return while other elements are being constructed by other thre
ads. */ | | May return while other elements are being constructed by other thre
ads. */ | |
| void grow_to_at_least( size_type n ) { | | void grow_to_at_least( size_type n ) { | |
| if( n ) internal_grow_to_at_least_with_result( n, sizeof(T), &initi
alize_array, NULL ); | | if( n ) internal_grow_to_at_least_with_result( n, sizeof(T), &initi
alize_array, NULL ); | |
| }; | | }; | |
| #else | | #else | |
| | | | |
| skipping to change at line 703 | | skipping to change at line 784 | |
| Returns iterator that points to beginning of appended sequence. | | Returns iterator that points to beginning of appended sequence. | |
| If no elements were appended, returns iterator pointing to nth elem
ent. */ | | If no elements were appended, returns iterator pointing to nth elem
ent. */ | |
| iterator grow_to_at_least( size_type n ) { | | iterator grow_to_at_least( size_type n ) { | |
| size_type m=0; | | size_type m=0; | |
| if( n ) { | | if( n ) { | |
| m = internal_grow_to_at_least_with_result( n, sizeof(T), &initi
alize_array, NULL ); | | m = internal_grow_to_at_least_with_result( n, sizeof(T), &initi
alize_array, NULL ); | |
| if( m>n ) m=n; | | if( m>n ) m=n; | |
| } | | } | |
| return iterator(*this, m); | | return iterator(*this, m); | |
| }; | | }; | |
|
| | | | |
| | | /** Analogous to grow_to_at_least( size_type n ) with exception that th | |
| | | e new | |
| | | elements are initialized by copying of t instead of default constru | |
| | | ction. */ | |
| | | iterator grow_to_at_least( size_type n, const_reference t ) { | |
| | | size_type m=0; | |
| | | if( n ) { | |
| | | m = internal_grow_to_at_least_with_result( n, sizeof(T), &initi | |
| | | alize_array_by, &t); | |
| | | if( m>n ) m=n; | |
| | | } | |
| | | return iterator(*this, m); | |
| | | }; | |
| | | | |
| #endif | | #endif | |
| | | | |
| //! Push item | | //! Push item | |
| #if TBB_DEPRECATED | | #if TBB_DEPRECATED | |
| size_type push_back( const_reference item ) | | size_type push_back( const_reference item ) | |
| #else | | #else | |
| /** Returns iterator pointing to the new element. */ | | /** Returns iterator pointing to the new element. */ | |
| iterator push_back( const_reference item ) | | iterator push_back( const_reference item ) | |
| #endif | | #endif | |
| { | | { | |
| | | | |
| skipping to change at line 831 | | skipping to change at line 924 | |
| const_reverse_iterator rbegin() const {return const_reverse_iterator(en
d());} | | const_reverse_iterator rbegin() const {return const_reverse_iterator(en
d());} | |
| //! reverse end const iterator | | //! reverse end const iterator | |
| const_reverse_iterator rend() const {return const_reverse_iterator(begi
n());} | | const_reverse_iterator rend() const {return const_reverse_iterator(begi
n());} | |
| //! reverse start const iterator | | //! reverse start const iterator | |
| const_reverse_iterator crbegin() const {return const_reverse_iterator(e
nd());} | | const_reverse_iterator crbegin() const {return const_reverse_iterator(e
nd());} | |
| //! reverse end const iterator | | //! reverse end const iterator | |
| const_reverse_iterator crend() const {return const_reverse_iterator(beg
in());} | | const_reverse_iterator crend() const {return const_reverse_iterator(beg
in());} | |
| //! the first item | | //! the first item | |
| reference front() { | | reference front() { | |
| __TBB_ASSERT( size()>0, NULL); | | __TBB_ASSERT( size()>0, NULL); | |
|
| return static_cast<T*>(my_segment[0].array)[0]; | | return (my_segment[0].template load<relaxed>().template pointer<T>(
))[0]; | |
| } | | } | |
| //! the first item const | | //! the first item const | |
| const_reference front() const { | | const_reference front() const { | |
| __TBB_ASSERT( size()>0, NULL); | | __TBB_ASSERT( size()>0, NULL); | |
| return static_cast<const T*>(my_segment[0].array)[0]; | | return static_cast<const T*>(my_segment[0].array)[0]; | |
| } | | } | |
| //! the last item | | //! the last item | |
| reference back() { | | reference back() { | |
| __TBB_ASSERT( size()>0, NULL); | | __TBB_ASSERT( size()>0, NULL); | |
| return internal_subscript( size()-1 ); | | return internal_subscript( size()-1 ); | |
| | | | |
| skipping to change at line 887 | | skipping to change at line 980 | |
| } | | } | |
| | | | |
| //! Clear container while keeping memory allocated. | | //! Clear container while keeping memory allocated. | |
| /** To free up the memory, use in conjunction with method compact(). No
t thread safe **/ | | /** To free up the memory, use in conjunction with method compact(). No
t thread safe **/ | |
| void clear() { | | void clear() { | |
| internal_clear(&destroy_array); | | internal_clear(&destroy_array); | |
| } | | } | |
| | | | |
| //! Clear and destroy vector. | | //! Clear and destroy vector. | |
| ~concurrent_vector() { | | ~concurrent_vector() { | |
|
| segment_t *table = my_segment; | | segment_t *table = my_segment.load<relaxed>(); | |
| internal_free_segments( reinterpret_cast<void**>(table), internal_c | | internal_free_segments( table, internal_clear(&destroy_array), my_f | |
| lear(&destroy_array), my_first_block ); | | irst_block.load<relaxed>() ); | |
| // base class destructor call should be then | | // base class destructor call should be then | |
| } | | } | |
| | | | |
| const internal::concurrent_vector_base_v3 &internal_vector_base() const
{ return *this; } | | const internal::concurrent_vector_base_v3 &internal_vector_base() const
{ return *this; } | |
| private: | | private: | |
| //! Allocate k items | | //! Allocate k items | |
| static void *internal_allocator(internal::concurrent_vector_base_v3 &vb
, size_t k) { | | static void *internal_allocator(internal::concurrent_vector_base_v3 &vb
, size_t k) { | |
| return static_cast<concurrent_vector<T, A>&>(vb).my_allocator.alloc
ate(k); | | return static_cast<concurrent_vector<T, A>&>(vb).my_allocator.alloc
ate(k); | |
| } | | } | |
| //! Free k segments from table | | //! Free k segments from table | |
|
| void internal_free_segments(void *table[], segment_index_t k, segment_i
ndex_t first_block); | | void internal_free_segments(segment_t table[], segment_index_t k, segme
nt_index_t first_block); | |
| | | | |
| //! Get reference to element at given index. | | //! Get reference to element at given index. | |
| T& internal_subscript( size_type index ) const; | | T& internal_subscript( size_type index ) const; | |
| | | | |
| //! Get reference to element at given index with errors checks | | //! Get reference to element at given index with errors checks | |
| T& internal_subscript_with_exceptions( size_type index ) const; | | T& internal_subscript_with_exceptions( size_type index ) const; | |
| | | | |
| //! assign n items by copying t | | //! assign n items by copying t | |
| void internal_assign_n(size_type n, const_pointer p) { | | void internal_assign_n(size_type n, const_pointer p) { | |
| internal_resize( n, sizeof(T), max_size(), static_cast<const void*>
(p), &destroy_array, p? &initialize_array_by : &initialize_array ); | | internal_resize( n, sizeof(T), max_size(), static_cast<const void*>
(p), &destroy_array, p? &initialize_array_by : &initialize_array ); | |
| | | | |
| skipping to change at line 929 | | skipping to change at line 1022 | |
| } | | } | |
| //! inline proxy assign by iterators | | //! inline proxy assign by iterators | |
| template<class I> | | template<class I> | |
| void internal_assign_range(I first, I last, is_integer_tag<false> *) { | | void internal_assign_range(I first, I last, is_integer_tag<false> *) { | |
| internal_assign_iterators(first, last); | | internal_assign_iterators(first, last); | |
| } | | } | |
| //! assign by iterators | | //! assign by iterators | |
| template<class I> | | template<class I> | |
| void internal_assign_iterators(I first, I last); | | void internal_assign_iterators(I first, I last); | |
| | | | |
|
| | | //these functions are marked __TBB_EXPORTED_FUNC as they are called fro | |
| | | m within the library | |
| | | | |
| //! Construct n instances of T, starting at "begin". | | //! Construct n instances of T, starting at "begin". | |
| static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const vo
id*, size_type n ); | | static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const vo
id*, size_type n ); | |
| | | | |
| //! Construct n instances of T, starting at "begin". | | //! Construct n instances of T, starting at "begin". | |
| static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const
void* src, size_type n ); | | static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const
void* src, size_type n ); | |
| | | | |
| //! Construct n instances of T, starting at "begin". | | //! Construct n instances of T, starting at "begin". | |
| static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src,
size_type n ); | | static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src,
size_type n ); | |
| | | | |
|
| | | //! Construct n instances of T, starting at "begin". | |
| | | template<typename Iterator> | |
| | | static void __TBB_EXPORTED_FUNC copy_range( void* dst, const void* p_ty | |
| | | pe_erased_iterator, size_type n ); | |
| | | | |
| //! Assign n instances of T, starting at "begin". | | //! Assign n instances of T, starting at "begin". | |
| static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* sr
c, size_type n ); | | static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* sr
c, size_type n ); | |
| | | | |
| //! Destroy n instances of T, starting at "begin". | | //! Destroy n instances of T, starting at "begin". | |
| static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n
); | | static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n
); | |
| | | | |
| //! Exception-aware helper class for filling a segment by exception-dan
ger operators of user class | | //! Exception-aware helper class for filling a segment by exception-dan
ger operators of user class | |
| class internal_loop_guide : internal::no_copy { | | class internal_loop_guide : internal::no_copy { | |
| public: | | public: | |
| const pointer array; | | const pointer array; | |
| | | | |
| skipping to change at line 986 | | skipping to change at line 1085 | |
| if( old.first_block ) // free segment allocated for compacting. Onl
y for support of exceptions in ctor of user T[ype] | | if( old.first_block ) // free segment allocated for compacting. Onl
y for support of exceptions in ctor of user T[ype] | |
| internal_free_segments( old.table, 1, old.first_block ); | | internal_free_segments( old.table, 1, old.first_block ); | |
| __TBB_RETHROW(); | | __TBB_RETHROW(); | |
| } | | } | |
| } | | } | |
| #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) | | #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) | |
| #pragma warning (pop) | | #pragma warning (pop) | |
| #endif // warning 4701 is back | | #endif // warning 4701 is back | |
| | | | |
| template<typename T, class A> | | template<typename T, class A> | |
|
| void concurrent_vector<T, A>::internal_free_segments(void *table[], segment
_index_t k, segment_index_t first_block) { | | void concurrent_vector<T, A>::internal_free_segments(segment_t table[], seg
ment_index_t k, segment_index_t first_block) { | |
| // Free the arrays | | // Free the arrays | |
| while( k > first_block ) { | | while( k > first_block ) { | |
| --k; | | --k; | |
|
| T* array = static_cast<T*>(table[k]); | | segment_value_t segment_value = table[k].load<relaxed>(); | |
| table[k] = NULL; | | table[k].store<relaxed>(segment_not_used()); | |
| if( array > internal::vector_allocation_error_flag ) // check for c | | if( segment_value == segment_allocated() ) // check for correct seg | |
| orrect segment pointer | | ment pointer | |
| this->my_allocator.deallocate( array, segment_size(k) ); | | this->my_allocator.deallocate( (segment_value.pointer<T>()), se | |
| | | gment_size(k) ); | |
| } | | } | |
|
| T* array = static_cast<T*>(table[0]); | | segment_value_t segment_value = table[0].load<relaxed>(); | |
| if( array > internal::vector_allocation_error_flag ) { | | if( segment_value == segment_allocated() ) { | |
| __TBB_ASSERT( first_block > 0, NULL ); | | __TBB_ASSERT( first_block > 0, NULL ); | |
|
| while(k > 0) table[--k] = NULL; | | while(k > 0) table[--k].store<relaxed>(segment_not_used()); | |
| this->my_allocator.deallocate( array, segment_size(first_block) ); | | this->my_allocator.deallocate( (segment_value.pointer<T>()), segmen | |
| | | t_size(first_block) ); | |
| } | | } | |
| } | | } | |
| | | | |
| template<typename T, class A> | | template<typename T, class A> | |
| T& concurrent_vector<T, A>::internal_subscript( size_type index ) const { | | T& concurrent_vector<T, A>::internal_subscript( size_type index ) const { | |
|
| | | //TODO: unify both versions of internal_subscript | |
| __TBB_ASSERT( index < my_early_size, "index out of bounds" ); | | __TBB_ASSERT( index < my_early_size, "index out of bounds" ); | |
| size_type j = index; | | size_type j = index; | |
| segment_index_t k = segment_base_index_of( j ); | | segment_index_t k = segment_base_index_of( j ); | |
|
| __TBB_ASSERT( (segment_t*)my_segment != my_storage || k < pointers_per_ | | __TBB_ASSERT( my_segment.load<acquire>() != my_storage || k < pointers_ | |
| short_table, "index is being allocated" ); | | per_short_table, "index is being allocated" ); | |
| // no need in __TBB_load_with_acquire since thread works in own space o | | //no need in load with acquire (load<acquire>) since thread works in ow | |
| r gets | | n space or gets | |
| T* array = static_cast<T*>( tbb::internal::itt_hide_load_word(my_segmen | | //the information about added elements via some form of external synchr | |
| t[k].array)); | | onization | |
| __TBB_ASSERT( array != internal::vector_allocation_error_flag, "the ins | | //TODO: why not make a load of my_segment relaxed as well ? | |
| tance is broken by bad allocation. Use at() instead" ); | | //TODO: add an assertion that my_segment[k] is properly aligned to plea | |
| __TBB_ASSERT( array, "index is being allocated" ); | | se ITT | |
| return array[j]; | | segment_value_t segment_value = my_segment[k].template load<relaxed>() | |
| | | ; | |
| | | __TBB_ASSERT( segment_value != segment_allocation_failed(), "the instan | |
| | | ce is broken by bad allocation. Use at() instead" ); | |
| | | __TBB_ASSERT( segment_value != segment_not_used(), "index is being allo | |
| | | cated" ); | |
| | | return (( segment_value.pointer<T>()))[j]; | |
| } | | } | |
| | | | |
| template<typename T, class A> | | template<typename T, class A> | |
| T& concurrent_vector<T, A>::internal_subscript_with_exceptions( size_type i
ndex ) const { | | T& concurrent_vector<T, A>::internal_subscript_with_exceptions( size_type i
ndex ) const { | |
| if( index >= my_early_size ) | | if( index >= my_early_size ) | |
| internal::throw_exception(internal::eid_out_of_range); // throw std
::out_of_range | | internal::throw_exception(internal::eid_out_of_range); // throw std
::out_of_range | |
| size_type j = index; | | size_type j = index; | |
| segment_index_t k = segment_base_index_of( j ); | | segment_index_t k = segment_base_index_of( j ); | |
|
| if( (segment_t*)my_segment == my_storage && k >= pointers_per_short_tab | | //TODO: refactor this condition into separate helper function, e.g. fit | |
| le ) | | s_into_small_table | |
| | | if( my_segment.load<acquire>() == my_storage && k >= pointers_per_short | |
| | | _table ) | |
| internal::throw_exception(internal::eid_segment_range_error); // th
row std::range_error | | internal::throw_exception(internal::eid_segment_range_error); // th
row std::range_error | |
|
| void *array = my_segment[k].array; // no need in __TBB_load_with_acquir | | // no need in load with acquire (load<acquire>) since thread works in o | |
| e | | wn space or gets | |
| if( array <= internal::vector_allocation_error_flag ) // check for corr | | //the information about added elements via some form of external synchr | |
| ect segment pointer | | onization | |
| | | //TODO: why not make a load of my_segment relaxed as well ? | |
| | | //TODO: add an assertion that my_segment[k] is properly aligned to plea | |
| | | se ITT | |
| | | segment_value_t segment_value = my_segment[k].template load<relaxed>() | |
| | | ; | |
| | | if( segment_value != segment_allocated() ) // check for correct segment | |
| | | pointer | |
| internal::throw_exception(internal::eid_index_range_error); // thro
w std::range_error | | internal::throw_exception(internal::eid_index_range_error); // thro
w std::range_error | |
|
| return static_cast<T*>(array)[j]; | | return (segment_value.pointer<T>())[j]; | |
| } | | } | |
| | | | |
| template<typename T, class A> template<class I> | | template<typename T, class A> template<class I> | |
| void concurrent_vector<T, A>::internal_assign_iterators(I first, I last) { | | void concurrent_vector<T, A>::internal_assign_iterators(I first, I last) { | |
| __TBB_ASSERT(my_early_size == 0, NULL); | | __TBB_ASSERT(my_early_size == 0, NULL); | |
| size_type n = std::distance(first, last); | | size_type n = std::distance(first, last); | |
| if( !n ) return; | | if( !n ) return; | |
| internal_reserve(n, sizeof(T), max_size()); | | internal_reserve(n, sizeof(T), max_size()); | |
| my_early_size = n; | | my_early_size = n; | |
| segment_index_t k = 0; | | segment_index_t k = 0; | |
| size_type sz = segment_size( my_first_block ); | | size_type sz = segment_size( my_first_block ); | |
| while( sz < n ) { | | while( sz < n ) { | |
|
| internal_loop_guide loop(sz, my_segment[k].array); | | internal_loop_guide loop(sz, my_segment[k].template load<relaxed>()
.template pointer<void>()); | |
| loop.iterate(first); | | loop.iterate(first); | |
| n -= sz; | | n -= sz; | |
| if( !k ) k = my_first_block; | | if( !k ) k = my_first_block; | |
| else { ++k; sz <<= 1; } | | else { ++k; sz <<= 1; } | |
| } | | } | |
|
| internal_loop_guide loop(n, my_segment[k].array); | | internal_loop_guide loop(n, my_segment[k].template load<relaxed>().temp
late pointer<void>()); | |
| loop.iterate(first); | | loop.iterate(first); | |
| } | | } | |
| | | | |
| template<typename T, class A> | | template<typename T, class A> | |
| void concurrent_vector<T, A>::initialize_array( void* begin, const void *,
size_type n ) { | | void concurrent_vector<T, A>::initialize_array( void* begin, const void *,
size_type n ) { | |
| internal_loop_guide loop(n, begin); loop.init(); | | internal_loop_guide loop(n, begin); loop.init(); | |
| } | | } | |
| | | | |
| template<typename T, class A> | | template<typename T, class A> | |
| void concurrent_vector<T, A>::initialize_array_by( void* begin, const void
*src, size_type n ) { | | void concurrent_vector<T, A>::initialize_array_by( void* begin, const void
*src, size_type n ) { | |
| internal_loop_guide loop(n, begin); loop.init(src); | | internal_loop_guide loop(n, begin); loop.init(src); | |
| } | | } | |
| | | | |
| template<typename T, class A> | | template<typename T, class A> | |
| void concurrent_vector<T, A>::copy_array( void* dst, const void* src, size_
type n ) { | | void concurrent_vector<T, A>::copy_array( void* dst, const void* src, size_
type n ) { | |
| internal_loop_guide loop(n, dst); loop.copy(src); | | internal_loop_guide loop(n, dst); loop.copy(src); | |
| } | | } | |
| | | | |
| template<typename T, class A> | | template<typename T, class A> | |
|
| | | template<typename I> | |
| | | void concurrent_vector<T, A>::copy_range( void* dst, const void* p_type_era | |
| | | sed_iterator, size_type n ){ | |
| | | I & iterator ((*const_cast<I*>(static_cast<const I*>(p_type_erased_iter | |
| | | ator)))); | |
| | | internal_loop_guide loop(n, dst); loop.iterate(iterator); | |
| | | } | |
| | | | |
| | | template<typename T, class A> | |
| void concurrent_vector<T, A>::assign_array( void* dst, const void* src, siz
e_type n ) { | | void concurrent_vector<T, A>::assign_array( void* dst, const void* src, siz
e_type n ) { | |
| internal_loop_guide loop(n, dst); loop.assign(src); | | internal_loop_guide loop(n, dst); loop.assign(src); | |
| } | | } | |
| | | | |
| #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) | | #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) | |
| // Workaround for overzealous compiler warning | | // Workaround for overzealous compiler warning | |
| #pragma warning (push) | | #pragma warning (push) | |
| #pragma warning (disable: 4189) | | #pragma warning (disable: 4189) | |
| #endif | | #endif | |
| template<typename T, class A> | | template<typename T, class A> | |
| | | | |
End of changes. 34 change blocks. |
| 62 lines changed or deleted | | 216 lines changed or added | |
|
| flow_graph.h | | flow_graph.h | |
| | | | |
| skipping to change at line 2339 | | skipping to change at line 2339 | |
| | | | |
| #if TBB_PREVIEW_FLOW_GRAPH_TRACE | | #if TBB_PREVIEW_FLOW_GRAPH_TRACE | |
| /* override */ void set_name( const char *name ) { | | /* override */ void set_name( const char *name ) { | |
| tbb::internal::fgt_node_desc( this, name ); | | tbb::internal::fgt_node_desc( this, name ); | |
| } | | } | |
| #endif | | #endif | |
| | | | |
| }; | | }; | |
| | | | |
| #if TBB_PREVIEW_GRAPH_NODES | | #if TBB_PREVIEW_GRAPH_NODES | |
|
| // or node | | // indexer node | |
| #include "internal/_flow_graph_or_impl.h" | | #include "internal/_flow_graph_indexer_impl.h" | |
| | | | |
|
| template<typename InputTuple> | | struct indexer_null_type {}; | |
| class or_node : public internal::unfolded_or_node<InputTuple> { | | | |
| | | template<typename T0, typename T1=indexer_null_type, typename T2=indexer_nu | |
| | | ll_type, typename T3=indexer_null_type, | |
| | | typename T4=indexer_null_type, typename T5=indexer_nu | |
| | | ll_type, typename T6=indexer_null_type, | |
| | | typename T7=indexer_null_type, typename T8=indexer_nu | |
| | | ll_type, typename T9=indexer_null_type> class indexer_node; | |
| | | | |
| | | //indexer node specializations | |
| | | template<typename T0> | |
| | | class indexer_node<T0> : public internal::unfolded_indexer_node<tuple<T0> > | |
| | | { | |
| | | private: | |
| | | static const int N = 1; | |
| | | public: | |
| | | typedef tuple<T0> InputTuple; | |
| | | typedef typename internal::tagged_msg<size_t, T0> output_type; | |
| | | typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_t | |
| | | ype; | |
| | | indexer_node(graph& g) : unfolded_type(g) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | // Copy constructor | |
| | | indexer_node( const indexer_node& other ) : unfolded_type(other) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | | |
| | | #if TBB_PREVIEW_FLOW_GRAPH_TRACE | |
| | | void set_name( const char *name ) { | |
| | | tbb::internal::fgt_node_desc( this, name ); | |
| | | } | |
| | | #endif | |
| | | }; | |
| | | | |
| | | template<typename T0, typename T1> | |
| | | class indexer_node<T0, T1> : public internal::unfolded_indexer_node<tuple<T | |
| | | 0, T1> > { | |
| | | private: | |
| | | static const int N = 2; | |
| | | public: | |
| | | typedef tuple<T0, T1> InputTuple; | |
| | | typedef typename internal::tagged_msg<size_t, T0, T1> output_type; | |
| | | typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_t | |
| | | ype; | |
| | | indexer_node(graph& g) : unfolded_type(g) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | // Copy constructor | |
| | | indexer_node( const indexer_node& other ) : unfolded_type(other) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | | |
| | | #if TBB_PREVIEW_FLOW_GRAPH_TRACE | |
| | | void set_name( const char *name ) { | |
| | | tbb::internal::fgt_node_desc( this, name ); | |
| | | } | |
| | | #endif | |
| | | }; | |
| | | | |
| | | template<typename T0, typename T1, typename T2> | |
| | | class indexer_node<T0, T1, T2> : public internal::unfolded_indexer_node<tup | |
| | | le<T0, T1, T2> > { | |
| private: | | private: | |
|
| static const int N = tbb::flow::tuple_size<InputTuple>::value; | | static const int N = 3; | |
| public: | | public: | |
|
| typedef typename internal::or_output_type<InputTuple>::type output_type | | typedef tuple<T0, T1, T2> InputTuple; | |
| ; | | typedef typename internal::tagged_msg<size_t, T0, T1, T2> output_type; | |
| typedef typename internal::unfolded_or_node<InputTuple> unfolded_type; | | typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_t | |
| or_node(graph& g) : unfolded_type(g) { | | ype; | |
| tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | | indexer_node(graph& g) : unfolded_type(g) { | |
| OW_OR_NODE, &this->my_graph, | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| this->input_ports(), static_cast
< sender< output_type > *>(this) ); | | this->input_ports(), static_cast
< sender< output_type > *>(this) ); | |
| } | | } | |
| // Copy constructor | | // Copy constructor | |
|
| or_node( const or_node& other ) : unfolded_type(other) { | | indexer_node( const indexer_node& other ) : unfolded_type(other) { | |
| tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| OW_OR_NODE, &this->my_graph, | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | | |
| | | #if TBB_PREVIEW_FLOW_GRAPH_TRACE | |
| | | void set_name( const char *name ) { | |
| | | tbb::internal::fgt_node_desc( this, name ); | |
| | | } | |
| | | #endif | |
| | | }; | |
| | | | |
| | | template<typename T0, typename T1, typename T2, typename T3> | |
| | | class indexer_node<T0, T1, T2, T3> : public internal::unfolded_indexer_node | |
| | | <tuple<T0, T1, T2, T3> > { | |
| | | private: | |
| | | static const int N = 4; | |
| | | public: | |
| | | typedef tuple<T0, T1, T2, T3> InputTuple; | |
| | | typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3> output_ty | |
| | | pe; | |
| | | typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_t | |
| | | ype; | |
| | | indexer_node(graph& g) : unfolded_type(g) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | // Copy constructor | |
| | | indexer_node( const indexer_node& other ) : unfolded_type(other) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| this->input_ports(), static_cast
< sender< output_type > *>(this) ); | | this->input_ports(), static_cast
< sender< output_type > *>(this) ); | |
| } | | } | |
| | | | |
| #if TBB_PREVIEW_FLOW_GRAPH_TRACE | | #if TBB_PREVIEW_FLOW_GRAPH_TRACE | |
| /* override */ void set_name( const char *name ) { | | /* override */ void set_name( const char *name ) { | |
| tbb::internal::fgt_node_desc( this, name ); | | tbb::internal::fgt_node_desc( this, name ); | |
| } | | } | |
| #endif | | #endif | |
|
| | | }; | |
| | | | |
|
| | | template<typename T0, typename T1, typename T2, typename T3, typename T4> | |
| | | class indexer_node<T0, T1, T2, T3, T4> : public internal::unfolded_indexer_ | |
| | | node<tuple<T0, T1, T2, T3, T4> > { | |
| | | private: | |
| | | static const int N = 5; | |
| | | public: | |
| | | typedef tuple<T0, T1, T2, T3, T4> InputTuple; | |
| | | typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4> outpu | |
| | | t_type; | |
| | | typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_t | |
| | | ype; | |
| | | indexer_node(graph& g) : unfolded_type(g) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | // Copy constructor | |
| | | indexer_node( const indexer_node& other ) : unfolded_type(other) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | | |
| | | #if TBB_PREVIEW_FLOW_GRAPH_TRACE | |
| | | /* override */ void set_name( const char *name ) { | |
| | | tbb::internal::fgt_node_desc( this, name ); | |
| | | } | |
| | | #endif | |
| }; | | }; | |
|
| | | | |
| | | #if __TBB_VARIADIC_MAX >= 6 | |
| | | template<typename T0, typename T1, typename T2, typename T3, typename T4, t | |
| | | ypename T5> | |
| | | class indexer_node<T0, T1, T2, T3, T4, T5> : public internal::unfolded_inde | |
| | | xer_node<tuple<T0, T1, T2, T3, T4, T5> > { | |
| | | private: | |
| | | static const int N = 6; | |
| | | public: | |
| | | typedef tuple<T0, T1, T2, T3, T4, T5> InputTuple; | |
| | | typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4, T5> o | |
| | | utput_type; | |
| | | typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_t | |
| | | ype; | |
| | | indexer_node(graph& g) : unfolded_type(g) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | // Copy constructor | |
| | | indexer_node( const indexer_node& other ) : unfolded_type(other) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | | |
| | | #if TBB_PREVIEW_FLOW_GRAPH_TRACE | |
| | | /* override */ void set_name( const char *name ) { | |
| | | tbb::internal::fgt_node_desc( this, name ); | |
| | | } | |
| | | #endif | |
| | | }; | |
| | | #endif //variadic max 6 | |
| | | | |
| | | #if __TBB_VARIADIC_MAX >= 7 | |
| | | template<typename T0, typename T1, typename T2, typename T3, typename T4, t | |
| | | ypename T5, | |
| | | typename T6> | |
| | | class indexer_node<T0, T1, T2, T3, T4, T5, T6> : public internal::unfolded_ | |
| | | indexer_node<tuple<T0, T1, T2, T3, T4, T5, T6> > { | |
| | | private: | |
| | | static const int N = 7; | |
| | | public: | |
| | | typedef tuple<T0, T1, T2, T3, T4, T5, T6> InputTuple; | |
| | | typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4, T5, T | |
| | | 6> output_type; | |
| | | typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_t | |
| | | ype; | |
| | | indexer_node(graph& g) : unfolded_type(g) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | // Copy constructor | |
| | | indexer_node( const indexer_node& other ) : unfolded_type(other) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | | |
| | | #if TBB_PREVIEW_FLOW_GRAPH_TRACE | |
| | | /* override */ void set_name( const char *name ) { | |
| | | tbb::internal::fgt_node_desc( this, name ); | |
| | | } | |
| | | #endif | |
| | | }; | |
| | | #endif //variadic max 7 | |
| | | | |
| | | #if __TBB_VARIADIC_MAX >= 8 | |
| | | template<typename T0, typename T1, typename T2, typename T3, typename T4, t | |
| | | ypename T5, | |
| | | typename T6, typename T7> | |
| | | class indexer_node<T0, T1, T2, T3, T4, T5, T6, T7> : public internal::unfol | |
| | | ded_indexer_node<tuple<T0, T1, T2, T3, T4, T5, T6, T7> > { | |
| | | private: | |
| | | static const int N = 8; | |
| | | public: | |
| | | typedef tuple<T0, T1, T2, T3, T4, T5, T6, T7> InputTuple; | |
| | | typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4, T5, T | |
| | | 6, T7> output_type; | |
| | | typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_t | |
| | | ype; | |
| | | indexer_node(graph& g) : unfolded_type(g) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | // Copy constructor | |
| | | indexer_node( const indexer_node& other ) : unfolded_type(other) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | | |
| | | #if TBB_PREVIEW_FLOW_GRAPH_TRACE | |
| | | /* override */ void set_name( const char *name ) { | |
| | | tbb::internal::fgt_node_desc( this, name ); | |
| | | } | |
| | | #endif | |
| | | }; | |
| | | #endif //variadic max 8 | |
| | | | |
| | | #if __TBB_VARIADIC_MAX >= 9 | |
| | | template<typename T0, typename T1, typename T2, typename T3, typename T4, t | |
| | | ypename T5, | |
| | | typename T6, typename T7, typename T8> | |
| | | class indexer_node<T0, T1, T2, T3, T4, T5, T6, T7, T8> : public internal::u | |
| | | nfolded_indexer_node<tuple<T0, T1, T2, T3, T4, T5, T6, T7, T8> > { | |
| | | private: | |
| | | static const int N = 9; | |
| | | public: | |
| | | typedef tuple<T0, T1, T2, T3, T4, T5, T6, T7, T8> InputTuple; | |
| | | typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4, T5, T | |
| | | 6, T7, T8> output_type; | |
| | | typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_t | |
| | | ype; | |
| | | indexer_node(graph& g) : unfolded_type(g) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | // Copy constructor | |
| | | indexer_node( const indexer_node& other ) : unfolded_type(other) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | | |
| | | #if TBB_PREVIEW_FLOW_GRAPH_TRACE | |
| | | /* override */ void set_name( const char *name ) { | |
| | | tbb::internal::fgt_node_desc( this, name ); | |
| | | } | |
| | | #endif | |
| | | }; | |
| | | #endif //variadic max 9 | |
| | | | |
| | | #if __TBB_VARIADIC_MAX >= 10 | |
| | | template<typename T0, typename T1, typename T2, typename T3, typename T4, t | |
| | | ypename T5, | |
| | | typename T6, typename T7, typename T8, typename T9> | |
| | | class indexer_node/*default*/ : public internal::unfolded_indexer_node<tupl | |
| | | e<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> > { | |
| | | private: | |
| | | static const int N = 10; | |
| | | public: | |
| | | typedef tuple<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> InputTuple; | |
| | | typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4, T5, T | |
| | | 6, T7, T8, T9> output_type; | |
| | | typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_t | |
| | | ype; | |
| | | indexer_node(graph& g) : unfolded_type(g) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | // Copy constructor | |
| | | indexer_node( const indexer_node& other ) : unfolded_type(other) { | |
| | | tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FL | |
| | | OW_INDEXER_NODE, &this->my_graph, | |
| | | this->input_ports(), static_cast | |
| | | < sender< output_type > *>(this) ); | |
| | | } | |
| | | | |
| | | #if TBB_PREVIEW_FLOW_GRAPH_TRACE | |
| | | /* override */ void set_name( const char *name ) { | |
| | | tbb::internal::fgt_node_desc( this, name ); | |
| | | } | |
| | | #endif | |
| | | }; | |
| | | #endif //variadic max 10 | |
| | | | |
| #endif // TBB_PREVIEW_GRAPH_NODES | | #endif // TBB_PREVIEW_GRAPH_NODES | |
| | | | |
| //! Makes an edge between a single predecessor and a single successor | | //! Makes an edge between a single predecessor and a single successor | |
| template< typename T > | | template< typename T > | |
| inline void make_edge( sender<T> &p, receiver<T> &s ) { | | inline void make_edge( sender<T> &p, receiver<T> &s ) { | |
| p.register_successor( s ); | | p.register_successor( s ); | |
| tbb::internal::fgt_make_edge( &p, &s ); | | tbb::internal::fgt_make_edge( &p, &s ); | |
| } | | } | |
| | | | |
| //! Makes an edge between a single predecessor and a single successor | | //! Makes an edge between a single predecessor and a single successor | |
| | | | |
| skipping to change at line 2403 | | skipping to change at line 2650 | |
| using interface7::sender; | | using interface7::sender; | |
| using interface7::receiver; | | using interface7::receiver; | |
| using interface7::continue_receiver; | | using interface7::continue_receiver; | |
| | | | |
| using interface7::source_node; | | using interface7::source_node; | |
| using interface7::function_node; | | using interface7::function_node; | |
| using interface7::multifunction_node; | | using interface7::multifunction_node; | |
| using interface7::split_node; | | using interface7::split_node; | |
| using interface7::internal::output_port; | | using interface7::internal::output_port; | |
| #if TBB_PREVIEW_GRAPH_NODES | | #if TBB_PREVIEW_GRAPH_NODES | |
|
| using interface7::or_node; | | using interface7::indexer_node; | |
| | | using interface7::internal::tagged_msg; | |
| | | using interface7::internal::cast_to; | |
| | | using interface7::internal::is_a; | |
| #endif | | #endif | |
| using interface7::continue_node; | | using interface7::continue_node; | |
| using interface7::overwrite_node; | | using interface7::overwrite_node; | |
| using interface7::write_once_node; | | using interface7::write_once_node; | |
| using interface7::broadcast_node; | | using interface7::broadcast_node; | |
| using interface7::buffer_node; | | using interface7::buffer_node; | |
| using interface7::queue_node; | | using interface7::queue_node; | |
| using interface7::sequencer_node; | | using interface7::sequencer_node; | |
| using interface7::priority_queue_node; | | using interface7::priority_queue_node; | |
| using interface7::limiter_node; | | using interface7::limiter_node; | |
| | | | |
End of changes. 9 change blocks. |
| 15 lines changed or deleted | | 335 lines changed or added | |
|
| partitioner.h | | partitioner.h | |
| | | | |
| skipping to change at line 33 | | skipping to change at line 33 | |
| file does not by itself cause the resulting executable to be covered by | | file does not by itself cause the resulting executable to be covered by | |
| the GNU General Public License. This exception does not however | | the GNU General Public License. This exception does not however | |
| invalidate any other reasons why the executable file might be covered b
y | | invalidate any other reasons why the executable file might be covered b
y | |
| the GNU General Public License. | | the GNU General Public License. | |
| */ | | */ | |
| | | | |
| #ifndef __TBB_partitioner_H | | #ifndef __TBB_partitioner_H | |
| #define __TBB_partitioner_H | | #define __TBB_partitioner_H | |
| | | | |
| #ifndef __TBB_INITIAL_CHUNKS | | #ifndef __TBB_INITIAL_CHUNKS | |
|
| | | // initial task divisions per thread | |
| #define __TBB_INITIAL_CHUNKS 2 | | #define __TBB_INITIAL_CHUNKS 2 | |
| #endif | | #endif | |
| #ifndef __TBB_RANGE_POOL_CAPACITY | | #ifndef __TBB_RANGE_POOL_CAPACITY | |
|
| | | // maximum number of elements in range pool | |
| #define __TBB_RANGE_POOL_CAPACITY 8 | | #define __TBB_RANGE_POOL_CAPACITY 8 | |
| #endif | | #endif | |
| #ifndef __TBB_INIT_DEPTH | | #ifndef __TBB_INIT_DEPTH | |
|
| | | // initial value for depth of range pool | |
| #define __TBB_INIT_DEPTH 5 | | #define __TBB_INIT_DEPTH 5 | |
| #endif | | #endif | |
|
| | | #ifndef __TBB_DEMAND_DEPTH_ADD | |
| | | // when imbalance is found range splits this value times more | |
| | | #define __TBB_DEMAND_DEPTH_ADD 2 | |
| | | #endif | |
| | | #ifndef __TBB_STATIC_THRESHOLD | |
| | | // necessary number of clocks for the work to be distributed among all task | |
| | | s | |
| | | #define __TBB_STATIC_THRESHOLD 40000 | |
| | | #endif | |
| | | #if __TBB_DEFINE_MIC | |
| | | #define __TBB_NONUNIFORM_TASK_CREATION 1 | |
| | | #ifdef __TBB_machine_time_stamp | |
| | | #define __TBB_USE_MACHINE_TIME_STAMPS 1 | |
| | | #define __TBB_task_duration() __TBB_STATIC_THRESHOLD | |
| | | #endif // __TBB_machine_time_stamp | |
| | | #endif // __TBB_DEFINE_MIC | |
| | | | |
| #include "task.h" | | #include "task.h" | |
| #include "aligned_space.h" | | #include "aligned_space.h" | |
| #include "atomic.h" | | #include "atomic.h" | |
| | | | |
| #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) | | #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) | |
| // Workaround for overzealous compiler warnings | | // Workaround for overzealous compiler warnings | |
| #pragma warning (push) | | #pragma warning (push) | |
| #pragma warning (disable: 4244) | | #pragma warning (disable: 4244) | |
| #endif | | #endif | |
| | | | |
| namespace tbb { | | namespace tbb { | |
| | | | |
| class auto_partitioner; | | class auto_partitioner; | |
| class simple_partitioner; | | class simple_partitioner; | |
| class affinity_partitioner; | | class affinity_partitioner; | |
|
| namespace interface6 { | | namespace interface7 { | |
| namespace internal { | | namespace internal { | |
| class affinity_partition_type; | | class affinity_partition_type; | |
| } | | } | |
| } | | } | |
| | | | |
| namespace internal { //< @cond INTERNAL | | namespace internal { //< @cond INTERNAL | |
| size_t __TBB_EXPORTED_FUNC get_initial_auto_partitioner_divisor(); | | size_t __TBB_EXPORTED_FUNC get_initial_auto_partitioner_divisor(); | |
| | | | |
| //! Defines entry point for affinity partitioner into tbb run-time library. | | //! Defines entry point for affinity partitioner into tbb run-time library. | |
| class affinity_partitioner_base_v3: no_copy { | | class affinity_partitioner_base_v3: no_copy { | |
| friend class tbb::affinity_partitioner; | | friend class tbb::affinity_partitioner; | |
|
| friend class tbb::interface6::internal::affinity_partition_type; | | friend class tbb::interface7::internal::affinity_partition_type; | |
| //! Array that remembers affinities of tree positions to affinity_id. | | //! Array that remembers affinities of tree positions to affinity_id. | |
| /** NULL if my_size==0. */ | | /** NULL if my_size==0. */ | |
| affinity_id* my_array; | | affinity_id* my_array; | |
| //! Number of elements in my_array. | | //! Number of elements in my_array. | |
| size_t my_size; | | size_t my_size; | |
| //! Zeros the fields. | | //! Zeros the fields. | |
| affinity_partitioner_base_v3() : my_array(NULL), my_size(0) {} | | affinity_partitioner_base_v3() : my_array(NULL), my_size(0) {} | |
| //! Deallocates my_array. | | //! Deallocates my_array. | |
| ~affinity_partitioner_base_v3() {resize(0);} | | ~affinity_partitioner_base_v3() {resize(0);} | |
| //! Resize my_array. | | //! Resize my_array. | |
| | | | |
| skipping to change at line 101 | | skipping to change at line 119 | |
| void spawn_or_delay( bool, task& b ) { | | void spawn_or_delay( bool, task& b ) { | |
| task::spawn(b); | | task::spawn(b); | |
| } | | } | |
| }; | | }; | |
| | | | |
| template<typename Range, typename Body, typename Partitioner> class start_s
can; | | template<typename Range, typename Body, typename Partitioner> class start_s
can; | |
| | | | |
| } //< namespace internal @endcond | | } //< namespace internal @endcond | |
| | | | |
| namespace serial { | | namespace serial { | |
|
| namespace interface6 { | | namespace interface7 { | |
| template<typename Range, typename Body, typename Partitioner> class start_f
or; | | template<typename Range, typename Body, typename Partitioner> class start_f
or; | |
| } | | } | |
| } | | } | |
| | | | |
|
| namespace interface6 { | | namespace interface7 { | |
| //! @cond INTERNAL | | //! @cond INTERNAL | |
| namespace internal { | | namespace internal { | |
| using namespace tbb::internal; | | using namespace tbb::internal; | |
| template<typename Range, typename Body, typename Partitioner> class start_f
or; | | template<typename Range, typename Body, typename Partitioner> class start_f
or; | |
| template<typename Range, typename Body, typename Partitioner> class start_r
educe; | | template<typename Range, typename Body, typename Partitioner> class start_r
educe; | |
| | | | |
| //! Join task node that contains shared flag for stealing feedback | | //! Join task node that contains shared flag for stealing feedback | |
| class flag_task: public task { | | class flag_task: public task { | |
| public: | | public: | |
| tbb::atomic<bool> my_child_stolen; | | tbb::atomic<bool> my_child_stolen; | |
| | | | |
| skipping to change at line 151 | | skipping to change at line 169 | |
| depth_t my_head; | | depth_t my_head; | |
| depth_t my_tail; | | depth_t my_tail; | |
| depth_t my_size; | | depth_t my_size; | |
| depth_t my_depth[MaxCapacity]; // relative depths of stored ranges | | depth_t my_depth[MaxCapacity]; // relative depths of stored ranges | |
| tbb::aligned_space<T, MaxCapacity> my_pool; | | tbb::aligned_space<T, MaxCapacity> my_pool; | |
| | | | |
| public: | | public: | |
| //! initialize via first range in pool | | //! initialize via first range in pool | |
| range_vector(const T& elem) : my_head(0), my_tail(0), my_size(1) { | | range_vector(const T& elem) : my_head(0), my_tail(0), my_size(1) { | |
| my_depth[0] = 0; | | my_depth[0] = 0; | |
|
| new( my_pool.begin() ) T(elem);//TODO: std::move? | | new( static_cast<void *>(my_pool.begin()) ) T(elem);//TODO: std::mo
ve? | |
| } | | } | |
| ~range_vector() { | | ~range_vector() { | |
| while( !empty() ) pop_back(); | | while( !empty() ) pop_back(); | |
| } | | } | |
| bool empty() const { return my_size == 0; } | | bool empty() const { return my_size == 0; } | |
| depth_t size() const { return my_size; } | | depth_t size() const { return my_size; } | |
| //! Populates range pool via ranges up to max depth or while divisible | | //! Populates range pool via ranges up to max depth or while divisible | |
| //! max_depth starts from 0, e.g. value 2 makes 3 ranges in the pool up
to two 1/4 pieces | | //! max_depth starts from 0, e.g. value 2 makes 3 ranges in the pool up
to two 1/4 pieces | |
| void split_to_fill(depth_t max_depth) { | | void split_to_fill(depth_t max_depth) { | |
|
| while( my_size < MaxCapacity && my_depth[my_head] < max_depth | | while( my_size < MaxCapacity && is_divisible(max_depth) ) { | |
| && my_pool.begin()[my_head].is_divisible() ) { | | | |
| depth_t prev = my_head; | | depth_t prev = my_head; | |
| my_head = (my_head + 1) % MaxCapacity; | | my_head = (my_head + 1) % MaxCapacity; | |
| new(my_pool.begin()+my_head) T(my_pool.begin()[prev]); // copy
TODO: std::move? | | new(my_pool.begin()+my_head) T(my_pool.begin()[prev]); // copy
TODO: std::move? | |
| my_pool.begin()[prev].~T(); // instead of assignment | | my_pool.begin()[prev].~T(); // instead of assignment | |
| new(my_pool.begin()+prev) T(my_pool.begin()[my_head], split());
// do 'inverse' split | | new(my_pool.begin()+prev) T(my_pool.begin()[my_head], split());
// do 'inverse' split | |
| my_depth[my_head] = ++my_depth[prev]; | | my_depth[my_head] = ++my_depth[prev]; | |
| my_size++; | | my_size++; | |
| } | | } | |
| } | | } | |
| void pop_back() { | | void pop_back() { | |
| | | | |
| skipping to change at line 197 | | skipping to change at line 214 | |
| } | | } | |
| T& front() { | | T& front() { | |
| __TBB_ASSERT(my_size > 0, "range_vector::front() with empty size"); | | __TBB_ASSERT(my_size > 0, "range_vector::front() with empty size"); | |
| return my_pool.begin()[my_tail]; | | return my_pool.begin()[my_tail]; | |
| } | | } | |
| //! similarly to front(), returns depth of the first range in the pool | | //! similarly to front(), returns depth of the first range in the pool | |
| depth_t front_depth() { | | depth_t front_depth() { | |
| __TBB_ASSERT(my_size > 0, "range_vector::front_depth() with empty s
ize"); | | __TBB_ASSERT(my_size > 0, "range_vector::front_depth() with empty s
ize"); | |
| return my_depth[my_tail]; | | return my_depth[my_tail]; | |
| } | | } | |
|
| | | depth_t back_depth() { | |
| | | __TBB_ASSERT(my_size > 0, "range_vector::back_depth() with empty si | |
| | | ze"); | |
| | | return my_depth[my_head]; | |
| | | } | |
| | | bool is_divisible(depth_t max_depth) { | |
| | | return back_depth() < max_depth && back().is_divisible(); | |
| | | } | |
| }; | | }; | |
| | | | |
| //! Provides default methods for partition objects and common algorithm blo
cks. | | //! Provides default methods for partition objects and common algorithm blo
cks. | |
| template <typename Partition> | | template <typename Partition> | |
| struct partition_type_base { | | struct partition_type_base { | |
|
| | | typedef split split_type; | |
| // decision makers | | // decision makers | |
| void set_affinity( task & ) {} | | void set_affinity( task & ) {} | |
| void note_affinity( task::affinity_id ) {} | | void note_affinity( task::affinity_id ) {} | |
| bool check_being_stolen(task &) { return false; } // part of old should
_execute_range() | | bool check_being_stolen(task &) { return false; } // part of old should
_execute_range() | |
| bool check_for_demand(task &) { return false; } | | bool check_for_demand(task &) { return false; } | |
| bool is_divisible() { return true; } // part of old should_execute_rang
e() | | bool is_divisible() { return true; } // part of old should_execute_rang
e() | |
| depth_t max_depth() { return 0; } | | depth_t max_depth() { return 0; } | |
| void align_depth(depth_t) { } | | void align_depth(depth_t) { } | |
|
| | | template <typename Range> split_type get_split() { return split(); } | |
| | | | |
| // common function blocks | | // common function blocks | |
| Partition& self() { return *static_cast<Partition*>(this); } // CRTP he
lper | | Partition& self() { return *static_cast<Partition*>(this); } // CRTP he
lper | |
| template<typename StartType, typename Range> | | template<typename StartType, typename Range> | |
| void execute(StartType &start, Range &range) { | | void execute(StartType &start, Range &range) { | |
| // The algorithm in a few words ([]-denotes calls to decision metho
ds of partitioner): | | // The algorithm in a few words ([]-denotes calls to decision metho
ds of partitioner): | |
| // [If this task is stolen, adjust depth and divisions if necessary
, set flag]. | | // [If this task is stolen, adjust depth and divisions if necessary
, set flag]. | |
| // If range is divisible { | | // If range is divisible { | |
| // Spread the work while [initial divisions left]; | | // Spread the work while [initial divisions left]; | |
| // Create trap task [if necessary]; | | // Create trap task [if necessary]; | |
| // } | | // } | |
| // If not divisible or [max depth is reached], execute, else do the
range pool part | | // If not divisible or [max depth is reached], execute, else do the
range pool part | |
| if ( range.is_divisible() ) { | | if ( range.is_divisible() ) { | |
|
| if ( self().is_divisible() ) | | if ( self().is_divisible() ) { | |
| do start.offer_work( split() ); // split until is divisible | | do { // split until is divisible | |
| while ( range.is_divisible() && self().is_divisible() ); | | typename Partition::split_type split_obj = self().templ | |
| | | ate get_split<Range>(); | |
| | | start.offer_work( split_obj ); | |
| | | } while ( range.is_divisible() && self().is_divisible() ); | |
| | | } | |
| } | | } | |
| if( !range.is_divisible() || !self().max_depth() ) | | if( !range.is_divisible() || !self().max_depth() ) | |
| start.run_body( range ); // simple partitioner goes always here | | start.run_body( range ); // simple partitioner goes always here | |
| else { // do range pool | | else { // do range pool | |
| internal::range_vector<Range, Partition::range_pool_size> range
_pool(range); | | internal::range_vector<Range, Partition::range_pool_size> range
_pool(range); | |
| do { | | do { | |
| range_pool.split_to_fill(self().max_depth()); // fill range
pool | | range_pool.split_to_fill(self().max_depth()); // fill range
pool | |
| if( self().check_for_demand( start ) ) { | | if( self().check_for_demand( start ) ) { | |
| if( range_pool.size() > 1 ) { | | if( range_pool.size() > 1 ) { | |
| start.offer_work( range_pool.front(), range_pool.fr
ont_depth() ); | | start.offer_work( range_pool.front(), range_pool.fr
ont_depth() ); | |
| range_pool.pop_front(); | | range_pool.pop_front(); | |
| continue; | | continue; | |
| } | | } | |
|
| if( range_pool.back().is_divisible() ) // was not enoug | | if( range_pool.is_divisible(self().max_depth()) ) // wa | |
| h depth to fork a task | | s not enough depth to fork a task | |
| continue; // note: check_for_demand() should guaran | | continue; // note: next split_to_fill() should spli | |
| tee increasing max_depth() next time | | t range at least once | |
| } | | } | |
| start.run_body( range_pool.back() ); | | start.run_body( range_pool.back() ); | |
| range_pool.pop_back(); | | range_pool.pop_back(); | |
| } while( !range_pool.empty() && !start.is_cancelled() ); | | } while( !range_pool.empty() && !start.is_cancelled() ); | |
| } | | } | |
| } | | } | |
| }; | | }; | |
| | | | |
| //! Provides default methods for auto (adaptive) partition objects. | | //! Provides default methods for auto (adaptive) partition objects. | |
| template <typename Partition> | | template <typename Partition> | |
|
| struct auto_partition_type_base : partition_type_base<Partition> { | | struct adaptive_partition_type_base : partition_type_base<Partition> { | |
| size_t my_divisor; | | size_t my_divisor; | |
| depth_t my_max_depth; | | depth_t my_max_depth; | |
|
| auto_partition_type_base() : my_max_depth(__TBB_INIT_DEPTH) { | | adaptive_partition_type_base() : my_max_depth(__TBB_INIT_DEPTH) { | |
| my_divisor = tbb::internal::get_initial_auto_partitioner_divisor()* | | my_divisor = tbb::internal::get_initial_auto_partitioner_divisor() | |
| __TBB_INITIAL_CHUNKS/4; | | / 4; | |
| __TBB_ASSERT(my_divisor, "initial value of get_initial_auto_partiti
oner_divisor() is not valid"); | | __TBB_ASSERT(my_divisor, "initial value of get_initial_auto_partiti
oner_divisor() is not valid"); | |
| } | | } | |
|
| auto_partition_type_base(auto_partition_type_base &src, split) { | | adaptive_partition_type_base(adaptive_partition_type_base &src, split)
{ | |
| my_max_depth = src.my_max_depth; | | my_max_depth = src.my_max_depth; | |
|
| | | #if TBB_USE_ASSERT | |
| | | size_t old_divisor = src.my_divisor; | |
| | | #endif | |
| | | | |
| #if __TBB_INITIAL_TASK_IMBALANCE | | #if __TBB_INITIAL_TASK_IMBALANCE | |
| if( src.my_divisor <= 1 ) my_divisor = 0; | | if( src.my_divisor <= 1 ) my_divisor = 0; | |
|
| else my_divisor = src.my_divisor = (src.my_divisor+1u) / 2u; | | else my_divisor = src.my_divisor = (src.my_divisor + 1u) / 2u; | |
| #else | | #else | |
| my_divisor = src.my_divisor / 2u; | | my_divisor = src.my_divisor / 2u; | |
| src.my_divisor = src.my_divisor - my_divisor; // TODO: check the ef
fect separately | | src.my_divisor = src.my_divisor - my_divisor; // TODO: check the ef
fect separately | |
|
| if(my_divisor) src.my_max_depth += static_cast<depth_t>(__TBB_Log2(
src.my_divisor/my_divisor)); | | if (my_divisor) src.my_max_depth += static_cast<depth_t>(__TBB_Log2
(src.my_divisor / my_divisor)); | |
| #endif | | #endif | |
|
| | | // For affinity_partitioner, my_divisor indicates the number of aff | |
| | | inity array indices the task reserves. | |
| | | // A task which has only one index must produce the right split wit | |
| | | hout reserved index in order to avoid | |
| | | // it to be overwritten in note_affinity() of the created (right) t | |
| | | ask. | |
| | | // I.e. a task created deeper than the affinity array can remember | |
| | | must not save its affinity (LIFO order) | |
| | | __TBB_ASSERT( (old_divisor <= 1 && my_divisor == 0) || | |
| | | (old_divisor > 1 && my_divisor != 0), NULL); | |
| | | } | |
| | | adaptive_partition_type_base(adaptive_partition_type_base &src, const p | |
| | | roportional_split& split_obj) { | |
| | | my_max_depth = src.my_max_depth; | |
| | | my_divisor = size_t(float(src.my_divisor) * float(split_obj.right() | |
| | | ) | |
| | | / float(split_obj.left() + split_obj.right())); | |
| | | src.my_divisor -= my_divisor; | |
| } | | } | |
| bool check_being_stolen( task &t) { // part of old should_execute_range
() | | bool check_being_stolen( task &t) { // part of old should_execute_range
() | |
| if( !my_divisor ) { // if not from the top P tasks of binary tree | | if( !my_divisor ) { // if not from the top P tasks of binary tree | |
| my_divisor = 1; // TODO: replace by on-stack flag (partition_st
ate's member)? | | my_divisor = 1; // TODO: replace by on-stack flag (partition_st
ate's member)? | |
|
| if( t.is_stolen_task() ) { | | if( t.is_stolen_task() && t.parent()->ref_count() >= 2 ) { // r
uns concurrently with the left task | |
| #if TBB_USE_EXCEPTIONS | | #if TBB_USE_EXCEPTIONS | |
| // RTTI is available, check whether the cast is valid | | // RTTI is available, check whether the cast is valid | |
| __TBB_ASSERT(dynamic_cast<flag_task*>(t.parent()), 0); | | __TBB_ASSERT(dynamic_cast<flag_task*>(t.parent()), 0); | |
| // correctness of the cast relies on avoiding the root task
for which: | | // correctness of the cast relies on avoiding the root task
for which: | |
| // - initial value of my_divisor != 0 (protected by separat
e assertion) | | // - initial value of my_divisor != 0 (protected by separat
e assertion) | |
| // - is_stolen_task() always returns false for the root tas
k. | | // - is_stolen_task() always returns false for the root tas
k. | |
| #endif | | #endif | |
| flag_task::mark_task_stolen(t); | | flag_task::mark_task_stolen(t); | |
|
| my_max_depth++; | | if( !my_max_depth ) my_max_depth++; | |
| | | my_max_depth += __TBB_DEMAND_DEPTH_ADD; | |
| return true; | | return true; | |
| } | | } | |
| } | | } | |
| return false; | | return false; | |
| } | | } | |
|
| bool is_divisible() { // part of old should_execute_range() | | | |
| if( my_divisor > 1 ) return true; | | | |
| if( my_divisor && my_max_depth > 1 ) { // can split the task and on | | | |
| ce more internally. TODO: on-stack flag instead | | | |
| // keep same fragmentation while splitting for the local task p | | | |
| ool | | | |
| my_max_depth--; | | | |
| my_divisor = 0; // decrease max_depth once per task | | | |
| return true; | | | |
| } else return false; | | | |
| } | | | |
| bool check_for_demand(task &t) { | | | |
| if( flag_task::is_peer_stolen(t) ) { | | | |
| my_max_depth++; | | | |
| return true; | | | |
| } else return false; | | | |
| } | | | |
| void align_depth(depth_t base) { | | void align_depth(depth_t base) { | |
| __TBB_ASSERT(base <= my_max_depth, 0); | | __TBB_ASSERT(base <= my_max_depth, 0); | |
| my_max_depth -= base; | | my_max_depth -= base; | |
| } | | } | |
| depth_t max_depth() { return my_max_depth; } | | depth_t max_depth() { return my_max_depth; } | |
| }; | | }; | |
| | | | |
|
| | | //! Helper that enables one or the other code branches (see example in is_r | |
| | | ange_divisible_in_proportion) | |
| | | template<bool C, typename T = void> struct enable_if { typedef T type; }; | |
| | | template<typename T> struct enable_if<false, T> { }; | |
| | | | |
| | | //! Class determines whether template parameter has static boolean | |
| | | //! constant 'is_divisible_in_proportion' initialized with value of | |
| | | //! 'true' or not. | |
| | | /** If template parameter has such field that has been initialized | |
| | | * with non-zero value then class field will be set to 'true', | |
| | | * otherwise - 'false' | |
| | | */ | |
| | | template <typename Range> | |
| | | class is_range_divisible_in_proportion { | |
| | | private: | |
| | | typedef char yes[1]; | |
| | | typedef char no [2]; | |
| | | | |
| | | template <typename range_type> static yes& decide(typename enable_if<ra | |
| | | nge_type::is_divisible_in_proportion>::type *); | |
| | | template <typename range_type> static no& decide(...); | |
| | | public: | |
| | | // equals to 'true' if and only if static const variable 'is_divisible_ | |
| | | in_proportion' of template parameter | |
| | | // initialized with the value of 'true' | |
| | | static const bool value = (sizeof(decide<Range>(0)) == sizeof(yes)); | |
| | | }; | |
| | | | |
| //! Provides default methods for affinity (adaptive) partition objects. | | //! Provides default methods for affinity (adaptive) partition objects. | |
|
| class affinity_partition_type : public auto_partition_type_base<affinity_pa
rtition_type> { | | class affinity_partition_type : public adaptive_partition_type_base<affinit
y_partition_type> { | |
| static const unsigned factor_power = 4; | | static const unsigned factor_power = 4; | |
|
| static const unsigned factor = 1<<factor_power; | | static const unsigned factor = 1<<factor_power; // number of slots in | |
| bool my_delay; | | affinity array per task | |
| unsigned map_begin, map_end, map_mid; | | enum { | |
| | | start = 0, | |
| | | run, | |
| | | pass | |
| | | } my_delay; | |
| | | #ifdef __TBB_USE_MACHINE_TIME_STAMPS | |
| | | machine_tsc_t my_dst_tsc; | |
| | | #endif | |
| | | size_t my_begin; | |
| tbb::internal::affinity_id* my_array; | | tbb::internal::affinity_id* my_array; | |
|
| void set_mid() { | | | |
| unsigned d = (map_end - map_begin)/2; // we could add 1 but it is r | | | |
| ather for LIFO affinity | | | |
| if( d > factor ) | | | |
| d &= 0u-factor; | | | |
| map_mid = map_end - d; | | | |
| } | | | |
| public: | | public: | |
|
| affinity_partition_type( tbb::internal::affinity_partitioner_base_v3& a | | typedef proportional_split split_type; | |
| p ) { | | | |
| | | affinity_partition_type( tbb::internal::affinity_partitioner_base_v3& a | |
| | | p ) | |
| | | : adaptive_partition_type_base<affinity_partition_type>(), | |
| | | my_delay(start) | |
| | | #ifdef __TBB_USE_MACHINE_TIME_STAMPS | |
| | | , my_dst_tsc(0) | |
| | | #endif | |
| | | { | |
| __TBB_ASSERT( (factor&(factor-1))==0, "factor must be power of two"
); | | __TBB_ASSERT( (factor&(factor-1))==0, "factor must be power of two"
); | |
|
| | | my_divisor *= factor; | |
| ap.resize(factor); | | ap.resize(factor); | |
| my_array = ap.my_array; | | my_array = ap.my_array; | |
|
| map_begin = 0; | | my_begin = 0; | |
| map_end = unsigned(ap.my_size); | | my_max_depth = factor_power + 1; // the first factor_power ranges w | |
| set_mid(); | | ill be spawned, and >=1 ranges should be left | |
| my_delay = true; | | | |
| my_divisor /= __TBB_INITIAL_CHUNKS; // let exactly P tasks to be di | | | |
| stributed across workers | | | |
| my_max_depth = factor_power+1; // the first factor_power ranges wil | | | |
| l be spawned, and >=1 ranges should be left | | | |
| __TBB_ASSERT( my_max_depth < __TBB_RANGE_POOL_CAPACITY, 0 ); | | __TBB_ASSERT( my_max_depth < __TBB_RANGE_POOL_CAPACITY, 0 ); | |
| } | | } | |
| affinity_partition_type(affinity_partition_type& p, split) | | affinity_partition_type(affinity_partition_type& p, split) | |
|
| : auto_partition_type_base<affinity_partition_type>(p, split()), my | | : adaptive_partition_type_base<affinity_partition_type>(p, split()) | |
| _array(p.my_array) { | | , | |
| __TBB_ASSERT( p.map_end-p.map_begin<factor || (p.map_end-p.map_begi | | my_delay(pass), | |
| n)%factor==0, NULL ); | | #ifdef __TBB_USE_MACHINE_TIME_STAMPS | |
| map_end = p.map_end; | | my_dst_tsc(0), | |
| map_begin = p.map_end = p.map_mid; | | #endif | |
| set_mid(); p.set_mid(); | | my_array(p.my_array) { | |
| my_delay = p.my_delay; | | // the sum of the divisors represents original value of p.my_diviso | |
| | | r before split | |
| | | __TBB_ASSERT(my_divisor + p.my_divisor <= factor, NULL); | |
| | | my_begin = p.my_begin + p.my_divisor; | |
| | | } | |
| | | affinity_partition_type(affinity_partition_type& p, const proportional_ | |
| | | split& split_obj) | |
| | | : adaptive_partition_type_base<affinity_partition_type>(p, split_ob | |
| | | j), | |
| | | my_delay(start), | |
| | | #ifdef __TBB_USE_MACHINE_TIME_STAMPS | |
| | | my_dst_tsc(0), | |
| | | #endif | |
| | | my_array(p.my_array) { | |
| | | size_t total_divisor = my_divisor + p.my_divisor; | |
| | | __TBB_ASSERT(total_divisor % factor == 0, NULL); | |
| | | my_divisor = (my_divisor + factor/2) & (0u - factor); | |
| | | if (!my_divisor) | |
| | | my_divisor = factor; | |
| | | else if (my_divisor == total_divisor) | |
| | | my_divisor = total_divisor - factor; | |
| | | p.my_divisor = total_divisor - my_divisor; | |
| | | __TBB_ASSERT(my_divisor && p.my_divisor, NULL); | |
| | | my_begin = p.my_begin + p.my_divisor; | |
| } | | } | |
| void set_affinity( task &t ) { | | void set_affinity( task &t ) { | |
|
| if( map_begin<map_end ) | | if( my_divisor ) { | |
| t.set_affinity( my_array[map_begin] ); | | if( !my_array[my_begin] ) { | |
| | | // TODO: consider code reuse for static_paritioner | |
| | | my_array[my_begin] = affinity_id(my_begin / factor + 1); | |
| | | } | |
| | | t.set_affinity( my_array[my_begin] ); | |
| | | } | |
| } | | } | |
| void note_affinity( task::affinity_id id ) { | | void note_affinity( task::affinity_id id ) { | |
|
| if( map_begin<map_end ) | | if( my_divisor ) | |
| my_array[map_begin] = id; | | my_array[my_begin] = id; | |
| } | | } | |
| bool check_for_demand( task &t ) { | | bool check_for_demand( task &t ) { | |
|
| if( !my_delay ) { | | if( pass == my_delay ) { | |
| if( map_mid<map_end ) { | | if( my_divisor > 1 ) // produce affinitized tasks while they ha | |
| __TBB_ASSERT(my_max_depth>__TBB_Log2(map_end-map_mid), 0); | | ve slot in array | |
| return true;// do not do my_max_depth++ here, but be sure m | | return true; // do not do my_max_depth++ here, but be sure | |
| y_max_depth is big enough | | range_pool is splittable once more | |
| | | else if( my_divisor && my_max_depth ) { // make balancing task | |
| | | my_divisor = 0; // once for each task; depth will be decrea | |
| | | sed in align_depth() | |
| | | return true; | |
| } | | } | |
|
| if( flag_task::is_peer_stolen(t) ) { | | else if( flag_task::is_peer_stolen(t) ) { | |
| my_max_depth++; | | my_max_depth += __TBB_DEMAND_DEPTH_ADD; | |
| return true; | | return true; | |
| } | | } | |
|
| } else my_delay = false; | | } else if( start == my_delay ) { | |
| | | #ifndef __TBB_USE_MACHINE_TIME_STAMPS | |
| | | my_delay = pass; | |
| | | #else | |
| | | my_dst_tsc = __TBB_machine_time_stamp() + __TBB_task_duration() | |
| | | ; | |
| | | my_delay = run; | |
| | | } else if( run == my_delay ) { | |
| | | if( __TBB_machine_time_stamp() < my_dst_tsc ) { | |
| | | __TBB_ASSERT(my_max_depth > 0, NULL); | |
| | | return false; | |
| | | } | |
| | | my_delay = pass; | |
| | | return true; | |
| | | #endif // __TBB_USE_MACHINE_TIME_STAMPS | |
| | | } | |
| return false; | | return false; | |
| } | | } | |
|
| bool is_divisible() { | | bool is_divisible() { // part of old should_execute_range() | |
| return my_divisor > 1; | | return my_divisor > factor; | |
| } | | } | |
|
| | | | |
| | | #if _MSC_VER && !defined(__INTEL_COMPILER) | |
| | | // Suppress "conditional expression is constant" warning. | |
| | | #pragma warning( push ) | |
| | | #pragma warning( disable: 4127 ) | |
| | | #endif | |
| | | template <typename Range> | |
| | | split_type get_split() { | |
| | | if (is_range_divisible_in_proportion<Range>::value) { | |
| | | size_t size = my_divisor / factor; | |
| | | #if __TBB_NONUNIFORM_TASK_CREATION | |
| | | size_t right = (size + 2) / 3; | |
| | | #else | |
| | | size_t right = size / 2; | |
| | | #endif | |
| | | size_t left = size - right; | |
| | | return split_type(left, right); | |
| | | } else { | |
| | | return split_type(1, 1); | |
| | | } | |
| | | } | |
| | | #if _MSC_VER && !defined(__INTEL_COMPILER) | |
| | | #pragma warning( pop ) | |
| | | #endif // warning 4127 is back | |
| | | | |
| static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY; | | static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY; | |
| }; | | }; | |
| | | | |
|
| class auto_partition_type: public auto_partition_type_base<auto_partition_t
ype> { | | class auto_partition_type: public adaptive_partition_type_base<auto_partiti
on_type> { | |
| public: | | public: | |
|
| auto_partition_type( const auto_partitioner& ) {} | | auto_partition_type( const auto_partitioner& ) { | |
| | | my_divisor *= __TBB_INITIAL_CHUNKS; | |
| | | } | |
| auto_partition_type( auto_partition_type& src, split) | | auto_partition_type( auto_partition_type& src, split) | |
|
| : auto_partition_type_base<auto_partition_type>(src, split()) {} | | : adaptive_partition_type_base<auto_partition_type>(src, split()) {} | |
| | | | |
| | | bool is_divisible() { // part of old should_execute_range() | |
| | | if( my_divisor > 1 ) return true; | |
| | | if( my_divisor && my_max_depth ) { // can split the task. TODO: on- | |
| | | stack flag instead | |
| | | // keep same fragmentation while splitting for the local task p | |
| | | ool | |
| | | my_max_depth--; | |
| | | my_divisor = 0; // decrease max_depth once per task | |
| | | return true; | |
| | | } else return false; | |
| | | } | |
| | | bool check_for_demand(task &t) { | |
| | | if( flag_task::is_peer_stolen(t) ) { | |
| | | my_max_depth += __TBB_DEMAND_DEPTH_ADD; | |
| | | return true; | |
| | | } else return false; | |
| | | } | |
| | | | |
| static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY; | | static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY; | |
| }; | | }; | |
| | | | |
| class simple_partition_type: public partition_type_base<simple_partition_ty
pe> { | | class simple_partition_type: public partition_type_base<simple_partition_ty
pe> { | |
| public: | | public: | |
| simple_partition_type( const simple_partitioner& ) {} | | simple_partition_type( const simple_partitioner& ) {} | |
| simple_partition_type( const simple_partition_type&, split ) {} | | simple_partition_type( const simple_partition_type&, split ) {} | |
| //! simplified algorithm | | //! simplified algorithm | |
| template<typename StartType, typename Range> | | template<typename StartType, typename Range> | |
| void execute(StartType &start, Range &range) { | | void execute(StartType &start, Range &range) { | |
|
| | | split_type split_obj = split(); // start.offer_work accepts split_t
ype as reference | |
| while( range.is_divisible() ) | | while( range.is_divisible() ) | |
|
| start.offer_work( split() ); | | start.offer_work( split_obj ); | |
| start.run_body( range ); | | start.run_body( range ); | |
| } | | } | |
| //static const unsigned range_pool_size = 1; - not necessary because ex
ecute() is overridden | | //static const unsigned range_pool_size = 1; - not necessary because ex
ecute() is overridden | |
| }; | | }; | |
| | | | |
| //! Backward-compatible partition for auto and affinity partition objects. | | //! Backward-compatible partition for auto and affinity partition objects. | |
| class old_auto_partition_type: public tbb::internal::partition_type_base { | | class old_auto_partition_type: public tbb::internal::partition_type_base { | |
| size_t num_chunks; | | size_t num_chunks; | |
| static const size_t VICTIM_CHUNKS = 4; | | static const size_t VICTIM_CHUNKS = 4; | |
| public: | | public: | |
| | | | |
| skipping to change at line 421 | | skipping to change at line 571 | |
| //! @endcond | | //! @endcond | |
| } // namespace interfaceX | | } // namespace interfaceX | |
| | | | |
| //! A simple partitioner | | //! A simple partitioner | |
| /** Divides the range until the range is not divisible. | | /** Divides the range until the range is not divisible. | |
| @ingroup algorithms */ | | @ingroup algorithms */ | |
| class simple_partitioner { | | class simple_partitioner { | |
| public: | | public: | |
| simple_partitioner() {} | | simple_partitioner() {} | |
| private: | | private: | |
|
| template<typename Range, typename Body, typename Partitioner> friend cl | | template<typename Range, typename Body, typename Partitioner> friend cl | |
| ass serial::interface6::start_for; | | ass serial::interface7::start_for; | |
| template<typename Range, typename Body, typename Partitioner> friend cl | | template<typename Range, typename Body, typename Partitioner> friend cl | |
| ass interface6::internal::start_for; | | ass interface7::internal::start_for; | |
| template<typename Range, typename Body, typename Partitioner> friend cl | | template<typename Range, typename Body, typename Partitioner> friend cl | |
| ass interface6::internal::start_reduce; | | ass interface7::internal::start_reduce; | |
| template<typename Range, typename Body, typename Partitioner> friend cl
ass internal::start_scan; | | template<typename Range, typename Body, typename Partitioner> friend cl
ass internal::start_scan; | |
| // backward compatibility | | // backward compatibility | |
| class partition_type: public internal::partition_type_base { | | class partition_type: public internal::partition_type_base { | |
| public: | | public: | |
| bool should_execute_range(const task& ) {return false;} | | bool should_execute_range(const task& ) {return false;} | |
| partition_type( const simple_partitioner& ) {} | | partition_type( const simple_partitioner& ) {} | |
| partition_type( const partition_type&, split ) {} | | partition_type( const partition_type&, split ) {} | |
| }; | | }; | |
| // new implementation just extends existing interface | | // new implementation just extends existing interface | |
|
| typedef interface6::internal::simple_partition_type task_partition_type | | typedef interface7::internal::simple_partition_type task_partition_type | |
| ; | | ; | |
| | | | |
| | | // TODO: consider to make split_type public | |
| | | typedef interface7::internal::simple_partition_type::split_type split_t | |
| | | ype; | |
| }; | | }; | |
| | | | |
| //! An auto partitioner | | //! An auto partitioner | |
| /** The range is initial divided into several large chunks. | | /** The range is initial divided into several large chunks. | |
| Chunks are further subdivided into smaller pieces if demand detected an
d they are divisible. | | Chunks are further subdivided into smaller pieces if demand detected an
d they are divisible. | |
| @ingroup algorithms */ | | @ingroup algorithms */ | |
| class auto_partitioner { | | class auto_partitioner { | |
| public: | | public: | |
| auto_partitioner() {} | | auto_partitioner() {} | |
| | | | |
| private: | | private: | |
|
| template<typename Range, typename Body, typename Partitioner> friend cl | | template<typename Range, typename Body, typename Partitioner> friend cl | |
| ass serial::interface6::start_for; | | ass serial::interface7::start_for; | |
| template<typename Range, typename Body, typename Partitioner> friend cl | | template<typename Range, typename Body, typename Partitioner> friend cl | |
| ass interface6::internal::start_for; | | ass interface7::internal::start_for; | |
| template<typename Range, typename Body, typename Partitioner> friend cl | | template<typename Range, typename Body, typename Partitioner> friend cl | |
| ass interface6::internal::start_reduce; | | ass interface7::internal::start_reduce; | |
| template<typename Range, typename Body, typename Partitioner> friend cl
ass internal::start_scan; | | template<typename Range, typename Body, typename Partitioner> friend cl
ass internal::start_scan; | |
| // backward compatibility | | // backward compatibility | |
|
| typedef interface6::internal::old_auto_partition_type partition_type; | | typedef interface7::internal::old_auto_partition_type partition_type; | |
| // new implementation just extends existing interface | | // new implementation just extends existing interface | |
|
| typedef interface6::internal::auto_partition_type task_partition_type; | | typedef interface7::internal::auto_partition_type task_partition_type; | |
| | | | |
| | | // TODO: consider to make split_type public | |
| | | typedef interface7::internal::auto_partition_type::split_type split_typ | |
| | | e; | |
| }; | | }; | |
| | | | |
| //! An affinity partitioner | | //! An affinity partitioner | |
| class affinity_partitioner: internal::affinity_partitioner_base_v3 { | | class affinity_partitioner: internal::affinity_partitioner_base_v3 { | |
| public: | | public: | |
| affinity_partitioner() {} | | affinity_partitioner() {} | |
| | | | |
| private: | | private: | |
|
| template<typename Range, typename Body, typename Partitioner> friend cl | | template<typename Range, typename Body, typename Partitioner> friend cl | |
| ass serial::interface6::start_for; | | ass serial::interface7::start_for; | |
| template<typename Range, typename Body, typename Partitioner> friend cl | | template<typename Range, typename Body, typename Partitioner> friend cl | |
| ass interface6::internal::start_for; | | ass interface7::internal::start_for; | |
| template<typename Range, typename Body, typename Partitioner> friend cl | | template<typename Range, typename Body, typename Partitioner> friend cl | |
| ass interface6::internal::start_reduce; | | ass interface7::internal::start_reduce; | |
| template<typename Range, typename Body, typename Partitioner> friend cl
ass internal::start_scan; | | template<typename Range, typename Body, typename Partitioner> friend cl
ass internal::start_scan; | |
| // backward compatibility - for parallel_scan only | | // backward compatibility - for parallel_scan only | |
|
| typedef interface6::internal::old_auto_partition_type partition_type; | | typedef interface7::internal::old_auto_partition_type partition_type; | |
| // new implementation just extends existing interface | | // new implementation just extends existing interface | |
|
| typedef interface6::internal::affinity_partition_type task_partition_ty | | typedef interface7::internal::affinity_partition_type task_partition_ty | |
| pe; | | pe; | |
| | | | |
| | | // TODO: consider to make split_type public | |
| | | typedef interface7::internal::affinity_partition_type::split_type split | |
| | | _type; | |
| }; | | }; | |
| | | | |
| } // namespace tbb | | } // namespace tbb | |
| | | | |
| #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) | | #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) | |
| #pragma warning (pop) | | #pragma warning (pop) | |
| #endif // warning 4244 is back | | #endif // warning 4244 is back | |
| #undef __TBB_INITIAL_CHUNKS | | #undef __TBB_INITIAL_CHUNKS | |
| #undef __TBB_RANGE_POOL_CAPACITY | | #undef __TBB_RANGE_POOL_CAPACITY | |
| #undef __TBB_INIT_DEPTH | | #undef __TBB_INIT_DEPTH | |
| | | | |
End of changes. 53 change blocks. |
| 112 lines changed or deleted | | 290 lines changed or added | |
|
| tbb_config.h | | tbb_config.h | |
| | | | |
| skipping to change at line 120 | | skipping to change at line 120 | |
| #if (__has_feature(__cxx_generalized_initializers__) && __has_inclu
de(<initializer_list>)) | | #if (__has_feature(__cxx_generalized_initializers__) && __has_inclu
de(<initializer_list>)) | |
| #define __TBB_INITIALIZER_LISTS_PRESENT 1 | | #define __TBB_INITIALIZER_LISTS_PRESENT 1 | |
| #endif | | #endif | |
| #else | | #else | |
| /** TODO: when MSVC2013 is supported by Intel C++ compiler, it will
be enabled silently by compiler, so rule will need to be updated.**/ | | /** TODO: when MSVC2013 is supported by Intel C++ compiler, it will
be enabled silently by compiler, so rule will need to be updated.**/ | |
| #define __TBB_INITIALIZER_LISTS_PRESENT __INTEL_CXX11_MODE__
&& __INTEL_COMPILER >= 1400 && (_MSC_VER >= 1800 || __TBB_GCC_VERSION >= 40
400 || _LIBCPP_VERSION) | | #define __TBB_INITIALIZER_LISTS_PRESENT __INTEL_CXX11_MODE__
&& __INTEL_COMPILER >= 1400 && (_MSC_VER >= 1800 || __TBB_GCC_VERSION >= 40
400 || _LIBCPP_VERSION) | |
| #endif | | #endif | |
| | | | |
| #define __TBB_CONSTEXPR_PRESENT __INTEL_CXX11_MODE__
&& __INTEL_COMPILER >= 1400 | | #define __TBB_CONSTEXPR_PRESENT __INTEL_CXX11_MODE__
&& __INTEL_COMPILER >= 1400 | |
| #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT __INTEL_CXX11_MODE__
&& __INTEL_COMPILER >= 1200 | | #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT __INTEL_CXX11_MODE__
&& __INTEL_COMPILER >= 1200 | |
|
| | | /** ICC seems to disable support of noexcept event in c++11 when compil | |
| | | ing in compatibility mode for gcc <4.6 **/ | |
| | | #define __TBB_NOEXCEPT_PRESENT __INTEL_CXX11_MODE__ | |
| | | && __INTEL_COMPILER >= 1300 && (__TBB_GCC_VERSION >= 40600 || _LIBCPP_VERSI | |
| | | ON || _MSC_VER) | |
| #elif __clang__ | | #elif __clang__ | |
| //TODO: these options need to be rechecked | | //TODO: these options need to be rechecked | |
| /** on OS X* the only way to get C++11 is to use clang. For library feature
s (e.g. exception_ptr) libc++ is also | | /** on OS X* the only way to get C++11 is to use clang. For library feature
s (e.g. exception_ptr) libc++ is also | |
| * required. So there is no need to check GCC version for clang**/ | | * required. So there is no need to check GCC version for clang**/ | |
| #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __has_feature(__cxx_
variadic_templates__) | | #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __has_feature(__cxx_
variadic_templates__) | |
| #define __TBB_CPP11_RVALUE_REF_PRESENT __has_feature(__cxx_
rvalue_references__) | | #define __TBB_CPP11_RVALUE_REF_PRESENT __has_feature(__cxx_
rvalue_references__) | |
| /** TODO: extend exception_ptr related conditions to cover libstdc++ **/ | | /** TODO: extend exception_ptr related conditions to cover libstdc++ **/ | |
| #define __TBB_EXCEPTION_PTR_PRESENT (__cplusplus >= 20110
3L && _LIBCPP_VERSION) | | #define __TBB_EXCEPTION_PTR_PRESENT (__cplusplus >= 20110
3L && _LIBCPP_VERSION) | |
| #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__cplusplus >= 20110
3L && _LIBCPP_VERSION) | | #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__cplusplus >= 20110
3L && _LIBCPP_VERSION) | |
| #define __TBB_STATIC_ASSERT_PRESENT __has_feature(__cxx_s
tatic_assert__) | | #define __TBB_STATIC_ASSERT_PRESENT __has_feature(__cxx_s
tatic_assert__) | |
| /**Clang (preprocessor) has problems with dealing with expression havin
g __has_include in #if's | | /**Clang (preprocessor) has problems with dealing with expression havin
g __has_include in #if's | |
| * used inside C++ code. (At least version that comes with OS X 10.8) *
*/ | | * used inside C++ code. (At least version that comes with OS X 10.8) *
*/ | |
| #if (__GXX_EXPERIMENTAL_CXX0X__ && __has_include(<tuple>)) | | #if (__GXX_EXPERIMENTAL_CXX0X__ && __has_include(<tuple>)) | |
| #define __TBB_CPP11_TUPLE_PRESENT 1 | | #define __TBB_CPP11_TUPLE_PRESENT 1 | |
| #endif | | #endif | |
| #if (__has_feature(__cxx_generalized_initializers__) && __has_include(<
initializer_list>)) | | #if (__has_feature(__cxx_generalized_initializers__) && __has_include(<
initializer_list>)) | |
| #define __TBB_INITIALIZER_LISTS_PRESENT 1 | | #define __TBB_INITIALIZER_LISTS_PRESENT 1 | |
| #endif | | #endif | |
| #define __TBB_CONSTEXPR_PRESENT __has_feature(__cxx_c
onstexpr__) | | #define __TBB_CONSTEXPR_PRESENT __has_feature(__cxx_c
onstexpr__) | |
| #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (__has_feature(__cxx_
defaulted_functions__) && __has_feature(__cxx_deleted_functions__)) | | #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (__has_feature(__cxx_
defaulted_functions__) && __has_feature(__cxx_deleted_functions__)) | |
|
| | | /**For some unknown reason __has_feature(__cxx_noexcept) does not yiel | |
| | | d true for all cases. Compiler bug ? **/ | |
| | | #define __TBB_NOEXCEPT_PRESENT (__cplusplus >= 20110 | |
| | | 3L) | |
| #elif __GNUC__ | | #elif __GNUC__ | |
| #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __GXX_EXPERIMENTAL_CX
X0X__ | | #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __GXX_EXPERIMENTAL_CX
X0X__ | |
| #define __TBB_CPP11_RVALUE_REF_PRESENT __GXX_EXPERIMENTAL_CX
X0X__ | | #define __TBB_CPP11_RVALUE_REF_PRESENT __GXX_EXPERIMENTAL_CX
X0X__ | |
| /** __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 here is a substitution for _GLIB
CXX_ATOMIC_BUILTINS_4, which is a prerequisite | | /** __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 here is a substitution for _GLIB
CXX_ATOMIC_BUILTINS_4, which is a prerequisite | |
| for exception_ptr but cannot be used in this file because it is def
ined in a header, not by the compiler. | | for exception_ptr but cannot be used in this file because it is def
ined in a header, not by the compiler. | |
| If the compiler has no atomic intrinsics, the C++ library should no
t expect those as well. **/ | | If the compiler has no atomic intrinsics, the C++ library should no
t expect those as well. **/ | |
| #define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_C
XX0X__ && __TBB_GCC_VERSION >= 40404 && __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) | | #define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_C
XX0X__ && __TBB_GCC_VERSION >= 40404 && __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) | |
| #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_C
XX0X__ && __TBB_GCC_VERSION >= 40600) | | #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_C
XX0X__ && __TBB_GCC_VERSION >= 40600) | |
| #define __TBB_STATIC_ASSERT_PRESENT (__GXX_EXPERIMENTAL_C
XX0X__ && __TBB_GCC_VERSION >= 40300) | | #define __TBB_STATIC_ASSERT_PRESENT (__GXX_EXPERIMENTAL_C
XX0X__ && __TBB_GCC_VERSION >= 40300) | |
| #define __TBB_CPP11_TUPLE_PRESENT (__GXX_EXPERIMENTAL_C
XX0X__ && __TBB_GCC_VERSION >= 40300) | | #define __TBB_CPP11_TUPLE_PRESENT (__GXX_EXPERIMENTAL_C
XX0X__ && __TBB_GCC_VERSION >= 40300) | |
| #define __TBB_INITIALIZER_LISTS_PRESENT (__GXX_EXPERIMENTAL_C
XX0X__ && __TBB_GCC_VERSION >= 40400) | | #define __TBB_INITIALIZER_LISTS_PRESENT (__GXX_EXPERIMENTAL_C
XX0X__ && __TBB_GCC_VERSION >= 40400) | |
| /** gcc seems have to support constexpr from 4.4 but tests in (test_ato
mic) seeming reasonable fail to compile prior 4.6**/ | | /** gcc seems have to support constexpr from 4.4 but tests in (test_ato
mic) seeming reasonable fail to compile prior 4.6**/ | |
| #define __TBB_CONSTEXPR_PRESENT (__GXX_EXPERIMENTAL_C
XX0X__ && __TBB_GCC_VERSION >= 40400) | | #define __TBB_CONSTEXPR_PRESENT (__GXX_EXPERIMENTAL_C
XX0X__ && __TBB_GCC_VERSION >= 40400) | |
| #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (__GXX_EXPERIMENTAL_C
XX0X__ && __TBB_GCC_VERSION >= 40400) | | #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (__GXX_EXPERIMENTAL_C
XX0X__ && __TBB_GCC_VERSION >= 40400) | |
|
| | | #define __TBB_NOEXCEPT_PRESENT (__GXX_EXPERIMENTAL_C
XX0X__ && __TBB_GCC_VERSION >= 40600) | |
| #elif _MSC_VER | | #elif _MSC_VER | |
| #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (_MSC_VER >= 1800) | | #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (_MSC_VER >= 1800) | |
| #define __TBB_CPP11_RVALUE_REF_PRESENT (_MSC_VER >= 1600) | | #define __TBB_CPP11_RVALUE_REF_PRESENT (_MSC_VER >= 1600) | |
| #define __TBB_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1600) | | #define __TBB_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1600) | |
| #define __TBB_STATIC_ASSERT_PRESENT (_MSC_VER >= 1600) | | #define __TBB_STATIC_ASSERT_PRESENT (_MSC_VER >= 1600) | |
| #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1700) | | #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1700) | |
| #define __TBB_CPP11_TUPLE_PRESENT (_MSC_VER >= 1600) | | #define __TBB_CPP11_TUPLE_PRESENT (_MSC_VER >= 1600) | |
| #define __TBB_INITIALIZER_LISTS_PRESENT (_MSC_VER >= 1800) | | #define __TBB_INITIALIZER_LISTS_PRESENT (_MSC_VER >= 1800) | |
| #define __TBB_CONSTEXPR_PRESENT 0 | | #define __TBB_CONSTEXPR_PRESENT 0 | |
| #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (_MSC_VER >= 1800) | | #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (_MSC_VER >= 1800) | |
|
| | | #define __TBB_NOEXCEPT_PRESENT 0 /*for _MSC_VER == 1
800*/ | |
| #else | | #else | |
| #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 0 | | #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 0 | |
| #define __TBB_CPP11_RVALUE_REF_PRESENT 0 | | #define __TBB_CPP11_RVALUE_REF_PRESENT 0 | |
| #define __TBB_EXCEPTION_PTR_PRESENT 0 | | #define __TBB_EXCEPTION_PTR_PRESENT 0 | |
| #define __TBB_STATIC_ASSERT_PRESENT 0 | | #define __TBB_STATIC_ASSERT_PRESENT 0 | |
| #define __TBB_MAKE_EXCEPTION_PTR_PRESENT 0 | | #define __TBB_MAKE_EXCEPTION_PTR_PRESENT 0 | |
| #define __TBB_CPP11_TUPLE_PRESENT 0 | | #define __TBB_CPP11_TUPLE_PRESENT 0 | |
| #define __TBB_INITIALIZER_LISTS_PRESENT 0 | | #define __TBB_INITIALIZER_LISTS_PRESENT 0 | |
| #define __TBB_CONSTEXPR_PRESENT 0 | | #define __TBB_CONSTEXPR_PRESENT 0 | |
| #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT 0 | | #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT 0 | |
|
| | | #define __TBB_NOEXCEPT_PRESENT 0 | |
| #endif | | #endif | |
| | | | |
| //TODO: not clear how exactly this macro affects exception_ptr - investigat
e | | //TODO: not clear how exactly this macro affects exception_ptr - investigat
e | |
| // On linux ICC fails to find existing std::exception_ptr in libstdc++ with
out this define | | // On linux ICC fails to find existing std::exception_ptr in libstdc++ with
out this define | |
| #if __INTEL_COMPILER && __GNUC__ && __TBB_EXCEPTION_PTR_PRESENT && !defined
(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) | | #if __INTEL_COMPILER && __GNUC__ && __TBB_EXCEPTION_PTR_PRESENT && !defined
(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) | |
| #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 1 | | #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 1 | |
| #endif | | #endif | |
| | | | |
| // Work around a bug in MinGW32 | | // Work around a bug in MinGW32 | |
| #if __MINGW32__ && __TBB_EXCEPTION_PTR_PRESENT && !defined(_GLIBCXX_ATOMIC_
BUILTINS_4) | | #if __MINGW32__ && __TBB_EXCEPTION_PTR_PRESENT && !defined(_GLIBCXX_ATOMIC_
BUILTINS_4) | |
| | | | |
| skipping to change at line 322 | | skipping to change at line 329 | |
| #endif | | #endif | |
| | | | |
| /** __TBB_DYNAMIC_LOAD_ENABLED describes the system possibility to load sha
red libraries at run time **/ | | /** __TBB_DYNAMIC_LOAD_ENABLED describes the system possibility to load sha
red libraries at run time **/ | |
| #ifndef __TBB_DYNAMIC_LOAD_ENABLED | | #ifndef __TBB_DYNAMIC_LOAD_ENABLED | |
| #define __TBB_DYNAMIC_LOAD_ENABLED 1 | | #define __TBB_DYNAMIC_LOAD_ENABLED 1 | |
| #endif | | #endif | |
| | | | |
| /** __TBB_SOURCE_DIRECTLY_INCLUDED is a mode used in whitebox testing when | | /** __TBB_SOURCE_DIRECTLY_INCLUDED is a mode used in whitebox testing when | |
| it's necessary to test internal functions not exported from TBB DLLs | | it's necessary to test internal functions not exported from TBB DLLs | |
| **/ | | **/ | |
|
| #if (_WIN32||_WIN64) && __TBB_SOURCE_DIRECTLY_INCLUDED | | #if (_WIN32||_WIN64) && (__TBB_SOURCE_DIRECTLY_INCLUDED || TBB_USE_PREVIEW_
BINARY) | |
| #define __TBB_NO_IMPLICIT_LINKAGE 1 | | #define __TBB_NO_IMPLICIT_LINKAGE 1 | |
| #define __TBBMALLOC_NO_IMPLICIT_LINKAGE 1 | | #define __TBBMALLOC_NO_IMPLICIT_LINKAGE 1 | |
| #endif | | #endif | |
| | | | |
| #ifndef __TBB_COUNT_TASK_NODES | | #ifndef __TBB_COUNT_TASK_NODES | |
| #define __TBB_COUNT_TASK_NODES TBB_USE_ASSERT | | #define __TBB_COUNT_TASK_NODES TBB_USE_ASSERT | |
| #endif | | #endif | |
| | | | |
| #ifndef __TBB_TASK_GROUP_CONTEXT | | #ifndef __TBB_TASK_GROUP_CONTEXT | |
| #define __TBB_TASK_GROUP_CONTEXT 1 | | #define __TBB_TASK_GROUP_CONTEXT 1 | |
| #endif /* __TBB_TASK_GROUP_CONTEXT */ | | #endif /* __TBB_TASK_GROUP_CONTEXT */ | |
| | | | |
| #ifndef __TBB_SCHEDULER_OBSERVER | | #ifndef __TBB_SCHEDULER_OBSERVER | |
| #define __TBB_SCHEDULER_OBSERVER 1 | | #define __TBB_SCHEDULER_OBSERVER 1 | |
| #endif /* __TBB_SCHEDULER_OBSERVER */ | | #endif /* __TBB_SCHEDULER_OBSERVER */ | |
| | | | |
|
| | | #ifndef __TBB_FP_CONTEXT | |
| | | #define __TBB_FP_CONTEXT __TBB_TASK_GROUP_CONTEXT | |
| | | #endif /* __TBB_FP_CONTEXT */ | |
| | | | |
| | | #if __TBB_FP_CONTEXT && !__TBB_TASK_GROUP_CONTEXT | |
| | | #error __TBB_FP_CONTEXT requires __TBB_TASK_GROUP_CONTEXT to be enabled | |
| | | #endif | |
| | | | |
| #ifndef __TBB_TASK_ARENA | | #ifndef __TBB_TASK_ARENA | |
| #define __TBB_TASK_ARENA (__TBB_BUILD||TBB_PREVIEW_TASK_ARENA) | | #define __TBB_TASK_ARENA (__TBB_BUILD||TBB_PREVIEW_TASK_ARENA) | |
| #endif /* __TBB_TASK_ARENA */ | | #endif /* __TBB_TASK_ARENA */ | |
| #if __TBB_TASK_ARENA | | #if __TBB_TASK_ARENA | |
| #define __TBB_RECYCLE_TO_ENQUEUE __TBB_BUILD // keep non-official | | #define __TBB_RECYCLE_TO_ENQUEUE __TBB_BUILD // keep non-official | |
| #if !__TBB_SCHEDULER_OBSERVER | | #if !__TBB_SCHEDULER_OBSERVER | |
| #error TBB_PREVIEW_TASK_ARENA requires __TBB_SCHEDULER_OBSERVER to
be enabled | | #error TBB_PREVIEW_TASK_ARENA requires __TBB_SCHEDULER_OBSERVER to
be enabled | |
| #endif | | #endif | |
| #endif /* __TBB_TASK_ARENA */ | | #endif /* __TBB_TASK_ARENA */ | |
| | | | |
|
| #if !defined(TBB_PREVIEW_LOCAL_OBSERVER) && __TBB_BUILD && __TBB_SCHEDULER_ | | #ifndef __TBB_ARENA_OBSERVER | |
| OBSERVER | | #define __TBB_ARENA_OBSERVER ((__TBB_BUILD||TBB_PREVIEW_LOCAL_OBSERVER) | |
| #define TBB_PREVIEW_LOCAL_OBSERVER 1 | | && __TBB_SCHEDULER_OBSERVER) | |
| #endif /* TBB_PREVIEW_LOCAL_OBSERVER */ | | #endif /* __TBB_ARENA_OBSERVER */ | |
| | | | |
| | | #ifndef __TBB_SLEEP_PERMISSION | |
| | | #define __TBB_SLEEP_PERMISSION ((__TBB_CPF_BUILD||TBB_PREVIEW_LOCAL_OBS | |
| | | ERVER)&& __TBB_SCHEDULER_OBSERVER) | |
| | | #endif /* __TBB_SLEEP_PERMISSION */ | |
| | | | |
| | | #if TBB_PREVIEW_FLOW_GRAPH_TRACE | |
| | | #define __TBB_NO_IMPLICIT_LINKAGE 1 | |
| | | #endif /* TBB_PREVIEW_FLOW_GRAPH_TRACE */ | |
| | | | |
| #ifndef __TBB_ITT_STRUCTURE_API | | #ifndef __TBB_ITT_STRUCTURE_API | |
| #define __TBB_ITT_STRUCTURE_API ( !__TBB_DEFINE_MIC && (__TBB_CPF_BUILD ||
TBB_PREVIEW_FLOW_GRAPH_TRACE) ) | | #define __TBB_ITT_STRUCTURE_API ( !__TBB_DEFINE_MIC && (__TBB_CPF_BUILD ||
TBB_PREVIEW_FLOW_GRAPH_TRACE) ) | |
| #endif | | #endif | |
| | | | |
| #if TBB_USE_EXCEPTIONS && !__TBB_TASK_GROUP_CONTEXT | | #if TBB_USE_EXCEPTIONS && !__TBB_TASK_GROUP_CONTEXT | |
| #error TBB_USE_EXCEPTIONS requires __TBB_TASK_GROUP_CONTEXT to be enabl
ed | | #error TBB_USE_EXCEPTIONS requires __TBB_TASK_GROUP_CONTEXT to be enabl
ed | |
| #endif | | #endif | |
| | | | |
| #ifndef __TBB_TASK_PRIORITY | | #ifndef __TBB_TASK_PRIORITY | |
|
| #define __TBB_TASK_PRIORITY (!__TBB_CPF_BUILD&&__TBB_TASK_GROUP_CONTEXT
) // TODO: it will be enabled for CPF in the next versions | | #define __TBB_TASK_PRIORITY (!(__TBB_CPF_BUILD||TBB_USE_PREVIEW_BINARY)
&&__TBB_TASK_GROUP_CONTEXT) // TODO: it will be enabled for CPF in the next
versions | |
| #endif /* __TBB_TASK_PRIORITY */ | | #endif /* __TBB_TASK_PRIORITY */ | |
| | | | |
| #if __TBB_TASK_PRIORITY && !__TBB_TASK_GROUP_CONTEXT | | #if __TBB_TASK_PRIORITY && !__TBB_TASK_GROUP_CONTEXT | |
| #error __TBB_TASK_PRIORITY requires __TBB_TASK_GROUP_CONTEXT to be enab
led | | #error __TBB_TASK_PRIORITY requires __TBB_TASK_GROUP_CONTEXT to be enab
led | |
| #endif | | #endif | |
| | | | |
| #if TBB_PREVIEW_WAITING_FOR_WORKERS || __TBB_BUILD | | #if TBB_PREVIEW_WAITING_FOR_WORKERS || __TBB_BUILD | |
| #define __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE 1 | | #define __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE 1 | |
| #endif | | #endif | |
| | | | |
| | | | |
| skipping to change at line 402 | | skipping to change at line 425 | |
| #define __TBB_VARIADIC_MAX 5 /* current VS11 setting, may change. */ | | #define __TBB_VARIADIC_MAX 5 /* current VS11 setting, may change. */ | |
| #else | | #else | |
| #define __TBB_VARIADIC_MAX 10 | | #define __TBB_VARIADIC_MAX 10 | |
| #endif | | #endif | |
| #endif | | #endif | |
| | | | |
| #if !defined(TBB_PREVIEW_SPECULATIVE_SPIN_RW_MUTEX) | | #if !defined(TBB_PREVIEW_SPECULATIVE_SPIN_RW_MUTEX) | |
| #define TBB_PREVIEW_SPECULATIVE_SPIN_RW_MUTEX __TBB_CPF_BUILD | | #define TBB_PREVIEW_SPECULATIVE_SPIN_RW_MUTEX __TBB_CPF_BUILD | |
| #endif /* TBB_PREVIEW_SPECULATIVE_SPIN_RW_MUTEX */ | | #endif /* TBB_PREVIEW_SPECULATIVE_SPIN_RW_MUTEX */ | |
| | | | |
|
| | | #if TBB_PREVIEW_SPECULATIVE_SPIN_RW_MUTEX && !__TBB_CPF_BUILD | |
| | | #define __TBB_NO_IMPLICIT_LINKAGE 1 | |
| | | #endif /* TBB_PREVIEW_SPECULATIVE_SPIN_RW_MUTEX && !__TBB_CPF_BUILD */ | |
| | | | |
| /** __TBB_WIN8UI_SUPPORT enables support of New Windows*8 Store Apps and li
mit a possibility to load | | /** __TBB_WIN8UI_SUPPORT enables support of New Windows*8 Store Apps and li
mit a possibility to load | |
| shared libraries at run time only from application container **/ | | shared libraries at run time only from application container **/ | |
| #if defined(WINAPI_FAMILY) && WINAPI_FAMILY == WINAPI_FAMILY_APP | | #if defined(WINAPI_FAMILY) && WINAPI_FAMILY == WINAPI_FAMILY_APP | |
| #define __TBB_WIN8UI_SUPPORT 1 | | #define __TBB_WIN8UI_SUPPORT 1 | |
| #else | | #else | |
| #define __TBB_WIN8UI_SUPPORT 0 | | #define __TBB_WIN8UI_SUPPORT 0 | |
| #endif | | #endif | |
| | | | |
| // Define preprocessor symbols used to determine architecture | | // Define preprocessor symbols used to determine architecture | |
| #if _WIN32||_WIN64 | | #if _WIN32||_WIN64 | |
| | | | |
End of changes. 10 change blocks. |
| 6 lines changed or deleted | | 39 lines changed or added | |
|