| _flow_graph_join_impl.h | | _flow_graph_join_impl.h | |
| | | | |
| skipping to change at line 127 | | skipping to change at line 127 | |
| } | | } | |
| | | | |
| template< typename TagFuncTuple1, typename TagFuncTuple2> | | template< typename TagFuncTuple1, typename TagFuncTuple2> | |
| static inline void copy_tag_functors(TagFuncTuple1 &my_inputs, TagF
uncTuple2 &other_inputs) { | | static inline void copy_tag_functors(TagFuncTuple1 &my_inputs, TagF
uncTuple2 &other_inputs) { | |
| if(std::get<N-1>(other_inputs).my_original_func()) { | | if(std::get<N-1>(other_inputs).my_original_func()) { | |
| std::get<N-1>(my_inputs).set_my_tag_func(std::get<N-1>(othe
r_inputs).my_original_func()->clone()); | | std::get<N-1>(my_inputs).set_my_tag_func(std::get<N-1>(othe
r_inputs).my_original_func()->clone()); | |
| std::get<N-1>(my_inputs).set_my_original_tag_func(std::get<
N-1>(other_inputs).my_original_func()->clone()); | | std::get<N-1>(my_inputs).set_my_original_tag_func(std::get<
N-1>(other_inputs).my_original_func()->clone()); | |
| } | | } | |
| join_helper<N-1>::copy_tag_functors(my_inputs, other_inputs); | | join_helper<N-1>::copy_tag_functors(my_inputs, other_inputs); | |
| } | | } | |
|
| | | | |
| | | template<typename InputTuple> | |
| | | static inline void reset_inputs(InputTuple &my_input) { | |
| | | join_helper<N-1>::reset_inputs(my_input); | |
| | | std::get<N-1>(my_input).reinitialize_port(); | |
| | | } | |
| }; | | }; | |
| | | | |
| template< > | | template< > | |
| struct join_helper<1> { | | struct join_helper<1> { | |
| | | | |
| template< typename TupleType, typename PortType > | | template< typename TupleType, typename PortType > | |
| static inline void set_join_node_pointer(TupleType &my_input, PortT
ype *port) { | | static inline void set_join_node_pointer(TupleType &my_input, PortT
ype *port) { | |
| std::get<0>( my_input ).set_join_node_pointer(port); | | std::get<0>( my_input ).set_join_node_pointer(port); | |
| } | | } | |
| | | | |
| | | | |
| skipping to change at line 191 | | skipping to change at line 197 | |
| std::get<0>(my_tag_funcs) = NULL; | | std::get<0>(my_tag_funcs) = NULL; | |
| } | | } | |
| | | | |
| template< typename TagFuncTuple1, typename TagFuncTuple2> | | template< typename TagFuncTuple1, typename TagFuncTuple2> | |
| static inline void copy_tag_functors(TagFuncTuple1 &my_inputs, TagF
uncTuple2 &other_inputs) { | | static inline void copy_tag_functors(TagFuncTuple1 &my_inputs, TagF
uncTuple2 &other_inputs) { | |
| if(std::get<0>(other_inputs).my_original_func()) { | | if(std::get<0>(other_inputs).my_original_func()) { | |
| std::get<0>(my_inputs).set_my_tag_func(std::get<0>(other_in
puts).my_original_func()->clone()); | | std::get<0>(my_inputs).set_my_tag_func(std::get<0>(other_in
puts).my_original_func()->clone()); | |
| std::get<0>(my_inputs).set_my_original_tag_func(std::get<0>
(other_inputs).my_original_func()->clone()); | | std::get<0>(my_inputs).set_my_original_tag_func(std::get<0>
(other_inputs).my_original_func()->clone()); | |
| } | | } | |
| } | | } | |
|
| | | template<typename InputTuple> | |
| | | static inline void reset_inputs(InputTuple &my_input) { | |
| | | std::get<0>(my_input).reinitialize_port(); | |
| | | } | |
| }; | | }; | |
| | | | |
| //! The two-phase join port | | //! The two-phase join port | |
| template< typename T > | | template< typename T > | |
| class reserving_port : public receiver<T> { | | class reserving_port : public receiver<T> { | |
| public: | | public: | |
| typedef T input_type; | | typedef T input_type; | |
| typedef sender<T> predecessor_type; | | typedef sender<T> predecessor_type; | |
| private: | | private: | |
| // ----------- Aggregator ------------ | | // ----------- Aggregator ------------ | |
| | | | |
| skipping to change at line 330 | | skipping to change at line 340 | |
| reserving_port_operation op_data(rel_res); | | reserving_port_operation op_data(rel_res); | |
| my_aggregator.execute(&op_data); | | my_aggregator.execute(&op_data); | |
| } | | } | |
| | | | |
| //! Complete use of the port | | //! Complete use of the port | |
| void consume( ) { | | void consume( ) { | |
| reserving_port_operation op_data(con_res); | | reserving_port_operation op_data(con_res); | |
| my_aggregator.execute(&op_data); | | my_aggregator.execute(&op_data); | |
| } | | } | |
| | | | |
|
| | | void reinitialize_port() { | |
| | | my_predecessors.reset(); | |
| | | reserved = false; | |
| | | } | |
| | | | |
| | | protected: | |
| | | | |
| | | /*override*/void reset_receiver() { | |
| | | my_predecessors.reset(); | |
| | | } | |
| | | | |
| private: | | private: | |
| forwarding_base *my_join; | | forwarding_base *my_join; | |
| reservable_predecessor_cache< T, null_mutex > my_predecessors; | | reservable_predecessor_cache< T, null_mutex > my_predecessors; | |
| bool reserved; | | bool reserved; | |
| }; | | }; | |
| | | | |
| //! queueing join_port | | //! queueing join_port | |
| template<typename T> | | template<typename T> | |
| class queueing_port : public receiver<T>, public item_buffer<T> { | | class queueing_port : public receiver<T>, public item_buffer<T> { | |
| public: | | public: | |
| | | | |
| skipping to change at line 443 | | skipping to change at line 464 | |
| } | | } | |
| | | | |
| // reset_port is called when item is accepted by successor, but | | // reset_port is called when item is accepted by successor, but | |
| // is initiated by join_node. | | // is initiated by join_node. | |
| void reset_port() { | | void reset_port() { | |
| queueing_port_operation op_data(res_port); | | queueing_port_operation op_data(res_port); | |
| my_aggregator.execute(&op_data); | | my_aggregator.execute(&op_data); | |
| return; | | return; | |
| } | | } | |
| | | | |
|
| | | void reinitialize_port() { | |
| | | item_buffer<T>::reset(); | |
| | | } | |
| | | | |
| | | protected: | |
| | | | |
| | | /*override*/void reset_receiver() { | |
| | | // nothing to do. We queue, so no predecessor cache | |
| | | } | |
| | | | |
| private: | | private: | |
| forwarding_base *my_join; | | forwarding_base *my_join; | |
| }; | | }; | |
| | | | |
| #include "_flow_graph_tagged_buffer_impl.h" | | #include "_flow_graph_tagged_buffer_impl.h" | |
| | | | |
| template< typename T > | | template< typename T > | |
| class tag_matching_port : public receiver<T>, public tagged_buffer< tag
_value, T, NO_TAG > { | | class tag_matching_port : public receiver<T>, public tagged_buffer< tag
_value, T, NO_TAG > { | |
| public: | | public: | |
| typedef T input_type; | | typedef T input_type; | |
| typedef sender<T> predecessor_type; | | typedef sender<T> predecessor_type; | |
| typedef tag_matching_port<T> my_node_type; // for forwarding, if n
eeded | | typedef tag_matching_port<T> my_node_type; // for forwarding, if n
eeded | |
| typedef function_body<input_type, tag_value> my_tag_func_type; | | typedef function_body<input_type, tag_value> my_tag_func_type; | |
|
| | | typedef tagged_buffer<tag_value,T,NO_TAG> my_buffer_type; | |
| private: | | private: | |
| // ----------- Aggregator ------------ | | // ----------- Aggregator ------------ | |
| private: | | private: | |
| enum op_type { try__put, get__item, res_port }; | | enum op_type { try__put, get__item, res_port }; | |
| enum op_stat {WAIT=0, SUCCEEDED, FAILED}; | | enum op_stat {WAIT=0, SUCCEEDED, FAILED}; | |
| typedef tag_matching_port<T> my_class; | | typedef tag_matching_port<T> my_class; | |
| | | | |
| class tag_matching_port_operation : public aggregated_operation<tag
_matching_port_operation> { | | class tag_matching_port_operation : public aggregated_operation<tag
_matching_port_operation> { | |
| public: | | public: | |
| char type; | | char type; | |
| | | | |
| skipping to change at line 581 | | skipping to change at line 613 | |
| // is initiated by join_node. | | // is initiated by join_node. | |
| void reset_port() { | | void reset_port() { | |
| tag_matching_port_operation op_data(res_port); | | tag_matching_port_operation op_data(res_port); | |
| my_aggregator.execute(&op_data); | | my_aggregator.execute(&op_data); | |
| return; | | return; | |
| } | | } | |
| | | | |
| my_tag_func_type *my_func() { return my_tag_func; } | | my_tag_func_type *my_func() { return my_tag_func; } | |
| my_tag_func_type *my_original_func() { return my_original_tag_func;
} | | my_tag_func_type *my_original_func() { return my_original_tag_func;
} | |
| | | | |
|
| | | void reinitialize_port() { | |
| | | my_buffer_type::reset(); | |
| | | } | |
| | | | |
| | | protected: | |
| | | | |
| | | /*override*/void reset_receiver() { | |
| | | // nothing to do. We queue, so no predecessor cache | |
| | | } | |
| | | | |
| private: | | private: | |
| // need map of tags to values | | // need map of tags to values | |
| forwarding_base *my_join; | | forwarding_base *my_join; | |
| my_tag_func_type *my_tag_func; | | my_tag_func_type *my_tag_func; | |
| my_tag_func_type *my_original_tag_func; | | my_tag_func_type *my_original_tag_func; | |
| }; // tag_matching_port | | }; // tag_matching_port | |
| | | | |
| using namespace graph_policy_namespace; | | using namespace graph_policy_namespace; | |
| | | | |
| template<graph_buffer_policy JP, typename InputTuple, typename OutputTu
ple> | | template<graph_buffer_policy JP, typename InputTuple, typename OutputTu
ple> | |
| | | | |
| skipping to change at line 630 | | skipping to change at line 672 | |
| | | | |
| // if all input_ports have predecessors, spawn forward to try and c
onsume tuples | | // if all input_ports have predecessors, spawn forward to try and c
onsume tuples | |
| void decrement_port_count() { | | void decrement_port_count() { | |
| if(ports_with_no_inputs.fetch_and_decrement() == 1) { | | if(ports_with_no_inputs.fetch_and_decrement() == 1) { | |
| task::enqueue( * new ( task::allocate_additional_child_of(
*(this->my_root_task) ) ) | | task::enqueue( * new ( task::allocate_additional_child_of(
*(this->my_root_task) ) ) | |
| forward_task<my_node_type>(*my_node) ); | | forward_task<my_node_type>(*my_node) ); | |
| } | | } | |
| } | | } | |
| | | | |
| input_type &input_ports() { return my_inputs; } | | input_type &input_ports() { return my_inputs; } | |
|
| | | | |
| protected: | | protected: | |
|
| | | | |
| | | void reset() { | |
| | | // called outside of parallel contexts | |
| | | ports_with_no_inputs = N; | |
| | | join_helper<N>::reset_inputs(my_inputs); | |
| | | } | |
| | | | |
| // all methods on input ports should be called under mutual exclusi
on from join_node_base. | | // all methods on input ports should be called under mutual exclusi
on from join_node_base. | |
| | | | |
| bool tuple_build_may_succeed() { | | bool tuple_build_may_succeed() { | |
| return !ports_with_no_inputs; | | return !ports_with_no_inputs; | |
| } | | } | |
| | | | |
| bool try_to_make_tuple(output_type &out) { | | bool try_to_make_tuple(output_type &out) { | |
| if(ports_with_no_inputs) return false; | | if(ports_with_no_inputs) return false; | |
| return join_helper<N>::reserve(my_inputs, out); | | return join_helper<N>::reserve(my_inputs, out); | |
| } | | } | |
| | | | |
| skipping to change at line 690 | | skipping to change at line 740 | |
| void decrement_port_count() { | | void decrement_port_count() { | |
| if(ports_with_no_items.fetch_and_decrement() == 1) { | | if(ports_with_no_items.fetch_and_decrement() == 1) { | |
| task::enqueue( * new ( task::allocate_additional_child_of(
*(this->my_root_task) ) ) | | task::enqueue( * new ( task::allocate_additional_child_of(
*(this->my_root_task) ) ) | |
| forward_task<my_node_type>(*my_node) ); | | forward_task<my_node_type>(*my_node) ); | |
| } | | } | |
| } | | } | |
| | | | |
| void increment_port_count() { __TBB_ASSERT(false, NULL); } // shou
ld never be called | | void increment_port_count() { __TBB_ASSERT(false, NULL); } // shou
ld never be called | |
| | | | |
| input_type &input_ports() { return my_inputs; } | | input_type &input_ports() { return my_inputs; } | |
|
| | | | |
| protected: | | protected: | |
|
| | | | |
| | | void reset() { | |
| | | reset_port_count(); | |
| | | join_helper<N>::reset_inputs(my_inputs); | |
| | | } | |
| | | | |
| // all methods on input ports should be called under mutual exclusi
on from join_node_base. | | // all methods on input ports should be called under mutual exclusi
on from join_node_base. | |
| | | | |
| bool tuple_build_may_succeed() { | | bool tuple_build_may_succeed() { | |
| return !ports_with_no_items; | | return !ports_with_no_items; | |
| } | | } | |
| | | | |
| bool try_to_make_tuple(output_type &out) { | | bool try_to_make_tuple(output_type &out) { | |
| if(ports_with_no_items) return false; | | if(ports_with_no_items) return false; | |
| return join_helper<N>::get_items(my_inputs, out); | | return join_helper<N>::get_items(my_inputs, out); | |
| } | | } | |
| | | | |
| skipping to change at line 865 | | skipping to change at line 922 | |
| tag_matching_FE_operation op_data(t, inc_count); | | tag_matching_FE_operation op_data(t, inc_count); | |
| my_aggregator.execute(&op_data); | | my_aggregator.execute(&op_data); | |
| return; | | return; | |
| } | | } | |
| | | | |
| void decrement_port_count() { __TBB_ASSERT(false, NULL); } | | void decrement_port_count() { __TBB_ASSERT(false, NULL); } | |
| | | | |
| void increment_port_count() { __TBB_ASSERT(false, NULL); } // shou
ld never be called | | void increment_port_count() { __TBB_ASSERT(false, NULL); } // shou
ld never be called | |
| | | | |
| input_type &input_ports() { return my_inputs; } | | input_type &input_ports() { return my_inputs; } | |
|
| | | | |
| protected: | | protected: | |
|
| | | | |
| | | void reset() { | |
| | | // called outside of parallel contexts | |
| | | join_helper<N>::reset_inputs(my_inputs); | |
| | | | |
| | | my_tag_buffer::reset(); // have to reset the tag counts | |
| | | output_buffer_type::reset(); // also the queue of outputs | |
| | | my_node->current_tag = NO_TAG; | |
| | | } | |
| | | | |
| // all methods on input ports should be called under mutual exclusi
on from join_node_base. | | // all methods on input ports should be called under mutual exclusi
on from join_node_base. | |
| | | | |
| bool tuple_build_may_succeed() { // called from back-end | | bool tuple_build_may_succeed() { // called from back-end | |
| tag_matching_FE_operation op_data(may_succeed); | | tag_matching_FE_operation op_data(may_succeed); | |
| my_aggregator.execute(&op_data); | | my_aggregator.execute(&op_data); | |
| return op_data.status == SUCCEEDED; | | return op_data.status == SUCCEEDED; | |
| } | | } | |
| | | | |
| // cannot lock while calling back to input_ports. current_tag will
only be set | | // cannot lock while calling back to input_ports. current_tag will
only be set | |
| // and reset under the aggregator, so it will remain consistent. | | // and reset under the aggregator, so it will remain consistent. | |
| | | | |
| skipping to change at line 1028 | | skipping to change at line 1096 | |
| my_aggregator.execute(&op_data); | | my_aggregator.execute(&op_data); | |
| return op_data.status == SUCCEEDED; | | return op_data.status == SUCCEEDED; | |
| } | | } | |
| | | | |
| bool try_get( output_type &v) { | | bool try_get( output_type &v) { | |
| join_node_base_operation op_data(v, try__get); | | join_node_base_operation op_data(v, try__get); | |
| my_aggregator.execute(&op_data); | | my_aggregator.execute(&op_data); | |
| return op_data.status == SUCCEEDED; | | return op_data.status == SUCCEEDED; | |
| } | | } | |
| | | | |
|
| | | protected: | |
| | | | |
| | | /*override*/void reset() { | |
| | | input_ports_type::reset(); | |
| | | } | |
| | | | |
| private: | | private: | |
| broadcast_cache<output_type, null_rw_mutex> my_successors; | | broadcast_cache<output_type, null_rw_mutex> my_successors; | |
| | | | |
| friend class forward_task< join_node_base<JP, InputTuple, OutputTup
le> >; | | friend class forward_task< join_node_base<JP, InputTuple, OutputTup
le> >; | |
| | | | |
| void forward() { | | void forward() { | |
| join_node_base_operation op_data(do_fwrd); | | join_node_base_operation op_data(do_fwrd); | |
| my_aggregator.execute(&op_data); | | my_aggregator.execute(&op_data); | |
| } | | } | |
| }; | | }; | |
| | | | |
End of changes. 13 change blocks. |
| 0 lines changed or deleted | | 74 lines changed or added | |
|
| flow_graph.h | | flow_graph.h | |
| | | | |
| skipping to change at line 43 | | skipping to change at line 43 | |
| #include "atomic.h" | | #include "atomic.h" | |
| #include "spin_mutex.h" | | #include "spin_mutex.h" | |
| #include "null_mutex.h" | | #include "null_mutex.h" | |
| #include "spin_rw_mutex.h" | | #include "spin_rw_mutex.h" | |
| #include "null_rw_mutex.h" | | #include "null_rw_mutex.h" | |
| #include "task.h" | | #include "task.h" | |
| #include "concurrent_vector.h" | | #include "concurrent_vector.h" | |
| #include "internal/_aggregator_impl.h" | | #include "internal/_aggregator_impl.h" | |
| | | | |
| // use the VC10 or gcc version of tuple if it is available. | | // use the VC10 or gcc version of tuple if it is available. | |
|
| #if TBB_IMPLEMENT_CPP0X && (!defined(_MSC_VER) || _MSC_VER < 1600) | | #if __TBB_CPP11_TUPLE_PRESENT | |
| #define TBB_PREVIEW_TUPLE 1 | | #include <tuple> | |
| #include "compat/tuple" | | | |
| #else | | #else | |
|
| #include <tuple> | | #include "compat/tuple" | |
| #endif | | #endif | |
| | | | |
| #include<list> | | #include<list> | |
| #include<queue> | | #include<queue> | |
| | | | |
| /** @file | | /** @file | |
| \brief The graph related classes and functions | | \brief The graph related classes and functions | |
| | | | |
| There are some applications that best express dependencies as messages | | There are some applications that best express dependencies as messages | |
| passed between nodes in a graph. These messages may contain data or | | passed between nodes in a graph. These messages may contain data or | |
| | | | |
| skipping to change at line 71 | | skipping to change at line 70 | |
| */ | | */ | |
| | | | |
| namespace tbb { | | namespace tbb { | |
| namespace flow { | | namespace flow { | |
| | | | |
| //! An enumeration the provides the two most common concurrency levels: unl
imited and serial | | //! An enumeration the provides the two most common concurrency levels: unl
imited and serial | |
| enum concurrency { unlimited = 0, serial = 1 }; | | enum concurrency { unlimited = 0, serial = 1 }; | |
| | | | |
| namespace interface6 { | | namespace interface6 { | |
| | | | |
|
| | | namespace internal { | |
| | | template<typename T, typename M> | |
| | | class successor_cache; | |
| | | } | |
| | | | |
| //! An empty class used for messages that mean "I'm done" | | //! An empty class used for messages that mean "I'm done" | |
| class continue_msg {}; | | class continue_msg {}; | |
| | | | |
| template< typename T > class sender; | | template< typename T > class sender; | |
| template< typename T > class receiver; | | template< typename T > class receiver; | |
| class continue_receiver; | | class continue_receiver; | |
| | | | |
| //! Pure virtual template class that defines a sender of messages of type T | | //! Pure virtual template class that defines a sender of messages of type T | |
| template< typename T > | | template< typename T > | |
| class sender { | | class sender { | |
| | | | |
| skipping to change at line 109 | | skipping to change at line 113 | |
| //! Reserves an item in the sender | | //! Reserves an item in the sender | |
| virtual bool try_reserve( T & ) { return false; } | | virtual bool try_reserve( T & ) { return false; } | |
| | | | |
| //! Releases the reserved item | | //! Releases the reserved item | |
| virtual bool try_release( ) { return false; } | | virtual bool try_release( ) { return false; } | |
| | | | |
| //! Consumes the reserved item | | //! Consumes the reserved item | |
| virtual bool try_consume( ) { return false; } | | virtual bool try_consume( ) { return false; } | |
| }; | | }; | |
| | | | |
|
| | | template< typename T > class limiter_node; // needed for resetting decreme | |
| | | nter | |
| | | | |
| //! Pure virtual template class that defines a receiver of messages of type
T | | //! Pure virtual template class that defines a receiver of messages of type
T | |
| template< typename T > | | template< typename T > | |
| class receiver { | | class receiver { | |
| public: | | public: | |
| //! The input type of this receiver | | //! The input type of this receiver | |
| typedef T input_type; | | typedef T input_type; | |
| | | | |
| //! The predecessor type for this node | | //! The predecessor type for this node | |
| typedef sender<T> predecessor_type; | | typedef sender<T> predecessor_type; | |
| | | | |
| | | | |
| skipping to change at line 130 | | skipping to change at line 136 | |
| virtual ~receiver() {} | | virtual ~receiver() {} | |
| | | | |
| //! Put an item to the receiver | | //! Put an item to the receiver | |
| virtual bool try_put( const T& t ) = 0; | | virtual bool try_put( const T& t ) = 0; | |
| | | | |
| //! Add a predecessor to the node | | //! Add a predecessor to the node | |
| virtual bool register_predecessor( predecessor_type & ) { return false;
} | | virtual bool register_predecessor( predecessor_type & ) { return false;
} | |
| | | | |
| //! Remove a predecessor from the node | | //! Remove a predecessor from the node | |
| virtual bool remove_predecessor( predecessor_type & ) { return false; } | | virtual bool remove_predecessor( predecessor_type & ) { return false; } | |
|
| | | | |
| | | protected: | |
| | | //! put receiver back in initial state | |
| | | template<typename U> friend class limiter_node; | |
| | | virtual void reset_receiver() = 0; | |
| | | template<typename TT, typename M> | |
| | | friend class internal::successor_cache; | |
| | | virtual bool is_continue_receiver() { return false; } | |
| }; | | }; | |
| | | | |
| //! Base class for receivers of completion messages | | //! Base class for receivers of completion messages | |
| /** These receivers automatically reset, but cannot be explicitly waited on
*/ | | /** These receivers automatically reset, but cannot be explicitly waited on
*/ | |
| class continue_receiver : public receiver< continue_msg > { | | class continue_receiver : public receiver< continue_msg > { | |
| public: | | public: | |
| | | | |
| //! The input type | | //! The input type | |
| typedef continue_msg input_type; | | typedef continue_msg input_type; | |
| | | | |
| | | | |
| skipping to change at line 195 | | skipping to change at line 209 | |
| } | | } | |
| execute(); | | execute(); | |
| return true; | | return true; | |
| } | | } | |
| | | | |
| protected: | | protected: | |
| spin_mutex my_mutex; | | spin_mutex my_mutex; | |
| int my_predecessor_count; | | int my_predecessor_count; | |
| int my_current_count; | | int my_current_count; | |
| int my_initial_predecessor_count; | | int my_initial_predecessor_count; | |
|
| | | // the friend declaration in the base class did not eliminate the "prot | |
| | | ected class" | |
| | | // error in gcc 4.1.2 | |
| | | template<typename U> friend class limiter_node; | |
| | | /*override*/void reset_receiver() { | |
| | | my_current_count = 0; | |
| | | } | |
| | | | |
| //! Does whatever should happen when the threshold is reached | | //! Does whatever should happen when the threshold is reached | |
| /** This should be very fast or else spawn a task. This is | | /** This should be very fast or else spawn a task. This is | |
| called while the sender is blocked in the try_put(). */ | | called while the sender is blocked in the try_put(). */ | |
| virtual void execute() = 0; | | virtual void execute() = 0; | |
|
| | | template<typename TT, typename M> | |
| | | friend class internal::successor_cache; | |
| | | /*override*/ bool is_continue_receiver() { return true; } | |
| }; | | }; | |
| | | | |
| #include "internal/_flow_graph_impl.h" | | #include "internal/_flow_graph_impl.h" | |
| using namespace internal::graph_policy_namespace; | | using namespace internal::graph_policy_namespace; | |
| | | | |
| class graph; | | class graph; | |
| class graph_node; | | class graph_node; | |
| | | | |
| template <typename GraphContainerType, typename GraphNodeType> | | template <typename GraphContainerType, typename GraphNodeType> | |
| class graph_iterator { | | class graph_iterator { | |
| | | | |
| skipping to change at line 390 | | skipping to change at line 413 | |
| #if TBB_USE_EXCEPTIONS | | #if TBB_USE_EXCEPTIONS | |
| } | | } | |
| catch(...) { | | catch(...) { | |
| my_root_task->set_ref_count(1); | | my_root_task->set_ref_count(1); | |
| my_context->reset(); | | my_context->reset(); | |
| caught_exception = true; | | caught_exception = true; | |
| cancelled = true; | | cancelled = true; | |
| throw; | | throw; | |
| } | | } | |
| #endif | | #endif | |
|
| | | my_context->reset(); // consistent with behavior in catch() | |
| my_root_task->set_ref_count(1); | | my_root_task->set_ref_count(1); | |
| } | | } | |
| } | | } | |
| | | | |
| //! Returns the root task of the graph | | //! Returns the root task of the graph | |
| task * root_task() { | | task * root_task() { | |
| return my_root_task; | | return my_root_task; | |
| } | | } | |
| | | | |
| // ITERATORS | | // ITERATORS | |
| | | | |
| skipping to change at line 425 | | skipping to change at line 449 | |
| const_iterator end() const { return const_iterator(this, false); } | | const_iterator end() const { return const_iterator(this, false); } | |
| //! start const iterator | | //! start const iterator | |
| const_iterator cbegin() const { return const_iterator(this, true); } | | const_iterator cbegin() const { return const_iterator(this, true); } | |
| //! end const iterator | | //! end const iterator | |
| const_iterator cend() const { return const_iterator(this, false); } | | const_iterator cend() const { return const_iterator(this, false); } | |
| | | | |
| //! return status of graph execution | | //! return status of graph execution | |
| bool is_cancelled() { return cancelled; } | | bool is_cancelled() { return cancelled; } | |
| bool exception_thrown() { return caught_exception; } | | bool exception_thrown() { return caught_exception; } | |
| | | | |
|
| | | // un-thread-safe state reset. | |
| | | void reset(); | |
| | | | |
| private: | | private: | |
| task *my_root_task; | | task *my_root_task; | |
| task_group_context *my_context; | | task_group_context *my_context; | |
| bool own_context; | | bool own_context; | |
| bool cancelled; | | bool cancelled; | |
| bool caught_exception; | | bool caught_exception; | |
| | | | |
| graph_node *my_nodes, *my_nodes_last; | | graph_node *my_nodes, *my_nodes_last; | |
| | | | |
| spin_mutex nodelist_mutex; | | spin_mutex nodelist_mutex; | |
| | | | |
| skipping to change at line 478 | | skipping to change at line 505 | |
| protected: | | protected: | |
| graph& my_graph; | | graph& my_graph; | |
| graph_node *next, *prev; | | graph_node *next, *prev; | |
| public: | | public: | |
| graph_node(graph& g) : my_graph(g) { | | graph_node(graph& g) : my_graph(g) { | |
| my_graph.register_node(this); | | my_graph.register_node(this); | |
| } | | } | |
| virtual ~graph_node() { | | virtual ~graph_node() { | |
| my_graph.remove_node(this); | | my_graph.remove_node(this); | |
| } | | } | |
|
| | | | |
| | | protected: | |
| | | virtual void reset() = 0; | |
| }; | | }; | |
| | | | |
| inline void graph::register_node(graph_node *n) { | | inline void graph::register_node(graph_node *n) { | |
| n->next = NULL; | | n->next = NULL; | |
| { | | { | |
| spin_mutex::scoped_lock lock(nodelist_mutex); | | spin_mutex::scoped_lock lock(nodelist_mutex); | |
| n->prev = my_nodes_last; | | n->prev = my_nodes_last; | |
| if (my_nodes_last) my_nodes_last->next = n; | | if (my_nodes_last) my_nodes_last->next = n; | |
| my_nodes_last = n; | | my_nodes_last = n; | |
| if (!my_nodes) my_nodes = n; | | if (!my_nodes) my_nodes = n; | |
| | | | |
| skipping to change at line 503 | | skipping to change at line 533 | |
| spin_mutex::scoped_lock lock(nodelist_mutex); | | spin_mutex::scoped_lock lock(nodelist_mutex); | |
| __TBB_ASSERT(my_nodes && my_nodes_last, "graph::remove_node: Error:
no registered nodes"); | | __TBB_ASSERT(my_nodes && my_nodes_last, "graph::remove_node: Error:
no registered nodes"); | |
| if (n->prev) n->prev->next = n->next; | | if (n->prev) n->prev->next = n->next; | |
| if (n->next) n->next->prev = n->prev; | | if (n->next) n->next->prev = n->prev; | |
| if (my_nodes_last == n) my_nodes_last = n->prev; | | if (my_nodes_last == n) my_nodes_last = n->prev; | |
| if (my_nodes == n) my_nodes = n->next; | | if (my_nodes == n) my_nodes = n->next; | |
| } | | } | |
| n->prev = n->next = NULL; | | n->prev = n->next = NULL; | |
| } | | } | |
| | | | |
|
| | | inline void graph::reset() { | |
| | | // reset context | |
| | | if(my_context) my_context->reset(); | |
| | | cancelled = false; | |
| | | caught_exception = false; | |
| | | // reset all the nodes comprising the graph | |
| | | for(iterator ii = begin(); ii != end(); ++ii) { | |
| | | graph_node *my_p = &(*ii); | |
| | | my_p->reset(); | |
| | | } | |
| | | } | |
| | | | |
| #include "internal/_flow_graph_node_impl.h" | | #include "internal/_flow_graph_node_impl.h" | |
| | | | |
| //! An executable node that acts as a source, i.e. it has no predecessors | | //! An executable node that acts as a source, i.e. it has no predecessors | |
| template < typename Output > | | template < typename Output > | |
| class source_node : public graph_node, public sender< Output > { | | class source_node : public graph_node, public sender< Output > { | |
| using graph_node::my_graph; | | using graph_node::my_graph; | |
| public: | | public: | |
| //! The type of the output message, which is complete | | //! The type of the output message, which is complete | |
| typedef Output output_type; | | typedef Output output_type; | |
| | | | |
| | | | |
| skipping to change at line 564 | | skipping to change at line 606 | |
| | | | |
| //! Request an item from the node | | //! Request an item from the node | |
| /*override */ bool try_get( output_type &v ) { | | /*override */ bool try_get( output_type &v ) { | |
| spin_mutex::scoped_lock lock(my_mutex); | | spin_mutex::scoped_lock lock(my_mutex); | |
| if ( my_reserved ) | | if ( my_reserved ) | |
| return false; | | return false; | |
| | | | |
| if ( my_has_cached_item ) { | | if ( my_has_cached_item ) { | |
| v = my_cached_item; | | v = my_cached_item; | |
| my_has_cached_item = false; | | my_has_cached_item = false; | |
|
| } else if ( (*my_body)(v) == false ) { | | return true; | |
| return false; | | | |
| } | | } | |
|
| return true; | | return false; | |
| } | | } | |
| | | | |
| //! Reserves an item. | | //! Reserves an item. | |
| /* override */ bool try_reserve( output_type &v ) { | | /* override */ bool try_reserve( output_type &v ) { | |
| spin_mutex::scoped_lock lock(my_mutex); | | spin_mutex::scoped_lock lock(my_mutex); | |
| if ( my_reserved ) { | | if ( my_reserved ) { | |
| return false; | | return false; | |
| } | | } | |
| | | | |
|
| if ( !my_has_cached_item && (*my_body)(my_cached_item) ) | | | |
| my_has_cached_item = true; | | | |
| | | | |
| if ( my_has_cached_item ) { | | if ( my_has_cached_item ) { | |
| v = my_cached_item; | | v = my_cached_item; | |
| my_reserved = true; | | my_reserved = true; | |
| return true; | | return true; | |
| } else { | | } else { | |
| return false; | | return false; | |
| } | | } | |
| } | | } | |
| | | | |
| //! Release a reserved item. | | //! Release a reserved item. | |
| | | | |
| skipping to change at line 620 | | skipping to change at line 658 | |
| } | | } | |
| | | | |
| //! Activates a node that was created in the inactive state | | //! Activates a node that was created in the inactive state | |
| void activate() { | | void activate() { | |
| spin_mutex::scoped_lock lock(my_mutex); | | spin_mutex::scoped_lock lock(my_mutex); | |
| my_active = true; | | my_active = true; | |
| if ( !my_successors.empty() ) | | if ( !my_successors.empty() ) | |
| spawn_put(); | | spawn_put(); | |
| } | | } | |
| | | | |
|
| | | template<class Body> | |
| | | Body copy_function_object() { | |
| | | internal::source_body<output_type> &body_ref = *this->my_body; | |
| | | return dynamic_cast< internal::source_body_leaf<output_type, Body> | |
| | | & >(body_ref).get_body(); | |
| | | } | |
| | | | |
| | | protected: | |
| | | | |
| | | //! resets the node to its initial state | |
| | | void reset() { | |
| | | my_active = init_my_active; | |
| | | my_reserved =false; | |
| | | if(my_has_cached_item) { | |
| | | my_has_cached_item = false; | |
| | | } | |
| | | } | |
| | | | |
| private: | | private: | |
| task *my_root_task; | | task *my_root_task; | |
| spin_mutex my_mutex; | | spin_mutex my_mutex; | |
| bool my_active; | | bool my_active; | |
| bool init_my_active; | | bool init_my_active; | |
| internal::source_body<output_type> *my_body; | | internal::source_body<output_type> *my_body; | |
| internal::broadcast_cache< output_type > my_successors; | | internal::broadcast_cache< output_type > my_successors; | |
| bool my_reserved; | | bool my_reserved; | |
| bool my_has_cached_item; | | bool my_has_cached_item; | |
| output_type my_cached_item; | | output_type my_cached_item; | |
| | | | |
| friend class internal::source_task< source_node< output_type > >; | | friend class internal::source_task< source_node< output_type > >; | |
| | | | |
|
| | | // used by apply_body, can invoke body of node. | |
| | | | |
| | | bool try_reserve_apply_body(output_type &v) { | |
| | | spin_mutex::scoped_lock lock(my_mutex); | |
| | | if ( my_reserved ) { | |
| | | return false; | |
| | | } | |
| | | | |
| | | if ( !my_has_cached_item && (*my_body)(my_cached_item) ) | |
| | | my_has_cached_item = true; | |
| | | | |
| | | if ( my_has_cached_item ) { | |
| | | v = my_cached_item; | |
| | | my_reserved = true; | |
| | | return true; | |
| | | } else { | |
| | | return false; | |
| | | } | |
| | | } | |
| | | | |
| //! Applies the body | | //! Applies the body | |
| /* override */ void apply_body( ) { | | /* override */ void apply_body( ) { | |
| output_type v; | | output_type v; | |
|
| if ( try_reserve(v) == false ) | | if ( !try_reserve_apply_body(v) ) | |
| return; | | return; | |
| | | | |
| if ( my_successors.try_put( v ) ) | | if ( my_successors.try_put( v ) ) | |
| try_consume(); | | try_consume(); | |
| else | | else | |
| try_release(); | | try_release(); | |
| } | | } | |
| | | | |
| //! Spawns a task that applies the body | | //! Spawns a task that applies the body | |
| /* override */ void spawn_put( ) { | | /* override */ void spawn_put( ) { | |
| task::enqueue( * new ( task::allocate_additional_child_of( *my_root
_task ) ) | | task::enqueue( * new ( task::allocate_additional_child_of( *my_root
_task ) ) | |
| internal::source_task< source_node< output_type > >( *this ) ); | | internal::source_task< source_node< output_type > >( *this ) ); | |
| } | | } | |
|
| }; | | }; // source_node | |
| | | | |
| //! Implements a function node that supports Input -> Output | | //! Implements a function node that supports Input -> Output | |
| template < typename Input, typename Output = continue_msg, graph_buffer_pol
icy = queueing, typename Allocator=cache_aligned_allocator<Input> > | | template < typename Input, typename Output = continue_msg, graph_buffer_pol
icy = queueing, typename Allocator=cache_aligned_allocator<Input> > | |
| class function_node : public graph_node, public internal::function_input<In
put,Output,Allocator>, public internal::function_output<Output> { | | class function_node : public graph_node, public internal::function_input<In
put,Output,Allocator>, public internal::function_output<Output> { | |
| using graph_node::my_graph; | | using graph_node::my_graph; | |
| public: | | public: | |
| typedef Input input_type; | | typedef Input input_type; | |
| typedef Output output_type; | | typedef Output output_type; | |
| typedef sender< input_type > predecessor_type; | | typedef sender< input_type > predecessor_type; | |
| typedef receiver< output_type > successor_type; | | typedef receiver< output_type > successor_type; | |
| | | | |
| skipping to change at line 679 | | skipping to change at line 754 | |
| | | | |
| //! Copy constructor | | //! Copy constructor | |
| function_node( const function_node& src ) : | | function_node( const function_node& src ) : | |
| graph_node(src.my_graph), internal::function_input<input_type,outpu
t_type,Allocator>( src ), | | graph_node(src.my_graph), internal::function_input<input_type,outpu
t_type,Allocator>( src ), | |
| fOutput_type() | | fOutput_type() | |
| {} | | {} | |
| | | | |
| bool try_put(const input_type &i) { return fInput_type::try_put(i); } | | bool try_put(const input_type &i) { return fInput_type::try_put(i); } | |
| | | | |
| protected: | | protected: | |
|
| | | | |
| | | // override of graph_node's reset. | |
| | | /*override*/void reset() {fInput_type::reset_function_input(); } | |
| | | | |
| /* override */ internal::broadcast_cache<output_type> &successors () {
return fOutput_type::my_successors; } | | /* override */ internal::broadcast_cache<output_type> &successors () {
return fOutput_type::my_successors; } | |
| }; | | }; | |
| | | | |
| //! Implements a function node that supports Input -> Output | | //! Implements a function node that supports Input -> Output | |
| template < typename Input, typename Output, typename Allocator > | | template < typename Input, typename Output, typename Allocator > | |
| class function_node<Input,Output,queueing,Allocator> : public graph_node, p
ublic internal::function_input<Input,Output,Allocator>, public internal::fu
nction_output<Output> { | | class function_node<Input,Output,queueing,Allocator> : public graph_node, p
ublic internal::function_input<Input,Output,Allocator>, public internal::fu
nction_output<Output> { | |
| using graph_node::my_graph; | | using graph_node::my_graph; | |
| public: | | public: | |
| typedef Input input_type; | | typedef Input input_type; | |
| typedef Output output_type; | | typedef Output output_type; | |
| | | | |
| skipping to change at line 709 | | skipping to change at line 788 | |
| {} | | {} | |
| | | | |
| //! Copy constructor | | //! Copy constructor | |
| function_node( const function_node& src ) : | | function_node( const function_node& src ) : | |
| graph_node(src.my_graph), fInput_type( src, new queue_type() ), fOu
tput_type() | | graph_node(src.my_graph), fInput_type( src, new queue_type() ), fOu
tput_type() | |
| {} | | {} | |
| | | | |
| bool try_put(const input_type &i) { return fInput_type::try_put(i); } | | bool try_put(const input_type &i) { return fInput_type::try_put(i); } | |
| | | | |
| protected: | | protected: | |
|
| | | | |
| | | /*override*/void reset() { fInput_type::reset_function_input(); } | |
| | | | |
| /* override */ internal::broadcast_cache<output_type> &successors () {
return fOutput_type::my_successors; } | | /* override */ internal::broadcast_cache<output_type> &successors () {
return fOutput_type::my_successors; } | |
| }; | | }; | |
| | | | |
| #include "tbb/internal/_flow_graph_types_impl.h" | | #include "tbb/internal/_flow_graph_types_impl.h" | |
| | | | |
| //! implements a function node that supports Input -> (set of outputs) | | //! implements a function node that supports Input -> (set of outputs) | |
| // Output is a tuple of output types. | | // Output is a tuple of output types. | |
| template < typename Input, typename Output, graph_buffer_policy = queueing,
typename Allocator=cache_aligned_allocator<Input> > | | template < typename Input, typename Output, graph_buffer_policy = queueing,
typename Allocator=cache_aligned_allocator<Input> > | |
| class multifunction_node : | | class multifunction_node : | |
| public graph_node, | | public graph_node, | |
| | | | |
| skipping to change at line 747 | | skipping to change at line 829 | |
| typedef typename internal::function_input_queue<input_type,Allocator> q
ueue_type; | | typedef typename internal::function_input_queue<input_type,Allocator> q
ueue_type; | |
| public: | | public: | |
| template<typename Body> | | template<typename Body> | |
| multifunction_node( graph &g, size_t concurrency, Body body ) : | | multifunction_node( graph &g, size_t concurrency, Body body ) : | |
| graph_node(g), base_type(g,concurrency, body) | | graph_node(g), base_type(g,concurrency, body) | |
| {} | | {} | |
| multifunction_node( const multifunction_node &other) : | | multifunction_node( const multifunction_node &other) : | |
| graph_node(other.my_graph), base_type(other) | | graph_node(other.my_graph), base_type(other) | |
| {} | | {} | |
| // all the guts are in multifunction_input... | | // all the guts are in multifunction_input... | |
|
| | | protected: | |
| | | /*override*/void reset() { base_type::reset(); } | |
| }; // multifunction_node | | }; // multifunction_node | |
| | | | |
| template < typename Input, typename Output, typename Allocator > | | template < typename Input, typename Output, typename Allocator > | |
| class multifunction_node<Input,Output,queueing,Allocator> : public graph_no
de, public internal::multifunction_input<Input, | | class multifunction_node<Input,Output,queueing,Allocator> : public graph_no
de, public internal::multifunction_input<Input, | |
| typename internal::wrap_tuple_elements<std::tuple_size<Output>::value,
internal::function_output, Output>::type, Allocator> { | | typename internal::wrap_tuple_elements<std::tuple_size<Output>::value,
internal::function_output, Output>::type, Allocator> { | |
| using graph_node::my_graph; | | using graph_node::my_graph; | |
| static const int N = std::tuple_size<Output>::value; | | static const int N = std::tuple_size<Output>::value; | |
| public: | | public: | |
| typedef Input input_type; | | typedef Input input_type; | |
| typedef typename internal::wrap_tuple_elements<N, internal::function_ou
tput, Output>::type output_ports_type; | | typedef typename internal::wrap_tuple_elements<N, internal::function_ou
tput, Output>::type output_ports_type; | |
| | | | |
| skipping to change at line 768 | | skipping to change at line 852 | |
| typedef typename internal::multifunction_input<input_type, output_ports
_type, Allocator> base_type; | | typedef typename internal::multifunction_input<input_type, output_ports
_type, Allocator> base_type; | |
| typedef typename internal::function_input_queue<input_type,Allocator> q
ueue_type; | | typedef typename internal::function_input_queue<input_type,Allocator> q
ueue_type; | |
| public: | | public: | |
| template<typename Body> | | template<typename Body> | |
| multifunction_node( graph &g, size_t concurrency, Body body) : | | multifunction_node( graph &g, size_t concurrency, Body body) : | |
| graph_node(g), base_type(g,concurrency, body, new queue_type()) | | graph_node(g), base_type(g,concurrency, body, new queue_type()) | |
| {} | | {} | |
| multifunction_node( const multifunction_node &other) : | | multifunction_node( const multifunction_node &other) : | |
| graph_node(other.my_graph), base_type(other, new queue_type()) | | graph_node(other.my_graph), base_type(other, new queue_type()) | |
| {} | | {} | |
|
| | | // all the guts are in multifunction_input... | |
| | | protected: | |
| | | /*override*/void reset() { base_type::reset(); } | |
| }; // multifunction_node | | }; // multifunction_node | |
| | | | |
| //! split_node: accepts a tuple as input, forwards each element of the tupl
e to its | | //! split_node: accepts a tuple as input, forwards each element of the tupl
e to its | |
| // successors. The node has unlimited concurrency, so though it is marked
as | | // successors. The node has unlimited concurrency, so though it is marked
as | |
| // "rejecting" it does not reject inputs. | | // "rejecting" it does not reject inputs. | |
| template<typename TupleType, typename Allocator=cache_aligned_allocator<Tup
leType> > | | template<typename TupleType, typename Allocator=cache_aligned_allocator<Tup
leType> > | |
| class split_node : public multifunction_node<TupleType, TupleType, rejectin
g, Allocator> { | | class split_node : public multifunction_node<TupleType, TupleType, rejectin
g, Allocator> { | |
| static const int N = std::tuple_size<TupleType>::value; | | static const int N = std::tuple_size<TupleType>::value; | |
| typedef multifunction_node<TupleType,TupleType,rejecting,Allocator> bas
e_type; | | typedef multifunction_node<TupleType,TupleType,rejecting,Allocator> bas
e_type; | |
| public: | | public: | |
| | | | |
| skipping to change at line 824 | | skipping to change at line 911 | |
| | | | |
| //! Copy constructor | | //! Copy constructor | |
| continue_node( const continue_node& src ) : | | continue_node( const continue_node& src ) : | |
| graph_node(src.my_graph), internal::continue_input<output_type>(src
), | | graph_node(src.my_graph), internal::continue_input<output_type>(src
), | |
| internal::function_output<Output>() | | internal::function_output<Output>() | |
| {} | | {} | |
| | | | |
| bool try_put(const input_type &i) { return internal::continue_input<Out
put>::try_put(i); } | | bool try_put(const input_type &i) { return internal::continue_input<Out
put>::try_put(i); } | |
| | | | |
| protected: | | protected: | |
|
| | | /*override*/void reset() { internal::continue_input<Output>::reset_rece | |
| | | iver(); } | |
| | | | |
| /* override */ internal::broadcast_cache<output_type> &successors () {
return fOutput_type::my_successors; } | | /* override */ internal::broadcast_cache<output_type> &successors () {
return fOutput_type::my_successors; } | |
| }; | | }; | |
| | | | |
| template< typename T > | | template< typename T > | |
| class overwrite_node : public graph_node, public receiver<T>, public sender
<T> { | | class overwrite_node : public graph_node, public receiver<T>, public sender
<T> { | |
| using graph_node::my_graph; | | using graph_node::my_graph; | |
| public: | | public: | |
| typedef T input_type; | | typedef T input_type; | |
| typedef T output_type; | | typedef T output_type; | |
| typedef sender< input_type > predecessor_type; | | typedef sender< input_type > predecessor_type; | |
| | | | |
| skipping to change at line 903 | | skipping to change at line 992 | |
| spin_mutex::scoped_lock l( my_mutex ); | | spin_mutex::scoped_lock l( my_mutex ); | |
| return my_buffer_is_valid; | | return my_buffer_is_valid; | |
| } | | } | |
| | | | |
| void clear() { | | void clear() { | |
| spin_mutex::scoped_lock l( my_mutex ); | | spin_mutex::scoped_lock l( my_mutex ); | |
| my_buffer_is_valid = false; | | my_buffer_is_valid = false; | |
| } | | } | |
| | | | |
| protected: | | protected: | |
|
| | | | |
| | | /*override*/void reset() { my_buffer_is_valid = false; } | |
| | | | |
| spin_mutex my_mutex; | | spin_mutex my_mutex; | |
| internal::broadcast_cache< T, null_rw_mutex > my_successors; | | internal::broadcast_cache< T, null_rw_mutex > my_successors; | |
| T my_buffer; | | T my_buffer; | |
| bool my_buffer_is_valid; | | bool my_buffer_is_valid; | |
|
| | | /*override*/void reset_receiver() {} | |
| }; | | }; | |
| | | | |
| template< typename T > | | template< typename T > | |
| class write_once_node : public overwrite_node<T> { | | class write_once_node : public overwrite_node<T> { | |
| public: | | public: | |
| typedef T input_type; | | typedef T input_type; | |
| typedef T output_type; | | typedef T output_type; | |
| typedef sender< input_type > predecessor_type; | | typedef sender< input_type > predecessor_type; | |
| typedef receiver< output_type > successor_type; | | typedef receiver< output_type > successor_type; | |
| | | | |
| | | | |
| skipping to change at line 974 | | skipping to change at line 1067 | |
| //! Removes s as a successor | | //! Removes s as a successor | |
| virtual bool remove_successor( receiver<T> &r ) { | | virtual bool remove_successor( receiver<T> &r ) { | |
| my_successors.remove_successor( r ); | | my_successors.remove_successor( r ); | |
| return true; | | return true; | |
| } | | } | |
| | | | |
| /* override */ bool try_put( const T &t ) { | | /* override */ bool try_put( const T &t ) { | |
| my_successors.try_put(t); | | my_successors.try_put(t); | |
| return true; | | return true; | |
| } | | } | |
|
| | | protected: | |
| | | /*override*/void reset() {} | |
| | | /*override*/void reset_receiver() {} | |
| }; | | }; | |
| | | | |
| #include "internal/_flow_graph_item_buffer_impl.h" | | #include "internal/_flow_graph_item_buffer_impl.h" | |
| | | | |
| //! Forwards messages in arbitrary order | | //! Forwards messages in arbitrary order | |
| template <typename T, typename A=cache_aligned_allocator<T> > | | template <typename T, typename A=cache_aligned_allocator<T> > | |
| class buffer_node : public graph_node, public reservable_item_buffer<T, A>,
public receiver<T>, public sender<T> { | | class buffer_node : public graph_node, public reservable_item_buffer<T, A>,
public receiver<T>, public sender<T> { | |
| using graph_node::my_graph; | | using graph_node::my_graph; | |
| public: | | public: | |
| typedef T input_type; | | typedef T input_type; | |
| | | | |
| skipping to change at line 1201 | | skipping to change at line 1297 | |
| return true; | | return true; | |
| } | | } | |
| | | | |
| //! Receive an item | | //! Receive an item | |
| /** true is always returned */ | | /** true is always returned */ | |
| /* override */ bool try_put(const T &t) { | | /* override */ bool try_put(const T &t) { | |
| buffer_operation op_data(t, put_item); | | buffer_operation op_data(t, put_item); | |
| my_aggregator.execute(&op_data); | | my_aggregator.execute(&op_data); | |
| return true; | | return true; | |
| } | | } | |
|
| | | | |
| | | protected: | |
| | | | |
| | | /*override*/void reset() { | |
| | | reservable_item_buffer<T, A>::reset(); | |
| | | forwarder_busy = false; | |
| | | } | |
| | | | |
| | | /*override*/void reset_receiver() { | |
| | | // nothing to do; no predecesor_cache | |
| | | } | |
| | | | |
| }; | | }; | |
| | | | |
| //! Forwards messages in FIFO order | | //! Forwards messages in FIFO order | |
| template <typename T, typename A=cache_aligned_allocator<T> > | | template <typename T, typename A=cache_aligned_allocator<T> > | |
| class queue_node : public buffer_node<T, A> { | | class queue_node : public buffer_node<T, A> { | |
| protected: | | protected: | |
| typedef typename buffer_node<T, A>::size_type size_type; | | typedef typename buffer_node<T, A>::size_type size_type; | |
| typedef typename buffer_node<T, A>::buffer_operation queue_operation; | | typedef typename buffer_node<T, A>::buffer_operation queue_operation; | |
| | | | |
| enum op_stat {WAIT=0, SUCCEEDED, FAILED}; | | enum op_stat {WAIT=0, SUCCEEDED, FAILED}; | |
| | | | |
| skipping to change at line 1324 | | skipping to change at line 1432 | |
| __TBB_store_with_release(op->status, SUCCEEDED); | | __TBB_store_with_release(op->status, SUCCEEDED); | |
| } | | } | |
| }; | | }; | |
| | | | |
| //! Forwards messages in priority order | | //! Forwards messages in priority order | |
| template< typename T, typename Compare = std::less<T>, typename A=cache_ali
gned_allocator<T> > | | template< typename T, typename Compare = std::less<T>, typename A=cache_ali
gned_allocator<T> > | |
| class priority_queue_node : public buffer_node<T, A> { | | class priority_queue_node : public buffer_node<T, A> { | |
| public: | | public: | |
| typedef T input_type; | | typedef T input_type; | |
| typedef T output_type; | | typedef T output_type; | |
|
| | | typedef buffer_node<T,A> base_type; | |
| typedef sender< input_type > predecessor_type; | | typedef sender< input_type > predecessor_type; | |
| typedef receiver< output_type > successor_type; | | typedef receiver< output_type > successor_type; | |
| | | | |
| //! Constructor | | //! Constructor | |
| priority_queue_node( graph &g ) : buffer_node<T, A>(g), mark(0) {} | | priority_queue_node( graph &g ) : buffer_node<T, A>(g), mark(0) {} | |
| | | | |
| //! Copy constructor | | //! Copy constructor | |
| priority_queue_node( const priority_queue_node &src ) : buffer_node<T,
A>(src), mark(0) {} | | priority_queue_node( const priority_queue_node &src ) : buffer_node<T,
A>(src), mark(0) {} | |
| | | | |
| protected: | | protected: | |
|
| | | | |
| | | /*override*/void reset() { | |
| | | mark = 0; | |
| | | base_type::reset(); | |
| | | } | |
| | | | |
| typedef typename buffer_node<T, A>::size_type size_type; | | typedef typename buffer_node<T, A>::size_type size_type; | |
| typedef typename buffer_node<T, A>::item_type item_type; | | typedef typename buffer_node<T, A>::item_type item_type; | |
| typedef typename buffer_node<T, A>::buffer_operation prio_operation; | | typedef typename buffer_node<T, A>::buffer_operation prio_operation; | |
| | | | |
| enum op_stat {WAIT=0, SUCCEEDED, FAILED}; | | enum op_stat {WAIT=0, SUCCEEDED, FAILED}; | |
| | | | |
| /* override */ void handle_operations(prio_operation *op_list) { | | /* override */ void handle_operations(prio_operation *op_list) { | |
| prio_operation *tmp /*, *pop_list*/ ; | | prio_operation *tmp /*, *pop_list*/ ; | |
| bool try_forwarding=false; | | bool try_forwarding=false; | |
| while (op_list) { | | while (op_list) { | |
| | | | |
| skipping to change at line 1630 | | skipping to change at line 1745 | |
| task::enqueue( * new ( task::allocate_additional_child_of( *my_
root_task ) ) | | task::enqueue( * new ( task::allocate_additional_child_of( *my_
root_task ) ) | |
| internal::forward_task< limiter_node<T> >( *this
) ); | | internal::forward_task< limiter_node<T> >( *this
) ); | |
| return true; | | return true; | |
| } | | } | |
| | | | |
| //! Removes src from the list of cached predecessors. | | //! Removes src from the list of cached predecessors. | |
| /* override */ bool remove_predecessor( predecessor_type &src ) { | | /* override */ bool remove_predecessor( predecessor_type &src ) { | |
| my_predecessors.remove( src ); | | my_predecessors.remove( src ); | |
| return true; | | return true; | |
| } | | } | |
|
| | | | |
| | | protected: | |
| | | | |
| | | /*override*/void reset() { | |
| | | my_count = 0; | |
| | | my_predecessors.reset(); | |
| | | decrement.reset_receiver(); | |
| | | } | |
| | | | |
| | | /*override*/void reset_receiver() { my_predecessors.reset(); } | |
| }; | | }; | |
| | | | |
| #include "internal/_flow_graph_join_impl.h" | | #include "internal/_flow_graph_join_impl.h" | |
| | | | |
| using internal::reserving_port; | | using internal::reserving_port; | |
| using internal::queueing_port; | | using internal::queueing_port; | |
| using internal::tag_matching_port; | | using internal::tag_matching_port; | |
| using internal::input_port; | | using internal::input_port; | |
| using internal::tag_value; | | using internal::tag_value; | |
| using internal::NO_TAG; | | using internal::NO_TAG; | |
| | | | |
End of changes. 30 change blocks. |
| 12 lines changed or deleted | | 141 lines changed or added | |
|
| parallel_reduce.h | | parallel_reduce.h | |
| | | | |
| skipping to change at line 177 | | skipping to change at line 177 | |
| } else __TBB_ASSERT(my_context==root_task,NULL);// because left lea
f spawns right leafs without recycling | | } else __TBB_ASSERT(my_context==root_task,NULL);// because left lea
f spawns right leafs without recycling | |
| my_partition.execute(*this, my_range); | | my_partition.execute(*this, my_range); | |
| if( my_context==left_child ) { | | if( my_context==left_child ) { | |
| finish_type* parent_ptr = static_cast<finish_type*>(parent()); | | finish_type* parent_ptr = static_cast<finish_type*>(parent()); | |
| __TBB_ASSERT(my_body!=parent_ptr->zombie_space.begin(),NULL); | | __TBB_ASSERT(my_body!=parent_ptr->zombie_space.begin(),NULL); | |
| itt_store_word_with_release(parent_ptr->my_body, my_body ); | | itt_store_word_with_release(parent_ptr->my_body, my_body ); | |
| } | | } | |
| return NULL; | | return NULL; | |
| } | | } | |
| | | | |
|
| #if TBB_PREVIEW_DETERMINISTIC_REDUCE | | | |
| //! Task type used to combine the partial results of parallel_determini
stic_reduce. | | //! Task type used to combine the partial results of parallel_determini
stic_reduce. | |
| /** @ingroup algorithms */ | | /** @ingroup algorithms */ | |
| template<typename Body> | | template<typename Body> | |
| class finish_deterministic_reduce: public task { | | class finish_deterministic_reduce: public task { | |
| Body &my_left_body; | | Body &my_left_body; | |
| Body my_right_body; | | Body my_right_body; | |
| | | | |
| finish_deterministic_reduce( Body &body ) : | | finish_deterministic_reduce( Body &body ) : | |
| my_left_body( body ), | | my_left_body( body ), | |
| my_right_body( body, split() ) | | my_right_body( body, split() ) | |
| | | | |
| skipping to change at line 256 | | skipping to change at line 255 | |
| return NULL; | | return NULL; | |
| } else { | | } else { | |
| finish_type& c = *new( allocate_continuation() ) finish_type( m
y_body ); | | finish_type& c = *new( allocate_continuation() ) finish_type( m
y_body ); | |
| recycle_as_child_of(c); | | recycle_as_child_of(c); | |
| c.set_ref_count(2); | | c.set_ref_count(2); | |
| start_deterministic_reduce& b = *new( c.allocate_child() ) star
t_deterministic_reduce( *this, c ); | | start_deterministic_reduce& b = *new( c.allocate_child() ) star
t_deterministic_reduce( *this, c ); | |
| task::spawn(b); | | task::spawn(b); | |
| return this; | | return this; | |
| } | | } | |
| } | | } | |
|
| #endif /* TBB_PREVIEW_DETERMINISTIC_REDUCE */ | | | |
| } // namespace internal | | } // namespace internal | |
| //! @endcond | | //! @endcond | |
| } //namespace interfaceX | | } //namespace interfaceX | |
| | | | |
| //! @cond INTERNAL | | //! @cond INTERNAL | |
| namespace internal { | | namespace internal { | |
| using interface6::internal::start_reduce; | | using interface6::internal::start_reduce; | |
|
| #if TBB_PREVIEW_DETERMINISTIC_REDUCE | | | |
| using interface6::internal::start_deterministic_reduce; | | using interface6::internal::start_deterministic_reduce; | |
|
| #endif | | | |
| //! Auxiliary class for parallel_reduce; for internal use only. | | //! Auxiliary class for parallel_reduce; for internal use only. | |
| /** The adaptor class that implements \ref parallel_reduce_body_req "pa
rallel_reduce Body" | | /** The adaptor class that implements \ref parallel_reduce_body_req "pa
rallel_reduce Body" | |
| using given \ref parallel_reduce_lambda_req "anonymous function obj
ects". | | using given \ref parallel_reduce_lambda_req "anonymous function obj
ects". | |
| **/ | | **/ | |
| /** @ingroup algorithms */ | | /** @ingroup algorithms */ | |
| template<typename Range, typename Value, typename RealBody, typename Re
duction> | | template<typename Range, typename Value, typename RealBody, typename Re
duction> | |
| class lambda_reduce_body { | | class lambda_reduce_body { | |
| | | | |
| //FIXME: decide if my_real_body, my_reduction, and identity_element should
be copied or referenced | | //FIXME: decide if my_real_body, my_reduction, and identity_element should
be copied or referenced | |
| // (might require some performance measurements) | | // (might require some performance measurements) | |
| | | | |
| skipping to change at line 469 | | skipping to change at line 465 | |
| template<typename Range, typename Value, typename RealBody, typename Reduct
ion> | | template<typename Range, typename Value, typename RealBody, typename Reduct
ion> | |
| Value parallel_reduce( const Range& range, const Value& identity, const Rea
lBody& real_body, const Reduction& reduction, | | Value parallel_reduce( const Range& range, const Value& identity, const Rea
lBody& real_body, const Reduction& reduction, | |
| affinity_partitioner& partitioner, task_group_contex
t& context ) { | | affinity_partitioner& partitioner, task_group_contex
t& context ) { | |
| internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(ident
ity, real_body, reduction); | | internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(ident
ity, real_body, reduction); | |
| internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,R
ealBody,Reduction>,affinity_partitioner> | | internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,R
ealBody,Reduction>,affinity_partitioner> | |
| ::run( range, body, partitioner, co
ntext ); | | ::run( range, body, partitioner, co
ntext ); | |
| return body.result(); | | return body.result(); | |
| } | | } | |
| #endif /* __TBB_TASK_GROUP_CONTEXT */ | | #endif /* __TBB_TASK_GROUP_CONTEXT */ | |
| | | | |
|
| #if TBB_PREVIEW_DETERMINISTIC_REDUCE | | | |
| //! Parallel iteration with deterministic reduction and default partitioner
. | | //! Parallel iteration with deterministic reduction and default partitioner
. | |
| /** @ingroup algorithms **/ | | /** @ingroup algorithms **/ | |
| template<typename Range, typename Body> | | template<typename Range, typename Body> | |
| void parallel_deterministic_reduce( const Range& range, Body& body ) { | | void parallel_deterministic_reduce( const Range& range, Body& body ) { | |
| internal::start_deterministic_reduce<Range,Body>::run( range, body ); | | internal::start_deterministic_reduce<Range,Body>::run( range, body ); | |
| } | | } | |
| | | | |
| #if __TBB_TASK_GROUP_CONTEXT | | #if __TBB_TASK_GROUP_CONTEXT | |
| //! Parallel iteration with deterministic reduction, simple partitioner and
user-supplied context. | | //! Parallel iteration with deterministic reduction, simple partitioner and
user-supplied context. | |
| /** @ingroup algorithms **/ | | /** @ingroup algorithms **/ | |
| | | | |
| skipping to change at line 511 | | skipping to change at line 506 | |
| /** @ingroup algorithms **/ | | /** @ingroup algorithms **/ | |
| template<typename Range, typename Value, typename RealBody, typename Reduct
ion> | | template<typename Range, typename Value, typename RealBody, typename Reduct
ion> | |
| Value parallel_deterministic_reduce( const Range& range, const Value& ident
ity, const RealBody& real_body, const Reduction& reduction, | | Value parallel_deterministic_reduce( const Range& range, const Value& ident
ity, const RealBody& real_body, const Reduction& reduction, | |
| task_group_context& context ) { | | task_group_context& context ) { | |
| internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(ident
ity, real_body, reduction); | | internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(ident
ity, real_body, reduction); | |
| internal::start_deterministic_reduce<Range,internal::lambda_reduce_body
<Range,Value,RealBody,Reduction> > | | internal::start_deterministic_reduce<Range,internal::lambda_reduce_body
<Range,Value,RealBody,Reduction> > | |
| ::run( range, body, context ); | | ::run( range, body, context ); | |
| return body.result(); | | return body.result(); | |
| } | | } | |
| #endif /* __TBB_TASK_GROUP_CONTEXT */ | | #endif /* __TBB_TASK_GROUP_CONTEXT */ | |
|
| #endif /* TBB_PREVIEW_DETERMINISTIC_REDUCE */ | | | |
| //@} | | //@} | |
| | | | |
| } // namespace tbb | | } // namespace tbb | |
| | | | |
| #endif /* __TBB_parallel_reduce_H */ | | #endif /* __TBB_parallel_reduce_H */ | |
| | | | |
End of changes. 6 change blocks. |
| 6 lines changed or deleted | | 0 lines changed or added | |
|
| tbb_config.h | | tbb_config.h | |
| | | | |
| skipping to change at line 42 | | skipping to change at line 42 | |
| /** This header is supposed to contain macro definitions and C style commen
ts only. | | /** This header is supposed to contain macro definitions and C style commen
ts only. | |
| The macros defined here are intended to control such aspects of TBB bui
ld as | | The macros defined here are intended to control such aspects of TBB bui
ld as | |
| - presence of compiler features | | - presence of compiler features | |
| - compilation modes | | - compilation modes | |
| - feature sets | | - feature sets | |
| - known compiler/platform issues | | - known compiler/platform issues | |
| **/ | | **/ | |
| | | | |
| #define __TBB_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC
_PATCHLEVEL__) | | #define __TBB_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC
_PATCHLEVEL__) | |
| #if __clang__ | | #if __clang__ | |
|
| #define __TBB_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 10
0 + __clang_patchlevel__) | | #define __TBB_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__
* 100 + __clang_patchlevel__) | |
| #endif | | #endif | |
| | | | |
| /** Presence of compiler features **/ | | /** Presence of compiler features **/ | |
| | | | |
|
| | | #if __INTEL_COMPILER == 9999 && __INTEL_COMPILER_BUILD_DATE == 20110811 | |
| | | /* Intel Composer XE 2011 Update 6 incorrectly sets __INTEL_COMPILER. Fix i | |
| | | t. */ | |
| | | #undef __INTEL_COMPILER | |
| | | #define __INTEL_COMPILER 1210 | |
| | | #endif | |
| | | | |
| #if (__TBB_GCC_VERSION >= 40400) && !defined(__INTEL_COMPILER) | | #if (__TBB_GCC_VERSION >= 40400) && !defined(__INTEL_COMPILER) | |
| /** warning suppression pragmas available in GCC since 4.4 **/ | | /** warning suppression pragmas available in GCC since 4.4 **/ | |
| #define __TBB_GCC_WARNING_SUPPRESSION_PRESENT 1 | | #define __TBB_GCC_WARNING_SUPPRESSION_PRESENT 1 | |
| #endif | | #endif | |
| | | | |
| /* Select particular features of C++11 based on compiler version. | | /* Select particular features of C++11 based on compiler version. | |
| ICC 12.1 (Linux), GCC 4.3 and higher, clang 2.9 and higher | | ICC 12.1 (Linux), GCC 4.3 and higher, clang 2.9 and higher | |
| set __GXX_EXPERIMENTAL_CXX0X__ in c++11 mode. | | set __GXX_EXPERIMENTAL_CXX0X__ in c++11 mode. | |
| | | | |
| Compilers that mimics other compilers (ICC, clang) must be processed bef
ore | | Compilers that mimics other compilers (ICC, clang) must be processed bef
ore | |
|
| compilers they mimic. | | compilers they mimic (GCC, MSVC). | |
| | | | |
| TODO: The following conditions should be extended when new compilers/run
times | | TODO: The following conditions should be extended when new compilers/run
times | |
| support added. | | support added. | |
| */ | | */ | |
| | | | |
| #if __INTEL_COMPILER | | #if __INTEL_COMPILER | |
|
| | | /** On Windows environment when using Intel C++ compiler with Visual St | |
| | | udio 2010*, | |
| | | the C++0x features supported by Visual C++ 2010 are enabled by defa | |
| | | ult | |
| | | TODO: find a way to get know if c++0x mode is specified in command | |
| | | line on windows **/ | |
| #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __GXX_EXPERIMENTAL_CXX0X
__ && __VARIADIC_TEMPLATES | | #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __GXX_EXPERIMENTAL_CXX0X
__ && __VARIADIC_TEMPLATES | |
|
| #define __TBB_CPP11_RVALUE_REF_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ || _ | | #define __TBB_CPP11_RVALUE_REF_PRESENT (__GXX_EXPERIMENTAL_CXX0 | |
| MSC_VER >= 1600) && (__INTEL_COMPILER >= 1200) | | X__ || _MSC_VER >= 1600) && (__INTEL_COMPILER >= 1200) | |
| #define __TBB_EXCEPTION_PTR_PRESENT 0 | | #if _MSC_VER >= 1600 | |
| | | #define __TBB_EXCEPTION_PTR_PRESENT __INTEL_COMPILER > 1300 | |
| | | \ | |
| | | /*ICC 12.1 Upd 10 and 13 | |
| | | beta Upd 2 fixed exception_ptr linking issue*/ \ | |
| | | || (__INTEL_COMPILER == | |
| | | 1300 && __INTEL_COMPILER_BUILD_DATE >= 20120530) \ | |
| | | || (__INTEL_COMPILER == | |
| | | 1210 && __INTEL_COMPILER_BUILD_DATE >= 20120410) | |
| | | /** libstc++ that comes with GCC 4.6 use C++ features not yet supported | |
| | | by current ICC (12.1)**/ | |
| | | #elif (__TBB_GCC_VERSION >= 40404) && (__TBB_GCC_VERSION < 40600) | |
| | | #define __TBB_EXCEPTION_PTR_PRESENT __GXX_EXPERIMENTAL_CXX0X | |
| | | __ && __INTEL_COMPILER >= 1200 | |
| | | #else | |
| | | #define __TBB_EXCEPTION_PTR_PRESENT 0 | |
| | | #endif | |
| | | #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1700 || (__ | |
| | | GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600)) | |
| | | #define __TBB_CPP11_TUPLE_PRESENT (_MSC_VER >= 1600) || (( | |
| | | __GXX_EXPERIMENTAL_CXX0X__) && (__TBB_GCC_VERSION >= 40300)) | |
| #elif __clang__ | | #elif __clang__ | |
|
| | | //TODO: these options need to be rechecked | |
| #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (__GXX_EXPERIMENTAL_CXX0
X__ && __TBB_CLANG_VERSION >= 20900) | | #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (__GXX_EXPERIMENTAL_CXX0
X__ && __TBB_CLANG_VERSION >= 20900) | |
|
| #define __TBB_CPP11_RVALUE_REF_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && _ | | #define __TBB_CPP11_RVALUE_REF_PRESENT (__GXX_EXPERIMENTAL_CXX0 | |
| _TBB_CLANG_VERSION >= 20900) | | X__ && __TBB_CLANG_VERSION >= 20900) | |
| #define __TBB_EXCEPTION_PTR_PRESENT __GXX_EXPERIMENTAL_CXX0X__ | | #define __TBB_EXCEPTION_PTR_PRESENT __GXX_EXPERIMENTAL_CXX0X | |
| | | __ | |
| | | #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_CXX0 | |
| | | X__ && __TBB_CLANG_VERSION > 30100)// TODO: check version | |
| | | #define __TBB_CPP11_TUPLE_PRESENT ((__GXX_EXPERIMENTAL_CXX | |
| | | 0X__) && (__TBB_GCC_VERSION >= 40300)) | |
| #elif __GNUC__ | | #elif __GNUC__ | |
| #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __GXX_EXPERIMENTAL_CXX0X
__ | | #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __GXX_EXPERIMENTAL_CXX0X
__ | |
|
| #define __TBB_CPP11_RVALUE_REF_PRESENT __GXX_EXPERIMENTAL_CXX0X__ | | #define __TBB_CPP11_RVALUE_REF_PRESENT __GXX_EXPERIMENTAL_CXX0X | |
| #define __TBB_EXCEPTION_PTR_PRESENT __GXX_EXPERIMENTAL_CXX0X__ | | __ | |
| | | /** __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 here is a substitution for _GLIB | |
| | | CXX_ATOMIC_BUILTINS_4, which is a prerequisite | |
| | | for exception_ptr but cannot be used in this file because it is def | |
| | | ined in a header, not by the compiler. | |
| | | If the compiler has no atomic intrinsics, the C++ library should no | |
| | | t expect those as well. **/ | |
| | | #define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_CXX0 | |
| | | X__ && (__TBB_GCC_VERSION >= 40404) && __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) | |
| | | #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_CXX0 | |
| | | X__ && __TBB_GCC_VERSION >= 40600) | |
| | | #define __TBB_CPP11_TUPLE_PRESENT ((__GXX_EXPERIMENTAL_CXX | |
| | | 0X__) && (__TBB_GCC_VERSION >= 40300)) | |
| #elif _MSC_VER | | #elif _MSC_VER | |
| #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 0 | | #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 0 | |
|
| #define __TBB_CPP11_RVALUE_REF_PRESENT 0 | | #define __TBB_CPP11_RVALUE_REF_PRESENT 0 | |
| #define __TBB_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1600) | | #define __TBB_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1600) | |
| | | #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1700) | |
| | | #define __TBB_CPP11_TUPLE_PRESENT (_MSC_VER >= 1600) | |
| #else | | #else | |
| #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 0 | | #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 0 | |
|
| #define __TBB_CPP11_RVALUE_REF_PRESENT 0 | | #define __TBB_CPP11_RVALUE_REF_PRESENT 0 | |
| #define __TBB_EXCEPTION_PTR_PRESENT 0 | | #define __TBB_EXCEPTION_PTR_PRESENT 0 | |
| | | #define __TBB_MAKE_EXCEPTION_PTR_PRESENT 0 | |
| | | #define __TBB_CPP11_TUPLE_PRESENT 0 | |
| | | #endif | |
| | | | |
| | | //TODO: not clear how exactly this macro affects exception_ptr - investigat | |
| | | e | |
| | | // On linux ICC fails to find existing std::exception_ptr in libstdc++ with | |
| | | out this define | |
| | | #if __INTEL_COMPILER && __GNUC__ && __TBB_EXCEPTION_PTR_PRESENT && !defined | |
| | | (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) | |
| | | #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 1 | |
| #endif | | #endif | |
| | | | |
| // Work around a bug in MinGW32 | | // Work around a bug in MinGW32 | |
| #if __MINGW32__ && __TBB_EXCEPTION_PTR_PRESENT && !defined(_GLIBCXX_ATOMIC_
BUILTINS_4) | | #if __MINGW32__ && __TBB_EXCEPTION_PTR_PRESENT && !defined(_GLIBCXX_ATOMIC_
BUILTINS_4) | |
| #define _GLIBCXX_ATOMIC_BUILTINS_4 | | #define _GLIBCXX_ATOMIC_BUILTINS_4 | |
| #endif | | #endif | |
| | | | |
| #if __GNUC__ || __SUNPRO_CC || __IBMCPP__ | | #if __GNUC__ || __SUNPRO_CC || __IBMCPP__ | |
| /* ICC defines __GNUC__ and so is covered */ | | /* ICC defines __GNUC__ and so is covered */ | |
| #define __TBB_ATTRIBUTE_ALIGNED_PRESENT 1 | | #define __TBB_ATTRIBUTE_ALIGNED_PRESENT 1 | |
| | | | |
| skipping to change at line 170 | | skipping to change at line 209 | |
| | | | |
| #ifndef TBB_IMPLEMENT_CPP0X | | #ifndef TBB_IMPLEMENT_CPP0X | |
| /** By default, use C++0x classes if available **/ | | /** By default, use C++0x classes if available **/ | |
| #if __GNUC__==4 && __GNUC_MINOR__>=4 && __GXX_EXPERIMENTAL_CXX0X__ | | #if __GNUC__==4 && __GNUC_MINOR__>=4 && __GXX_EXPERIMENTAL_CXX0X__ | |
| #define TBB_IMPLEMENT_CPP0X 0 | | #define TBB_IMPLEMENT_CPP0X 0 | |
| #else | | #else | |
| #define TBB_IMPLEMENT_CPP0X 1 | | #define TBB_IMPLEMENT_CPP0X 1 | |
| #endif | | #endif | |
| #endif /* TBB_IMPLEMENT_CPP0X */ | | #endif /* TBB_IMPLEMENT_CPP0X */ | |
| | | | |
|
| | | /* TBB_USE_CAPTURED_EXCEPTION should be explicitly set to either 0 or 1, as
it is used as C++ const */ | |
| #ifndef TBB_USE_CAPTURED_EXCEPTION | | #ifndef TBB_USE_CAPTURED_EXCEPTION | |
|
| #if __TBB_EXCEPTION_PTR_PRESENT | | /** linux pre-built TBB binary does not support exception_ptr. **/ | |
| | | #if __TBB_EXCEPTION_PTR_PRESENT && !defined(__GNUC__) | |
| #define TBB_USE_CAPTURED_EXCEPTION 0 | | #define TBB_USE_CAPTURED_EXCEPTION 0 | |
| #else | | #else | |
| #define TBB_USE_CAPTURED_EXCEPTION 1 | | #define TBB_USE_CAPTURED_EXCEPTION 1 | |
| #endif | | #endif | |
| #else /* defined TBB_USE_CAPTURED_EXCEPTION */ | | #else /* defined TBB_USE_CAPTURED_EXCEPTION */ | |
| #if !TBB_USE_CAPTURED_EXCEPTION && !__TBB_EXCEPTION_PTR_PRESENT | | #if !TBB_USE_CAPTURED_EXCEPTION && !__TBB_EXCEPTION_PTR_PRESENT | |
| #error Current runtime does not support std::exception_ptr. Set TBB
_USE_CAPTURED_EXCEPTION and make sure that your code is ready to catch tbb:
:captured_exception. | | #error Current runtime does not support std::exception_ptr. Set TBB
_USE_CAPTURED_EXCEPTION and make sure that your code is ready to catch tbb:
:captured_exception. | |
| #endif | | #endif | |
| #endif /* defined TBB_USE_CAPTURED_EXCEPTION */ | | #endif /* defined TBB_USE_CAPTURED_EXCEPTION */ | |
| | | | |
| | | | |
| skipping to change at line 213 | | skipping to change at line 254 | |
| #endif | | #endif | |
| | | | |
| #ifndef __TBB_COUNT_TASK_NODES | | #ifndef __TBB_COUNT_TASK_NODES | |
| #define __TBB_COUNT_TASK_NODES TBB_USE_ASSERT | | #define __TBB_COUNT_TASK_NODES TBB_USE_ASSERT | |
| #endif | | #endif | |
| | | | |
| #ifndef __TBB_TASK_GROUP_CONTEXT | | #ifndef __TBB_TASK_GROUP_CONTEXT | |
| #define __TBB_TASK_GROUP_CONTEXT 1 | | #define __TBB_TASK_GROUP_CONTEXT 1 | |
| #endif /* __TBB_TASK_GROUP_CONTEXT */ | | #endif /* __TBB_TASK_GROUP_CONTEXT */ | |
| | | | |
|
| #if TBB_USE_EXCEPTIONS && !__TBB_TASK_GROUP_CONTEXT | | | |
| #error TBB_USE_EXCEPTIONS requires __TBB_TASK_GROUP_CONTEXT to be enabl | | | |
| ed | | | |
| #endif | | | |
| | | | |
| #ifndef __TBB_SCHEDULER_OBSERVER | | #ifndef __TBB_SCHEDULER_OBSERVER | |
| #define __TBB_SCHEDULER_OBSERVER 1 | | #define __TBB_SCHEDULER_OBSERVER 1 | |
| #endif /* __TBB_SCHEDULER_OBSERVER */ | | #endif /* __TBB_SCHEDULER_OBSERVER */ | |
| | | | |
|
| | | #if !defined(TBB_PREVIEW_TASK_ARENA) && __TBB_BUILD | |
| | | #define TBB_PREVIEW_TASK_ARENA __TBB_CPF_BUILD | |
| | | #endif /* TBB_PREVIEW_TASK_ARENA */ | |
| | | #define __TBB_TASK_ARENA TBB_PREVIEW_TASK_ARENA | |
| | | #if TBB_PREVIEW_TASK_ARENA | |
| | | #define TBB_PREVIEW_LOCAL_OBSERVER 1 | |
| | | #define __TBB_NO_IMPLICIT_LINKAGE 1 | |
| | | #define __TBB_TASK_PRIORITY 0 // TODO: it will be removed in next versi | |
| | | ons | |
| | | #if !__TBB_SCHEDULER_OBSERVER | |
| | | #error TBB_PREVIEW_TASK_ARENA requires __TBB_SCHEDULER_OBSERVER to | |
| | | be enabled | |
| | | #endif | |
| | | #endif /* TBB_PREVIEW_TASK_ARENA */ | |
| | | | |
| | | #if !defined(TBB_PREVIEW_LOCAL_OBSERVER) && __TBB_BUILD && __TBB_SCHEDULER_ | |
| | | OBSERVER | |
| | | #define TBB_PREVIEW_LOCAL_OBSERVER 1 | |
| | | #endif /* TBB_PREVIEW_LOCAL_OBSERVER */ | |
| | | | |
| | | #if TBB_USE_EXCEPTIONS && !__TBB_TASK_GROUP_CONTEXT | |
| | | #error TBB_USE_EXCEPTIONS requires __TBB_TASK_GROUP_CONTEXT to be enabl | |
| | | ed | |
| | | #endif | |
| | | | |
| #ifndef __TBB_TASK_PRIORITY | | #ifndef __TBB_TASK_PRIORITY | |
| #define __TBB_TASK_PRIORITY __TBB_TASK_GROUP_CONTEXT | | #define __TBB_TASK_PRIORITY __TBB_TASK_GROUP_CONTEXT | |
| #endif /* __TBB_TASK_PRIORITY */ | | #endif /* __TBB_TASK_PRIORITY */ | |
| | | | |
| #if __TBB_TASK_PRIORITY && !__TBB_TASK_GROUP_CONTEXT | | #if __TBB_TASK_PRIORITY && !__TBB_TASK_GROUP_CONTEXT | |
| #error __TBB_TASK_PRIORITY requires __TBB_TASK_GROUP_CONTEXT to be enab
led | | #error __TBB_TASK_PRIORITY requires __TBB_TASK_GROUP_CONTEXT to be enab
led | |
| #endif | | #endif | |
| | | | |
|
| #if !defined(__TBB_SURVIVE_THREAD_SWITCH) && (_WIN32 || _WIN64 || __linux__ | | #if !defined(__TBB_SURVIVE_THREAD_SWITCH) && \ | |
| ) | | (_WIN32 || _WIN64 || __APPLE__ || __linux__) | |
| #define __TBB_SURVIVE_THREAD_SWITCH 1 | | #define __TBB_SURVIVE_THREAD_SWITCH 1 | |
| #endif /* __TBB_SURVIVE_THREAD_SWITCH */ | | #endif /* __TBB_SURVIVE_THREAD_SWITCH */ | |
| | | | |
| #ifndef __TBB_DEFAULT_PARTITIONER | | #ifndef __TBB_DEFAULT_PARTITIONER | |
| #if TBB_DEPRECATED | | #if TBB_DEPRECATED | |
| /** Default partitioner for parallel loop templates in TBB 1.0-2.1 */ | | /** Default partitioner for parallel loop templates in TBB 1.0-2.1 */ | |
| #define __TBB_DEFAULT_PARTITIONER tbb::simple_partitioner | | #define __TBB_DEFAULT_PARTITIONER tbb::simple_partitioner | |
| #else | | #else | |
| /** Default partitioner for parallel loop templates since TBB 2.2 */ | | /** Default partitioner for parallel loop templates since TBB 2.2 */ | |
| #define __TBB_DEFAULT_PARTITIONER tbb::auto_partitioner | | #define __TBB_DEFAULT_PARTITIONER tbb::auto_partitioner | |
| #endif /* TBB_DEPRECATED */ | | #endif /* TBB_DEPRECATED */ | |
| #endif /* !defined(__TBB_DEFAULT_PARTITIONER */ | | #endif /* !defined(__TBB_DEFAULT_PARTITIONER */ | |
| | | | |
| #ifdef _VARIADIC_MAX | | #ifdef _VARIADIC_MAX | |
| #define __TBB_VARIADIC_MAX _VARIADIC_MAX | | #define __TBB_VARIADIC_MAX _VARIADIC_MAX | |
| #else | | #else | |
| #if _MSC_VER >= 1700 | | #if _MSC_VER >= 1700 | |
|
| #define __TBB_VARIADIC_MAX 5 // current VS11 setting, may change. | | #define __TBB_VARIADIC_MAX 5 /* current VS11 setting, may change. */ | |
| #else | | #else | |
| #define __TBB_VARIADIC_MAX 10 | | #define __TBB_VARIADIC_MAX 10 | |
| #endif | | #endif | |
| #endif | | #endif | |
| | | | |
| /** Macros of the form __TBB_XXX_BROKEN denote known issues that are caused
by | | /** Macros of the form __TBB_XXX_BROKEN denote known issues that are caused
by | |
| the bugs in compilers, standard or OS specific libraries. They should b
e | | the bugs in compilers, standard or OS specific libraries. They should b
e | |
| removed as soon as the corresponding bugs are fixed or the buggy OS/com
piler | | removed as soon as the corresponding bugs are fixed or the buggy OS/com
piler | |
| versions go out of the support list. | | versions go out of the support list. | |
| **/ | | **/ | |
| | | | |
| skipping to change at line 277 | | skipping to change at line 336 | |
| #define __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN 1 | | #define __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN 1 | |
| #endif | | #endif | |
| | | | |
| #if defined(_MSC_VER) && _MSC_VER < 1500 && !defined(__INTEL_COMPILER) | | #if defined(_MSC_VER) && _MSC_VER < 1500 && !defined(__INTEL_COMPILER) | |
| /** VS2005 and earlier do not allow declaring template class as a frien
d | | /** VS2005 and earlier do not allow declaring template class as a frien
d | |
| of classes defined in other namespaces. **/ | | of classes defined in other namespaces. **/ | |
| #define __TBB_TEMPLATE_FRIENDS_BROKEN 1 | | #define __TBB_TEMPLATE_FRIENDS_BROKEN 1 | |
| #endif | | #endif | |
| | | | |
| #if __GLIBC__==2 && __GLIBC_MINOR__==3 || __MINGW32__ || (__APPLE__ && __IN
TEL_COMPILER==1200 && !TBB_USE_DEBUG) | | #if __GLIBC__==2 && __GLIBC_MINOR__==3 || __MINGW32__ || (__APPLE__ && __IN
TEL_COMPILER==1200 && !TBB_USE_DEBUG) | |
|
| //! Macro controlling EH usages in TBB tests | | /** Macro controlling EH usages in TBB tests. | |
| /** Some older versions of glibc crash when exception handling happens | | Some older versions of glibc crash when exception handling happens | |
| concurrently. **/ | | concurrently. **/ | |
| #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 1 | | #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 1 | |
|
| | | #else | |
| | | #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 0 | |
| #endif | | #endif | |
| | | | |
| #if (_WIN32||_WIN64) && __INTEL_COMPILER == 1110 | | #if (_WIN32||_WIN64) && __INTEL_COMPILER == 1110 | |
| /** That's a bug in Intel compiler 11.1.044/IA-32/Windows, that leads t
o a worker thread crash on the thread's startup. **/ | | /** That's a bug in Intel compiler 11.1.044/IA-32/Windows, that leads t
o a worker thread crash on the thread's startup. **/ | |
| #define __TBB_ICL_11_1_CODE_GEN_BROKEN 1 | | #define __TBB_ICL_11_1_CODE_GEN_BROKEN 1 | |
| #endif | | #endif | |
| | | | |
| #if __clang__ || (__GNUC__==3 && __GNUC_MINOR__==3 && !defined(__INTEL_COMP
ILER)) | | #if __clang__ || (__GNUC__==3 && __GNUC_MINOR__==3 && !defined(__INTEL_COMP
ILER)) | |
| /** Bugs with access to nested classes declared in protected area */ | | /** Bugs with access to nested classes declared in protected area */ | |
| #define __TBB_PROTECTED_NESTED_CLASS_BROKEN 1 | | #define __TBB_PROTECTED_NESTED_CLASS_BROKEN 1 | |
| #endif | | #endif | |
| | | | |
| #if __MINGW32__ && (__GNUC__<4 || __GNUC__==4 && __GNUC_MINOR__<2) | | #if __MINGW32__ && (__GNUC__<4 || __GNUC__==4 && __GNUC_MINOR__<2) | |
| /** MinGW has a bug with stack alignment for routines invoked from MS R
TLs. | | /** MinGW has a bug with stack alignment for routines invoked from MS R
TLs. | |
| Since GCC 4.2, the bug can be worked around via a special attribute
. **/ | | Since GCC 4.2, the bug can be worked around via a special attribute
. **/ | |
| #define __TBB_SSE_STACK_ALIGNMENT_BROKEN 1 | | #define __TBB_SSE_STACK_ALIGNMENT_BROKEN 1 | |
|
| | | #else | |
| | | #define __TBB_SSE_STACK_ALIGNMENT_BROKEN 0 | |
| #endif | | #endif | |
| | | | |
| #if __GNUC__==4 && __GNUC_MINOR__==3 && __GNUC_PATCHLEVEL__==0 | | #if __GNUC__==4 && __GNUC_MINOR__==3 && __GNUC_PATCHLEVEL__==0 | |
|
| // GCC of this version may rashly ignore control dependencies | | /* GCC of this version may rashly ignore control dependencies */ | |
| #define __TBB_GCC_OPTIMIZER_ORDERING_BROKEN 1 | | #define __TBB_GCC_OPTIMIZER_ORDERING_BROKEN 1 | |
| #endif | | #endif | |
| | | | |
| #if __FreeBSD__ | | #if __FreeBSD__ | |
| /** A bug in FreeBSD 8.0 results in kernel panic when there is contenti
on | | /** A bug in FreeBSD 8.0 results in kernel panic when there is contenti
on | |
| on a mutex created with this attribute. **/ | | on a mutex created with this attribute. **/ | |
| #define __TBB_PRIO_INHERIT_BROKEN 1 | | #define __TBB_PRIO_INHERIT_BROKEN 1 | |
| | | | |
| /** A bug in FreeBSD 8.0 results in test hanging when an exception occu
rs | | /** A bug in FreeBSD 8.0 results in test hanging when an exception occu
rs | |
| during (concurrent?) object construction by means of placement new
operator. **/ | | during (concurrent?) object construction by means of placement new
operator. **/ | |
| | | | |
| skipping to change at line 336 | | skipping to change at line 399 | |
| #define __TBB_CPP11_STD_FORWARD_BROKEN 1 | | #define __TBB_CPP11_STD_FORWARD_BROKEN 1 | |
| #else | | #else | |
| #define __TBB_CPP11_STD_FORWARD_BROKEN 0 | | #define __TBB_CPP11_STD_FORWARD_BROKEN 0 | |
| #endif | | #endif | |
| | | | |
| #if __TBB_DEFINE_MIC | | #if __TBB_DEFINE_MIC | |
| /** Main thread and user's thread have different default thread affinit
y masks. **/ | | /** Main thread and user's thread have different default thread affinit
y masks. **/ | |
| #define __TBB_MAIN_THREAD_AFFINITY_BROKEN 1 | | #define __TBB_MAIN_THREAD_AFFINITY_BROKEN 1 | |
| #endif | | #endif | |
| | | | |
|
| | | #if !defined(__EXCEPTIONS) && __GNUC__==4 && (__GNUC_MINOR__==4 ||__GNUC_MI | |
| | | NOR__==5) && defined(__GXX_EXPERIMENTAL_CXX0X__) | |
| | | /* There is an issue for specific GCC toolchain when C++11 is enabled | |
| | | and exceptions are disabled: | |
| | | exceprion_ptr.h/nested_exception.h are using throw unconditionally. | |
| | | */ | |
| | | #define __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN 1 | |
| | | #else | |
| | | #define __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN 0 | |
| | | #endif | |
| | | | |
| #endif /* __TBB_tbb_config_H */ | | #endif /* __TBB_tbb_config_H */ | |
| | | | |
End of changes. 21 change blocks. |
| 27 lines changed or deleted | | 128 lines changed or added | |
|