_concurrent_unordered_impl.h   _concurrent_unordered_impl.h 
skipping to change at line 61 skipping to change at line 61
#if !TBB_USE_EXCEPTIONS && _MSC_VER #if !TBB_USE_EXCEPTIONS && _MSC_VER
#pragma warning (pop) #pragma warning (pop)
#endif #endif
#include "../atomic.h" #include "../atomic.h"
#include "../tbb_exception.h" #include "../tbb_exception.h"
#include "../tbb_allocator.h" #include "../tbb_allocator.h"
#include "tbb/atomic.h" #include "tbb/atomic.h"
#if __TBB_INITIALIZER_LISTS_PRESENT
#include <initializer_list>
#endif
namespace tbb { namespace tbb {
namespace interface5 { namespace interface5 {
//! @cond INTERNAL //! @cond INTERNAL
namespace internal { namespace internal {
template <typename T, typename Allocator> template <typename T, typename Allocator>
class split_ordered_list; class split_ordered_list;
template <typename Traits> template <typename Traits>
class concurrent_unordered_base; class concurrent_unordered_base;
skipping to change at line 719 skipping to change at line 723
internal_init(); internal_init();
internal_copy(right); internal_copy(right);
} }
concurrent_unordered_base& operator=(const concurrent_unordered_base& r ight) { concurrent_unordered_base& operator=(const concurrent_unordered_base& r ight) {
if (this != &right) if (this != &right)
internal_copy(right); internal_copy(right);
return (*this); return (*this);
} }
#if __TBB_INITIALIZER_LISTS_PRESENT
//! assignment operator from initializer_list
concurrent_unordered_base& operator=(std::initializer_list<value_type>
const& il)
{
this->clear();
this->insert(il.begin(),il.end());
return (*this);
}
#endif //# __TBB_INITIALIZER_LISTS_PRESENT
~concurrent_unordered_base() { ~concurrent_unordered_base() {
// Delete all node segments // Delete all node segments
internal_clear(); internal_clear();
} }
public: public:
allocator_type get_allocator() const { allocator_type get_allocator() const {
return my_solist.get_allocator(); return my_solist.get_allocator();
} }
 End of changes. 2 change blocks. 
0 lines changed or deleted 15 lines changed or added


 _flow_graph_join_impl.h   _flow_graph_join_impl.h 
skipping to change at line 692 skipping to change at line 692
++ports_with_no_inputs; ++ports_with_no_inputs;
} }
// if all input_ports have predecessors, spawn forward to try and c onsume tuples // if all input_ports have predecessors, spawn forward to try and c onsume tuples
task * decrement_port_count(bool handle_task) { task * decrement_port_count(bool handle_task) {
if(ports_with_no_inputs.fetch_and_decrement() == 1) { if(ports_with_no_inputs.fetch_and_decrement() == 1) {
task *rtask = new ( task::allocate_additional_child_of( *(t his->my_root_task) ) ) task *rtask = new ( task::allocate_additional_child_of( *(t his->my_root_task) ) )
forward_task_bypass forward_task_bypass
<my_node_type>(*my_node); <my_node_type>(*my_node);
if(!handle_task) return rtask; if(!handle_task) return rtask;
task::enqueue(*rtask); FLOW_SPAWN(*rtask);
} }
return NULL; return NULL;
} }
input_type &input_ports() { return my_inputs; } input_type &input_ports() { return my_inputs; }
protected: protected:
void reset() { void reset() {
// called outside of parallel contexts // called outside of parallel contexts
skipping to change at line 763 skipping to change at line 763
} }
// if all input_ports have items, spawn forward to try and consume tuples // if all input_ports have items, spawn forward to try and consume tuples
task * decrement_port_count(bool handle_task) task * decrement_port_count(bool handle_task)
{ {
if(ports_with_no_items.fetch_and_decrement() == 1) { if(ports_with_no_items.fetch_and_decrement() == 1) {
task *rtask = new ( task::allocate_additional_child_of( *(t his->my_root_task) ) ) task *rtask = new ( task::allocate_additional_child_of( *(t his->my_root_task) ) )
forward_task_bypass forward_task_bypass
<my_node_type>(*my_node); <my_node_type>(*my_node);
if(!handle_task) return rtask; if(!handle_task) return rtask;
task::enqueue( *rtask); FLOW_SPAWN( *rtask);
} }
return NULL; return NULL;
} }
void increment_port_count() { __TBB_ASSERT(false, NULL); } // shou ld never be called void increment_port_count() { __TBB_ASSERT(false, NULL); } // shou ld never be called
input_type &input_ports() { return my_inputs; } input_type &input_ports() { return my_inputs; }
protected: protected:
skipping to change at line 862 skipping to change at line 862
task *rtask = NULL; task *rtask = NULL;
bool do_fwd = should_enqueue && this->buffer_empty(); bool do_fwd = should_enqueue && this->buffer_empty();
while(find_value_tag(this->current_tag,N)) { // while there ar e completed items while(find_value_tag(this->current_tag,N)) { // while there ar e completed items
this->tagged_delete(this->current_tag); // remove the tag this->tagged_delete(this->current_tag); // remove the tag
if(join_helper<N>::get_items(my_inputs, l_out)) { // <== call back if(join_helper<N>::get_items(my_inputs, l_out)) { // <== call back
this->push_back(l_out); this->push_back(l_out);
if(do_fwd) { // we enqueue if receiving an item from p redecessor, not if successor asks for item if(do_fwd) { // we enqueue if receiving an item from p redecessor, not if successor asks for item
rtask = new ( task::allocate_additional_child_of( * (this->my_root_task) ) ) rtask = new ( task::allocate_additional_child_of( * (this->my_root_task) ) )
forward_task_bypass<my_node_type>(*my_node); forward_task_bypass<my_node_type>(*my_node);
if(handle_task) { if(handle_task) {
task::enqueue(*rtask); FLOW_SPAWN(*rtask);
rtask = NULL; rtask = NULL;
} }
do_fwd = false; do_fwd = false;
} }
// retire the input values // retire the input values
join_helper<N>::reset_ports(my_inputs); // <== call b ack join_helper<N>::reset_ports(my_inputs); // <== call b ack
this->current_tag = NO_TAG; this->current_tag = NO_TAG;
} }
else { else {
__TBB_ASSERT(false, "should have had something to push" ); __TBB_ASSERT(false, "should have had something to push" );
skipping to change at line 1061 skipping to change at line 1061
while(op_list) { while(op_list) {
current = op_list; current = op_list;
op_list = op_list->next; op_list = op_list->next;
switch(current->type) { switch(current->type) {
case reg_succ: case reg_succ:
my_successors.register_successor(*(current->my_succ)); my_successors.register_successor(*(current->my_succ));
if(tuple_build_may_succeed() && !forwarder_busy) { if(tuple_build_may_succeed() && !forwarder_busy) {
task *rtask = new ( task::allocate_additional_child _of(*(this->my_root_task)) ) task *rtask = new ( task::allocate_additional_child _of(*(this->my_root_task)) )
forward_task_bypass forward_task_bypass
<join_node_base<JP,InputTuple,OutputTuple> >(*this); <join_node_base<JP,InputTuple,OutputTuple> >(*this);
task::enqueue(*rtask); FLOW_SPAWN(*rtask);
forwarder_busy = true; forwarder_busy = true;
} }
__TBB_store_with_release(current->status, SUCCEEDED); __TBB_store_with_release(current->status, SUCCEEDED);
break; break;
case rem_succ: case rem_succ:
my_successors.remove_successor(*(current->my_succ)); my_successors.remove_successor(*(current->my_succ));
__TBB_store_with_release(current->status, SUCCEEDED); __TBB_store_with_release(current->status, SUCCEEDED);
break; break;
case try__get: case try__get:
if(tuple_build_may_succeed()) { if(tuple_build_may_succeed()) {
 End of changes. 4 change blocks. 
4 lines changed or deleted 4 lines changed or added


 _flow_graph_node_impl.h   _flow_graph_node_impl.h 
skipping to change at line 274 skipping to change at line 274
__TBB_store_with_release(op->status, FAILED); __TBB_store_with_release(op->status, FAILED);
forwarder_busy = false; forwarder_busy = false;
} }
//! Applies the body to the provided input //! Applies the body to the provided input
// then decides if more work is available // then decides if more work is available
void apply_body( input_type &i ) { void apply_body( input_type &i ) {
task *new_task = apply_body_bypass(i); task *new_task = apply_body_bypass(i);
if(!new_task) return; if(!new_task) return;
if(new_task == SUCCESSFULLY_ENQUEUED) return; if(new_task == SUCCESSFULLY_ENQUEUED) return;
task::enqueue(*new_task); FLOW_SPAWN(*new_task);
return; return;
} }
//! Applies the body to the provided input //! Applies the body to the provided input
// then decides if more work is available // then decides if more work is available
task * apply_body_bypass( input_type &i ) { task * apply_body_bypass( input_type &i ) {
task * new_task = static_cast<ImplType *>(this)->apply_body_imp l_bypass(i); task * new_task = static_cast<ImplType *>(this)->apply_body_imp l_bypass(i);
if ( my_max_concurrency != 0 ) { if ( my_max_concurrency != 0 ) {
my_operation op_data(app_body_bypass); // tries to pop an item or get_item, enqueues another apply_body my_operation op_data(app_body_bypass); // tries to pop an item or get_item, enqueues another apply_body
my_aggregator.execute(&op_data); my_aggregator.execute(&op_data);
skipping to change at line 299 skipping to change at line 299
} }
//! allocates a task to call apply_body( input ) //! allocates a task to call apply_body( input )
inline task * create_body_task( const input_type &input ) { inline task * create_body_task( const input_type &input ) {
return new(task::allocate_additional_child_of(*my_root_task)) return new(task::allocate_additional_child_of(*my_root_task))
apply_body_task_bypass < my_class, input_type >(*this, input ); apply_body_task_bypass < my_class, input_type >(*this, input );
} }
//! Spawns a task that calls apply_body( input ) //! Spawns a task that calls apply_body( input )
inline void spawn_body_task( const input_type &input ) { inline void spawn_body_task( const input_type &input ) {
task::enqueue(*create_body_task(input)); FLOW_SPAWN(*create_body_task(input));
} }
//! This is executed by an enqueued task, the "forwarder" //! This is executed by an enqueued task, the "forwarder"
task *forward_task() { task *forward_task() {
my_operation op_data(try_fwd); my_operation op_data(try_fwd);
task *rval = NULL; task *rval = NULL;
do { do {
op_data.status = WAIT; op_data.status = WAIT;
my_aggregator.execute(&op_data); my_aggregator.execute(&op_data);
if(op_data.status == SUCCEEDED) { if(op_data.status == SUCCEEDED) {
skipping to change at line 324 skipping to change at line 324
return rval; return rval;
} }
inline task *create_forward_task() { inline task *create_forward_task() {
task *rval = new(task::allocate_additional_child_of(*my_root_tas k)) forward_task_bypass< my_class >(*this); task *rval = new(task::allocate_additional_child_of(*my_root_tas k)) forward_task_bypass< my_class >(*this);
return rval; return rval;
} }
//! Spawns a task that calls forward() //! Spawns a task that calls forward()
inline void spawn_forward_task() { inline void spawn_forward_task() {
task::enqueue(*create_forward_task()); FLOW_SPAWN(*create_forward_task());
} }
}; // function_input_base }; // function_input_base
//! Implements methods for a function node that takes a type Input as i nput and sends //! Implements methods for a function node that takes a type Input as i nput and sends
// a type Output to its successors. // a type Output to its successors.
template< typename Input, typename Output, typename A> template< typename Input, typename Output, typename A>
class function_input : public function_input_base<Input, A, function_in put<Input,Output,A> > { class function_input : public function_input_base<Input, A, function_in put<Input,Output,A> > {
public: public:
typedef Input input_type; typedef Input input_type;
typedef Output output_type; typedef Output output_type;
skipping to change at line 570 skipping to change at line 570
typedef Output output_type; typedef Output output_type;
typedef function_output<output_type> base_type; typedef function_output<output_type> base_type;
using base_type::my_successors; using base_type::my_successors;
multifunction_output() : base_type() {my_successors.set_owner(this) ;} multifunction_output() : base_type() {my_successors.set_owner(this) ;}
multifunction_output( const multifunction_output &/*other*/) : base _type() { my_successors.set_owner(this); } multifunction_output( const multifunction_output &/*other*/) : base _type() { my_successors.set_owner(this); }
bool try_put(const output_type &i) { bool try_put(const output_type &i) {
task *res = my_successors.try_put_task(i); task *res = my_successors.try_put_task(i);
if(!res) return false; if(!res) return false;
if(res != SUCCESSFULLY_ENQUEUED) task::enqueue(*res); if(res != SUCCESSFULLY_ENQUEUED) FLOW_SPAWN(*res);
return true; return true;
} }
}; };
} // internal } // internal
#endif // __TBB__flow_graph_node_impl_H #endif // __TBB__flow_graph_node_impl_H
 End of changes. 4 change blocks. 
4 lines changed or deleted 4 lines changed or added


 concurrent_hash_map.h   concurrent_hash_map.h 
skipping to change at line 56 skipping to change at line 56
#endif #endif
#include "cache_aligned_allocator.h" #include "cache_aligned_allocator.h"
#include "tbb_allocator.h" #include "tbb_allocator.h"
#include "spin_rw_mutex.h" #include "spin_rw_mutex.h"
#include "atomic.h" #include "atomic.h"
#include "aligned_space.h" #include "aligned_space.h"
#include "tbb_exception.h" #include "tbb_exception.h"
#include "tbb_profiling.h" #include "tbb_profiling.h"
#include "internal/_concurrent_unordered_impl.h" // Need tbb_hasher #include "internal/_concurrent_unordered_impl.h" // Need tbb_hasher
#if __TBB_INITIALIZER_LISTS_PRESENT
#include <initializer_list>
#endif
#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
#include <typeinfo> #include <typeinfo>
#endif #endif
#if __TBB_STATISTICS #if __TBB_STATISTICS
#include <stdio.h> #include <stdio.h>
#endif #endif
namespace tbb { namespace tbb {
//! hash_compare that is default argument for concurrent_hash_map //! hash_compare that is default argument for concurrent_hash_map
skipping to change at line 764 skipping to change at line 767
//! Construction with copying iteration range and given allocator insta nce //! Construction with copying iteration range and given allocator insta nce
template<typename I> template<typename I>
concurrent_hash_map(I first, I last, const allocator_type &a = allocato r_type()) concurrent_hash_map(I first, I last, const allocator_type &a = allocato r_type())
: my_allocator(a) : my_allocator(a)
{ {
reserve( std::distance(first, last) ); // TODO: load_factor? reserve( std::distance(first, last) ); // TODO: load_factor?
internal_copy(first, last); internal_copy(first, last);
} }
#if __TBB_INITIALIZER_LISTS_PRESENT
//! Construct empty table with n preallocated buckets. This number serv
es also as initial concurrency level.
concurrent_hash_map(const std::initializer_list<value_type> &il, const
allocator_type &a = allocator_type())
: my_allocator(a)
{
reserve(il.size());
internal_copy(il.begin(), il.end());
}
#endif //__TBB_INITIALIZER_LISTS_PRESENT
//! Assignment //! Assignment
concurrent_hash_map& operator=( const concurrent_hash_map& table ) { concurrent_hash_map& operator=( const concurrent_hash_map& table ) {
if( this!=&table ) { if( this!=&table ) {
clear(); clear();
internal_copy(table); internal_copy(table);
} }
return *this; return *this;
} }
#if __TBB_INITIALIZER_LISTS_PRESENT
//! Assignment
concurrent_hash_map& operator=( const std::initializer_list<value_type>
&il ) {
clear();
reserve(il.size());
internal_copy(il.begin(), il.end());
return *this;
}
#endif //__TBB_INITIALIZER_LISTS_PRESENT
//! Rehashes and optionally resizes the whole table. //! Rehashes and optionally resizes the whole table.
/** Useful to optimize performance before or after concurrent operation s. /** Useful to optimize performance before or after concurrent operation s.
Also enables using of find() and count() concurrent methods in seri al context. */ Also enables using of find() and count() concurrent methods in seri al context. */
void rehash(size_type n = 0); void rehash(size_type n = 0);
//! Clear table //! Clear table
void clear(); void clear();
//! Clear table and destroy it. //! Clear table and destroy it.
~concurrent_hash_map() { clear(); } ~concurrent_hash_map() { clear(); }
 End of changes. 3 change blocks. 
0 lines changed or deleted 27 lines changed or added


 concurrent_unordered_map.h   concurrent_unordered_map.h 
skipping to change at line 144 skipping to change at line 144
template <typename Iterator> template <typename Iterator>
concurrent_unordered_map(Iterator first, Iterator last, size_type n_of_ buckets = 8, concurrent_unordered_map(Iterator first, Iterator last, size_type n_of_ buckets = 8,
const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(),
const allocator_type& a = allocator_type()) const allocator_type& a = allocator_type())
: base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a)
{ {
for (; first != last; ++first) for (; first != last; ++first)
base_type::insert(*first); base_type::insert(*first);
} }
#if __TBB_INITIALIZER_LISTS_PRESENT
//! Constructor from initializer_list
concurrent_unordered_map(std::initializer_list<value_type> const& il, s
ize_type n_of_buckets = 8,
const hasher& _Hasher = hasher(), const key_equal& _Key_equality =
key_equal(),
const allocator_type& a = allocator_type())
: base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a)
{
this->insert(il.begin(),il.end());
}
#endif //# __TBB_INITIALIZER_LISTS_PRESENT
concurrent_unordered_map(const concurrent_unordered_map& table) : base_ type(table) concurrent_unordered_map(const concurrent_unordered_map& table) : base_ type(table)
{ {
} }
concurrent_unordered_map(const concurrent_unordered_map& table, const A llocator& a) concurrent_unordered_map(const concurrent_unordered_map& table, const A llocator& a)
: base_type(table, a) : base_type(table, a)
{ {
} }
concurrent_unordered_map& operator=(const concurrent_unordered_map& tab le) concurrent_unordered_map& operator=(const concurrent_unordered_map& tab le)
{ {
base_type::operator=(table); base_type::operator=(table);
return (*this); return (*this);
} }
#if __TBB_INITIALIZER_LISTS_PRESENT
//! assignment operator from initializer_list
concurrent_unordered_map& operator=(std::initializer_list<value_type> c
onst& il)
{
base_type::operator=(il);
return (*this);
}
#endif //# __TBB_INITIALIZER_LISTS_PRESENT
iterator unsafe_erase(const_iterator where) iterator unsafe_erase(const_iterator where)
{ {
return base_type::unsafe_erase(where); return base_type::unsafe_erase(where);
} }
size_type unsafe_erase(const key_type& key) size_type unsafe_erase(const key_type& key)
{ {
return base_type::unsafe_erase(key); return base_type::unsafe_erase(key);
} }
skipping to change at line 291 skipping to change at line 311
template <typename Iterator> template <typename Iterator>
concurrent_unordered_multimap(Iterator first, Iterator last, size_type n_of_buckets = 8, concurrent_unordered_multimap(Iterator first, Iterator last, size_type n_of_buckets = 8,
const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(),
const allocator_type& a = allocator_type()) const allocator_type& a = allocator_type())
: base_type(n_of_buckets,key_compare(_Hasher,_Key_equality), a) : base_type(n_of_buckets,key_compare(_Hasher,_Key_equality), a)
{ {
for (; first != last; ++first) for (; first != last; ++first)
base_type::insert(*first); base_type::insert(*first);
} }
#if __TBB_INITIALIZER_LISTS_PRESENT
//! Constructor from initializer_list
concurrent_unordered_multimap(std::initializer_list<value_type> const&
il, size_type n_of_buckets = 8,
const hasher& _Hasher = hasher(), const key_equal& _Key_equality =
key_equal(),
const allocator_type& a = allocator_type())
: base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a)
{
this->insert(il.begin(),il.end());
}
#endif //# __TBB_INITIALIZER_LISTS_PRESENT
concurrent_unordered_multimap(const concurrent_unordered_multimap& tabl e) : base_type(table) concurrent_unordered_multimap(const concurrent_unordered_multimap& tabl e) : base_type(table)
{ {
} }
concurrent_unordered_multimap(const concurrent_unordered_multimap& tabl e, const Allocator& a) concurrent_unordered_multimap(const concurrent_unordered_multimap& tabl e, const Allocator& a)
: base_type(table, a) : base_type(table, a)
{ {
} }
concurrent_unordered_multimap& operator=(const concurrent_unordered_mul timap& table) concurrent_unordered_multimap& operator=(const concurrent_unordered_mul timap& table)
{ {
base_type::operator=(table); base_type::operator=(table);
return (*this); return (*this);
} }
#if __TBB_INITIALIZER_LISTS_PRESENT
//! assignment operator from initializer_list
concurrent_unordered_multimap& operator=(std::initializer_list<value_ty
pe> const& il)
{
base_type::operator=(il);
return (*this);
}
#endif //# __TBB_INITIALIZER_LISTS_PRESENT
iterator unsafe_erase(const_iterator where) iterator unsafe_erase(const_iterator where)
{ {
return base_type::unsafe_erase(where); return base_type::unsafe_erase(where);
} }
size_type unsafe_erase(const key_type& key) size_type unsafe_erase(const key_type& key)
{ {
return base_type::unsafe_erase(key); return base_type::unsafe_erase(key);
} }
 End of changes. 4 change blocks. 
0 lines changed or deleted 46 lines changed or added


 concurrent_unordered_set.h   concurrent_unordered_set.h 
skipping to change at line 124 skipping to change at line 124
template <typename Iterator> template <typename Iterator>
concurrent_unordered_set(Iterator first, Iterator last, size_type n_of_ buckets = 8, const hasher& a_hasher = hasher(), concurrent_unordered_set(Iterator first, Iterator last, size_type n_of_ buckets = 8, const hasher& a_hasher = hasher(),
const key_equal& a_keyeq = key_equal(), const allocator_type& a = a llocator_type()) const key_equal& a_keyeq = key_equal(), const allocator_type& a = a llocator_type())
: base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a)
{ {
for (; first != last; ++first) for (; first != last; ++first)
base_type::insert(*first); base_type::insert(*first);
} }
#if __TBB_INITIALIZER_LISTS_PRESENT
//! Constructor from initializer_list
concurrent_unordered_set(std::initializer_list<value_type> const& il, si
ze_type n_of_buckets = 8, const hasher& a_hasher = hasher(),
const key_equal& a_keyeq = key_equal(), const allocator_type& a = a
llocator_type())
: base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a)
{
this->insert(il.begin(),il.end());
}
#endif //# __TBB_INITIALIZER_LISTS_PRESENT
concurrent_unordered_set(const concurrent_unordered_set& table) : base_ type(table) concurrent_unordered_set(const concurrent_unordered_set& table) : base_ type(table)
{ {
} }
concurrent_unordered_set(const concurrent_unordered_set& table, const A llocator& a) concurrent_unordered_set(const concurrent_unordered_set& table, const A llocator& a)
: base_type(table, a) : base_type(table, a)
{ {
} }
concurrent_unordered_set& operator=(const concurrent_unordered_set& tab le) concurrent_unordered_set& operator=(const concurrent_unordered_set& tab le)
{ {
base_type::operator=(table); base_type::operator=(table);
return (*this); return (*this);
} }
#if __TBB_INITIALIZER_LISTS_PRESENT
//! assignment operator from initializer_list
concurrent_unordered_set& operator=(std::initializer_list<value_type> c
onst& il)
{
base_type::operator=(il);
return (*this);
}
#endif //# __TBB_INITIALIZER_LISTS_PRESENT
iterator unsafe_erase(const_iterator where) iterator unsafe_erase(const_iterator where)
{ {
return base_type::unsafe_erase(where); return base_type::unsafe_erase(where);
} }
size_type unsafe_erase(const key_type& key) size_type unsafe_erase(const key_type& key)
{ {
return base_type::unsafe_erase(key); return base_type::unsafe_erase(key);
} }
skipping to change at line 231 skipping to change at line 250
const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(),
const allocator_type& a = allocator_type()) const allocator_type& a = allocator_type())
: base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a)
{ {
for (; first != last; ++first) for (; first != last; ++first)
{ {
base_type::insert(*first); base_type::insert(*first);
} }
} }
#if __TBB_INITIALIZER_LISTS_PRESENT
//! Constructor from initializer_list
concurrent_unordered_multiset(std::initializer_list<value_type> const& i
l, size_type n_of_buckets = 8, const hasher& a_hasher = hasher(),
const key_equal& a_keyeq = key_equal(), const allocator_type& a = a
llocator_type())
: base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a)
{
this->insert(il.begin(),il.end());
}
#endif //# __TBB_INITIALIZER_LISTS_PRESENT
concurrent_unordered_multiset(const concurrent_unordered_multiset& tabl e) : base_type(table) concurrent_unordered_multiset(const concurrent_unordered_multiset& tabl e) : base_type(table)
{ {
} }
concurrent_unordered_multiset(const concurrent_unordered_multiset& tabl e, const Allocator& a) : base_type(table, a) concurrent_unordered_multiset(const concurrent_unordered_multiset& tabl e, const Allocator& a) : base_type(table, a)
{ {
} }
concurrent_unordered_multiset& operator=(const concurrent_unordered_mul tiset& table) concurrent_unordered_multiset& operator=(const concurrent_unordered_mul tiset& table)
{ {
base_type::operator=(table); base_type::operator=(table);
return (*this); return (*this);
} }
#if __TBB_INITIALIZER_LISTS_PRESENT
//! assignment operator from initializer_list
concurrent_unordered_multiset& operator=(std::initializer_list<value_ty
pe> const& il)
{
base_type::operator=(il);
return (*this);
}
#endif //# __TBB_INITIALIZER_LISTS_PRESENT
// Modifiers // Modifiers
std::pair<iterator, bool> insert(const value_type& value) std::pair<iterator, bool> insert(const value_type& value)
{ {
return base_type::insert(value); return base_type::insert(value);
} }
iterator insert(const_iterator where, const value_type& value) iterator insert(const_iterator where, const value_type& value)
{ {
return base_type::insert(where, value); return base_type::insert(where, value);
} }
 End of changes. 4 change blocks. 
0 lines changed or deleted 44 lines changed or added


 concurrent_vector.h   concurrent_vector.h 
skipping to change at line 661 skipping to change at line 661
//--------------------------------------------------------------------- --- //--------------------------------------------------------------------- ---
// Concurrent operations // Concurrent operations
//--------------------------------------------------------------------- --- //--------------------------------------------------------------------- ---
//TODO: consider adding overload of grow_by accepting range of iterator s: grow_by(iterator,iterator) //TODO: consider adding overload of grow_by accepting range of iterator s: grow_by(iterator,iterator)
//TODO: consider adding overload of grow_by accepting initializer_list: grow_by(std::initializer_list<T>), as a analogy to std::vector::insert(in itializer_list) //TODO: consider adding overload of grow_by accepting initializer_list: grow_by(std::initializer_list<T>), as a analogy to std::vector::insert(in itializer_list)
//! Grow by "delta" elements. //! Grow by "delta" elements.
#if TBB_DEPRECATED #if TBB_DEPRECATED
/** Returns old size. */ /** Returns old size. */
size_type grow_by( size_type delta ) { size_type grow_by( size_type delta ) {
return delta ? internal_grow_by( delta, sizeof(T), &initialize_arra y, NULL ) : my_early_size; return delta ? internal_grow_by( delta, sizeof(T), &initialize_arra y, NULL ) : my_early_size.load();
} }
#else #else
/** Returns iterator pointing to the first new element. */ /** Returns iterator pointing to the first new element. */
iterator grow_by( size_type delta ) { iterator grow_by( size_type delta ) {
return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size); return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size.load());
} }
#endif #endif
//! Grow by "delta" elements using copying constructor. //! Grow by "delta" elements using copying constructor.
#if TBB_DEPRECATED #if TBB_DEPRECATED
/** Returns old size. */ /** Returns old size. */
size_type grow_by( size_type delta, const_reference t ) { size_type grow_by( size_type delta, const_reference t ) {
return delta ? internal_grow_by( delta, sizeof(T), &initialize_arra y_by, static_cast<const void*>(&t) ) : my_early_size; return delta ? internal_grow_by( delta, sizeof(T), &initialize_arra y_by, static_cast<const void*>(&t) ) : my_early_size.load();
} }
#else #else
/** Returns iterator pointing to the first new element. */ /** Returns iterator pointing to the first new element. */
iterator grow_by( size_type delta, const_reference t ) { iterator grow_by( size_type delta, const_reference t ) {
return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast<const void*>(&t) ) : my_early_size); return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast<const void*>(&t) ) : my_early_size.load() );
} }
#endif #endif
//! Append minimal sequence of elements such that size()>=n. //! Append minimal sequence of elements such that size()>=n.
#if TBB_DEPRECATED #if TBB_DEPRECATED
/** The new elements are default constructed. Blocks until all element s in range [0..n) are allocated. /** The new elements are default constructed. Blocks until all element s in range [0..n) are allocated.
May return while other elements are being constructed by other thre ads. */ May return while other elements are being constructed by other thre ads. */
void grow_to_at_least( size_type n ) { void grow_to_at_least( size_type n ) {
if( n ) internal_grow_to_at_least_with_result( n, sizeof(T), &initi alize_array, NULL ); if( n ) internal_grow_to_at_least_with_result( n, sizeof(T), &initi alize_array, NULL );
}; };
 End of changes. 4 change blocks. 
4 lines changed or deleted 4 lines changed or added


 flow_graph.h   flow_graph.h 
skipping to change at line 42 skipping to change at line 42
#include "tbb_stddef.h" #include "tbb_stddef.h"
#include "atomic.h" #include "atomic.h"
#include "spin_mutex.h" #include "spin_mutex.h"
#include "null_mutex.h" #include "null_mutex.h"
#include "spin_rw_mutex.h" #include "spin_rw_mutex.h"
#include "null_rw_mutex.h" #include "null_rw_mutex.h"
#include "task.h" #include "task.h"
#include "concurrent_vector.h" #include "concurrent_vector.h"
#include "internal/_aggregator_impl.h" #include "internal/_aggregator_impl.h"
#if TBB_DEPRECATED_FLOW_ENQUEUE
#define FLOW_SPAWN(a) tbb::task::enqueue((a))
#else
#define FLOW_SPAWN(a) tbb::task::spawn((a))
#endif
// use the VC10 or gcc version of tuple if it is available. // use the VC10 or gcc version of tuple if it is available.
#if __TBB_CPP11_TUPLE_PRESENT #if __TBB_CPP11_TUPLE_PRESENT
#include <tuple> #include <tuple>
namespace tbb { namespace tbb {
namespace flow { namespace flow {
using std::tuple; using std::tuple;
using std::tuple_size; using std::tuple_size;
using std::tuple_element; using std::tuple_element;
using std::get; using std::get;
} }
skipping to change at line 137 skipping to change at line 143
// enqueue left task if necessary. Returns the non-enqueued task if there is one. // enqueue left task if necessary. Returns the non-enqueued task if there is one.
static inline tbb::task *combine_tasks( tbb::task * left, tbb::task * right ) { static inline tbb::task *combine_tasks( tbb::task * left, tbb::task * right ) {
// if no RHS task, don't change left. // if no RHS task, don't change left.
if(right == NULL) return left; if(right == NULL) return left;
// right != NULL // right != NULL
if(left == NULL) return right; if(left == NULL) return right;
if(left == SUCCESSFULLY_ENQUEUED) return right; if(left == SUCCESSFULLY_ENQUEUED) return right;
// left contains a task // left contains a task
if(right != SUCCESSFULLY_ENQUEUED) { if(right != SUCCESSFULLY_ENQUEUED) {
// both are valid tasks // both are valid tasks
tbb::task::enqueue(*left); FLOW_SPAWN(*left);
return right; return right;
} }
return left; return left;
} }
//! Pure virtual template class that defines a receiver of messages of type T //! Pure virtual template class that defines a receiver of messages of type T
template< typename T > template< typename T >
class receiver { class receiver {
public: public:
//! The input type of this receiver //! The input type of this receiver
skipping to change at line 160 skipping to change at line 166
//! The predecessor type for this node //! The predecessor type for this node
typedef sender<T> predecessor_type; typedef sender<T> predecessor_type;
//! Destructor //! Destructor
virtual ~receiver() {} virtual ~receiver() {}
//! Put an item to the receiver //! Put an item to the receiver
bool try_put( const T& t ) { bool try_put( const T& t ) {
task *res = try_put_task(t); task *res = try_put_task(t);
if(!res) return false; if(!res) return false;
if (res != SUCCESSFULLY_ENQUEUED) task::enqueue(*res); if (res != SUCCESSFULLY_ENQUEUED) FLOW_SPAWN(*res);
return true; return true;
} }
//! put item to successor; return task to run the successor if possible . //! put item to successor; return task to run the successor if possible .
protected: protected:
template< typename R, typename B > friend class run_and_put_task; template< typename R, typename B > friend class run_and_put_task;
template<typename X, typename Y> friend class internal::broadcast_cache ; template<typename X, typename Y> friend class internal::broadcast_cache ;
template<typename X, typename Y> friend class internal::round_robin_cac he; template<typename X, typename Y> friend class internal::round_robin_cac he;
virtual task *try_put_task(const T& t) = 0; virtual task *try_put_task(const T& t) = 0;
public: public:
skipping to change at line 424 skipping to change at line 430
void decrement_wait_count() { void decrement_wait_count() {
if (my_root_task) if (my_root_task)
my_root_task->decrement_ref_count(); my_root_task->decrement_ref_count();
} }
//! Spawns a task that runs a body and puts its output to a specific re ceiver //! Spawns a task that runs a body and puts its output to a specific re ceiver
/** The task is spawned as a child of the graph. This is useful for run ning tasks /** The task is spawned as a child of the graph. This is useful for run ning tasks
that need to block a wait_for_all() on the graph. For example a on e-off source. */ that need to block a wait_for_all() on the graph. For example a on e-off source. */
template< typename Receiver, typename Body > template< typename Receiver, typename Body >
void run( Receiver &r, Body body ) { void run( Receiver &r, Body body ) {
task::enqueue( * new ( task::allocate_additional_child_of( *my_root_ FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *my_root_ta
task ) ) sk ) )
run_and_put_task< Receiver, Body >( r, body ) ); run_and_put_task< Receiver, Body >( r, body )) );
} }
//! Spawns a task that runs a function object //! Spawns a task that runs a function object
/** The task is spawned as a child of the graph. This is useful for run ning tasks /** The task is spawned as a child of the graph. This is useful for run ning tasks
that need to block a wait_for_all() on the graph. For example a one -off source. */ that need to block a wait_for_all() on the graph. For example a one -off source. */
template< typename Body > template< typename Body >
void run( Body body ) { void run( Body body ) {
task::enqueue( * new ( task::allocate_additional_child_of( *my_root_ FLOW_SPAWN( * new ( task::allocate_additional_child_of( *my_root_tas
task ) ) k ) ) run_task< Body >( body ) );
run_task< Body >( body ) );
} }
//! Wait until graph is idle and decrement_wait_count calls equals incr ement_wait_count calls. //! Wait until graph is idle and decrement_wait_count calls equals incr ement_wait_count calls.
/** The waiting thread will go off and steal work while it is block in the wait_for_all. */ /** The waiting thread will go off and steal work while it is block in the wait_for_all. */
void wait_for_all() { void wait_for_all() {
cancelled = false; cancelled = false;
caught_exception = false; caught_exception = false;
if (my_root_task) { if (my_root_task) {
#if TBB_USE_EXCEPTIONS #if TBB_USE_EXCEPTIONS
try { try {
skipping to change at line 754 skipping to change at line 759
v = my_cached_item; v = my_cached_item;
my_reserved = true; my_reserved = true;
return true; return true;
} else { } else {
return false; return false;
} }
} }
//! Spawns a task that applies the body //! Spawns a task that applies the body
/* override */ void spawn_put( ) { /* override */ void spawn_put( ) {
task::enqueue( * new ( task::allocate_additional_child_of( *my_root FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *my_root_t
_task ) ) ask ) )
internal:: source_task_bypass < source_node< output_type > >( *t internal:: source_task_bypass < source_node< output_typ
his ) ); e > >( *this ) ) );
} }
friend class internal::source_task_bypass< source_node< output_type > > ; friend class internal::source_task_bypass< source_node< output_type > > ;
//! Applies the body. Returning SUCCESSFULLY_ENQUEUED okay; forward_ta sk_bypass will handle it. //! Applies the body. Returning SUCCESSFULLY_ENQUEUED okay; forward_ta sk_bypass will handle it.
/* override */ task * apply_body_bypass( ) { /* override */ task * apply_body_bypass( ) {
output_type v; output_type v;
if ( !try_reserve_apply_body(v) ) if ( !try_reserve_apply_body(v) )
return NULL; return NULL;
task *last_task = my_successors.try_put_task(v); task *last_task = my_successors.try_put_task(v);
skipping to change at line 1225 skipping to change at line 1230
} }
} }
inline task *grab_forwarding_task( buffer_operation &op_data) { inline task *grab_forwarding_task( buffer_operation &op_data) {
return op_data.ltask; return op_data.ltask;
} }
inline bool enqueue_forwarding_task(buffer_operation &op_data) { inline bool enqueue_forwarding_task(buffer_operation &op_data) {
task *ft = grab_forwarding_task(op_data); task *ft = grab_forwarding_task(op_data);
if(ft) { if(ft) {
task::enqueue(*ft); FLOW_SPAWN(*ft);
return true; return true;
} }
return false; return false;
} }
//! This is executed by an enqueued task, the "forwarder" //! This is executed by an enqueued task, the "forwarder"
virtual task *forward_task() { virtual task *forward_task() {
buffer_operation op_data(try_fwd_task); buffer_operation op_data(try_fwd_task);
task *last_task = NULL; task *last_task = NULL;
do { do {
skipping to change at line 1805 skipping to change at line 1810
void forward() { void forward() {
{ {
spin_mutex::scoped_lock lock(my_mutex); spin_mutex::scoped_lock lock(my_mutex);
if ( my_count < my_threshold ) if ( my_count < my_threshold )
++my_count; ++my_count;
else else
return; return;
} }
task * rtask = decrement_counter(); task * rtask = decrement_counter();
if(rtask) task::enqueue(*rtask); if(rtask) FLOW_SPAWN(*rtask);
} }
task *forward_task() { task *forward_task() {
spin_mutex::scoped_lock lock(my_mutex); spin_mutex::scoped_lock lock(my_mutex);
if ( my_count >= my_threshold ) if ( my_count >= my_threshold )
return NULL; return NULL;
++my_count; ++my_count;
task * rtask = decrement_counter(); task * rtask = decrement_counter();
return rtask; return rtask;
} }
skipping to change at line 1863 skipping to change at line 1868
r.remove_predecessor(*this); r.remove_predecessor(*this);
my_successors.remove_successor(r); my_successors.remove_successor(r);
return true; return true;
} }
//! Removes src from the list of cached predecessors. //! Removes src from the list of cached predecessors.
/* override */ bool register_predecessor( predecessor_type &src ) { /* override */ bool register_predecessor( predecessor_type &src ) {
spin_mutex::scoped_lock lock(my_mutex); spin_mutex::scoped_lock lock(my_mutex);
my_predecessors.add( src ); my_predecessors.add( src );
if ( my_count < my_threshold && !my_successors.empty() ) { if ( my_count < my_threshold && !my_successors.empty() ) {
task::enqueue( * new ( task::allocate_additional_child_of( *my_ FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *my_ro
root_task ) ) ot_task ) )
internal:: internal::forward_task_bypass < limiter_node<T> >(
forward_task_bypass *this ) ) );
< limiter_node<T> >( *this ) );
} }
return true; return true;
} }
//! Removes src from the list of cached predecessors. //! Removes src from the list of cached predecessors.
/* override */ bool remove_predecessor( predecessor_type &src ) { /* override */ bool remove_predecessor( predecessor_type &src ) {
my_predecessors.remove( src ); my_predecessors.remove( src );
return true; return true;
} }
 End of changes. 9 change blocks. 
19 lines changed or deleted 23 lines changed or added


 gcc_generic.h   gcc_generic.h 
skipping to change at line 135 skipping to change at line 135
#define __TBB_Log2(V) __TBB_machine_lg(V) #define __TBB_Log2(V) __TBB_machine_lg(V)
#define __TBB_USE_GENERIC_FETCH_STORE 1 #define __TBB_USE_GENERIC_FETCH_STORE 1
#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 #define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1
#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 #define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1
#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 #define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1
#if __TBB_WORDSIZE==4 #if __TBB_WORDSIZE==4
#define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 #define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1
#endif #endif
#if __TBB_x86_32 || __TBB_x86_64
#include "gcc_itsx.h"
#endif
 End of changes. 1 change blocks. 
0 lines changed or deleted 0 lines changed or added


 gcc_ia32_common.h   gcc_ia32_common.h 
skipping to change at line 97 skipping to change at line 97
} }
inline void __TBB_set_cpu_ctl_env ( const __TBB_cpu_ctl_env_t* ctl ) { inline void __TBB_set_cpu_ctl_env ( const __TBB_cpu_ctl_env_t* ctl ) {
__asm__ __volatile__ ( __asm__ __volatile__ (
"ldmxcsr %0\n\t" "ldmxcsr %0\n\t"
"fldcw %1" "fldcw %1"
: : "m"(ctl->mxcsr), "m"(ctl->x87cw) : : "m"(ctl->mxcsr), "m"(ctl->x87cw)
); );
} }
#endif /* !__TBB_CPU_CTL_ENV_PRESENT */ #endif /* !__TBB_CPU_CTL_ENV_PRESENT */
#include "gcc_itsx.h"
#endif /* __TBB_machine_gcc_ia32_common_H */ #endif /* __TBB_machine_gcc_ia32_common_H */
 End of changes. 1 change blocks. 
0 lines changed or deleted 2 lines changed or added


 icc_generic.h   icc_generic.h 
skipping to change at line 34 skipping to change at line 34
the GNU General Public License. This exception does not however the GNU General Public License. This exception does not however
invalidate any other reasons why the executable file might be covered b y invalidate any other reasons why the executable file might be covered b y
the GNU General Public License. the GNU General Public License.
*/ */
#if !defined(__TBB_machine_H) || defined(__TBB_machine_icc_generic_H) #if !defined(__TBB_machine_H) || defined(__TBB_machine_icc_generic_H)
#error Do not #include this internal file directly; use public TBB headers instead. #error Do not #include this internal file directly; use public TBB headers instead.
#endif #endif
#if ! __TBB_ICC_BUILTIN_ATOMICS_PRESENT #if ! __TBB_ICC_BUILTIN_ATOMICS_PRESENT
#error "Intel C++ Compiler of at least 12.1 version is needed to use IC C intrinsics port" #error "Intel C++ Compiler of at least 12.0 version is needed to use IC C intrinsics port"
#endif #endif
#define __TBB_machine_icc_generic_H #define __TBB_machine_icc_generic_H
//ICC mimics the "native" target compiler //ICC mimics the "native" target compiler
#if _MSC_VER #if _MSC_VER
#include "msvc_ia32_common.h" #include "msvc_ia32_common.h"
#else #else
#include "gcc_ia32_common.h" #include "gcc_ia32_common.h"
#endif #endif
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 memory_pool.h   memory_pool.h 
skipping to change at line 266 skipping to change at line 266
#if _MSC_VER==1700 && !defined(__INTEL_COMPILER) #if _MSC_VER==1700 && !defined(__INTEL_COMPILER)
#pragma warning (pop) #pragma warning (pop)
#endif #endif
inline fixed_pool::fixed_pool(void *buf, size_t size) : my_buffer(buf), my_ size(size) { inline fixed_pool::fixed_pool(void *buf, size_t size) : my_buffer(buf), my_ size(size) {
rml::MemPoolPolicy args(allocate_request, 0, size, /*fixedPool=*/true); rml::MemPoolPolicy args(allocate_request, 0, size, /*fixedPool=*/true);
rml::MemPoolError res = rml::pool_create_v1(intptr_t(this), &args, &my_ pool); rml::MemPoolError res = rml::pool_create_v1(intptr_t(this), &args, &my_ pool);
if( res!=rml::POOL_OK ) __TBB_THROW(std::bad_alloc()); if( res!=rml::POOL_OK ) __TBB_THROW(std::bad_alloc());
} }
inline void *fixed_pool::allocate_request(intptr_t pool_id, size_t & bytes) { inline void *fixed_pool::allocate_request(intptr_t pool_id, size_t & bytes) {
fixed_pool &self = *reinterpret_cast<fixed_pool*>(pool_id); fixed_pool &self = *reinterpret_cast<fixed_pool*>(pool_id);
// TODO: we can implement "buffer for fixed pools used only once" polic
y
// on low-level side, thus eliminate atomics here
if( !tbb::internal::as_atomic(self.my_size).compare_and_swap(0, (bytes= self.my_size)) ) if( !tbb::internal::as_atomic(self.my_size).compare_and_swap(0, (bytes= self.my_size)) )
return 0; // all the memory was given already return 0; // all the memory was given already
return self.my_buffer; return self.my_buffer;
} }
} //namespace interface6 } //namespace interface6
using interface6::memory_pool_allocator; using interface6::memory_pool_allocator;
using interface6::memory_pool; using interface6::memory_pool;
using interface6::fixed_pool; using interface6::fixed_pool;
} //namespace tbb } //namespace tbb
 End of changes. 1 change blocks. 
0 lines changed or deleted 3 lines changed or added


 mic_common.h   mic_common.h 
skipping to change at line 49 skipping to change at line 49
#ifndef __TBB_PREFETCHING #ifndef __TBB_PREFETCHING
#define __TBB_PREFETCHING 1 #define __TBB_PREFETCHING 1
#endif #endif
#if __TBB_PREFETCHING #if __TBB_PREFETCHING
#include <immintrin.h> #include <immintrin.h>
#define __TBB_cl_prefetch(p) _mm_prefetch((const char*)p, _MM_HINT_T1) #define __TBB_cl_prefetch(p) _mm_prefetch((const char*)p, _MM_HINT_T1)
#define __TBB_cl_evict(p) _mm_clevict(p, _MM_HINT_T1) #define __TBB_cl_evict(p) _mm_clevict(p, _MM_HINT_T1)
#endif #endif
/** Early Intel(R) Many Integrated Core Architecture does not support mfenc /** Intel(R) Many Integrated Core Architecture does not support mfence and
e and pause instructions **/ pause instructions **/
#define __TBB_full_memory_fence __TBB_release_consistency_helper #define __TBB_full_memory_fence() __asm__ __volatile__("lock; addl $0,(%%rs
p)":::"memory")
#define __TBB_Pause(x) _mm_delay_32(16*(x)) #define __TBB_Pause(x) _mm_delay_32(16*(x))
#define __TBB_STEALING_PAUSE 1500/16 #define __TBB_STEALING_PAUSE 1500/16
#include <sched.h> #include <sched.h>
#define __TBB_Yield() sched_yield() #define __TBB_Yield() sched_yield()
/** FPU control setting **/ /** FPU control setting **/
#define __TBB_CPU_CTL_ENV_PRESENT 0 #define __TBB_CPU_CTL_ENV_PRESENT 0
/** Specifics **/ /** Specifics **/
#define __TBB_STEALING_ABORT_ON_CONTENTION 1 #define __TBB_STEALING_ABORT_ON_CONTENTION 1
 End of changes. 1 change blocks. 
3 lines changed or deleted 4 lines changed or added


 msvc_ia32_common.h   msvc_ia32_common.h 
skipping to change at line 182 skipping to change at line 182
#else #else
#include<thread> #include<thread>
#define __TBB_Yield() std::this_thread::yield() #define __TBB_Yield() std::this_thread::yield()
#endif #endif
#define __TBB_Pause(V) __TBB_machine_pause(V) #define __TBB_Pause(V) __TBB_machine_pause(V)
#define __TBB_Log2(V) __TBB_machine_lg(V) #define __TBB_Log2(V) __TBB_machine_lg(V)
#undef __TBB_r #undef __TBB_r
extern "C" {
__int8 __TBB_EXPORTED_FUNC __TBB_machine_try_lock_elided (volatile void
* ptr);
void __TBB_EXPORTED_FUNC __TBB_machine_unlock_elided (volatile void*
ptr);
// 'pause' instruction aborts HLE/RTM transactions
#if __TBB_PAUSE_USE_INTRINSIC
inline static void __TBB_machine_try_lock_elided_cancel() { _mm_pause()
; }
#else
inline static void __TBB_machine_try_lock_elided_cancel() { _asm pause;
}
#endif
}
#endif /* __TBB_machine_msvc_ia32_common_H */ #endif /* __TBB_machine_msvc_ia32_common_H */
 End of changes. 1 change blocks. 
0 lines changed or deleted 16 lines changed or added


 parallel_reduce.h   parallel_reduce.h 
skipping to change at line 69 skipping to change at line 69
bool has_right_zombie; bool has_right_zombie;
const reduction_context my_context; const reduction_context my_context;
Body* my_body; Body* my_body;
aligned_space<Body,1> zombie_space; aligned_space<Body,1> zombie_space;
finish_reduce( reduction_context context_ ) : finish_reduce( reduction_context context_ ) :
has_right_zombie(false), // TODO: substitute by flag_task::chil d_stolen? has_right_zombie(false), // TODO: substitute by flag_task::chil d_stolen?
my_context(context_), my_context(context_),
my_body(NULL) my_body(NULL)
{ {
} }
~finish_reduce() {
if( has_right_zombie )
zombie_space.begin()->~Body();
}
task* execute() { task* execute() {
if( has_right_zombie ) { if( has_right_zombie ) {
// Right child was stolen. // Right child was stolen.
Body* s = zombie_space.begin(); Body* s = zombie_space.begin();
my_body->join( *s ); my_body->join( *s );
s->~Body(); // Body::join() won't be called if canceled. Defer destruct ion to destructor
} }
if( my_context==left_child ) if( my_context==left_child )
itt_store_word_with_release( static_cast<finish_reduce*>(pa rent())->my_body, my_body ); itt_store_word_with_release( static_cast<finish_reduce*>(pa rent())->my_body, my_body );
return NULL; return NULL;
} }
template<typename Range,typename Body_, typename Partitioner> template<typename Range,typename Body_, typename Partitioner>
friend class start_reduce; friend class start_reduce;
}; };
//! Task type used to split the work of parallel_reduce. //! Task type used to split the work of parallel_reduce.
 End of changes. 2 change blocks. 
1 lines changed or deleted 5 lines changed or added


 scalable_allocator.h   scalable_allocator.h 
skipping to change at line 91 skipping to change at line 91
/** The "_aligned_free" analogue. /** The "_aligned_free" analogue.
@ingroup memory_allocation */ @ingroup memory_allocation */
void __TBB_EXPORTED_FUNC scalable_aligned_free (void* ptr); void __TBB_EXPORTED_FUNC scalable_aligned_free (void* ptr);
/** The analogue of _msize/malloc_size/malloc_usable_size. /** The analogue of _msize/malloc_size/malloc_usable_size.
Returns the usable size of a memory block previously allocated by scala ble_*, Returns the usable size of a memory block previously allocated by scala ble_*,
or 0 (zero) if ptr does not point to such a block. or 0 (zero) if ptr does not point to such a block.
@ingroup memory_allocation */ @ingroup memory_allocation */
size_t __TBB_EXPORTED_FUNC scalable_msize (void* ptr); size_t __TBB_EXPORTED_FUNC scalable_msize (void* ptr);
/* Results for scalable_allocation_* functions */
typedef enum {
TBBMALLOC_OK,
TBBMALLOC_INVALID_PARAM,
TBBMALLOC_UNSUPPORTED,
TBBMALLOC_NO_MEMORY,
TBBMALLOC_NO_EFFECT
} ScalableAllocationResult;
/* Setting TBB_MALLOC_USE_HUGE_PAGES environment variable to 1 enables huge pages. /* Setting TBB_MALLOC_USE_HUGE_PAGES environment variable to 1 enables huge pages.
scalable_allocation_mode call has priority over environment variable. */ scalable_allocation_mode call has priority over environment variable. */
enum AllocationModeParam { typedef enum {
USE_HUGE_PAGES /* value turns using huge pages on and off */ TBBMALLOC_USE_HUGE_PAGES, /* value turns using huge pages on and off *
}; /
/* deprecated, kept for backward compatibility only */
USE_HUGE_PAGES = TBBMALLOC_USE_HUGE_PAGES
} AllocationModeParam;
/** Set TBB allocator-specific allocation modes. /** Set TBB allocator-specific allocation modes.
@ingroup memory_allocation */ @ingroup memory_allocation */
int __TBB_EXPORTED_FUNC scalable_allocation_mode(int param, intptr_t value) ; int __TBB_EXPORTED_FUNC scalable_allocation_mode(int param, intptr_t value) ;
typedef enum {
/* Clean internal allocator buffers for all threads.
Returns TBBMALLOC_NO_EFFECT if no buffers cleaned,
TBBMALLOC_OK if some memory released from buffers. */
TBBMALLOC_CLEAN_ALL_BUFFERS,
/* Clean internal allocator buffer for current thread only.
Return values same as for TBBMALLOC_CLEAN_ALL_BUFFERS. */
TBBMALLOC_CLEAN_THREAD_BUFFERS
} ScalableAllocationCmd;
/** Call TBB allocator-specific commands.
@ingroup memory_allocation */
int __TBB_EXPORTED_FUNC scalable_allocation_command(int cmd, void *param);
#ifdef __cplusplus #ifdef __cplusplus
} /* extern "C" */ } /* extern "C" */
#endif /* __cplusplus */ #endif /* __cplusplus */
#ifdef __cplusplus #ifdef __cplusplus
namespace rml { namespace rml {
class MemoryPool; class MemoryPool;
typedef void *(*rawAllocType)(intptr_t pool_id, size_t &bytes); typedef void *(*rawAllocType)(intptr_t pool_id, size_t &bytes);
skipping to change at line 148 skipping to change at line 173
reserved : 30; reserved : 30;
MemPoolPolicy(rawAllocType pAlloc_, rawFreeType pFree_, MemPoolPolicy(rawAllocType pAlloc_, rawFreeType pFree_,
size_t granularity_ = 0, bool fixedPool_ = false, size_t granularity_ = 0, bool fixedPool_ = false,
bool keepAllMemory_ = false) : bool keepAllMemory_ = false) :
pAlloc(pAlloc_), pFree(pFree_), granularity(granularity_), version( TBBMALLOC_POOL_VERSION), pAlloc(pAlloc_), pFree(pFree_), granularity(granularity_), version( TBBMALLOC_POOL_VERSION),
fixedPool(fixedPool_), keepAllMemory(keepAllMemory_), fixedPool(fixedPool_), keepAllMemory(keepAllMemory_),
reserved(0) {} reserved(0) {}
}; };
// enums have same values as appropriate enums from ScalableAllocationResul
t
// TODO: use ScalableAllocationResult in pool_create directly
enum MemPoolError { enum MemPoolError {
POOL_OK, // pool created successfully // pool created successfully
INVALID_POLICY, // invalid policy parameters found POOL_OK = TBBMALLOC_OK,
UNSUPPORTED_POLICY, // requested pool policy is not supported by alloca // invalid policy parameters found
tor library INVALID_POLICY = TBBMALLOC_INVALID_PARAM,
NO_MEMORY // lack of memory during pool creation // requested pool policy is not supported by allocator library
UNSUPPORTED_POLICY = TBBMALLOC_UNSUPPORTED,
// lack of memory during pool creation
NO_MEMORY = TBBMALLOC_NO_MEMORY,
// action takes no effect
NO_EFFECT = TBBMALLOC_NO_EFFECT
}; };
MemPoolError pool_create_v1(intptr_t pool_id, const MemPoolPolicy *policy, MemPoolError pool_create_v1(intptr_t pool_id, const MemPoolPolicy *policy,
rml::MemoryPool **pool); rml::MemoryPool **pool);
bool pool_destroy(MemoryPool* memPool); bool pool_destroy(MemoryPool* memPool);
void *pool_malloc(MemoryPool* memPool, size_t size); void *pool_malloc(MemoryPool* memPool, size_t size);
void *pool_realloc(MemoryPool* memPool, void *object, size_t size); void *pool_realloc(MemoryPool* memPool, void *object, size_t size);
void *pool_aligned_malloc(MemoryPool* mPool, size_t size, size_t alignment) ; void *pool_aligned_malloc(MemoryPool* mPool, size_t size, size_t alignment) ;
void *pool_aligned_realloc(MemoryPool* mPool, void *ptr, size_t size, size_ t alignment); void *pool_aligned_realloc(MemoryPool* mPool, void *ptr, size_t size, size_ t alignment);
 End of changes. 5 change blocks. 
8 lines changed or deleted 42 lines changed or added


 spin_mutex.h   spin_mutex.h 
skipping to change at line 38 skipping to change at line 38
#ifndef __TBB_spin_mutex_H #ifndef __TBB_spin_mutex_H
#define __TBB_spin_mutex_H #define __TBB_spin_mutex_H
#include <cstddef> #include <cstddef>
#include <new> #include <new>
#include "aligned_space.h" #include "aligned_space.h"
#include "tbb_stddef.h" #include "tbb_stddef.h"
#include "tbb_machine.h" #include "tbb_machine.h"
#include "tbb_profiling.h" #include "tbb_profiling.h"
#include "internal/_mutex_padding.h"
namespace tbb { namespace tbb {
//! A lock that occupies a single byte. //! A lock that occupies a single byte.
/** A spin_mutex is a spin mutex that fits in a single byte. /** A spin_mutex is a spin mutex that fits in a single byte.
It should be used only for locking short critical sections It should be used only for locking short critical sections
(typically less than 20 instructions) when fairness is not an issue. (typically less than 20 instructions) when fairness is not an issue.
If zero-initialized, the mutex is considered unheld. If zero-initialized, the mutex is considered unheld.
@ingroup synchronization */ @ingroup synchronization */
class spin_mutex { class spin_mutex {
skipping to change at line 94 skipping to change at line 95
//! Construct without acquiring a mutex. //! Construct without acquiring a mutex.
scoped_lock() : my_mutex(NULL), my_unlock_value(0) {} scoped_lock() : my_mutex(NULL), my_unlock_value(0) {}
//! Construct and acquire lock on a mutex. //! Construct and acquire lock on a mutex.
scoped_lock( spin_mutex& m ) : my_unlock_value(0) { scoped_lock( spin_mutex& m ) : my_unlock_value(0) {
internal::suppress_unused_warning(my_unlock_value); internal::suppress_unused_warning(my_unlock_value);
#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
my_mutex=NULL; my_mutex=NULL;
internal_acquire(m); internal_acquire(m);
#else #else
__TBB_LockByte(m.flag);
my_mutex=&m; my_mutex=&m;
__TBB_LockByte(m.flag);
#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/
} }
//! Acquire lock. //! Acquire lock.
void acquire( spin_mutex& m ) { void acquire( spin_mutex& m ) {
#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
internal_acquire(m); internal_acquire(m);
#else #else
__TBB_LockByte(m.flag);
my_mutex = &m; my_mutex = &m;
__TBB_LockByte(m.flag);
#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/
} }
//! Try acquiring lock (non-blocking) //! Try acquiring lock (non-blocking)
/** Return true if lock acquired; false otherwise. */ /** Return true if lock acquired; false otherwise. */
bool try_acquire( spin_mutex& m ) { bool try_acquire( spin_mutex& m ) {
#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
return internal_try_acquire(m); return internal_try_acquire(m);
#else #else
bool result = __TBB_TryLockByte(m.flag); bool result = __TBB_TryLockByte(m.flag);
skipping to change at line 144 skipping to change at line 145
if( my_mutex ) { if( my_mutex ) {
#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
internal_release(); internal_release();
#else #else
__TBB_UnlockByte(my_mutex->flag); __TBB_UnlockByte(my_mutex->flag);
#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
} }
} }
}; };
//! Internal constructor with ITT instrumentation.
void __TBB_EXPORTED_METHOD internal_construct(); void __TBB_EXPORTED_METHOD internal_construct();
// Mutex traits // Mutex traits
static const bool is_rw_mutex = false; static const bool is_rw_mutex = false;
static const bool is_recursive_mutex = false; static const bool is_recursive_mutex = false;
static const bool is_fair_mutex = false; static const bool is_fair_mutex = false;
// ISO C++0x compatibility methods // ISO C++0x compatibility methods
//! Acquire lock //! Acquire lock
skipping to change at line 187 skipping to change at line 189
aligned_space<scoped_lock,1> tmp; aligned_space<scoped_lock,1> tmp;
scoped_lock& s = *tmp.begin(); scoped_lock& s = *tmp.begin();
s.my_mutex = this; s.my_mutex = this;
s.internal_release(); s.internal_release();
#else #else
__TBB_store_with_release(flag, 0); __TBB_store_with_release(flag, 0);
#endif /* TBB_USE_THREADING_TOOLS */ #endif /* TBB_USE_THREADING_TOOLS */
} }
friend class scoped_lock; friend class scoped_lock;
}; }; // end of spin_mutex
__TBB_DEFINE_PROFILING_SET_NAME(spin_mutex) __TBB_DEFINE_PROFILING_SET_NAME(spin_mutex)
} // namespace tbb } // namespace tbb
#if ( __TBB_x86_32 || __TBB_x86_64 )
#include "internal/_x86_eliding_mutex_impl.h"
#endif
namespace tbb {
//! A cross-platform spin mutex with speculative lock acquisition.
/** On platforms with proper HW support, this lock may speculatively execut
e
its critical sections, using HW mechanisms to detect real data races an
d
ensure atomicity of the critical sections. In particular, it uses
Intel(R) Transactional Synchronization Extensions (Intel(R) TSX).
Without such HW support, it behaves like a spin_mutex.
It should be used for locking short critical sections where the lock is
contended but the data it protects are not. If zero-initialized, the
mutex is considered unheld.
@ingroup synchronization */
#if ( __TBB_x86_32 || __TBB_x86_64 )
typedef interface7::internal::padded_mutex<interface7::internal::x86_elidin
g_mutex> speculative_spin_mutex;
#else
typedef interface7::internal::padded_mutex<spin_mutex> speculative_spin_mut
ex;
#endif
__TBB_DEFINE_PROFILING_SET_NAME(speculative_spin_mutex)
} // namespace tbb
#endif /* __TBB_spin_mutex_H */ #endif /* __TBB_spin_mutex_H */
 End of changes. 8 change blocks. 
3 lines changed or deleted 34 lines changed or added


 task.h   task.h 
skipping to change at line 389 skipping to change at line 389
from the traits_type enumerations) take the next 16 bits. from the traits_type enumerations) take the next 16 bits.
Original (zeroth) version of the context did not support any traits . **/ Original (zeroth) version of the context did not support any traits . **/
uintptr_t my_version_and_traits; uintptr_t my_version_and_traits;
//! Pointer to the container storing exception being propagated across this task group. //! Pointer to the container storing exception being propagated across this task group.
exception_container_type *my_exception; exception_container_type *my_exception;
//! Scheduler instance that registered this context in its thread speci fic list. //! Scheduler instance that registered this context in its thread speci fic list.
internal::generic_scheduler *my_owner; internal::generic_scheduler *my_owner;
//! Internal state (combination of state flags). //! Internal state (combination of state flags, currently only may_have _children).
uintptr_t my_state; uintptr_t my_state;
#if __TBB_TASK_PRIORITY #if __TBB_TASK_PRIORITY
//! Priority level of the task group (in normalized representation) //! Priority level of the task group (in normalized representation)
intptr_t my_priority; intptr_t my_priority;
#endif /* __TBB_TASK_PRIORITY */ #endif /* __TBB_TASK_PRIORITY */
//! Trailing padding protecting accesses to frequently used members fro m false sharing //! Trailing padding protecting accesses to frequently used members fro m false sharing
/** \sa _leading_padding **/ /** \sa _leading_padding **/
char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t ) - 2 * sizeof(void*) char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t ) - 2 * sizeof(void*)
skipping to change at line 443 skipping to change at line 443
introduced in the currently unused padding areas and these fields a re updated introduced in the currently unused padding areas and these fields a re updated
by inline methods. **/ by inline methods. **/
task_group_context ( kind_type relation_with_parent = bound, task_group_context ( kind_type relation_with_parent = bound,
uintptr_t traits = default_traits ) uintptr_t traits = default_traits )
: my_kind(relation_with_parent) : my_kind(relation_with_parent)
, my_version_and_traits(1 | traits) , my_version_and_traits(1 | traits)
{ {
init(); init();
} }
// Do not introduce standalone unbind method since it will break state propagation assumptions
__TBB_EXPORTED_METHOD ~task_group_context (); __TBB_EXPORTED_METHOD ~task_group_context ();
//! Forcefully reinitializes the context after the task tree it was ass ociated with is completed. //! Forcefully reinitializes the context after the task tree it was ass ociated with is completed.
/** Because the method assumes that all the tasks that used to be assoc iated with /** Because the method assumes that all the tasks that used to be assoc iated with
this context have already finished, calling it while the context is still this context have already finished, calling it while the context is still
in use somewhere in the task hierarchy leads to undefined behavior. in use somewhere in the task hierarchy leads to undefined behavior.
IMPORTANT: This method is not thread safe! IMPORTANT: This method is not thread safe!
The method does not change the context's parent if it is set. **/ The method does not change the context's parent if it is set. **/
skipping to change at line 499 skipping to change at line 500
private: private:
friend class task; friend class task;
friend class internal::allocate_root_with_context_proxy; friend class internal::allocate_root_with_context_proxy;
static const kind_type binding_required = bound; static const kind_type binding_required = bound;
static const kind_type binding_completed = kind_type(bound+1); static const kind_type binding_completed = kind_type(bound+1);
static const kind_type detached = kind_type(binding_completed+1); static const kind_type detached = kind_type(binding_completed+1);
static const kind_type dying = kind_type(detached+1); static const kind_type dying = kind_type(detached+1);
//! Propagates state change (if any) from an ancestor //! Propagates any state change detected to *this, and as an optimisati
/** Checks if one of this object's ancestors is in a new state, and pro on possibly also upward along the heritage line.
pagates
the new state to all its descendants in this object's heritage line
. **/
template <typename T> template <typename T>
void propagate_state_from_ancestors ( T task_group_context::*mptr_state , T new_state ); void propagate_task_group_state ( T task_group_context::*mptr_state, ta sk_group_context& src, T new_state );
//! Makes sure that the context is registered with a scheduler instance . //! Makes sure that the context is registered with a scheduler instance .
inline void finish_initialization ( internal::generic_scheduler *local_ sched ); inline void finish_initialization ( internal::generic_scheduler *local_ sched );
//! Registers this context with the local scheduler and binds it to its parent context //! Registers this context with the local scheduler and binds it to its parent context
void bind_to ( internal::generic_scheduler *local_sched ); void bind_to ( internal::generic_scheduler *local_sched );
//! Registers this context with the local scheduler //! Registers this context with the local scheduler
void register_with ( internal::generic_scheduler *local_sched ); void register_with ( internal::generic_scheduler *local_sched );
 End of changes. 4 change blocks. 
7 lines changed or deleted 5 lines changed or added


 tbb_config.h   tbb_config.h 
skipping to change at line 40 skipping to change at line 40
#define __TBB_tbb_config_H #define __TBB_tbb_config_H
/** This header is supposed to contain macro definitions and C style commen ts only. /** This header is supposed to contain macro definitions and C style commen ts only.
The macros defined here are intended to control such aspects of TBB bui ld as The macros defined here are intended to control such aspects of TBB bui ld as
- presence of compiler features - presence of compiler features
- compilation modes - compilation modes
- feature sets - feature sets
- known compiler/platform issues - known compiler/platform issues
**/ **/
/*Check which standard library we use on OS X.*/
/*__TBB_SYMBOL is defined only while processing exported symbols list where
C++ is not allowed.*/
#if !defined(__TBB_SYMBOL) && __APPLE__
#include <cstddef>
#endif
#define __TBB_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC _PATCHLEVEL__) #define __TBB_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC _PATCHLEVEL__)
#if __clang__ #if __clang__
/**according to clang documentation version can be vendor specific **/ /**according to clang documentation version can be vendor specific **/
#define __TBB_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) #define __TBB_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__)
#endif #endif
/** Presence of compiler features **/ /** Presence of compiler features **/
#if __INTEL_COMPILER == 9999 && __INTEL_COMPILER_BUILD_DATE == 20110811 #if __INTEL_COMPILER == 9999 && __INTEL_COMPILER_BUILD_DATE == 20110811
/* Intel(R) Composer XE 2011 Update 6 incorrectly sets __INTEL_COMPILER. Fi x it. */ /* Intel(R) Composer XE 2011 Update 6 incorrectly sets __INTEL_COMPILER. Fi x it. */
#undef __INTEL_COMPILER #undef __INTEL_COMPILER
#define __INTEL_COMPILER 1210 #define __INTEL_COMPILER 1210
#endif #endif
#if (__TBB_GCC_VERSION >= 40400) && !defined(__INTEL_COMPILER) #if __TBB_GCC_VERSION >= 40400 && !defined(__INTEL_COMPILER)
/** warning suppression pragmas available in GCC since 4.4 **/ /** warning suppression pragmas available in GCC since 4.4 **/
#define __TBB_GCC_WARNING_SUPPRESSION_PRESENT 1 #define __TBB_GCC_WARNING_SUPPRESSION_PRESENT 1
#endif #endif
/* Select particular features of C++11 based on compiler version. /* Select particular features of C++11 based on compiler version.
ICC 12.1 (Linux), GCC 4.3 and higher, clang 2.9 and higher ICC 12.1 (Linux), GCC 4.3 and higher, clang 2.9 and higher
set __GXX_EXPERIMENTAL_CXX0X__ in c++11 mode. set __GXX_EXPERIMENTAL_CXX0X__ in c++11 mode.
Compilers that mimics other compilers (ICC, clang) must be processed bef ore Compilers that mimics other compilers (ICC, clang) must be processed bef ore
compilers they mimic (GCC, MSVC). compilers they mimic (GCC, MSVC).
TODO: The following conditions should be extended when new compilers/run times TODO: The following conditions should be extended when new compilers/run times
support added. support added.
*/ */
#if __INTEL_COMPILER #if __INTEL_COMPILER
/** On Windows environment when using Intel C++ compiler with Visual St /** C++11 mode detection macros for Intel C++ compiler (enabled by -std
udio 2010*, =c++0x option):
the C++0x features supported by Visual C++ 2010 are enabled by defa __INTEL_CXX11_MODE__ for version >=13.0
ult __STDC_HOSTED__ for version >=12.0 on Windows,
TODO: find a way to get know if c++0x mode is specified in command __GXX_EXPERIMENTAL_CXX0X__ for version >=12.0 on Linux and OS X.
line on windows **/ **/
#define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT ( __VARIADIC_TEMPLATE // On Windows, C++11 features supported by Visual Studio 2010 and high
S && (__GXX_EXPERIMENTAL_CXX0X__ || _MSC_VER) ) er are enabled by default
#define __TBB_CPP11_RVALUE_REF_PRESENT ( (__GXX_EXPERIMENTAL #ifndef __INTEL_CXX11_MODE__
_CXX0X__ || _MSC_VER >= 1600) && (__INTEL_COMPILER >= 1200) ) #define __INTEL_CXX11_MODE__ ((_MSC_VER && __STDC_HOSTED__) || __GX
X_EXPERIMENTAL_CXX0X__)
// TODO: check if more conditions can be simplified with the above
macro
#endif
#define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (__INTEL_CXX11_MODE__
&& __VARIADIC_TEMPLATES)
#define __TBB_CPP11_RVALUE_REF_PRESENT ((__GXX_EXPERIMENTAL_
CXX0X__ || _MSC_VER >= 1600) && __INTEL_COMPILER >= 1200)
#if _MSC_VER >= 1600 #if _MSC_VER >= 1600
#define __TBB_EXCEPTION_PTR_PRESENT ( __INTEL_COMPILER > 1300 \ #define __TBB_EXCEPTION_PTR_PRESENT ( __INTEL_COMPILER > 1300 \
/*ICC 12.1 Upd 10 and 13 beta Upd 2 fixed exception_ptr linking issue*/ \ /*ICC 12.1 Upd 10 and 13 beta Upd 2 fixed exception_ptr linking issue*/ \
|| (__INTEL_COMPILER == 1300 && __INTEL_COMPILER_BUILD_DATE >= 20120530) \ || (__INTEL_COMPILER == 1300 && __INTEL_COMPILER_BUILD_DATE >= 20120530) \
|| (__INTEL_COMPILER == 1210 && __INTEL_COMPILER_BUILD_DATE >= 20120410) ) || (__INTEL_COMPILER == 1210 && __INTEL_COMPILER_BUILD_DATE >= 20120410) )
/** libstc++ that comes with GCC 4.6 use C++11 features not supported b /** libstdc++ that comes with GCC 4.6 use C++11 features not supported
y ICC 12.1. by ICC 12.1.
* Because of that ICC 12.1 does not support C++11 mode with with gcc 4 * Because of that ICC 12.1 does not support C++11 mode with with gcc
.6. (or higher) 4.6 (or higher),
* , and therefore does not define __GXX_EXPERIMENTAL_CXX0X__ macro**/ * and therefore does not define __GXX_EXPERIMENTAL_CXX0X__ macro **/
#elif (__TBB_GCC_VERSION >= 40404) && (__TBB_GCC_VERSION < 40600) #elif __TBB_GCC_VERSION >= 40404 && __TBB_GCC_VERSION < 40600
#define __TBB_EXCEPTION_PTR_PRESENT ( __GXX_EXPERIMENTAL_ #define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_C
CXX0X__ && __INTEL_COMPILER >= 1200 ) XX0X__ && __INTEL_COMPILER >= 1200)
#elif (__TBB_GCC_VERSION >= 40600) #elif __TBB_GCC_VERSION >= 40600
#define __TBB_EXCEPTION_PTR_PRESENT ( __GXX_EXPERIMENTAL_ #define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_C
CXX0X__ && __INTEL_COMPILER >= 1300 ) XX0X__ && __INTEL_COMPILER >= 1300)
#else #else
#define __TBB_EXCEPTION_PTR_PRESENT 0 #define __TBB_EXCEPTION_PTR_PRESENT 0
#endif #endif
#define __TBB_MAKE_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1700 || (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600)) #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1700 || (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600))
#define __TBB_STATIC_ASSERT_PRESENT ( __GXX_EXPERIMENTAL_ #define __TBB_STATIC_ASSERT_PRESENT (__INTEL_CXX11_MODE__
CXX0X__ || (_MSC_VER >= 1600) ) || _MSC_VER >= 1600)
#define __TBB_CPP11_TUPLE_PRESENT ( (_MSC_VER >= 1600) #define __TBB_CPP11_TUPLE_PRESENT (_MSC_VER >= 1600 ||
|| ((__GXX_EXPERIMENTAL_CXX0X__) && (__TBB_GCC_VERSION >= 40300)) ) (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300))
/** TODO: re-check for compiler version greater than 12.1 if it support s initializer lists**/ /** TODO: re-check for compiler version greater than 12.1 if it support s initializer lists**/
#define __TBB_INITIALIZER_LISTS_PRESENT 0 #define __TBB_INITIALIZER_LISTS_PRESENT 0
#define __TBB_CONSTEXPR_PRESENT 0 #define __TBB_CONSTEXPR_PRESENT 0
#define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT 0 #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT __INTEL_CXX11_MODE__
#elif __clang__ #elif __clang__
//TODO: these options need to be rechecked //TODO: these options need to be rechecked
/** on OS X* the only way to get C++11 is to use clang. For library feature s (e.g. exception_ptr) libc++ is also /** on OS X* the only way to get C++11 is to use clang. For library feature s (e.g. exception_ptr) libc++ is also
* required. So there is no need to check GCC version for clang**/ * required. So there is no need to check GCC version for clang**/
#define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __has_feature(__cxx_ variadic_templates__) #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __has_feature(__cxx_ variadic_templates__)
#define __TBB_CPP11_RVALUE_REF_PRESENT __has_feature(__cxx_ rvalue_references__) #define __TBB_CPP11_RVALUE_REF_PRESENT __has_feature(__cxx_ rvalue_references__)
#define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_C /** TODO: extend exception_ptr related conditions to cover libstdc++ **/
XX0X__ && (__cplusplus >= 201103L)) #define __TBB_EXCEPTION_PTR_PRESENT (__cplusplus >= 20110
#define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_C 3L && _LIBCPP_VERSION)
XX0X__ && (__cplusplus >= 201103L)) #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__cplusplus >= 20110
3L && _LIBCPP_VERSION)
#define __TBB_STATIC_ASSERT_PRESENT __has_feature(__cxx_s tatic_assert__) #define __TBB_STATIC_ASSERT_PRESENT __has_feature(__cxx_s tatic_assert__)
/**Clang (preprocessor) has problems with dealing with expression havin g __has_include in #if's /**Clang (preprocessor) has problems with dealing with expression havin g __has_include in #if's
* used inside C++ code. (At least version that comes with OS X 10.8) * */ * used inside C++ code. (At least version that comes with OS X 10.8) * */
#if (__GXX_EXPERIMENTAL_CXX0X__ && __has_include(<tuple>)) #if (__GXX_EXPERIMENTAL_CXX0X__ && __has_include(<tuple>))
#define __TBB_CPP11_TUPLE_PRESENT 1 #define __TBB_CPP11_TUPLE_PRESENT 1
#endif #endif
#if (__has_feature(__cxx_generalized_initializers__) && __has_include(< initializer_list>)) #if (__has_feature(__cxx_generalized_initializers__) && __has_include(< initializer_list>))
#define __TBB_INITIALIZER_LISTS_PRESENT 1 #define __TBB_INITIALIZER_LISTS_PRESENT 1
#endif #endif
#define __TBB_CONSTEXPR_PRESENT __has_feature(__cxx_c onstexpr__) #define __TBB_CONSTEXPR_PRESENT __has_feature(__cxx_c onstexpr__)
#define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (__has_feature(__cxx_ defaulted_functions__) && __has_feature(__cxx_deleted_functions__)) #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (__has_feature(__cxx_ defaulted_functions__) && __has_feature(__cxx_deleted_functions__))
#elif __GNUC__ #elif __GNUC__
#define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __GXX_EXPERIMENTAL_CX X0X__ #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __GXX_EXPERIMENTAL_CX X0X__
#define __TBB_CPP11_RVALUE_REF_PRESENT __GXX_EXPERIMENTAL_CX X0X__ #define __TBB_CPP11_RVALUE_REF_PRESENT __GXX_EXPERIMENTAL_CX X0X__
/** __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 here is a substitution for _GLIB CXX_ATOMIC_BUILTINS_4, which is a prerequisite /** __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 here is a substitution for _GLIB CXX_ATOMIC_BUILTINS_4, which is a prerequisite
for exception_ptr but cannot be used in this file because it is def ined in a header, not by the compiler. for exception_ptr but cannot be used in this file because it is def ined in a header, not by the compiler.
If the compiler has no atomic intrinsics, the C++ library should no t expect those as well. **/ If the compiler has no atomic intrinsics, the C++ library should no t expect those as well. **/
#define __TBB_EXCEPTION_PTR_PRESENT ((__GXX_EXPERIMENTAL_ #define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_C
CXX0X__) && (__TBB_GCC_VERSION >= 40404) && __GCC_HAVE_SYNC_COMPARE_AND_SWA XX0X__ && __TBB_GCC_VERSION >= 40404 && __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
P_4) #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_C
#define __TBB_MAKE_EXCEPTION_PTR_PRESENT ((__GXX_EXPERIMENTAL_ XX0X__ && __TBB_GCC_VERSION >= 40600)
CXX0X__) && (__TBB_GCC_VERSION >= 40600)) #define __TBB_STATIC_ASSERT_PRESENT (__GXX_EXPERIMENTAL_C
#define __TBB_STATIC_ASSERT_PRESENT ((__GXX_EXPERIMENTAL_ XX0X__ && __TBB_GCC_VERSION >= 40300)
CXX0X__) && (__TBB_GCC_VERSION >= 40300)) #define __TBB_CPP11_TUPLE_PRESENT (__GXX_EXPERIMENTAL_C
#define __TBB_CPP11_TUPLE_PRESENT ((__GXX_EXPERIMENTAL_ XX0X__ && __TBB_GCC_VERSION >= 40300)
CXX0X__) && (__TBB_GCC_VERSION >= 40300)) #define __TBB_INITIALIZER_LISTS_PRESENT (__GXX_EXPERIMENTAL_C
#define __TBB_INITIALIZER_LISTS_PRESENT ((__GXX_EXPERIMENTAL_ XX0X__ && __TBB_GCC_VERSION >= 40400)
CXX0X__) && (__TBB_GCC_VERSION >= 40400))
/** gcc seems have to support constexpr from 4.4 but tests in (test_ato mic) seeming reasonable fail to compile prior 4.6**/ /** gcc seems have to support constexpr from 4.4 but tests in (test_ato mic) seeming reasonable fail to compile prior 4.6**/
#define __TBB_CONSTEXPR_PRESENT ((__GXX_EXPERIMENTAL_ #define __TBB_CONSTEXPR_PRESENT (__GXX_EXPERIMENTAL_C
CXX0X__) && (__TBB_GCC_VERSION >= 40400)) XX0X__ && __TBB_GCC_VERSION >= 40400)
#define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT ((__GXX_EXPERIMENTAL_ #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (__GXX_EXPERIMENTAL_C
CXX0X__) && (__TBB_GCC_VERSION >= 40400)) XX0X__ && __TBB_GCC_VERSION >= 40400)
#elif _MSC_VER #elif _MSC_VER
#define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 0 #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 0
#define __TBB_CPP11_RVALUE_REF_PRESENT 0 #define __TBB_CPP11_RVALUE_REF_PRESENT (_MSC_VER >= 1600)
#define __TBB_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1600) #define __TBB_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1600)
#define __TBB_STATIC_ASSERT_PRESENT (_MSC_VER >= 1600) #define __TBB_STATIC_ASSERT_PRESENT (_MSC_VER >= 1600)
#define __TBB_MAKE_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1700) #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1700)
#define __TBB_CPP11_TUPLE_PRESENT (_MSC_VER >= 1600) #define __TBB_CPP11_TUPLE_PRESENT (_MSC_VER >= 1600)
#define __TBB_INITIALIZER_LISTS_PRESENT 0 #define __TBB_INITIALIZER_LISTS_PRESENT 0
#define __TBB_CONSTEXPR_PRESENT 0 #define __TBB_CONSTEXPR_PRESENT 0
#define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT 0 #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT 0
#else #else
#define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 0 #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 0
#define __TBB_CPP11_RVALUE_REF_PRESENT 0 #define __TBB_CPP11_RVALUE_REF_PRESENT 0
skipping to change at line 175 skipping to change at line 188
#if __GNUC__ || __SUNPRO_CC || __IBMCPP__ #if __GNUC__ || __SUNPRO_CC || __IBMCPP__
/* ICC defines __GNUC__ and so is covered */ /* ICC defines __GNUC__ and so is covered */
#define __TBB_ATTRIBUTE_ALIGNED_PRESENT 1 #define __TBB_ATTRIBUTE_ALIGNED_PRESENT 1
#elif _MSC_VER && (_MSC_VER >= 1300 || __INTEL_COMPILER) #elif _MSC_VER && (_MSC_VER >= 1300 || __INTEL_COMPILER)
#define __TBB_DECLSPEC_ALIGN_PRESENT 1 #define __TBB_DECLSPEC_ALIGN_PRESENT 1
#endif #endif
/* Actually ICC supports gcc __sync_* intrinsics starting 11.1, /* Actually ICC supports gcc __sync_* intrinsics starting 11.1,
* but 64 bit support for 32 bit target comes in later ones*/ * but 64 bit support for 32 bit target comes in later ones*/
/* TODO: change the version back to 4.1.2 once macro __TBB_WORD_SIZE become optional */ /* TODO: change the version back to 4.1.2 once macro __TBB_WORD_SIZE become optional */
#if (__TBB_GCC_VERSION >= 40306) || (__INTEL_COMPILER >= 1200) #if __TBB_GCC_VERSION >= 40306 || __INTEL_COMPILER >= 1200
/** built-in atomics available in GCC since 4.1.2 **/ /** built-in atomics available in GCC since 4.1.2 **/
#define __TBB_GCC_BUILTIN_ATOMICS_PRESENT 1 #define __TBB_GCC_BUILTIN_ATOMICS_PRESENT 1
#endif #endif
#if (__INTEL_COMPILER >= 1210) #if __INTEL_COMPILER >= 1200
/** built-in C++11 style atomics available in compiler since 12.1 **/ /** built-in C++11 style atomics available in ICC since 12.0 **/
#define __TBB_ICC_BUILTIN_ATOMICS_PRESENT 1 #define __TBB_ICC_BUILTIN_ATOMICS_PRESENT 1
#endif #endif
/** User controlled TBB features & modes **/ /** User controlled TBB features & modes **/
#ifndef TBB_USE_DEBUG #ifndef TBB_USE_DEBUG
#ifdef TBB_DO_ASSERT #ifdef TBB_DO_ASSERT
#define TBB_USE_DEBUG TBB_DO_ASSERT #define TBB_USE_DEBUG TBB_DO_ASSERT
#else #else
#ifdef _DEBUG #ifdef _DEBUG
skipping to change at line 262 skipping to change at line 275
#else #else
#define TBB_IMPLEMENT_CPP0X 1 #define TBB_IMPLEMENT_CPP0X 1
#endif #endif
#else #else
#define TBB_IMPLEMENT_CPP0X 1 #define TBB_IMPLEMENT_CPP0X 1
#endif #endif
#endif /* TBB_IMPLEMENT_CPP0X */ #endif /* TBB_IMPLEMENT_CPP0X */
/* TBB_USE_CAPTURED_EXCEPTION should be explicitly set to either 0 or 1, as it is used as C++ const */ /* TBB_USE_CAPTURED_EXCEPTION should be explicitly set to either 0 or 1, as it is used as C++ const */
#ifndef TBB_USE_CAPTURED_EXCEPTION #ifndef TBB_USE_CAPTURED_EXCEPTION
/**TODO: enable it by default on OS X*, once it is enabled in pre-built /** IA-64 architecture pre-built TBB binaries do not support exception_
binary **/ ptr. **/
/** OS X* and IA64 pre-built TBB binaries do not support exception_ptr. #if __TBB_EXCEPTION_PTR_PRESENT && !defined(__ia64__)
**/
#if __TBB_EXCEPTION_PTR_PRESENT && !defined(__APPLE__) && !defined(__ia
64__)
#define TBB_USE_CAPTURED_EXCEPTION 0 #define TBB_USE_CAPTURED_EXCEPTION 0
#else #else
#define TBB_USE_CAPTURED_EXCEPTION 1 #define TBB_USE_CAPTURED_EXCEPTION 1
#endif #endif
#else /* defined TBB_USE_CAPTURED_EXCEPTION */ #else /* defined TBB_USE_CAPTURED_EXCEPTION */
#if !TBB_USE_CAPTURED_EXCEPTION && !__TBB_EXCEPTION_PTR_PRESENT #if !TBB_USE_CAPTURED_EXCEPTION && !__TBB_EXCEPTION_PTR_PRESENT
#error Current runtime does not support std::exception_ptr. Set TBB _USE_CAPTURED_EXCEPTION and make sure that your code is ready to catch tbb: :captured_exception. #error Current runtime does not support std::exception_ptr. Set TBB _USE_CAPTURED_EXCEPTION and make sure that your code is ready to catch tbb: :captured_exception.
#endif #endif
#endif /* defined TBB_USE_CAPTURED_EXCEPTION */ #endif /* defined TBB_USE_CAPTURED_EXCEPTION */
/** Check whether the request to use GCC atomics can be satisfied **/ /** Check whether the request to use GCC atomics can be satisfied **/
#if (TBB_USE_GCC_BUILTINS && !__TBB_GCC_BUILTIN_ATOMICS_PRESENT) #if TBB_USE_GCC_BUILTINS && !__TBB_GCC_BUILTIN_ATOMICS_PRESENT
#error "GCC atomic built-ins are not supported." #error "GCC atomic built-ins are not supported."
#endif #endif
/** Internal TBB features & modes **/ /** Internal TBB features & modes **/
/** __TBB_WEAK_SYMBOLS_PRESENT denotes that the system supports the weak sy mbol mechanism **/ /** __TBB_WEAK_SYMBOLS_PRESENT denotes that the system supports the weak sy mbol mechanism **/
#define __TBB_WEAK_SYMBOLS_PRESENT ( !_WIN32 && !__APPLE__ && !__sun && ((_ #ifndef __TBB_WEAK_SYMBOLS_PRESENT
_TBB_GCC_VERSION >= 40000) || __INTEL_COMPILER ) ) #define __TBB_WEAK_SYMBOLS_PRESENT ( !_WIN32 && !__APPLE__ && !__sun && (__
TBB_GCC_VERSION >= 40000 || __INTEL_COMPILER ) )
#endif
/** __TBB_DYNAMIC_LOAD_ENABLED describes the system possibility to load sha red libraries at run time **/ /** __TBB_DYNAMIC_LOAD_ENABLED describes the system possibility to load sha red libraries at run time **/
#ifndef __TBB_DYNAMIC_LOAD_ENABLED #ifndef __TBB_DYNAMIC_LOAD_ENABLED
#define __TBB_DYNAMIC_LOAD_ENABLED 1 #define __TBB_DYNAMIC_LOAD_ENABLED 1
#endif #endif
/** __TBB_SOURCE_DIRECTLY_INCLUDED is a mode used in whitebox testing when /** __TBB_SOURCE_DIRECTLY_INCLUDED is a mode used in whitebox testing when
it's necessary to test internal functions not exported from TBB DLLs it's necessary to test internal functions not exported from TBB DLLs
**/ **/
#if (_WIN32||_WIN64) && __TBB_SOURCE_DIRECTLY_INCLUDED #if (_WIN32||_WIN64) && __TBB_SOURCE_DIRECTLY_INCLUDED
skipping to change at line 318 skipping to change at line 332
#endif /* __TBB_SCHEDULER_OBSERVER */ #endif /* __TBB_SCHEDULER_OBSERVER */
#if !defined(TBB_PREVIEW_TASK_ARENA) && __TBB_BUILD #if !defined(TBB_PREVIEW_TASK_ARENA) && __TBB_BUILD
#define TBB_PREVIEW_TASK_ARENA __TBB_CPF_BUILD #define TBB_PREVIEW_TASK_ARENA __TBB_CPF_BUILD
#endif /* TBB_PREVIEW_TASK_ARENA */ #endif /* TBB_PREVIEW_TASK_ARENA */
#define __TBB_TASK_ARENA TBB_PREVIEW_TASK_ARENA #define __TBB_TASK_ARENA TBB_PREVIEW_TASK_ARENA
#if TBB_PREVIEW_TASK_ARENA #if TBB_PREVIEW_TASK_ARENA
#define TBB_PREVIEW_LOCAL_OBSERVER 1 #define TBB_PREVIEW_LOCAL_OBSERVER 1
#define __TBB_NO_IMPLICIT_LINKAGE 1 #define __TBB_NO_IMPLICIT_LINKAGE 1
#define __TBB_RECYCLE_TO_ENQUEUE 1 #define __TBB_RECYCLE_TO_ENQUEUE 1
#define __TBB_TASK_PRIORITY 0 // TODO: it will be removed in next versi #ifndef __TBB_TASK_PRIORITY
ons #define __TBB_TASK_PRIORITY 0 // TODO: it will be removed in next v
ersions
#endif
#if !__TBB_SCHEDULER_OBSERVER #if !__TBB_SCHEDULER_OBSERVER
#error TBB_PREVIEW_TASK_ARENA requires __TBB_SCHEDULER_OBSERVER to be enabled #error TBB_PREVIEW_TASK_ARENA requires __TBB_SCHEDULER_OBSERVER to be enabled
#endif #endif
#endif /* TBB_PREVIEW_TASK_ARENA */ #endif /* TBB_PREVIEW_TASK_ARENA */
#if !defined(TBB_PREVIEW_LOCAL_OBSERVER) && __TBB_BUILD && __TBB_SCHEDULER_ OBSERVER #if !defined(TBB_PREVIEW_LOCAL_OBSERVER) && __TBB_BUILD && __TBB_SCHEDULER_ OBSERVER
#define TBB_PREVIEW_LOCAL_OBSERVER 1 #define TBB_PREVIEW_LOCAL_OBSERVER 1
#endif /* TBB_PREVIEW_LOCAL_OBSERVER */ #endif /* TBB_PREVIEW_LOCAL_OBSERVER */
#if TBB_USE_EXCEPTIONS && !__TBB_TASK_GROUP_CONTEXT #if TBB_USE_EXCEPTIONS && !__TBB_TASK_GROUP_CONTEXT
skipping to change at line 369 skipping to change at line 385
#ifdef _VARIADIC_MAX #ifdef _VARIADIC_MAX
#define __TBB_VARIADIC_MAX _VARIADIC_MAX #define __TBB_VARIADIC_MAX _VARIADIC_MAX
#else #else
#if _MSC_VER >= 1700 #if _MSC_VER >= 1700
#define __TBB_VARIADIC_MAX 5 /* current VS11 setting, may change. */ #define __TBB_VARIADIC_MAX 5 /* current VS11 setting, may change. */
#else #else
#define __TBB_VARIADIC_MAX 10 #define __TBB_VARIADIC_MAX 10
#endif #endif
#endif #endif
/** __TBB_WIN8UI_SUPPORT enables support of New Windows*8 Store Apps and li
mit a possibility to load
shared libraries at run time only from application container **/
#if defined(WINAPI_FAMILY) && WINAPI_FAMILY == WINAPI_FAMILY_APP
#define __TBB_WIN8UI_SUPPORT 1
#else
#define __TBB_WIN8UI_SUPPORT 0
#endif
// Define preprocessor symbols used to determine architecture // Define preprocessor symbols used to determine architecture
#if _WIN32||_WIN64 #if _WIN32||_WIN64
# if defined(_M_X64)||defined(__x86_64__) // the latter for MinGW suppor t # if defined(_M_X64)||defined(__x86_64__) // the latter for MinGW suppor t
# define __TBB_x86_64 1 # define __TBB_x86_64 1
# elif defined(_M_IA64) # elif defined(_M_IA64)
# define __TBB_ipf 1 # define __TBB_ipf 1
# elif defined(_M_IX86)||defined(__i386__) // the latter for MinGW suppor t # elif defined(_M_IX86)||defined(__i386__) // the latter for MinGW suppor t
# define __TBB_x86_32 1 # define __TBB_x86_32 1
# endif # endif
#else /* Assume generic Unix */ #else /* Assume generic Unix */
skipping to change at line 392 skipping to change at line 416
# if __x86_64__ # if __x86_64__
# define __TBB_x86_64 1 # define __TBB_x86_64 1
# elif __ia64__ # elif __ia64__
# define __TBB_ipf 1 # define __TBB_ipf 1
# elif __i386__||__i386 // __i386 is for Sun OS # elif __i386__||__i386 // __i386 is for Sun OS
# define __TBB_x86_32 1 # define __TBB_x86_32 1
# else # else
# define __TBB_generic_arch 1 # define __TBB_generic_arch 1
# endif # endif
#endif #endif
/** Macros of the form __TBB_XXX_BROKEN denote known issues that are caused by /** Macros of the form __TBB_XXX_BROKEN denote known issues that are caused by
the bugs in compilers, standard or OS specific libraries. They should b e the bugs in compilers, standard or OS specific libraries. They should b e
removed as soon as the corresponding bugs are fixed or the buggy OS/com piler removed as soon as the corresponding bugs are fixed or the buggy OS/com piler
versions go out of the support list. versions go out of the support list.
**/ **/
#if __ANDROID__ && __TBB_GCC_VERSION <= 40403 && !__GCC_HAVE_SYNC_COMPARE_A ND_SWAP_8 #if __ANDROID__ && __TBB_GCC_VERSION <= 40403 && !__GCC_HAVE_SYNC_COMPARE_A ND_SWAP_8
/** Necessary because on Android 8-byte CAS and F&A are not available f or some processor architectures, /** Necessary because on Android 8-byte CAS and F&A are not available f or some processor architectures,
but no mandatory warning message appears from GCC 4.4.3. Instead, o nly a linkage error occurs when but no mandatory warning message appears from GCC 4.4.3. Instead, o nly a linkage error occurs when
these atomic operations are used (such as in unit test test_atomic. exe). **/ these atomic operations are used (such as in unit test test_atomic. exe). **/
skipping to change at line 441 skipping to change at line 466
#if (_WIN32||_WIN64) && __INTEL_COMPILER == 1110 #if (_WIN32||_WIN64) && __INTEL_COMPILER == 1110
/** That's a bug in Intel compiler 11.1.044/IA-32/Windows, that leads t o a worker thread crash on the thread's startup. **/ /** That's a bug in Intel compiler 11.1.044/IA-32/Windows, that leads t o a worker thread crash on the thread's startup. **/
#define __TBB_ICL_11_1_CODE_GEN_BROKEN 1 #define __TBB_ICL_11_1_CODE_GEN_BROKEN 1
#endif #endif
#if __clang__ || (__GNUC__==3 && __GNUC_MINOR__==3 && !defined(__INTEL_COMP ILER)) #if __clang__ || (__GNUC__==3 && __GNUC_MINOR__==3 && !defined(__INTEL_COMP ILER))
/** Bugs with access to nested classes declared in protected area */ /** Bugs with access to nested classes declared in protected area */
#define __TBB_PROTECTED_NESTED_CLASS_BROKEN 1 #define __TBB_PROTECTED_NESTED_CLASS_BROKEN 1
#endif #endif
#if __MINGW32__ && (__GNUC__<4 || __GNUC__==4 && __GNUC_MINOR__<2) #if __MINGW32__ && __TBB_GCC_VERSION < 40200
/** MinGW has a bug with stack alignment for routines invoked from MS R TLs. /** MinGW has a bug with stack alignment for routines invoked from MS R TLs.
Since GCC 4.2, the bug can be worked around via a special attribute . **/ Since GCC 4.2, the bug can be worked around via a special attribute . **/
#define __TBB_SSE_STACK_ALIGNMENT_BROKEN 1 #define __TBB_SSE_STACK_ALIGNMENT_BROKEN 1
#else #else
#define __TBB_SSE_STACK_ALIGNMENT_BROKEN 0 #define __TBB_SSE_STACK_ALIGNMENT_BROKEN 0
#endif #endif
#if __GNUC__==4 && __GNUC_MINOR__==3 && __GNUC_PATCHLEVEL__==0 #if __GNUC__==4 && __GNUC_MINOR__==3 && __GNUC_PATCHLEVEL__==0
/* GCC of this version may rashly ignore control dependencies */ /* GCC of this version may rashly ignore control dependencies */
#define __TBB_GCC_OPTIMIZER_ORDERING_BROKEN 1 #define __TBB_GCC_OPTIMIZER_ORDERING_BROKEN 1
skipping to change at line 492 skipping to change at line 517
#define __TBB_CPP11_STD_FORWARD_BROKEN 1 #define __TBB_CPP11_STD_FORWARD_BROKEN 1
#else #else
#define __TBB_CPP11_STD_FORWARD_BROKEN 0 #define __TBB_CPP11_STD_FORWARD_BROKEN 0
#endif #endif
#if __TBB_DEFINE_MIC #if __TBB_DEFINE_MIC
/** Main thread and user's thread have different default thread affinit y masks. **/ /** Main thread and user's thread have different default thread affinit y masks. **/
#define __TBB_MAIN_THREAD_AFFINITY_BROKEN 1 #define __TBB_MAIN_THREAD_AFFINITY_BROKEN 1
#endif #endif
/** __TBB_WIN8UI_SUPPORT enables support of New Windows*8 Store Apps and li #if __GXX_EXPERIMENTAL_CXX0X__ && !defined(__EXCEPTIONS) && \
mit a possibility to load __GNUC__==4 && (__GNUC_MINOR__==4 ||__GNUC_MINOR__==5 || (__INTEL_COMPI
shared libraries at run time only from application container **/ LER==1300 && (__GNUC_MINOR__==6 ||__GNUC_MINOR__==7)))
#if defined(WINAPI_FAMILY) && WINAPI_FAMILY == WINAPI_FAMILY_APP
#define __TBB_WIN8UI_SUPPORT 1
#else
#define __TBB_WIN8UI_SUPPORT 0
#endif
#if !defined(__EXCEPTIONS) && __GNUC__==4 && (__GNUC_MINOR__==4 ||__GNUC_MI
NOR__==5 || (__INTEL_COMPILER==1300 && __TBB_GCC_VERSION>=40600 && __TBB_GC
C_VERSION<=40700)) && defined(__GXX_EXPERIMENTAL_CXX0X__)
/* There is an issue for specific GCC toolchain when C++11 is enabled /* There is an issue for specific GCC toolchain when C++11 is enabled
and exceptions are disabled: and exceptions are disabled:
exceprion_ptr.h/nested_exception.h are using throw unconditionally. exceprion_ptr.h/nested_exception.h use throw unconditionally.
*/ */
#define __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN 1 #define __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN 1
#else #else
#define __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN 0 #define __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN 0
#endif #endif
#if __TBB_x86_32 && (__linux__ || __APPLE__ || _WIN32 || __sun) && ((defin ed(__INTEL_COMPILER) && (__INTEL_COMPILER <= 1300)) || (__GNUC__==3 && __GN UC_MINOR__==3 ) || defined(__SUNPRO_CC)) #if __TBB_x86_32 && (__linux__ || __APPLE__ || _WIN32 || __sun) && ((defin ed(__INTEL_COMPILER) && __INTEL_COMPILER <= 1300) || (__GNUC__==3 && __GNUC _MINOR__==3 ) || defined(__SUNPRO_CC))
// Some compilers for IA-32 fail to provide 8-byte alignment of objects on the stack, // Some compilers for IA-32 fail to provide 8-byte alignment of objects on the stack,
// even if the object specifies 8-byte alignment. On such platforms, t he IA-32 implementation // even if the object specifies 8-byte alignment. On such platforms, t he IA-32 implementation
// of 64 bit atomics (e.g. atomic<long long>) use different tactics dep ending upon // of 64 bit atomics (e.g. atomic<long long>) use different tactics dep ending upon
// whether the object is properly aligned or not. // whether the object is properly aligned or not.
#define __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 1 #define __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 1
#else #else
#define __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 0 #define __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 0
#endif #endif
#if (__TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT && (__TBB_GCC_VERSION < 40700 ) && (!defined(__INTEL_COMPILER) && !defined (__clang__))) #if __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT && __TBB_GCC_VERSION < 40700 & & !defined(__INTEL_COMPILER) && !defined (__clang__)
#define __TBB_ZERO_INIT_WITH_DEFAULTED_CTOR_BROKEN 1 #define __TBB_ZERO_INIT_WITH_DEFAULTED_CTOR_BROKEN 1
#endif #endif
/** End of __TBB_XXX_BROKEN macro section **/ /** End of __TBB_XXX_BROKEN macro section **/
#define __TBB_ATOMIC_CTORS (__TBB_CONSTEXPR_PRESENT && __TBB_DEFAULTED_ AND_DELETED_FUNC_PRESENT && (!__TBB_ZERO_INIT_WITH_DEFAULTED_CTOR_BROKEN)) #define __TBB_ATOMIC_CTORS (__TBB_CONSTEXPR_PRESENT && __TBB_DEFAULTED_ AND_DELETED_FUNC_PRESENT && (!__TBB_ZERO_INIT_WITH_DEFAULTED_CTOR_BROKEN))
#endif /* __TBB_tbb_config_H */ #endif /* __TBB_tbb_config_H */
 End of changes. 23 change blocks. 
77 lines changed or deleted 94 lines changed or added


 tbb_machine.h   tbb_machine.h 
skipping to change at line 918 skipping to change at line 918
tbb::internal::atomic_backoff backoff; tbb::internal::atomic_backoff backoff;
while( !__TBB_TryLockByte(flag) ) backoff.pause(); while( !__TBB_TryLockByte(flag) ) backoff.pause();
return 0; return 0;
} }
#endif #endif
#ifndef __TBB_UnlockByte #ifndef __TBB_UnlockByte
#define __TBB_UnlockByte(addr) __TBB_store_with_release((addr),0) #define __TBB_UnlockByte(addr) __TBB_store_with_release((addr),0)
#endif #endif
// lock primitives with TSX
#if ( __TBB_x86_32 || __TBB_x86_64 ) /* only on ia32/intel64 */
inline void __TBB_TryLockByteElidedCancel() { __TBB_machine_try_lock_elided
_cancel(); }
inline bool __TBB_TryLockByteElided( __TBB_atomic_flag& flag ) {
bool res = __TBB_machine_try_lock_elided( &flag )!=0;
// to avoid the "lemming" effect, we need to abort the transaction
// if __TBB_machine_try_lock_elided returns false (i.e., someone else
// has acquired the mutex non-speculatively).
if( !res ) __TBB_TryLockByteElidedCancel();
return res;
}
inline void __TBB_LockByteElided( __TBB_atomic_flag& flag )
{
for(;;) {
tbb::internal::spin_wait_while_eq( flag, 1 );
if( __TBB_machine_try_lock_elided( &flag ) )
return;
// Another thread acquired the lock "for real".
// To avoid the "lemming" effect, we abort the transaction.
__TBB_TryLockByteElidedCancel();
}
}
inline void __TBB_UnlockByteElided( __TBB_atomic_flag& flag ) {
__TBB_machine_unlock_elided( &flag );
}
#endif
#ifndef __TBB_ReverseByte #ifndef __TBB_ReverseByte
inline unsigned char __TBB_ReverseByte(unsigned char src) { inline unsigned char __TBB_ReverseByte(unsigned char src) {
return tbb::internal::reverse<unsigned char>::byte_table[src]; return tbb::internal::reverse<unsigned char>::byte_table[src];
} }
#endif #endif
template<typename T> template<typename T>
T __TBB_ReverseBits(T src) { T __TBB_ReverseBits(T src) {
T dst; T dst;
unsigned char *original = (unsigned char *) &src; unsigned char *original = (unsigned char *) &src;
 End of changes. 1 change blocks. 
0 lines changed or deleted 31 lines changed or added


 tbb_stddef.h   tbb_stddef.h 
skipping to change at line 34 skipping to change at line 34
the GNU General Public License. This exception does not however the GNU General Public License. This exception does not however
invalidate any other reasons why the executable file might be covered b y invalidate any other reasons why the executable file might be covered b y
the GNU General Public License. the GNU General Public License.
*/ */
#ifndef __TBB_tbb_stddef_H #ifndef __TBB_tbb_stddef_H
#define __TBB_tbb_stddef_H #define __TBB_tbb_stddef_H
// Marketing-driven product version // Marketing-driven product version
#define TBB_VERSION_MAJOR 4 #define TBB_VERSION_MAJOR 4
#define TBB_VERSION_MINOR 1 #define TBB_VERSION_MINOR 2
// Engineering-focused interface version // Engineering-focused interface version
#define TBB_INTERFACE_VERSION 6105 #define TBB_INTERFACE_VERSION 7000
#define TBB_INTERFACE_VERSION_MAJOR TBB_INTERFACE_VERSION/1000 #define TBB_INTERFACE_VERSION_MAJOR TBB_INTERFACE_VERSION/1000
// The oldest major interface version still supported // The oldest major interface version still supported
// To be used in SONAME, manifests, etc. // To be used in SONAME, manifests, etc.
#define TBB_COMPATIBLE_INTERFACE_VERSION 2 #define TBB_COMPATIBLE_INTERFACE_VERSION 2
#define __TBB_STRING_AUX(x) #x #define __TBB_STRING_AUX(x) #x
#define __TBB_STRING(x) __TBB_STRING_AUX(x) #define __TBB_STRING(x) __TBB_STRING_AUX(x)
// We do not need defines below for resource processing on windows // We do not need defines below for resource processing on windows
 End of changes. 2 change blocks. 
2 lines changed or deleted 2 lines changed or added

This html diff was produced by rfcdiff 1.41. The latest version is available from http://tools.ietf.org/tools/rfcdiff/