| _concurrent_queue_impl.h | | _concurrent_queue_impl.h | |
| | | | |
| skipping to change at line 233 | | skipping to change at line 233 | |
| ++rb.n_invalid_entries; | | ++rb.n_invalid_entries; | |
| throw_exception( eid_bad_last_alloc ); | | throw_exception( eid_bad_last_alloc ); | |
| } | | } | |
| } while( counter!=k ) ; | | } while( counter!=k ) ; | |
| } | | } | |
| | | | |
| template<typename T> | | template<typename T> | |
| void micro_queue<T>::push( const void* item, ticket k, concurrent_queue_bas
e_v3<T>& base ) { | | void micro_queue<T>::push( const void* item, ticket k, concurrent_queue_bas
e_v3<T>& base ) { | |
| k &= -concurrent_queue_rep_base::n_queue; | | k &= -concurrent_queue_rep_base::n_queue; | |
| page* p = NULL; | | page* p = NULL; | |
|
| size_t index = k/concurrent_queue_rep_base::n_queue & (base.my_rep->ite
ms_per_page-1); | | size_t index = modulo_power_of_two( k/concurrent_queue_rep_base::n_queu
e, base.my_rep->items_per_page); | |
| if( !index ) { | | if( !index ) { | |
| __TBB_TRY { | | __TBB_TRY { | |
| concurrent_queue_page_allocator& pa = base; | | concurrent_queue_page_allocator& pa = base; | |
| p = pa.allocate_page(); | | p = pa.allocate_page(); | |
| } __TBB_CATCH (...) { | | } __TBB_CATCH (...) { | |
| ++base.my_rep->n_invalid_entries; | | ++base.my_rep->n_invalid_entries; | |
| invalidate_page_and_rethrow( k ); | | invalidate_page_and_rethrow( k ); | |
| } | | } | |
| p->mask = 0; | | p->mask = 0; | |
| p->next = NULL; | | p->next = NULL; | |
| | | | |
| skipping to change at line 283 | | skipping to change at line 283 | |
| | | | |
| template<typename T> | | template<typename T> | |
| bool micro_queue<T>::pop( void* dst, ticket k, concurrent_queue_base_v3<T>&
base ) { | | bool micro_queue<T>::pop( void* dst, ticket k, concurrent_queue_base_v3<T>&
base ) { | |
| k &= -concurrent_queue_rep_base::n_queue; | | k &= -concurrent_queue_rep_base::n_queue; | |
| if( head_counter!=k ) spin_wait_until_eq( head_counter, k ); | | if( head_counter!=k ) spin_wait_until_eq( head_counter, k ); | |
| call_itt_notify(acquired, &head_counter); | | call_itt_notify(acquired, &head_counter); | |
| if( tail_counter==k ) spin_wait_while_eq( tail_counter, k ); | | if( tail_counter==k ) spin_wait_while_eq( tail_counter, k ); | |
| call_itt_notify(acquired, &tail_counter); | | call_itt_notify(acquired, &tail_counter); | |
| page& p = *head_page; | | page& p = *head_page; | |
| __TBB_ASSERT( &p, NULL ); | | __TBB_ASSERT( &p, NULL ); | |
|
| size_t index = k/concurrent_queue_rep_base::n_queue & (base.my_rep->ite
ms_per_page-1); | | size_t index = modulo_power_of_two( k/concurrent_queue_rep_base::n_queu
e, base.my_rep->items_per_page ); | |
| bool success = false; | | bool success = false; | |
| { | | { | |
| micro_queue_pop_finalizer<T> finalizer( *this, base, k+concurrent_q
ueue_rep_base::n_queue, index==base.my_rep->items_per_page-1 ? &p : NULL ); | | micro_queue_pop_finalizer<T> finalizer( *this, base, k+concurrent_q
ueue_rep_base::n_queue, index==base.my_rep->items_per_page-1 ? &p : NULL ); | |
| if( p.mask & uintptr_t(1)<<index ) { | | if( p.mask & uintptr_t(1)<<index ) { | |
| success = true; | | success = true; | |
| assign_and_destroy_item( dst, p, index ); | | assign_and_destroy_item( dst, p, index ); | |
| } else { | | } else { | |
| --base.my_rep->n_invalid_entries; | | --base.my_rep->n_invalid_entries; | |
| } | | } | |
| } | | } | |
| | | | |
| skipping to change at line 308 | | skipping to change at line 308 | |
| micro_queue<T>& micro_queue<T>::assign( const micro_queue<T>& src, concurre
nt_queue_base_v3<T>& base ) { | | micro_queue<T>& micro_queue<T>::assign( const micro_queue<T>& src, concurre
nt_queue_base_v3<T>& base ) { | |
| head_counter = src.head_counter; | | head_counter = src.head_counter; | |
| tail_counter = src.tail_counter; | | tail_counter = src.tail_counter; | |
| page_mutex = src.page_mutex; | | page_mutex = src.page_mutex; | |
| | | | |
| const page* srcp = src.head_page; | | const page* srcp = src.head_page; | |
| if( is_valid_page(srcp) ) { | | if( is_valid_page(srcp) ) { | |
| ticket g_index = head_counter; | | ticket g_index = head_counter; | |
| __TBB_TRY { | | __TBB_TRY { | |
| size_t n_items = (tail_counter-head_counter)/concurrent_queue_
rep_base::n_queue; | | size_t n_items = (tail_counter-head_counter)/concurrent_queue_
rep_base::n_queue; | |
|
| size_t index = head_counter/concurrent_queue_rep_base::n_queue
& (base.my_rep->items_per_page-1); | | size_t index = modulo_power_of_two( head_counter/concurrent_que
ue_rep_base::n_queue, base.my_rep->items_per_page ); | |
| size_t end_in_first_page = (index+n_items<base.my_rep->items_pe
r_page)?(index+n_items):base.my_rep->items_per_page; | | size_t end_in_first_page = (index+n_items<base.my_rep->items_pe
r_page)?(index+n_items):base.my_rep->items_per_page; | |
| | | | |
| head_page = make_copy( base, srcp, index, end_in_first_page, g_
index ); | | head_page = make_copy( base, srcp, index, end_in_first_page, g_
index ); | |
| page* cur_page = head_page; | | page* cur_page = head_page; | |
| | | | |
| if( srcp != src.tail_page ) { | | if( srcp != src.tail_page ) { | |
| for( srcp = srcp->next; srcp!=src.tail_page; srcp=srcp->nex
t ) { | | for( srcp = srcp->next; srcp!=src.tail_page; srcp=srcp->nex
t ) { | |
| cur_page->next = make_copy( base, srcp, 0, base.my_rep-
>items_per_page, g_index ); | | cur_page->next = make_copy( base, srcp, 0, base.my_rep-
>items_per_page, g_index ); | |
| cur_page = cur_page->next; | | cur_page = cur_page->next; | |
| } | | } | |
| | | | |
| __TBB_ASSERT( srcp==src.tail_page, NULL ); | | __TBB_ASSERT( srcp==src.tail_page, NULL ); | |
|
| size_t last_index = tail_counter/concurrent_queue_rep_base:
:n_queue & (base.my_rep->items_per_page-1); | | size_t last_index = modulo_power_of_two( tail_counter/concu
rrent_queue_rep_base::n_queue, base.my_rep->items_per_page ); | |
| if( last_index==0 ) last_index = base.my_rep->items_per_pag
e; | | if( last_index==0 ) last_index = base.my_rep->items_per_pag
e; | |
| | | | |
| cur_page->next = make_copy( base, srcp, 0, last_index, g_in
dex ); | | cur_page->next = make_copy( base, srcp, 0, last_index, g_in
dex ); | |
| cur_page = cur_page->next; | | cur_page = cur_page->next; | |
| } | | } | |
| tail_page = cur_page; | | tail_page = cur_page; | |
| } __TBB_CATCH (...) { | | } __TBB_CATCH (...) { | |
| invalidate_page_and_rethrow( g_index ); | | invalidate_page_and_rethrow( g_index ); | |
| } | | } | |
| } else { | | } else { | |
| | | | |
| skipping to change at line 634 | | skipping to change at line 634 | |
| }; | | }; | |
| | | | |
| template<typename T> | | template<typename T> | |
| bool concurrent_queue_iterator_rep<T>::get_item( T*& item, size_t k ) { | | bool concurrent_queue_iterator_rep<T>::get_item( T*& item, size_t k ) { | |
| if( k==my_queue.my_rep->tail_counter ) { | | if( k==my_queue.my_rep->tail_counter ) { | |
| item = NULL; | | item = NULL; | |
| return true; | | return true; | |
| } else { | | } else { | |
| typename concurrent_queue_base_v3<T>::page* p = array[concurrent_qu
eue_rep<T>::index(k)]; | | typename concurrent_queue_base_v3<T>::page* p = array[concurrent_qu
eue_rep<T>::index(k)]; | |
| __TBB_ASSERT(p,NULL); | | __TBB_ASSERT(p,NULL); | |
|
| size_t i = k/concurrent_queue_rep<T>::n_queue & (my_queue.my_rep->i
tems_per_page-1); | | size_t i = modulo_power_of_two( k/concurrent_queue_rep<T>::n_queue,
my_queue.my_rep->items_per_page ); | |
| item = µ_queue<T>::get_ref(*p,i); | | item = µ_queue<T>::get_ref(*p,i); | |
| return (p->mask & uintptr_t(1)<<i)!=0; | | return (p->mask & uintptr_t(1)<<i)!=0; | |
| } | | } | |
| } | | } | |
| | | | |
| //! Constness-independent portion of concurrent_queue_iterator. | | //! Constness-independent portion of concurrent_queue_iterator. | |
| /** @ingroup containers */ | | /** @ingroup containers */ | |
| template<typename Value> | | template<typename Value> | |
| class concurrent_queue_iterator_base_v3 : no_assign { | | class concurrent_queue_iterator_base_v3 : no_assign { | |
| //! Represents concurrent_queue over which we are iterating. | | //! Represents concurrent_queue over which we are iterating. | |
| | | | |
| skipping to change at line 719 | | skipping to change at line 719 | |
| template<typename Value> | | template<typename Value> | |
| void concurrent_queue_iterator_base_v3<Value>::advance() { | | void concurrent_queue_iterator_base_v3<Value>::advance() { | |
| __TBB_ASSERT( my_item, "attempt to increment iterator past end of queue
" ); | | __TBB_ASSERT( my_item, "attempt to increment iterator past end of queue
" ); | |
| size_t k = my_rep->head_counter; | | size_t k = my_rep->head_counter; | |
| const concurrent_queue_base_v3<Value>& queue = my_rep->my_queue; | | const concurrent_queue_base_v3<Value>& queue = my_rep->my_queue; | |
| #if TBB_USE_ASSERT | | #if TBB_USE_ASSERT | |
| Value* tmp; | | Value* tmp; | |
| my_rep->get_item(tmp,k); | | my_rep->get_item(tmp,k); | |
| __TBB_ASSERT( my_item==tmp, NULL ); | | __TBB_ASSERT( my_item==tmp, NULL ); | |
| #endif /* TBB_USE_ASSERT */ | | #endif /* TBB_USE_ASSERT */ | |
|
| size_t i = k/concurrent_queue_rep<Value>::n_queue & (queue.my_rep->item
s_per_page-1); | | size_t i = modulo_power_of_two( k/concurrent_queue_rep<Value>::n_queue,
queue.my_rep->items_per_page ); | |
| if( i==queue.my_rep->items_per_page-1 ) { | | if( i==queue.my_rep->items_per_page-1 ) { | |
| typename concurrent_queue_base_v3<Value>::page*& root = my_rep->arr
ay[concurrent_queue_rep<Value>::index(k)]; | | typename concurrent_queue_base_v3<Value>::page*& root = my_rep->arr
ay[concurrent_queue_rep<Value>::index(k)]; | |
| root = root->next; | | root = root->next; | |
| } | | } | |
| // advance k | | // advance k | |
| my_rep->head_counter = ++k; | | my_rep->head_counter = ++k; | |
| if( !my_rep->get_item(my_item, k) ) advance(); | | if( !my_rep->get_item(my_item, k) ) advance(); | |
| } | | } | |
| | | | |
| //! Similar to C++0x std::remove_cv | | //! Similar to C++0x std::remove_cv | |
| | | | |
End of changes. 6 change blocks. |
| 6 lines changed or deleted | | 6 lines changed or added | |
|
| _concurrent_unordered_impl.h | | _concurrent_unordered_impl.h | |
| | | | |
| skipping to change at line 276 | | skipping to change at line 276 | |
| my_node_allocator.deallocate(pnode, 1); | | my_node_allocator.deallocate(pnode, 1); | |
| __TBB_RETHROW(); | | __TBB_RETHROW(); | |
| } | | } | |
| | | | |
| return (pnode); | | return (pnode); | |
| } | | } | |
| | | | |
| // Allocate a new node with the given order key; used to allocate dummy
nodes | | // Allocate a new node with the given order key; used to allocate dummy
nodes | |
| nodeptr_t create_node(sokey_t order_key) { | | nodeptr_t create_node(sokey_t order_key) { | |
| nodeptr_t pnode = my_node_allocator.allocate(1); | | nodeptr_t pnode = my_node_allocator.allocate(1); | |
|
| | | pnode->init(order_key); | |
| __TBB_TRY { | | | |
| new(static_cast<void*>(&pnode->my_element)) T(); | | | |
| pnode->init(order_key); | | | |
| } __TBB_CATCH(...) { | | | |
| my_node_allocator.deallocate(pnode, 1); | | | |
| __TBB_RETHROW(); | | | |
| } | | | |
| | | | |
| return (pnode); | | return (pnode); | |
| } | | } | |
| | | | |
| split_ordered_list(allocator_type a = allocator_type()) | | split_ordered_list(allocator_type a = allocator_type()) | |
| : my_node_allocator(a), my_element_count(0) | | : my_node_allocator(a), my_element_count(0) | |
| { | | { | |
| // Immediately allocate a dummy node with order key of 0. This node | | // Immediately allocate a dummy node with order key of 0. This node | |
| // will always be the head of the list. | | // will always be the head of the list. | |
| my_head = create_node(0); | | my_head = create_node(0); | |
| } | | } | |
| | | | |
| skipping to change at line 385 | | skipping to change at line 377 | |
| | | | |
| // Swaps 'this' list with the passed in one | | // Swaps 'this' list with the passed in one | |
| void swap(self_type& other) | | void swap(self_type& other) | |
| { | | { | |
| if (this == &other) | | if (this == &other) | |
| { | | { | |
| // Nothing to do | | // Nothing to do | |
| return; | | return; | |
| } | | } | |
| | | | |
|
| std::swap(my_element_count, other.my_element_count); | | std::swap(my_element_count, other.my_element_count); | |
| std::swap(my_head, other.my_head); | | std::swap(my_head, other.my_head); | |
| } | | } | |
| | | | |
| // Split-order list functions | | // Split-order list functions | |
| | | | |
| // Returns a first element in the SOL, which is always a dummy | | // Returns a first element in the SOL, which is always a dummy | |
| raw_iterator raw_begin() { | | raw_iterator raw_begin() { | |
| return raw_iterator(my_head); | | return raw_iterator(my_head); | |
| } | | } | |
| | | | |
| // Returns a first element in the SOL, which is always a dummy | | // Returns a first element in the SOL, which is always a dummy | |
| | | | |
| skipping to change at line 414 | | skipping to change at line 406 | |
| | | | |
| raw_const_iterator raw_end() const { | | raw_const_iterator raw_end() const { | |
| return raw_const_iterator(0); | | return raw_const_iterator(0); | |
| } | | } | |
| | | | |
| static sokey_t get_order_key(const raw_const_iterator& it) { | | static sokey_t get_order_key(const raw_const_iterator& it) { | |
| return it.get_node_ptr()->get_order_key(); | | return it.get_node_ptr()->get_order_key(); | |
| } | | } | |
| | | | |
| static sokey_t get_safe_order_key(const raw_const_iterator& it) { | | static sokey_t get_safe_order_key(const raw_const_iterator& it) { | |
|
| if( !it.get_node_ptr() ) return sokey_t(~0U); | | if( !it.get_node_ptr() ) return ~sokey_t(0); | |
| return it.get_node_ptr()->get_order_key(); | | return it.get_node_ptr()->get_order_key(); | |
| } | | } | |
| | | | |
| // Returns a public iterator version of the internal iterator. Public i
terator must not | | // Returns a public iterator version of the internal iterator. Public i
terator must not | |
| // be a dummy private iterator. | | // be a dummy private iterator. | |
| iterator get_iterator(raw_iterator it) { | | iterator get_iterator(raw_iterator it) { | |
| __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_du
mmy(), "Invalid user node (dummy)"); | | __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_du
mmy(), "Invalid user node (dummy)"); | |
| return iterator(it.get_node_ptr(), this); | | return iterator(it.get_node_ptr(), this); | |
| } | | } | |
| | | | |
| | | | |
| skipping to change at line 466 | | skipping to change at line 458 | |
| { | | { | |
| // Skip all dummy, internal only iterators | | // Skip all dummy, internal only iterators | |
| while (it != raw_end() && it.get_node_ptr()->is_dummy()) | | while (it != raw_end() && it.get_node_ptr()->is_dummy()) | |
| ++it; | | ++it; | |
| | | | |
| return const_iterator(it.get_node_ptr(), this); | | return const_iterator(it.get_node_ptr(), this); | |
| } | | } | |
| | | | |
| // Erase an element using the allocator | | // Erase an element using the allocator | |
| void destroy_node(nodeptr_t pnode) { | | void destroy_node(nodeptr_t pnode) { | |
|
| my_node_allocator.destroy(pnode); | | if (!pnode->is_dummy()) my_node_allocator.destroy(pnode); | |
| my_node_allocator.deallocate(pnode, 1); | | my_node_allocator.deallocate(pnode, 1); | |
| } | | } | |
| | | | |
| // Try to insert a new element in the list. If insert fails, return the
node that | | // Try to insert a new element in the list. If insert fails, return the
node that | |
| // was inserted instead. | | // was inserted instead. | |
| nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t
current_node) { | | nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t
current_node) { | |
| new_node->my_next = current_node; | | new_node->my_next = current_node; | |
| return previous->atomic_set_next(new_node, current_node); | | return previous->atomic_set_next(new_node, current_node); | |
| } | | } | |
| | | | |
| | | | |
| skipping to change at line 822 | | skipping to change at line 814 | |
| | | | |
| //! Set my_midpoint_node to point approximately half way between my
_begin_node and my_end_node. | | //! Set my_midpoint_node to point approximately half way between my
_begin_node and my_end_node. | |
| void set_midpoint() const { | | void set_midpoint() const { | |
| if( my_begin_node == my_end_node ) // not divisible | | if( my_begin_node == my_end_node ) // not divisible | |
| my_midpoint_node = my_end_node; | | my_midpoint_node = my_end_node; | |
| else { | | else { | |
| sokey_t begin_key = solist_t::get_safe_order_key(my_begin_n
ode); | | sokey_t begin_key = solist_t::get_safe_order_key(my_begin_n
ode); | |
| sokey_t end_key = solist_t::get_safe_order_key(my_end_node)
; | | sokey_t end_key = solist_t::get_safe_order_key(my_end_node)
; | |
| size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key
-begin_key)/2 ) % my_table.my_number_of_buckets; | | size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key
-begin_key)/2 ) % my_table.my_number_of_buckets; | |
| while ( !my_table.is_initialized(mid_bucket) ) mid_bucket =
my_table.get_parent(mid_bucket); | | while ( !my_table.is_initialized(mid_bucket) ) mid_bucket =
my_table.get_parent(mid_bucket); | |
|
| my_midpoint_node = my_table.my_solist.first_real_iterator(m | | if(__TBB_ReverseBits(mid_bucket) > begin_key) { | |
| y_table.get_bucket( mid_bucket )); | | // found a dummy_node between begin and end | |
| if( my_midpoint_node == my_begin_node ) | | my_midpoint_node = my_table.my_solist.first_real_iterat | |
| | | or(my_table.get_bucket( mid_bucket )); | |
| | | } | |
| | | else { | |
| | | // didn't find a dummy node between begin and end. | |
| my_midpoint_node = my_end_node; | | my_midpoint_node = my_end_node; | |
|
| | | } | |
| #if TBB_USE_ASSERT | | #if TBB_USE_ASSERT | |
|
| else { | | { | |
| sokey_t mid_key = solist_t::get_safe_order_key(my_midpo
int_node); | | sokey_t mid_key = solist_t::get_safe_order_key(my_midpo
int_node); | |
| __TBB_ASSERT( begin_key < mid_key, "my_begin_node is af
ter my_midpoint_node" ); | | __TBB_ASSERT( begin_key < mid_key, "my_begin_node is af
ter my_midpoint_node" ); | |
| __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is
after my_end_node" ); | | __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is
after my_end_node" ); | |
| } | | } | |
| #endif // TBB_USE_ASSERT | | #endif // TBB_USE_ASSERT | |
| } | | } | |
| } | | } | |
| }; | | }; | |
| | | | |
| class range_type : public const_range_type { | | class range_type : public const_range_type { | |
| | | | |
| skipping to change at line 1123 | | skipping to change at line 1120 | |
| // Insert an element in the hash given its value | | // Insert an element in the hash given its value | |
| std::pair<iterator, bool> internal_insert(const value_type& value) | | std::pair<iterator, bool> internal_insert(const value_type& value) | |
| { | | { | |
| sokey_t order_key = (sokey_t) my_hash_compare(get_key(value)); | | sokey_t order_key = (sokey_t) my_hash_compare(get_key(value)); | |
| size_type bucket = order_key % my_number_of_buckets; | | size_type bucket = order_key % my_number_of_buckets; | |
| | | | |
| // If bucket is empty, initialize it first | | // If bucket is empty, initialize it first | |
| if (!is_initialized(bucket)) | | if (!is_initialized(bucket)) | |
| init_bucket(bucket); | | init_bucket(bucket); | |
| | | | |
|
| size_type new_count; | | size_type new_count = 0; | |
| order_key = split_order_key_regular(order_key); | | order_key = split_order_key_regular(order_key); | |
| raw_iterator it = get_bucket(bucket); | | raw_iterator it = get_bucket(bucket); | |
| raw_iterator last = my_solist.raw_end(); | | raw_iterator last = my_solist.raw_end(); | |
| raw_iterator where = it; | | raw_iterator where = it; | |
| | | | |
| __TBB_ASSERT(where != last, "Invalid head node"); | | __TBB_ASSERT(where != last, "Invalid head node"); | |
| | | | |
| // First node is a dummy node | | // First node is a dummy node | |
| ++where; | | ++where; | |
| | | | |
| | | | |
| skipping to change at line 1369 | | skipping to change at line 1366 | |
| | | | |
| // Utilities for keys | | // Utilities for keys | |
| | | | |
| // A regular order key has its original hash value reversed and the las
t bit set | | // A regular order key has its original hash value reversed and the las
t bit set | |
| sokey_t split_order_key_regular(sokey_t order_key) const { | | sokey_t split_order_key_regular(sokey_t order_key) const { | |
| return __TBB_ReverseBits(order_key) | 0x1; | | return __TBB_ReverseBits(order_key) | 0x1; | |
| } | | } | |
| | | | |
| // A dummy order key has its original hash value reversed and the last
bit unset | | // A dummy order key has its original hash value reversed and the last
bit unset | |
| sokey_t split_order_key_dummy(sokey_t order_key) const { | | sokey_t split_order_key_dummy(sokey_t order_key) const { | |
|
| return __TBB_ReverseBits(order_key) & ~(0x1); | | return __TBB_ReverseBits(order_key) & ~sokey_t(0x1); | |
| } | | } | |
| | | | |
| // Shared variables | | // Shared variables | |
| atomic<size_type> my_number
_of_buckets; // Current table size | | atomic<size_type> my_number
_of_buckets; // Current table size | |
| solist_t my_solist
; // List where all the elements are kept | | solist_t my_solist
; // List where all the elements are kept | |
| typename allocator_type::template rebind<raw_iterator>::other my_alloca
tor; // Allocator object for segments | | typename allocator_type::template rebind<raw_iterator>::other my_alloca
tor; // Allocator object for segments | |
| float my_maximu
m_bucket_size; // Maximum size of the bucket | | float my_maximu
m_bucket_size; // Maximum size of the bucket | |
| atomic<raw_iterator*> my_bucket
s[pointers_per_table]; // The segment table | | atomic<raw_iterator*> my_bucket
s[pointers_per_table]; // The segment table | |
| }; | | }; | |
| #if _MSC_VER | | #if _MSC_VER | |
| #pragma warning(pop) // warning 4127 -- while (true) has a constant express
ion in it | | #pragma warning(pop) // warning 4127 -- while (true) has a constant express
ion in it | |
| #endif | | #endif | |
| | | | |
| //! Hash multiplier | | //! Hash multiplier | |
|
| static const size_t hash_multiplier = tbb::internal::size_t_select(26544357
69U, 11400714819323198485ULL); | | static const size_t hash_multiplier = tbb::internal::select_size_t_constant
<2654435769U, 11400714819323198485ULL>::value; | |
| } // namespace internal | | } // namespace internal | |
| //! @endcond | | //! @endcond | |
| //! Hasher functions | | //! Hasher functions | |
| template<typename T> | | template<typename T> | |
| inline size_t tbb_hasher( const T& t ) { | | inline size_t tbb_hasher( const T& t ) { | |
| return static_cast<size_t>( t ) * internal::hash_multiplier; | | return static_cast<size_t>( t ) * internal::hash_multiplier; | |
| } | | } | |
| template<typename P> | | template<typename P> | |
| inline size_t tbb_hasher( P* ptr ) { | | inline size_t tbb_hasher( P* ptr ) { | |
| size_t const h = reinterpret_cast<size_t>( ptr ); | | size_t const h = reinterpret_cast<size_t>( ptr ); | |
| | | | |
End of changes. 10 change blocks. |
| 20 lines changed or deleted | | 17 lines changed or added | |
|
| concurrent_vector.h | | concurrent_vector.h | |
| | | | |
| skipping to change at line 80 | | skipping to change at line 80 | |
| // Workaround for overzealous compiler warnings in /Wp64 mode | | // Workaround for overzealous compiler warnings in /Wp64 mode | |
| #pragma warning (push) | | #pragma warning (push) | |
| #pragma warning (disable: 4267) | | #pragma warning (disable: 4267) | |
| #endif | | #endif | |
| | | | |
| namespace tbb { | | namespace tbb { | |
| | | | |
| template<typename T, class A = cache_aligned_allocator<T> > | | template<typename T, class A = cache_aligned_allocator<T> > | |
| class concurrent_vector; | | class concurrent_vector; | |
| | | | |
|
| | | template<typename Container, typename Value> | |
| | | class vector_iterator; | |
| | | | |
| //! @cond INTERNAL | | //! @cond INTERNAL | |
| namespace internal { | | namespace internal { | |
| | | | |
| //! Bad allocation marker | | //! Bad allocation marker | |
| static void *const vector_allocation_error_flag = reinterpret_cast<void
*>(size_t(63)); | | static void *const vector_allocation_error_flag = reinterpret_cast<void
*>(size_t(63)); | |
| | | | |
| //! Base class of concurrent vector implementation. | | //! Base class of concurrent vector implementation. | |
| /** @ingroup containers */ | | /** @ingroup containers */ | |
| class concurrent_vector_base_v3 { | | class concurrent_vector_base_v3 { | |
| protected: | | protected: | |
| | | | |
| skipping to change at line 142 | | skipping to change at line 145 | |
| | | | |
| concurrent_vector_base_v3() { | | concurrent_vector_base_v3() { | |
| my_early_size = 0; | | my_early_size = 0; | |
| my_first_block = 0; // here is not default_initial_segments | | my_first_block = 0; // here is not default_initial_segments | |
| for( segment_index_t i = 0; i < pointers_per_short_table; i++) | | for( segment_index_t i = 0; i < pointers_per_short_table; i++) | |
| my_storage[i].array = NULL; | | my_storage[i].array = NULL; | |
| my_segment = my_storage; | | my_segment = my_storage; | |
| } | | } | |
| __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3(); | | __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3(); | |
| | | | |
|
| | | //these helpers methods use the fact that segments are allocated so | |
| | | //that every segment size is a (increasing) power of 2. | |
| | | //with one exception 0 segment has size of 2 as well segment 1; | |
| | | //e.g. size of segment with index of 3 is 2^3=8; | |
| static segment_index_t segment_index_of( size_type index ) { | | static segment_index_t segment_index_of( size_type index ) { | |
| return segment_index_t( __TBB_Log2( index|1 ) ); | | return segment_index_t( __TBB_Log2( index|1 ) ); | |
| } | | } | |
| | | | |
| static segment_index_t segment_base( segment_index_t k ) { | | static segment_index_t segment_base( segment_index_t k ) { | |
| return (segment_index_t(1)<<k & ~segment_index_t(1)); | | return (segment_index_t(1)<<k & ~segment_index_t(1)); | |
| } | | } | |
| | | | |
| static inline segment_index_t segment_base_index_of( segment_index_
t &index ) { | | static inline segment_index_t segment_base_index_of( segment_index_
t &index ) { | |
| segment_index_t k = segment_index_of( index ); | | segment_index_t k = segment_index_of( index ); | |
| index -= segment_base(k); | | index -= segment_base(k); | |
| return k; | | return k; | |
| } | | } | |
| | | | |
| static size_type segment_size( segment_index_t k ) { | | static size_type segment_size( segment_index_t k ) { | |
| return segment_index_t(1)<<k; // fake value for k==0 | | return segment_index_t(1)<<k; // fake value for k==0 | |
| } | | } | |
| | | | |
|
| | | static bool is_first_element_in_segment(size_type element_index){ | |
| | | //check if element_index is a power of 2 that is at least 2. | |
| | | //The idea is to detect if the iterator crosses a segment bound | |
| | | ary, | |
| | | //and 2 is the minimal index for which it's true | |
| | | __TBB_ASSERT(element_index, "there should be no need to call " | |
| | | "is_first_element_in_segment for 0t | |
| | | h element" ); | |
| | | return is_power_of_two_factor( element_index, 2 ); | |
| | | } | |
| | | | |
| //! An operation on an n-element array starting at begin. | | //! An operation on an n-element array starting at begin. | |
| typedef void (__TBB_EXPORTED_FUNC *internal_array_op1)(void* begin,
size_type n ); | | typedef void (__TBB_EXPORTED_FUNC *internal_array_op1)(void* begin,
size_type n ); | |
| | | | |
| //! An operation on n-element destination array and n-element sourc
e array. | | //! An operation on n-element destination array and n-element sourc
e array. | |
| typedef void (__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, c
onst void* src, size_type n ); | | typedef void (__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, c
onst void* src, size_type n ); | |
| | | | |
| //! Internal structure for compact() | | //! Internal structure for compact() | |
| struct internal_segments_table { | | struct internal_segments_table { | |
| segment_index_t first_block; | | segment_index_t first_block; | |
| void* table[pointers_per_long_table]; | | void* table[pointers_per_long_table]; | |
| | | | |
| skipping to change at line 196 | | skipping to change at line 212 | |
| void __TBB_EXPORTED_METHOD internal_resize( size_type n, size_type
element_size, size_type max_size, const void *src, | | void __TBB_EXPORTED_METHOD internal_resize( size_type n, size_type
element_size, size_type max_size, const void *src, | |
| internal_array_op1 dest
roy, internal_array_op2 init ); | | internal_array_op1 dest
roy, internal_array_op2 init ); | |
| size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_resu
lt( size_type new_size, size_type element_size, internal_array_op2 init, co
nst void *src ); | | size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_resu
lt( size_type new_size, size_type element_size, internal_array_op2 init, co
nst void *src ); | |
| | | | |
| //! Deprecated entry point for backwards compatibility to TBB 2.1. | | //! Deprecated entry point for backwards compatibility to TBB 2.1. | |
| void __TBB_EXPORTED_METHOD internal_grow_to_at_least( size_type new
_size, size_type element_size, internal_array_op2 init, const void *src ); | | void __TBB_EXPORTED_METHOD internal_grow_to_at_least( size_type new
_size, size_type element_size, internal_array_op2 init, const void *src ); | |
| private: | | private: | |
| //! Private functionality | | //! Private functionality | |
| class helper; | | class helper; | |
| friend class helper; | | friend class helper; | |
|
| | | | |
| | | template<typename Container, typename Value> | |
| | | friend class vector_iterator; | |
| | | | |
| }; | | }; | |
| | | | |
| typedef concurrent_vector_base_v3 concurrent_vector_base; | | typedef concurrent_vector_base_v3 concurrent_vector_base; | |
| | | | |
| //! Meets requirements of a forward iterator for STL and a Value for a
blocked_range.*/ | | //! Meets requirements of a forward iterator for STL and a Value for a
blocked_range.*/ | |
| /** Value is either the T or const T type of the container. | | /** Value is either the T or const T type of the container. | |
| @ingroup containers */ | | @ingroup containers */ | |
| template<typename Container, typename Value> | | template<typename Container, typename Value> | |
| class vector_iterator | | class vector_iterator | |
| { | | { | |
| | | | |
| skipping to change at line 285 | | skipping to change at line 305 | |
| __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "
corrupt cache" ); | | __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "
corrupt cache" ); | |
| return *item; | | return *item; | |
| } | | } | |
| Value& operator[]( ptrdiff_t k ) const { | | Value& operator[]( ptrdiff_t k ) const { | |
| return my_vector->internal_subscript(my_index+k); | | return my_vector->internal_subscript(my_index+k); | |
| } | | } | |
| Value* operator->() const {return &operator*();} | | Value* operator->() const {return &operator*();} | |
| | | | |
| //! Pre increment | | //! Pre increment | |
| vector_iterator& operator++() { | | vector_iterator& operator++() { | |
|
| size_t k = ++my_index; | | size_t element_index = ++my_index; | |
| if( my_item ) { | | if( my_item ) { | |
|
| // Following test uses 2's-complement wizardry | | //TODO: consider using of knowledge about "first_block opti | |
| if( (k& (k-2))==0 ) { | | mization" here as well? | |
| // k is a power of two that is at least k-2 | | if( concurrent_vector_base::is_first_element_in_segment(ele | |
| | | ment_index)) { | |
| | | //if the iterator crosses a segment boundary, the point | |
| | | er become invalid | |
| | | //as possibly next segment is in another memory locatio | |
| | | n | |
| my_item= NULL; | | my_item= NULL; | |
| } else { | | } else { | |
| ++my_item; | | ++my_item; | |
| } | | } | |
| } | | } | |
| return *this; | | return *this; | |
| } | | } | |
| | | | |
| //! Pre decrement | | //! Pre decrement | |
| vector_iterator& operator--() { | | vector_iterator& operator--() { | |
| __TBB_ASSERT( my_index>0, "operator--() applied to iterator alr
eady at beginning of concurrent_vector" ); | | __TBB_ASSERT( my_index>0, "operator--() applied to iterator alr
eady at beginning of concurrent_vector" ); | |
|
| size_t k = my_index--; | | size_t element_index = my_index--; | |
| if( my_item ) { | | if( my_item ) { | |
|
| // Following test uses 2's-complement wizardry | | if(concurrent_vector_base::is_first_element_in_segment(elem | |
| if( (k& (k-2))==0 ) { | | ent_index)) { | |
| // k is a power of two that is at least k-2 | | //if the iterator crosses a segment boundary, the point | |
| | | er become invalid | |
| | | //as possibly next segment is in another memory locatio | |
| | | n | |
| my_item= NULL; | | my_item= NULL; | |
| } else { | | } else { | |
| --my_item; | | --my_item; | |
| } | | } | |
| } | | } | |
| return *this; | | return *this; | |
| } | | } | |
| | | | |
| //! Post increment | | //! Post increment | |
| vector_iterator operator++(int) { | | vector_iterator operator++(int) { | |
| | | | |
End of changes. 8 change blocks. |
| 8 lines changed or deleted | | 38 lines changed or added | |
|
| memory_pool.h | | memory_pool.h | |
| | | | |
| skipping to change at line 140 | | skipping to change at line 140 | |
| void deallocate( pointer p, size_type ) { | | void deallocate( pointer p, size_type ) { | |
| my_pool->free(p); | | my_pool->free(p); | |
| } | | } | |
| //! Largest value for which method allocate might succeed. | | //! Largest value for which method allocate might succeed. | |
| size_type max_size() const throw() { | | size_type max_size() const throw() { | |
| size_type max = static_cast<size_type>(-1) / sizeof (value_type); | | size_type max = static_cast<size_type>(-1) / sizeof (value_type); | |
| return (max > 0 ? max : 1); | | return (max > 0 ? max : 1); | |
| } | | } | |
| //! Copy-construct value at location pointed to by p. | | //! Copy-construct value at location pointed to by p. | |
| #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESEN
T | | #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESEN
T | |
|
| template<typename... Args> | | template<typename U, typename... Args> | |
| void construct(pointer p, Args&&... args) | | void construct(U *p, Args&&... args) | |
| #if __TBB_CPP11_STD_FORWARD_BROKEN | | #if __TBB_CPP11_STD_FORWARD_BROKEN | |
|
| { ::new((void *)p) T((args)...); } | | { ::new((void *)p) U((args)...); } | |
| #else | | #else | |
|
| { ::new((void *)p) T(std::forward<Args>(args)...); } | | { ::new((void *)p) U(std::forward<Args>(args)...); } | |
| #endif | | #endif | |
| #else // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_P
RESENT | | #else // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_P
RESENT | |
| void construct( pointer p, const value_type& value ) { ::new((void*)(p)
) value_type(value); } | | void construct( pointer p, const value_type& value ) { ::new((void*)(p)
) value_type(value); } | |
| #endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_
PRESENT | | #endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_
PRESENT | |
| | | | |
| //! Destroy value at location pointed to by p. | | //! Destroy value at location pointed to by p. | |
| void destroy( pointer p ) { p->~value_type(); } | | void destroy( pointer p ) { p->~value_type(); } | |
| | | | |
| }; | | }; | |
| | | | |
| | | | |
| skipping to change at line 241 | | skipping to change at line 241 | |
| template <typename Alloc> | | template <typename Alloc> | |
| void *memory_pool<Alloc>::allocate_request(intptr_t pool_id, size_t & bytes
) { | | void *memory_pool<Alloc>::allocate_request(intptr_t pool_id, size_t & bytes
) { | |
| memory_pool<Alloc> &self = *reinterpret_cast<memory_pool<Alloc>*>(pool_
id); | | memory_pool<Alloc> &self = *reinterpret_cast<memory_pool<Alloc>*>(pool_
id); | |
| const size_t unit_size = sizeof(typename Alloc::value_type); | | const size_t unit_size = sizeof(typename Alloc::value_type); | |
| __TBBMALLOC_ASSERT( 0 == bytes%unit_size, NULL); | | __TBBMALLOC_ASSERT( 0 == bytes%unit_size, NULL); | |
| void *ptr; | | void *ptr; | |
| __TBB_TRY { ptr = self.my_alloc.allocate( bytes/unit_size ); } | | __TBB_TRY { ptr = self.my_alloc.allocate( bytes/unit_size ); } | |
| __TBB_CATCH(...) { return 0; } | | __TBB_CATCH(...) { return 0; } | |
| return ptr; | | return ptr; | |
| } | | } | |
|
| | | #if _MSC_VER==1700 && !defined(__INTEL_COMPILER) | |
| | | // Workaround for erroneous "unreachable code" warning in the template | |
| | | below. | |
| | | // Specific for VC++ 17 compiler | |
| | | #pragma warning (push) | |
| | | #pragma warning (disable: 4702) | |
| | | #endif | |
| template <typename Alloc> | | template <typename Alloc> | |
| int memory_pool<Alloc>::deallocate_request(intptr_t pool_id, void* raw_ptr,
size_t raw_bytes) { | | int memory_pool<Alloc>::deallocate_request(intptr_t pool_id, void* raw_ptr,
size_t raw_bytes) { | |
| memory_pool<Alloc> &self = *reinterpret_cast<memory_pool<Alloc>*>(pool_
id); | | memory_pool<Alloc> &self = *reinterpret_cast<memory_pool<Alloc>*>(pool_
id); | |
| const size_t unit_size = sizeof(typename Alloc::value_type); | | const size_t unit_size = sizeof(typename Alloc::value_type); | |
| __TBBMALLOC_ASSERT( 0 == raw_bytes%unit_size, NULL); | | __TBBMALLOC_ASSERT( 0 == raw_bytes%unit_size, NULL); | |
| self.my_alloc.deallocate( static_cast<typename Alloc::value_type*>(raw_
ptr), raw_bytes/unit_size ); | | self.my_alloc.deallocate( static_cast<typename Alloc::value_type*>(raw_
ptr), raw_bytes/unit_size ); | |
| return 0; | | return 0; | |
| } | | } | |
|
| | | #if _MSC_VER==1700 && !defined(__INTEL_COMPILER) | |
| | | #pragma warning (pop) | |
| | | #endif | |
| inline fixed_pool::fixed_pool(void *buf, size_t size) : my_buffer(buf), my_
size(size) { | | inline fixed_pool::fixed_pool(void *buf, size_t size) : my_buffer(buf), my_
size(size) { | |
| rml::MemPoolPolicy args(allocate_request, 0, size, /*fixedPool=*/true); | | rml::MemPoolPolicy args(allocate_request, 0, size, /*fixedPool=*/true); | |
| rml::MemPoolError res = rml::pool_create_v1(intptr_t(this), &args, &my_
pool); | | rml::MemPoolError res = rml::pool_create_v1(intptr_t(this), &args, &my_
pool); | |
| if( res!=rml::POOL_OK ) __TBB_THROW(std::bad_alloc()); | | if( res!=rml::POOL_OK ) __TBB_THROW(std::bad_alloc()); | |
| } | | } | |
| inline void *fixed_pool::allocate_request(intptr_t pool_id, size_t & bytes)
{ | | inline void *fixed_pool::allocate_request(intptr_t pool_id, size_t & bytes)
{ | |
| fixed_pool &self = *reinterpret_cast<fixed_pool*>(pool_id); | | fixed_pool &self = *reinterpret_cast<fixed_pool*>(pool_id); | |
| if( !__TBB_CompareAndSwapW(&self.my_size, 0, (bytes=self.my_size)) ) | | if( !__TBB_CompareAndSwapW(&self.my_size, 0, (bytes=self.my_size)) ) | |
| return 0; // all the memory was given already | | return 0; // all the memory was given already | |
| return self.my_buffer; | | return self.my_buffer; | |
| | | | |
End of changes. 5 change blocks. |
| 4 lines changed or deleted | | 14 lines changed or added | |
|
| scalable_allocator.h | | scalable_allocator.h | |
| | | | |
| skipping to change at line 91 | | skipping to change at line 91 | |
| /** The "_aligned_free" analogue. | | /** The "_aligned_free" analogue. | |
| @ingroup memory_allocation */ | | @ingroup memory_allocation */ | |
| void __TBB_EXPORTED_FUNC scalable_aligned_free (void* ptr); | | void __TBB_EXPORTED_FUNC scalable_aligned_free (void* ptr); | |
| | | | |
| /** The analogue of _msize/malloc_size/malloc_usable_size. | | /** The analogue of _msize/malloc_size/malloc_usable_size. | |
| Returns the usable size of a memory block previously allocated by scala
ble_*, | | Returns the usable size of a memory block previously allocated by scala
ble_*, | |
| or 0 (zero) if ptr does not point to such a block. | | or 0 (zero) if ptr does not point to such a block. | |
| @ingroup memory_allocation */ | | @ingroup memory_allocation */ | |
| size_t __TBB_EXPORTED_FUNC scalable_msize (void* ptr); | | size_t __TBB_EXPORTED_FUNC scalable_msize (void* ptr); | |
| | | | |
|
| | | /* Setting TBB_MALLOC_USE_HUGE_PAGES environment variable to 1 enables huge | |
| | | pages. | |
| | | scalable_allocation_mode call has priority over environment variable. */ | |
| | | enum AllocationModeParam { | |
| | | USE_HUGE_PAGES /* value turns using huge pages on and off */ | |
| | | }; | |
| | | | |
| | | /** Set TBB allocator-specific allocation modes. | |
| | | @ingroup memory_allocation */ | |
| | | int __TBB_EXPORTED_FUNC scalable_allocation_mode(int param, intptr_t value) | |
| | | ; | |
| | | | |
| #ifdef __cplusplus | | #ifdef __cplusplus | |
| } /* extern "C" */ | | } /* extern "C" */ | |
| #endif /* __cplusplus */ | | #endif /* __cplusplus */ | |
| | | | |
| #ifdef __cplusplus | | #ifdef __cplusplus | |
| | | | |
| namespace rml { | | namespace rml { | |
| class MemoryPool; | | class MemoryPool; | |
| | | | |
| typedef void *(*rawAllocType)(intptr_t pool_id, size_t &bytes); | | typedef void *(*rawAllocType)(intptr_t pool_id, size_t &bytes); | |
| | | | |
| skipping to change at line 115 | | skipping to change at line 125 | |
| | | | |
| struct MemPoolPolicy { | | struct MemPoolPolicy { | |
| rawAllocType pAlloc; | | rawAllocType pAlloc; | |
| rawFreeType pFree; | | rawFreeType pFree; | |
| size_t granularity; // granularity of pAlloc allocations | | size_t granularity; // granularity of pAlloc allocations | |
| }; | | }; | |
| */ | | */ | |
| | | | |
| struct MemPoolPolicy { | | struct MemPoolPolicy { | |
| enum { | | enum { | |
|
| VERSION = 1 | | TBBMALLOC_POOL_VERSION = 1 | |
| }; | | }; | |
| | | | |
| rawAllocType pAlloc; | | rawAllocType pAlloc; | |
| rawFreeType pFree; | | rawFreeType pFree; | |
| // granularity of pAlloc allocations. 0 means default used
. | | // granularity of pAlloc allocations. 0 means default used
. | |
| size_t granularity; | | size_t granularity; | |
| int version; | | int version; | |
| // all memory consumed at 1st pAlloc call and never return
ed, | | // all memory consumed at 1st pAlloc call and never return
ed, | |
| // no more pAlloc calls after 1st | | // no more pAlloc calls after 1st | |
| unsigned fixedPool : 1, | | unsigned fixedPool : 1, | |
| // memory consumed but returned only at pool termination | | // memory consumed but returned only at pool termination | |
| keepAllMemory : 1, | | keepAllMemory : 1, | |
| reserved : 30; | | reserved : 30; | |
| | | | |
| MemPoolPolicy(rawAllocType pAlloc_, rawFreeType pFree_, | | MemPoolPolicy(rawAllocType pAlloc_, rawFreeType pFree_, | |
| size_t granularity_ = 0, bool fixedPool_ = false, | | size_t granularity_ = 0, bool fixedPool_ = false, | |
| bool keepAllMemory_ = false) : | | bool keepAllMemory_ = false) : | |
|
| pAlloc(pAlloc_), pFree(pFree_), granularity(granularity_), version(
VERSION), | | pAlloc(pAlloc_), pFree(pFree_), granularity(granularity_), version(
TBBMALLOC_POOL_VERSION), | |
| fixedPool(fixedPool_), keepAllMemory(keepAllMemory_), | | fixedPool(fixedPool_), keepAllMemory(keepAllMemory_), | |
| reserved(0) {} | | reserved(0) {} | |
| }; | | }; | |
| | | | |
| enum MemPoolError { | | enum MemPoolError { | |
| POOL_OK, // pool created successfully | | POOL_OK, // pool created successfully | |
| INVALID_POLICY, // invalid policy parameters found | | INVALID_POLICY, // invalid policy parameters found | |
| UNSUPPORTED_POLICY, // requested pool policy is not supported by alloca
tor library | | UNSUPPORTED_POLICY, // requested pool policy is not supported by alloca
tor library | |
| NO_MEMORY // lack of memory during pool creation | | NO_MEMORY // lack of memory during pool creation | |
| }; | | }; | |
| | | | |
| skipping to change at line 221 | | skipping to change at line 231 | |
| void deallocate( pointer p, size_type ) { | | void deallocate( pointer p, size_type ) { | |
| scalable_free( p ); | | scalable_free( p ); | |
| } | | } | |
| | | | |
| //! Largest value for which method allocate might succeed. | | //! Largest value for which method allocate might succeed. | |
| size_type max_size() const throw() { | | size_type max_size() const throw() { | |
| size_type absolutemax = static_cast<size_type>(-1) / sizeof (value_
type); | | size_type absolutemax = static_cast<size_type>(-1) / sizeof (value_
type); | |
| return (absolutemax > 0 ? absolutemax : 1); | | return (absolutemax > 0 ? absolutemax : 1); | |
| } | | } | |
| #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESEN
T | | #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESEN
T | |
|
| template<typename... Args> | | template<typename U, typename... Args> | |
| void construct(pointer p, Args&&... args) | | void construct(U *p, Args&&... args) | |
| #if __TBB_CPP11_STD_FORWARD_BROKEN | | #if __TBB_CPP11_STD_FORWARD_BROKEN | |
|
| { ::new((void *)p) T((args)...); } | | { ::new((void *)p) U((args)...); } | |
| #else | | #else | |
|
| { ::new((void *)p) T(std::forward<Args>(args)...); } | | { ::new((void *)p) U(std::forward<Args>(args)...); } | |
| #endif | | #endif | |
| #else // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_P
RESENT | | #else // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_P
RESENT | |
| void construct( pointer p, const value_type& value ) {::new((void*)(p))
value_type(value);} | | void construct( pointer p, const value_type& value ) {::new((void*)(p))
value_type(value);} | |
| #endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_
PRESENT | | #endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_
PRESENT | |
| void destroy( pointer p ) {p->~value_type();} | | void destroy( pointer p ) {p->~value_type();} | |
| }; | | }; | |
| | | | |
| #if _MSC_VER && !defined(__INTEL_COMPILER) | | #if _MSC_VER && !defined(__INTEL_COMPILER) | |
| #pragma warning (pop) | | #pragma warning (pop) | |
| #endif // warning 4100 is back | | #endif // warning 4100 is back | |
| | | | |
End of changes. 6 change blocks. |
| 6 lines changed or deleted | | 18 lines changed or added | |
|
| task_arena.h | | task_arena.h | |
| | | | |
| skipping to change at line 66 | | skipping to change at line 66 | |
| /*override*/ task* execute() { | | /*override*/ task* execute() { | |
| my_func(); | | my_func(); | |
| return NULL; | | return NULL; | |
| } | | } | |
| public: | | public: | |
| enqueued_function_task ( const F& f ) : my_func(f) {} | | enqueued_function_task ( const F& f ) : my_func(f) {} | |
| }; | | }; | |
| | | | |
| class delegate_base : no_assign { | | class delegate_base : no_assign { | |
| public: | | public: | |
|
| virtual void run() = 0; | | virtual void operator()() const = 0; | |
| virtual ~delegate_base() {} | | virtual ~delegate_base() {} | |
| }; | | }; | |
| | | | |
| template<typename F> | | template<typename F> | |
| class delegated_function : public delegate_base { | | class delegated_function : public delegate_base { | |
| F &my_func; | | F &my_func; | |
|
| /*override*/ void run() { | | /*override*/ void operator()() const { | |
| my_func(); | | my_func(); | |
| } | | } | |
| public: | | public: | |
| delegated_function ( F& f ) : my_func(f) {} | | delegated_function ( F& f ) : my_func(f) {} | |
| }; | | }; | |
| } // namespace internal | | } // namespace internal | |
| //! @endcond | | //! @endcond | |
| | | | |
| /** 1-to-1 proxy representation class of scheduler's arena | | /** 1-to-1 proxy representation class of scheduler's arena | |
| * Constructors set up settings only, real construction is deferred till th
e first method invocation | | * Constructors set up settings only, real construction is deferred till th
e first method invocation | |
| * TODO: A side effect of this is that it's impossible to create a const ta
sk_arena object. Rethink? | | * TODO: A side effect of this is that it's impossible to create a const ta
sk_arena object. Rethink? | |
| * Destructor only removes one of the references to the inner arena represe
ntation. | | * Destructor only removes one of the references to the inner arena represe
ntation. | |
| * Final destruction happens when all the references (and the work) are gon
e. | | * Final destruction happens when all the references (and the work) are gon
e. | |
| */ | | */ | |
| class task_arena { | | class task_arena { | |
| friend class internal::task_scheduler_observer_v3; | | friend class internal::task_scheduler_observer_v3; | |
| //! Concurrency level for deferred initialization | | //! Concurrency level for deferred initialization | |
| int my_max_concurrency; | | int my_max_concurrency; | |
| | | | |
|
| | | //! Reserved master slots | |
| | | unsigned my_master_slots; | |
| | | | |
| //! NULL if not currently initialized. | | //! NULL if not currently initialized. | |
| internal::arena* my_arena; | | internal::arena* my_arena; | |
| | | | |
|
| | | // Initialization flag enabling compiler to throw excessive lazy initia | |
| | | lization checks | |
| | | bool my_initialized; | |
| | | | |
| // const methods help to optimize the !my_arena check TODO: check, IDEA
: move to base-class? | | // const methods help to optimize the !my_arena check TODO: check, IDEA
: move to base-class? | |
|
| internal::arena* __TBB_EXPORTED_METHOD internal_initialize( int ) const
; | | void __TBB_EXPORTED_METHOD internal_initialize( ); | |
| void __TBB_EXPORTED_METHOD internal_terminate( ); | | void __TBB_EXPORTED_METHOD internal_terminate( ); | |
| void __TBB_EXPORTED_METHOD internal_enqueue( task&, intptr_t ) const; | | void __TBB_EXPORTED_METHOD internal_enqueue( task&, intptr_t ) const; | |
| void __TBB_EXPORTED_METHOD internal_execute( internal::delegate_base& )
const; | | void __TBB_EXPORTED_METHOD internal_execute( internal::delegate_base& )
const; | |
| void __TBB_EXPORTED_METHOD internal_wait() const; | | void __TBB_EXPORTED_METHOD internal_wait() const; | |
| | | | |
|
| inline void check_init() { | | | |
| if( !my_arena ) | | | |
| my_arena = internal_initialize( my_max_concurrency ); | | | |
| } | | | |
| | | | |
| public: | | public: | |
| //! Typedef for number of threads that is automatic. | | //! Typedef for number of threads that is automatic. | |
| static const int automatic = -1; // any value < 1 means 'automatic' | | static const int automatic = -1; // any value < 1 means 'automatic' | |
| | | | |
|
| //! Creates task_arena with certain concurrency limit | | //! Creates task_arena with certain concurrency limits | |
| task_arena(int max_concurrency = automatic) | | /** @arg max_concurrency specifies total number of slots in arena where | |
| | | threads work | |
| | | * @arg reserved_for_masters specifies number of slots to be used by m | |
| | | aster threads only. | |
| | | * Value of 1 is default and reflects behavior of implicit arenas | |
| | | . | |
| | | **/ | |
| | | task_arena(int max_concurrency = automatic, unsigned reserved_for_maste | |
| | | rs = 1) | |
| : my_max_concurrency(max_concurrency) | | : my_max_concurrency(max_concurrency) | |
|
| | | , my_master_slots(reserved_for_masters) | |
| , my_arena(0) | | , my_arena(0) | |
|
| | | , my_initialized(false) | |
| {} | | {} | |
| | | | |
| //! Copies settings from another task_arena | | //! Copies settings from another task_arena | |
| task_arena(const task_arena &s) | | task_arena(const task_arena &s) | |
| : my_max_concurrency(s.my_max_concurrency) // copy settings | | : my_max_concurrency(s.my_max_concurrency) // copy settings | |
|
| | | , my_master_slots(s.my_master_slots) | |
| , my_arena(0) // but not the reference or instance | | , my_arena(0) // but not the reference or instance | |
|
| | | , my_initialized(false) | |
| {} | | {} | |
| | | | |
|
| //! Removes the reference to the internal arena representation, and des | | inline void initialize() { | |
| troys the external object | | if( !my_initialized ) { | |
| //! Not thread safe wrt concurrent invocations of other methods | | internal_initialize(); | |
| | | my_initialized = true; | |
| | | } | |
| | | } | |
| | | | |
| | | //! Overrides concurrency level and forces initialization of internal r | |
| | | epresentation | |
| | | inline void initialize(int max_concurrency, unsigned reserved_for_maste | |
| | | rs = 1) { | |
| | | __TBB_ASSERT( !my_arena, "task_arena was initialized already"); | |
| | | if( !my_initialized ) { | |
| | | my_max_concurrency = max_concurrency; | |
| | | my_master_slots = reserved_for_masters; | |
| | | initialize(); | |
| | | } // TODO: else throw? | |
| | | } | |
| | | | |
| | | //! Removes the reference to the internal arena representation. | |
| | | //! Not thread safe wrt concurrent invocations of other methods. | |
| | | inline void terminate() { | |
| | | if( my_initialized ) { | |
| | | internal_terminate(); | |
| | | my_initialized = false; | |
| | | } | |
| | | } | |
| | | | |
| | | //! Removes the reference to the internal arena representation, and des | |
| | | troys the external object. | |
| | | //! Not thread safe wrt concurrent invocations of other methods. | |
| ~task_arena() { | | ~task_arena() { | |
|
| internal_terminate(); | | terminate(); | |
| } | | } | |
| | | | |
|
| | | //! Returns true if the arena is active (initialized); false otherwise. | |
| | | //! The name was chosen to match a task_scheduler_init method with the | |
| | | same semantics. | |
| | | bool is_active() const { return my_initialized; } | |
| | | | |
| //! Enqueues a task into the arena to process a functor, and immediatel
y returns. | | //! Enqueues a task into the arena to process a functor, and immediatel
y returns. | |
| //! Does not require the calling thread to join the arena | | //! Does not require the calling thread to join the arena | |
| template<typename F> | | template<typename F> | |
| void enqueue( const F& f ) { | | void enqueue( const F& f ) { | |
|
| check_init(); | | initialize(); | |
| internal_enqueue( *new( task::allocate_root() ) internal::enqueued_
function_task<F>(f), 0 ); | | internal_enqueue( *new( task::allocate_root() ) internal::enqueued_
function_task<F>(f), 0 ); | |
| } | | } | |
| | | | |
| #if __TBB_TASK_PRIORITY | | #if __TBB_TASK_PRIORITY | |
| //! Enqueues a task with priority p into the arena to process a functor
f, and immediately returns. | | //! Enqueues a task with priority p into the arena to process a functor
f, and immediately returns. | |
| //! Does not require the calling thread to join the arena | | //! Does not require the calling thread to join the arena | |
| template<typename F> | | template<typename F> | |
| void enqueue( const F& f, priority_t p ) { | | void enqueue( const F& f, priority_t p ) { | |
| __TBB_ASSERT( p == priority_low || p == priority_normal || p == pri
ority_high, "Invalid priority level value" ); | | __TBB_ASSERT( p == priority_low || p == priority_normal || p == pri
ority_high, "Invalid priority level value" ); | |
|
| check_init(); | | initialize(); | |
| internal_enqueue( *new( task::allocate_root() ) internal::enqueued_
function_task<F>(f), (intptr_t)p ); | | internal_enqueue( *new( task::allocate_root() ) internal::enqueued_
function_task<F>(f), (intptr_t)p ); | |
| } | | } | |
| #endif// __TBB_TASK_PRIORITY | | #endif// __TBB_TASK_PRIORITY | |
| | | | |
| //! Joins the arena and executes a functor, then returns | | //! Joins the arena and executes a functor, then returns | |
| //! If not possible to join, wraps the functor into a task, enqueues it
and waits for task completion | | //! If not possible to join, wraps the functor into a task, enqueues it
and waits for task completion | |
| //! Can decrement the arena demand for workers, causing a worker to lea
ve and free a slot to the calling thread | | //! Can decrement the arena demand for workers, causing a worker to lea
ve and free a slot to the calling thread | |
| template<typename F> | | template<typename F> | |
| void execute(F& f) { | | void execute(F& f) { | |
|
| check_init(); | | initialize(); | |
| internal::delegated_function<F> d(f); | | internal::delegated_function<F> d(f); | |
| internal_execute( d ); | | internal_execute( d ); | |
| } | | } | |
| | | | |
| //! Joins the arena and executes a functor, then returns | | //! Joins the arena and executes a functor, then returns | |
| //! If not possible to join, wraps the functor into a task, enqueues it
and waits for task completion | | //! If not possible to join, wraps the functor into a task, enqueues it
and waits for task completion | |
| //! Can decrement the arena demand for workers, causing a worker to lea
ve and free a slot to the calling thread | | //! Can decrement the arena demand for workers, causing a worker to lea
ve and free a slot to the calling thread | |
| template<typename F> | | template<typename F> | |
| void execute(const F& f) { | | void execute(const F& f) { | |
|
| check_init(); | | initialize(); | |
| internal::delegated_function<const F> d(f); | | internal::delegated_function<const F> d(f); | |
| internal_execute( d ); | | internal_execute( d ); | |
| } | | } | |
| | | | |
| //! Wait for all work in the arena to be completed | | //! Wait for all work in the arena to be completed | |
| //! Even submitted by other application threads | | //! Even submitted by other application threads | |
| //! Joins arena if/when possible (in the same way as execute()) | | //! Joins arena if/when possible (in the same way as execute()) | |
| void wait_until_empty() { | | void wait_until_empty() { | |
|
| check_init(); | | initialize(); | |
| internal_wait(); | | internal_wait(); | |
| } | | } | |
| | | | |
|
| //! Sets concurrency level and initializes internal representation | | | |
| inline void initialize(int max_concurrency) { | | | |
| my_max_concurrency = max_concurrency; | | | |
| __TBB_ASSERT( !my_arena, "task_arena was initialized already"); // | | | |
| TODO: throw? | | | |
| check_init(); | | | |
| } | | | |
| | | | |
| //! Returns the index, aka slot number, of the calling thread in its cu
rrent arena | | //! Returns the index, aka slot number, of the calling thread in its cu
rrent arena | |
| static int __TBB_EXPORTED_FUNC current_slot(); | | static int __TBB_EXPORTED_FUNC current_slot(); | |
| }; | | }; | |
| | | | |
| } // namespace interfaceX | | } // namespace interfaceX | |
| | | | |
| using interface6::task_arena; | | using interface6::task_arena; | |
| | | | |
| } // namespace tbb | | } // namespace tbb | |
| | | | |
| | | | |
End of changes. 20 change blocks. |
| 27 lines changed or deleted | | 66 lines changed or added | |
|
| tbb_config.h | | tbb_config.h | |
| | | | |
| skipping to change at line 43 | | skipping to change at line 43 | |
| The macros defined here are intended to control such aspects of TBB bui
ld as | | The macros defined here are intended to control such aspects of TBB bui
ld as | |
| - presence of compiler features | | - presence of compiler features | |
| - compilation modes | | - compilation modes | |
| - feature sets | | - feature sets | |
| - known compiler/platform issues | | - known compiler/platform issues | |
| **/ | | **/ | |
| | | | |
| #define __TBB_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC
_PATCHLEVEL__) | | #define __TBB_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC
_PATCHLEVEL__) | |
| | | | |
| #if __clang__ | | #if __clang__ | |
|
| | | /**according to clang documentation version can be vendor specific **/ | |
| #define __TBB_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__
* 100 + __clang_patchlevel__) | | #define __TBB_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__
* 100 + __clang_patchlevel__) | |
| #endif | | #endif | |
| | | | |
| /** Presence of compiler features **/ | | /** Presence of compiler features **/ | |
| | | | |
| #if __INTEL_COMPILER == 9999 && __INTEL_COMPILER_BUILD_DATE == 20110811 | | #if __INTEL_COMPILER == 9999 && __INTEL_COMPILER_BUILD_DATE == 20110811 | |
| /* Intel(R) Composer XE 2011 Update 6 incorrectly sets __INTEL_COMPILER. Fi
x it. */ | | /* Intel(R) Composer XE 2011 Update 6 incorrectly sets __INTEL_COMPILER. Fi
x it. */ | |
| #undef __INTEL_COMPILER | | #undef __INTEL_COMPILER | |
| #define __INTEL_COMPILER 1210 | | #define __INTEL_COMPILER 1210 | |
| #endif | | #endif | |
| | | | |
| skipping to change at line 74 | | skipping to change at line 75 | |
| compilers they mimic (GCC, MSVC). | | compilers they mimic (GCC, MSVC). | |
| | | | |
| TODO: The following conditions should be extended when new compilers/run
times | | TODO: The following conditions should be extended when new compilers/run
times | |
| support added. | | support added. | |
| */ | | */ | |
| | | | |
| #if __INTEL_COMPILER | | #if __INTEL_COMPILER | |
| /** On Windows environment when using Intel C++ compiler with Visual St
udio 2010*, | | /** On Windows environment when using Intel C++ compiler with Visual St
udio 2010*, | |
| the C++0x features supported by Visual C++ 2010 are enabled by defa
ult | | the C++0x features supported by Visual C++ 2010 are enabled by defa
ult | |
| TODO: find a way to get know if c++0x mode is specified in command
line on windows **/ | | TODO: find a way to get know if c++0x mode is specified in command
line on windows **/ | |
|
| #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __GXX_EXPERIMENTAL_CX | | #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT ( __GXX_EXPERIMENTAL_ | |
| X0X__ && __VARIADIC_TEMPLATES | | CXX0X__ && __VARIADIC_TEMPLATES ) | |
| #define __TBB_CPP11_RVALUE_REF_PRESENT (__GXX_EXPERIMENTAL_C | | #define __TBB_CPP11_RVALUE_REF_PRESENT ( (__GXX_EXPERIMENTAL | |
| XX0X__ || _MSC_VER >= 1600) && (__INTEL_COMPILER >= 1200) | | _CXX0X__ || _MSC_VER >= 1600) && (__INTEL_COMPILER >= 1200) ) | |
| #if _MSC_VER >= 1600 | | #if _MSC_VER >= 1600 | |
|
| #define __TBB_EXCEPTION_PTR_PRESENT __INTEL_COMPILER > 13
00 \ | | #define __TBB_EXCEPTION_PTR_PRESENT ( __INTEL_COMPILER >
1300 \ | |
| /*ICC 12.1 Upd 10 and
13 beta Upd 2 fixed exception_ptr linking issue*/ \ | | /*ICC 12.1 Upd 10 and
13 beta Upd 2 fixed exception_ptr linking issue*/ \ | |
| || (__INTEL_COMPILER
== 1300 && __INTEL_COMPILER_BUILD_DATE >= 20120530) \ | | || (__INTEL_COMPILER
== 1300 && __INTEL_COMPILER_BUILD_DATE >= 20120530) \ | |
|
| || (__INTEL_COMPILER | | || (__INTEL_COMPILER | |
| == 1210 && __INTEL_COMPILER_BUILD_DATE >= 20120410) | | == 1210 && __INTEL_COMPILER_BUILD_DATE >= 20120410) ) | |
| /** libstc++ that comes with GCC 4.6 use C++ features not yet supported | | /** libstc++ that comes with GCC 4.6 use C++11 features not supported b | |
| by current ICC (12.1)**/ | | y ICC 12.1. | |
| | | * Because of that ICC 12.1 does not support C++11 mode with with gcc 4 | |
| | | .6. (or higher) | |
| | | * , and therefore does not define __GXX_EXPERIMENTAL_CXX0X__ macro**/ | |
| #elif (__TBB_GCC_VERSION >= 40404) && (__TBB_GCC_VERSION < 40600) | | #elif (__TBB_GCC_VERSION >= 40404) && (__TBB_GCC_VERSION < 40600) | |
|
| #define __TBB_EXCEPTION_PTR_PRESENT __GXX_EXPERIMENTAL_CXX0X | | #define __TBB_EXCEPTION_PTR_PRESENT ( __GXX_EXPERIMENTAL_CXX | |
| __ && __INTEL_COMPILER >= 1200 | | 0X__ && __INTEL_COMPILER >= 1200 ) | |
| | | #elif (__TBB_GCC_VERSION >= 40600) | |
| | | #define __TBB_EXCEPTION_PTR_PRESENT ( __GXX_EXPERIMENTAL_CXX | |
| | | 0X__ && __INTEL_COMPILER >= 1300 ) | |
| #else | | #else | |
| #define __TBB_EXCEPTION_PTR_PRESENT 0 | | #define __TBB_EXCEPTION_PTR_PRESENT 0 | |
| #endif | | #endif | |
| #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1700 ||
(__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600)) | | #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1700 ||
(__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600)) | |
|
| #define __TBB_STATIC_ASSERT_PRESENT __GXX_EXPERIMENTAL_CX | | #define __TBB_STATIC_ASSERT_PRESENT ( __GXX_EXPERIMENTAL_ | |
| X0X__ || (_MSC_VER >= 1600) | | CXX0X__ || (_MSC_VER >= 1600) ) | |
| #define __TBB_CPP11_TUPLE_PRESENT (_MSC_VER >= 1600) || | | #define __TBB_CPP11_TUPLE_PRESENT ( (_MSC_VER >= 1600) | |
| ((__GXX_EXPERIMENTAL_CXX0X__) && (__TBB_GCC_VERSION >= 40300)) | | || ((__GXX_EXPERIMENTAL_CXX0X__) && (__TBB_GCC_VERSION >= 40300)) ) | |
| /** TODO: re-check for compiler version greater than 12.1 if it support
s initializer lists**/ | | /** TODO: re-check for compiler version greater than 12.1 if it support
s initializer lists**/ | |
| #define __TBB_INITIALIZER_LISTS_PRESENT 0 | | #define __TBB_INITIALIZER_LISTS_PRESENT 0 | |
| #define __TBB_CONSTEXPR_PRESENT 0 | | #define __TBB_CONSTEXPR_PRESENT 0 | |
| #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT 0 | | #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT 0 | |
| #elif __clang__ | | #elif __clang__ | |
| //TODO: these options need to be rechecked | | //TODO: these options need to be rechecked | |
|
| #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (__GXX_EXPERIMENTAL_C | | /** on OS X* the only way to get C++11 is to use clang. For library feature | |
| XX0X__ && __TBB_CLANG_VERSION >= 20900) | | s (e.g. exception_ptr) libc++ is also | |
| #define __TBB_CPP11_RVALUE_REF_PRESENT (__GXX_EXPERIMENTAL_C | | * required. So there is no need to check GCC version for clang**/ | |
| XX0X__ && __TBB_CLANG_VERSION >= 20900) | | #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __has_feature(__cxx_ | |
| #define __TBB_EXCEPTION_PTR_PRESENT __GXX_EXPERIMENTAL_CX | | variadic_templates__) | |
| X0X__ | | #define __TBB_CPP11_RVALUE_REF_PRESENT __has_feature(__cxx_ | |
| #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_C | | rvalue_references__) | |
| XX0X__ && __TBB_CLANG_VERSION > 30100)// TODO: check version | | #define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_C | |
| #define __TBB_STATIC_ASSERT_PRESENT (__GXX_EXPERIMENTAL_C | | XX0X__ && (__cplusplus >= 201103L)) | |
| XX0X__ && __TBB_CLANG_VERSION >= 20900) | | #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_C | |
| #define __TBB_CPP11_TUPLE_PRESENT ((__GXX_EXPERIMENTAL_ | | XX0X__ && (__cplusplus >= 201103L)) | |
| CXX0X__) && (__TBB_GCC_VERSION >= 40300)) | | #define __TBB_STATIC_ASSERT_PRESENT __has_feature(__cxx_s | |
| #define __TBB_INITIALIZER_LISTS_PRESENT 0 | | tatic_assert__) | |
| #define __TBB_CONSTEXPR_PRESENT (__GXX_EXPERIMENTAL_C | | /**Clang (preprocessor) has problems with dealing with expression havin | |
| XX0X__ && __TBB_CLANG_VERSION > 30100) | | g __has_include in #if's | |
| #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT 0 | | * used inside C++ code. (At least version that comes with OS X 10.8) * | |
| | | */ | |
| | | #if (__GXX_EXPERIMENTAL_CXX0X__ && __has_include(<tuple>)) | |
| | | #define __TBB_CPP11_TUPLE_PRESENT 1 | |
| | | #endif | |
| | | #if (__has_feature(__cxx_generalized_initializers__) && __has_include(< | |
| | | initializer_list>)) | |
| | | #define __TBB_INITIALIZER_LISTS_PRESENT 1 | |
| | | #endif | |
| | | #define __TBB_CONSTEXPR_PRESENT __has_feature(__cxx_c | |
| | | onstexpr__) | |
| | | #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (__has_feature(__cxx_ | |
| | | defaulted_functions__) && __has_feature(__cxx_deleted_functions__)) | |
| #elif __GNUC__ | | #elif __GNUC__ | |
| #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __GXX_EXPERIMENTAL_CX
X0X__ | | #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __GXX_EXPERIMENTAL_CX
X0X__ | |
| #define __TBB_CPP11_RVALUE_REF_PRESENT __GXX_EXPERIMENTAL_CX
X0X__ | | #define __TBB_CPP11_RVALUE_REF_PRESENT __GXX_EXPERIMENTAL_CX
X0X__ | |
| /** __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 here is a substitution for _GLIB
CXX_ATOMIC_BUILTINS_4, which is a prerequisite | | /** __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 here is a substitution for _GLIB
CXX_ATOMIC_BUILTINS_4, which is a prerequisite | |
| for exception_ptr but cannot be used in this file because it is def
ined in a header, not by the compiler. | | for exception_ptr but cannot be used in this file because it is def
ined in a header, not by the compiler. | |
| If the compiler has no atomic intrinsics, the C++ library should no
t expect those as well. **/ | | If the compiler has no atomic intrinsics, the C++ library should no
t expect those as well. **/ | |
| #define __TBB_EXCEPTION_PTR_PRESENT ((__GXX_EXPERIMENTAL_
CXX0X__) && (__TBB_GCC_VERSION >= 40404) && __GCC_HAVE_SYNC_COMPARE_AND_SWA
P_4) | | #define __TBB_EXCEPTION_PTR_PRESENT ((__GXX_EXPERIMENTAL_
CXX0X__) && (__TBB_GCC_VERSION >= 40404) && __GCC_HAVE_SYNC_COMPARE_AND_SWA
P_4) | |
| #define __TBB_MAKE_EXCEPTION_PTR_PRESENT ((__GXX_EXPERIMENTAL_
CXX0X__) && (__TBB_GCC_VERSION >= 40600)) | | #define __TBB_MAKE_EXCEPTION_PTR_PRESENT ((__GXX_EXPERIMENTAL_
CXX0X__) && (__TBB_GCC_VERSION >= 40600)) | |
| #define __TBB_STATIC_ASSERT_PRESENT ((__GXX_EXPERIMENTAL_
CXX0X__) && (__TBB_GCC_VERSION >= 40300)) | | #define __TBB_STATIC_ASSERT_PRESENT ((__GXX_EXPERIMENTAL_
CXX0X__) && (__TBB_GCC_VERSION >= 40300)) | |
| #define __TBB_CPP11_TUPLE_PRESENT ((__GXX_EXPERIMENTAL_
CXX0X__) && (__TBB_GCC_VERSION >= 40300)) | | #define __TBB_CPP11_TUPLE_PRESENT ((__GXX_EXPERIMENTAL_
CXX0X__) && (__TBB_GCC_VERSION >= 40300)) | |
| | | | |
| skipping to change at line 234 | | skipping to change at line 247 | |
| #define TBB_USE_EXCEPTIONS 1 | | #define TBB_USE_EXCEPTIONS 1 | |
| #endif | | #endif | |
| #elif TBB_USE_EXCEPTIONS && __TBB_DEFINE_MIC | | #elif TBB_USE_EXCEPTIONS && __TBB_DEFINE_MIC | |
| #error Please do not set TBB_USE_EXCEPTIONS macro or set it to 0. | | #error Please do not set TBB_USE_EXCEPTIONS macro or set it to 0. | |
| #endif | | #endif | |
| | | | |
| #ifndef TBB_IMPLEMENT_CPP0X | | #ifndef TBB_IMPLEMENT_CPP0X | |
| /** By default, use C++0x classes if available **/ | | /** By default, use C++0x classes if available **/ | |
| #if __GNUC__==4 && __GNUC_MINOR__>=4 && __GXX_EXPERIMENTAL_CXX0X__ | | #if __GNUC__==4 && __GNUC_MINOR__>=4 && __GXX_EXPERIMENTAL_CXX0X__ | |
| #define TBB_IMPLEMENT_CPP0X 0 | | #define TBB_IMPLEMENT_CPP0X 0 | |
|
| | | #elif __clang__ && __cplusplus >= 201103L | |
| | | //TODO: consider introducing separate macroses for each file? | |
| | | //prevent injection of according tbb names into std:: namespace if | |
| | | native headers are present | |
| | | #if __has_include(<thread>) || __has_include(<condition_variable>) | |
| | | #define TBB_IMPLEMENT_CPP0X 0 | |
| | | #else | |
| | | #define TBB_IMPLEMENT_CPP0X 1 | |
| | | #endif | |
| #else | | #else | |
| #define TBB_IMPLEMENT_CPP0X 1 | | #define TBB_IMPLEMENT_CPP0X 1 | |
| #endif | | #endif | |
| #endif /* TBB_IMPLEMENT_CPP0X */ | | #endif /* TBB_IMPLEMENT_CPP0X */ | |
| | | | |
| /* TBB_USE_CAPTURED_EXCEPTION should be explicitly set to either 0 or 1, as
it is used as C++ const */ | | /* TBB_USE_CAPTURED_EXCEPTION should be explicitly set to either 0 or 1, as
it is used as C++ const */ | |
| #ifndef TBB_USE_CAPTURED_EXCEPTION | | #ifndef TBB_USE_CAPTURED_EXCEPTION | |
|
| /** linux pre-built TBB binary does not support exception_ptr. **/ | | /**TODO: enable it by default on OS X*, once it is enabled in pre-built | |
| #if __TBB_EXCEPTION_PTR_PRESENT && !defined(__GNUC__) | | binary **/ | |
| | | /** OS X* and IA64 pre-built TBB binaries do not support exception_ptr. | |
| | | **/ | |
| | | #if __TBB_EXCEPTION_PTR_PRESENT && !defined(__APPLE__) && !defined(__ia | |
| | | 64__) | |
| #define TBB_USE_CAPTURED_EXCEPTION 0 | | #define TBB_USE_CAPTURED_EXCEPTION 0 | |
| #else | | #else | |
| #define TBB_USE_CAPTURED_EXCEPTION 1 | | #define TBB_USE_CAPTURED_EXCEPTION 1 | |
| #endif | | #endif | |
| #else /* defined TBB_USE_CAPTURED_EXCEPTION */ | | #else /* defined TBB_USE_CAPTURED_EXCEPTION */ | |
| #if !TBB_USE_CAPTURED_EXCEPTION && !__TBB_EXCEPTION_PTR_PRESENT | | #if !TBB_USE_CAPTURED_EXCEPTION && !__TBB_EXCEPTION_PTR_PRESENT | |
| #error Current runtime does not support std::exception_ptr. Set TBB
_USE_CAPTURED_EXCEPTION and make sure that your code is ready to catch tbb:
:captured_exception. | | #error Current runtime does not support std::exception_ptr. Set TBB
_USE_CAPTURED_EXCEPTION and make sure that your code is ready to catch tbb:
:captured_exception. | |
| #endif | | #endif | |
| #endif /* defined TBB_USE_CAPTURED_EXCEPTION */ | | #endif /* defined TBB_USE_CAPTURED_EXCEPTION */ | |
| | | | |
| /** Check whether the request to use GCC atomics can be satisfied **/ | | /** Check whether the request to use GCC atomics can be satisfied **/ | |
| #if (TBB_USE_GCC_BUILTINS && !__TBB_GCC_BUILTIN_ATOMICS_PRESENT) | | #if (TBB_USE_GCC_BUILTINS && !__TBB_GCC_BUILTIN_ATOMICS_PRESENT) | |
| #error "GCC atomic built-ins are not supported." | | #error "GCC atomic built-ins are not supported." | |
| #endif | | #endif | |
| | | | |
| /** Internal TBB features & modes **/ | | /** Internal TBB features & modes **/ | |
| | | | |
| /** __TBB_WEAK_SYMBOLS_PRESENT denotes that the system supports the weak sy
mbol mechanism **/ | | /** __TBB_WEAK_SYMBOLS_PRESENT denotes that the system supports the weak sy
mbol mechanism **/ | |
|
| #define __TBB_WEAK_SYMBOLS_PRESENT !_WIN32 && !__APPLE__ && !__sun && ((__T
BB_GCC_VERSION >= 40000) || defined(__INTEL_COMPILER)) | | #define __TBB_WEAK_SYMBOLS_PRESENT ( !_WIN32 && !__APPLE__ && !__sun && ((_
_TBB_GCC_VERSION >= 40000) || __INTEL_COMPILER ) ) | |
| | | | |
| /** __TBB_DYNAMIC_LOAD_ENABLED describes the system possibility to load sha
red libraries at run time **/ | | /** __TBB_DYNAMIC_LOAD_ENABLED describes the system possibility to load sha
red libraries at run time **/ | |
| #ifndef __TBB_DYNAMIC_LOAD_ENABLED | | #ifndef __TBB_DYNAMIC_LOAD_ENABLED | |
| #define __TBB_DYNAMIC_LOAD_ENABLED 1 | | #define __TBB_DYNAMIC_LOAD_ENABLED 1 | |
| #endif | | #endif | |
| | | | |
| /** __TBB_SOURCE_DIRECTLY_INCLUDED is a mode used in whitebox testing when | | /** __TBB_SOURCE_DIRECTLY_INCLUDED is a mode used in whitebox testing when | |
| it's necessary to test internal functions not exported from TBB DLLs | | it's necessary to test internal functions not exported from TBB DLLs | |
| **/ | | **/ | |
| #if (_WIN32||_WIN64) && __TBB_SOURCE_DIRECTLY_INCLUDED | | #if (_WIN32||_WIN64) && __TBB_SOURCE_DIRECTLY_INCLUDED | |
| | | | |
| skipping to change at line 399 | | skipping to change at line 421 | |
| incompatible with that of overridden one". **/ | | incompatible with that of overridden one". **/ | |
| #define __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN 1 | | #define __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN 1 | |
| #endif | | #endif | |
| | | | |
| #if defined(_MSC_VER) && _MSC_VER < 1500 && !defined(__INTEL_COMPILER) | | #if defined(_MSC_VER) && _MSC_VER < 1500 && !defined(__INTEL_COMPILER) | |
| /** VS2005 and earlier do not allow declaring template class as a frien
d | | /** VS2005 and earlier do not allow declaring template class as a frien
d | |
| of classes defined in other namespaces. **/ | | of classes defined in other namespaces. **/ | |
| #define __TBB_TEMPLATE_FRIENDS_BROKEN 1 | | #define __TBB_TEMPLATE_FRIENDS_BROKEN 1 | |
| #endif | | #endif | |
| | | | |
|
| #if __GLIBC__==2 && __GLIBC_MINOR__==3 || __MINGW32__ || (__APPLE__ && __IN | | //TODO: recheck for different clang versions | |
| TEL_COMPILER==1200 && !TBB_USE_DEBUG) | | #if __GLIBC__==2 && __GLIBC_MINOR__==3 || __MINGW32__ || (__APPLE__ && (__c | |
| | | lang__ || __INTEL_COMPILER==1200 && !TBB_USE_DEBUG)) | |
| /** Macro controlling EH usages in TBB tests. | | /** Macro controlling EH usages in TBB tests. | |
| Some older versions of glibc crash when exception handling happens
concurrently. **/ | | Some older versions of glibc crash when exception handling happens
concurrently. **/ | |
| #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 1 | | #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 1 | |
| #else | | #else | |
| #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 0 | | #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 0 | |
| #endif | | #endif | |
| | | | |
| #if (_WIN32||_WIN64) && __INTEL_COMPILER == 1110 | | #if (_WIN32||_WIN64) && __INTEL_COMPILER == 1110 | |
| /** That's a bug in Intel compiler 11.1.044/IA-32/Windows, that leads t
o a worker thread crash on the thread's startup. **/ | | /** That's a bug in Intel compiler 11.1.044/IA-32/Windows, that leads t
o a worker thread crash on the thread's startup. **/ | |
| #define __TBB_ICL_11_1_CODE_GEN_BROKEN 1 | | #define __TBB_ICL_11_1_CODE_GEN_BROKEN 1 | |
| | | | |
| skipping to change at line 441 | | skipping to change at line 464 | |
| /** A bug in FreeBSD 8.0 results in kernel panic when there is contenti
on | | /** A bug in FreeBSD 8.0 results in kernel panic when there is contenti
on | |
| on a mutex created with this attribute. **/ | | on a mutex created with this attribute. **/ | |
| #define __TBB_PRIO_INHERIT_BROKEN 1 | | #define __TBB_PRIO_INHERIT_BROKEN 1 | |
| | | | |
| /** A bug in FreeBSD 8.0 results in test hanging when an exception occu
rs | | /** A bug in FreeBSD 8.0 results in test hanging when an exception occu
rs | |
| during (concurrent?) object construction by means of placement new
operator. **/ | | during (concurrent?) object construction by means of placement new
operator. **/ | |
| #define __TBB_PLACEMENT_NEW_EXCEPTION_SAFETY_BROKEN 1 | | #define __TBB_PLACEMENT_NEW_EXCEPTION_SAFETY_BROKEN 1 | |
| #endif /* __FreeBSD__ */ | | #endif /* __FreeBSD__ */ | |
| | | | |
| #if (__linux__ || __APPLE__) && __i386__ && defined(__INTEL_COMPILER) | | #if (__linux__ || __APPLE__) && __i386__ && defined(__INTEL_COMPILER) | |
|
| /** The Intel compiler for IA-32 (Linux|Mac OS X) crashes or generates | | /** The Intel compiler for IA-32 (Linux|OS X) crashes or generates | |
| incorrect code when __asm__ arguments have a cast to volatile. **/ | | incorrect code when __asm__ arguments have a cast to volatile. **/ | |
| #define __TBB_ICC_ASM_VOLATILE_BROKEN 1 | | #define __TBB_ICC_ASM_VOLATILE_BROKEN 1 | |
| #endif | | #endif | |
| | | | |
| #if !__INTEL_COMPILER && (_MSC_VER || __GNUC__==3 && __GNUC_MINOR__<=2) | | #if !__INTEL_COMPILER && (_MSC_VER || __GNUC__==3 && __GNUC_MINOR__<=2) | |
| /** Bug in GCC 3.2 and MSVC compilers that sometimes return 0 for __ali
gnof(T) | | /** Bug in GCC 3.2 and MSVC compilers that sometimes return 0 for __ali
gnof(T) | |
| when T has not yet been instantiated. **/ | | when T has not yet been instantiated. **/ | |
| #define __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN 1 | | #define __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN 1 | |
| #endif | | #endif | |
| | | | |
|
| #if __INTEL_COMPILER | | /* Actually for Clang it should be name __TBB_CPP11_STD_FORWARD_PRESENT. | |
| | | * But in order to check for presence of std:: library feature we need to r | |
| | | ecognize | |
| | | * is standard library actually used stdlibc++ (GNU one) or libc++ (clang o | |
| | | ne). | |
| | | * Unfortunately it is not possible at the moment. So postponing it to late | |
| | | r moment.*/ | |
| | | /*TODO: for clang rename it to __TBB_CPP11_STD_FORWARD_PRESENT and re-imple | |
| | | ment it.*/ | |
| | | #if (__INTEL_COMPILER) || (__clang__ && __TBB_GCC_VERSION <= 40300) | |
| #define __TBB_CPP11_STD_FORWARD_BROKEN 1 | | #define __TBB_CPP11_STD_FORWARD_BROKEN 1 | |
| #else | | #else | |
| #define __TBB_CPP11_STD_FORWARD_BROKEN 0 | | #define __TBB_CPP11_STD_FORWARD_BROKEN 0 | |
| #endif | | #endif | |
| | | | |
| #if __TBB_DEFINE_MIC | | #if __TBB_DEFINE_MIC | |
| /** Main thread and user's thread have different default thread affinit
y masks. **/ | | /** Main thread and user's thread have different default thread affinit
y masks. **/ | |
| #define __TBB_MAIN_THREAD_AFFINITY_BROKEN 1 | | #define __TBB_MAIN_THREAD_AFFINITY_BROKEN 1 | |
| #endif | | #endif | |
| | | | |
| | | | |
End of changes. 13 change blocks. |
| 38 lines changed or deleted | | 80 lines changed or added | |
|
| tbb_machine.h | | tbb_machine.h | |
| | | | |
| skipping to change at line 242 | | skipping to change at line 242 | |
| #elif (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) | | #elif (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) | |
| #include "machine/icc_generic.h" | | #include "machine/icc_generic.h" | |
| #elif __i386__ | | #elif __i386__ | |
| #include "machine/linux_ia32.h" | | #include "machine/linux_ia32.h" | |
| #elif __x86_64__ | | #elif __x86_64__ | |
| #include "machine/linux_intel64.h" | | #include "machine/linux_intel64.h" | |
| #elif __ia64__ | | #elif __ia64__ | |
| #include "machine/linux_ia64.h" | | #include "machine/linux_ia64.h" | |
| #elif __powerpc__ | | #elif __powerpc__ | |
| #include "machine/mac_ppc.h" | | #include "machine/mac_ppc.h" | |
|
| | | #elif __arm__ | |
| | | #include "machine/gcc_armv7.h" | |
| #elif __TBB_GCC_BUILTIN_ATOMICS_PRESENT | | #elif __TBB_GCC_BUILTIN_ATOMICS_PRESENT | |
| #include "machine/gcc_generic.h" | | #include "machine/gcc_generic.h" | |
| #endif | | #endif | |
| #include "machine/linux_common.h" | | #include "machine/linux_common.h" | |
| | | | |
| #elif __APPLE__ | | #elif __APPLE__ | |
| //TODO: TBB_USE_GCC_BUILTINS is not used for Mac, Sun, Aix | | //TODO: TBB_USE_GCC_BUILTINS is not used for Mac, Sun, Aix | |
| #if (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) | | #if (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) | |
| #include "machine/icc_generic.h" | | #include "machine/icc_generic.h" | |
| #elif __i386__ | | #elif __i386__ | |
| | | | |
| skipping to change at line 399 | | skipping to change at line 401 | |
| } | | } | |
| | | | |
| //! Spin UNTIL the value of the variable is equal to a given value | | //! Spin UNTIL the value of the variable is equal to a given value | |
| /** T and U should be comparable types. */ | | /** T and U should be comparable types. */ | |
| template<typename T, typename U> | | template<typename T, typename U> | |
| void spin_wait_until_eq( const volatile T& location, const U value ) { | | void spin_wait_until_eq( const volatile T& location, const U value ) { | |
| atomic_backoff backoff; | | atomic_backoff backoff; | |
| while( location!=value ) backoff.pause(); | | while( location!=value ) backoff.pause(); | |
| } | | } | |
| | | | |
|
| //TODO: add static_assert for the requirements stated below | | #if (__TBB_USE_GENERIC_PART_WORD_CAS && ( __TBB_BIG_ENDIAN==-1)) | |
| //TODO: check if it works with signed types | | #error generic implementation of part-word CAS was explicitly disabled | |
| | | for this configuration | |
| | | #endif | |
| | | | |
|
| | | #if (__TBB_BIG_ENDIAN!=-1) | |
| // there are following restrictions/limitations for this operation: | | // there are following restrictions/limitations for this operation: | |
| // - T should be unsigned, otherwise sign propagation will break correctne
ss of bit manipulations. | | // - T should be unsigned, otherwise sign propagation will break correctne
ss of bit manipulations. | |
| // - T should be integer type of at most 4 bytes, for the casts and calcul
ations to work. | | // - T should be integer type of at most 4 bytes, for the casts and calcul
ations to work. | |
| // (Together, these rules limit applicability of Masked CAS to uint8_t
and uint16_t only, | | // (Together, these rules limit applicability of Masked CAS to uint8_t
and uint16_t only, | |
| // as it does nothing useful for 4 bytes). | | // as it does nothing useful for 4 bytes). | |
| // - The operation assumes that the architecture consistently uses either
little-endian or big-endian: | | // - The operation assumes that the architecture consistently uses either
little-endian or big-endian: | |
| // it does not support mixed-endian or page-specific bi-endian archite
ctures. | | // it does not support mixed-endian or page-specific bi-endian archite
ctures. | |
| // This function is the only use of __TBB_BIG_ENDIAN. | | // This function is the only use of __TBB_BIG_ENDIAN. | |
|
| #if (__TBB_BIG_ENDIAN!=-1) | | // | |
| #if ( __TBB_USE_GENERIC_PART_WORD_CAS) | | //TODO: add static_assert for the requirements stated above | |
| #error generic implementation of part-word CAS was explicitly disab | | //TODO: check if it works with signed types | |
| led for this configuration | | | |
| #endif | | | |
| template<typename T> | | template<typename T> | |
| inline T __TBB_MaskedCompareAndSwap (volatile T * const ptr, const T value,
const T comparand ) { | | inline T __TBB_MaskedCompareAndSwap (volatile T * const ptr, const T value,
const T comparand ) { | |
| struct endianness{ static bool is_big_endian(){ | | struct endianness{ static bool is_big_endian(){ | |
| #ifndef __TBB_BIG_ENDIAN | | #ifndef __TBB_BIG_ENDIAN | |
| const uint32_t probe = 0x03020100; | | const uint32_t probe = 0x03020100; | |
| return (((const char*)(&probe))[0]==0x03); | | return (((const char*)(&probe))[0]==0x03); | |
| #elif (__TBB_BIG_ENDIAN==0) || (__TBB_BIG_ENDIAN==1) | | #elif (__TBB_BIG_ENDIAN==0) || (__TBB_BIG_ENDIAN==1) | |
| return __TBB_BIG_ENDIAN; | | return __TBB_BIG_ENDIAN; | |
| #else | | #else | |
| #error unexpected value of __TBB_BIG_ENDIAN | | #error unexpected value of __TBB_BIG_ENDIAN | |
| | | | |
| skipping to change at line 451 | | skipping to change at line 454 | |
| // Cast shuts up /Wp64 warning | | // Cast shuts up /Wp64 warning | |
| const uint32_t big_result = (uint32_t)__TBB_machine_cmpswp4( aligne
d_ptr, big_value, big_comparand ); | | const uint32_t big_result = (uint32_t)__TBB_machine_cmpswp4( aligne
d_ptr, big_value, big_comparand ); | |
| if( big_result == big_comparand // CAS succeeded | | if( big_result == big_comparand // CAS succeeded | |
| || ((big_result ^ big_comparand) & mask) != 0) // CAS failed an
d the bits of interest have changed | | || ((big_result ^ big_comparand) & mask) != 0) // CAS failed an
d the bits of interest have changed | |
| { | | { | |
| return T((big_result & mask) >> bits_to_shift); | | return T((big_result & mask) >> bits_to_shift); | |
| } | | } | |
| else continue; // CAS failed bu
t the bits of interest left unchanged | | else continue; // CAS failed bu
t the bits of interest left unchanged | |
| } | | } | |
| } | | } | |
|
| #endif | | #endif //__TBB_BIG_ENDIAN!=-1 | |
| template<size_t S, typename T> | | template<size_t S, typename T> | |
| inline T __TBB_CompareAndSwapGeneric (volatile void *ptr, T value, T compar
and ); | | inline T __TBB_CompareAndSwapGeneric (volatile void *ptr, T value, T compar
and ); | |
| | | | |
| template<> | | template<> | |
| inline uint8_t __TBB_CompareAndSwapGeneric <1,uint8_t> (volatile void *ptr,
uint8_t value, uint8_t comparand ) { | | inline uint8_t __TBB_CompareAndSwapGeneric <1,uint8_t> (volatile void *ptr,
uint8_t value, uint8_t comparand ) { | |
| #if __TBB_USE_GENERIC_PART_WORD_CAS | | #if __TBB_USE_GENERIC_PART_WORD_CAS | |
| return __TBB_MaskedCompareAndSwap<uint8_t>((volatile uint8_t *)ptr,valu
e,comparand); | | return __TBB_MaskedCompareAndSwap<uint8_t>((volatile uint8_t *)ptr,valu
e,comparand); | |
| #else | | #else | |
| return __TBB_machine_cmpswp1(ptr,value,comparand); | | return __TBB_machine_cmpswp1(ptr,value,comparand); | |
| #endif | | #endif | |
| | | | |
| skipping to change at line 861 | | skipping to change at line 864 | |
| tbb::internal::atomic_backoff b; | | tbb::internal::atomic_backoff b; | |
| for(;;) { | | for(;;) { | |
| uintptr_t tmp = *(volatile uintptr_t *)operand; | | uintptr_t tmp = *(volatile uintptr_t *)operand; | |
| uintptr_t result = __TBB_CompareAndSwapW(operand, tmp&addend, tmp); | | uintptr_t result = __TBB_CompareAndSwapW(operand, tmp&addend, tmp); | |
| if( result==tmp ) break; | | if( result==tmp ) break; | |
| b.pause(); | | b.pause(); | |
| } | | } | |
| } | | } | |
| #endif | | #endif | |
| | | | |
|
| | | #if __TBB_PREFETCHING | |
| | | #ifndef __TBB_cl_prefetch | |
| | | #error This platform does not define cache management primitives required f | |
| | | or __TBB_PREFETCHING | |
| | | #endif | |
| | | | |
| | | #ifndef __TBB_cl_evict | |
| | | #define __TBB_cl_evict(p) | |
| | | #endif | |
| | | #endif | |
| | | | |
| #ifndef __TBB_Flag | | #ifndef __TBB_Flag | |
| typedef unsigned char __TBB_Flag; | | typedef unsigned char __TBB_Flag; | |
| #endif | | #endif | |
| typedef __TBB_atomic __TBB_Flag __TBB_atomic_flag; | | typedef __TBB_atomic __TBB_Flag __TBB_atomic_flag; | |
| | | | |
| #ifndef __TBB_TryLockByte | | #ifndef __TBB_TryLockByte | |
| inline bool __TBB_TryLockByte( __TBB_atomic_flag &flag ) { | | inline bool __TBB_TryLockByte( __TBB_atomic_flag &flag ) { | |
| return __TBB_machine_cmpswp1(&flag,1,0)==0; | | return __TBB_machine_cmpswp1(&flag,1,0)==0; | |
| } | | } | |
| #endif | | #endif | |
| | | | |
End of changes. 6 change blocks. |
| 8 lines changed or deleted | | 22 lines changed or added | |
|
| tbb_stddef.h | | tbb_stddef.h | |
| | | | |
| skipping to change at line 37 | | skipping to change at line 37 | |
| */ | | */ | |
| | | | |
| #ifndef __TBB_tbb_stddef_H | | #ifndef __TBB_tbb_stddef_H | |
| #define __TBB_tbb_stddef_H | | #define __TBB_tbb_stddef_H | |
| | | | |
| // Marketing-driven product version | | // Marketing-driven product version | |
| #define TBB_VERSION_MAJOR 4 | | #define TBB_VERSION_MAJOR 4 | |
| #define TBB_VERSION_MINOR 1 | | #define TBB_VERSION_MINOR 1 | |
| | | | |
| // Engineering-focused interface version | | // Engineering-focused interface version | |
|
| #define TBB_INTERFACE_VERSION 6102 | | #define TBB_INTERFACE_VERSION 6103 | |
| #define TBB_INTERFACE_VERSION_MAJOR TBB_INTERFACE_VERSION/1000 | | #define TBB_INTERFACE_VERSION_MAJOR TBB_INTERFACE_VERSION/1000 | |
| | | | |
| // The oldest major interface version still supported | | // The oldest major interface version still supported | |
| // To be used in SONAME, manifests, etc. | | // To be used in SONAME, manifests, etc. | |
| #define TBB_COMPATIBLE_INTERFACE_VERSION 2 | | #define TBB_COMPATIBLE_INTERFACE_VERSION 2 | |
| | | | |
| #define __TBB_STRING_AUX(x) #x | | #define __TBB_STRING_AUX(x) #x | |
| #define __TBB_STRING(x) __TBB_STRING_AUX(x) | | #define __TBB_STRING(x) __TBB_STRING_AUX(x) | |
| | | | |
| // We do not need defines below for resource processing on windows | | // We do not need defines below for resource processing on windows | |
| | | | |
| skipping to change at line 350 | | skipping to change at line 350 | |
| }; | | }; | |
| | | | |
| #if _MSC_VER | | #if _MSC_VER | |
| //! Microsoft std::allocator has non-standard extension that strips const f
rom a type. | | //! Microsoft std::allocator has non-standard extension that strips const f
rom a type. | |
| template<typename T> | | template<typename T> | |
| struct allocator_type<const T> { | | struct allocator_type<const T> { | |
| typedef T value_type; | | typedef T value_type; | |
| }; | | }; | |
| #endif | | #endif | |
| | | | |
|
| //! A function to select either 32-bit or 64-bit value, depending on machin | | //! A template to select either 32-bit or 64-bit constant as compile time, | |
| e word size. | | depending on machine word size. | |
| inline size_t size_t_select( unsigned u, unsigned long long ull ) { | | template <unsigned u, unsigned long long ull > | |
| /* Explicit cast of the arguments to size_t is done to avoid compiler w | | struct select_size_t_constant { | |
| arnings | | //Explicit cast is needed to avoid compiler warnings about possible tru | |
| (e.g. by Clang and MSVC) about possible truncation. The value of the | | ncation. | |
| right size, | | //The value of the right size, which is selected by ?:, is anyway not | |
| which is selected by ?:, is anyway not truncated or promoted. | | truncated or promoted. | |
| MSVC still warns if this trick is applied directly to constants, hen | | static const size_t value = (size_t)((sizeof(size_t)==sizeof(u)) ? u : | |
| ce this function. */ | | ull); | |
| return (sizeof(size_t)==sizeof(u)) ? size_t(u) : size_t(ull); | | }; | |
| } | | | |
| | | | |
|
| | | //! A function to check if passed in pointer is aligned on a specific borde
r | |
| template<typename T> | | template<typename T> | |
|
| static inline bool is_aligned(T* pointer, uintptr_t alignment) { | | inline bool is_aligned(T* pointer, uintptr_t alignment) { | |
| return 0==((uintptr_t)pointer & (alignment-1)); | | return 0==((uintptr_t)pointer & (alignment-1)); | |
| } | | } | |
| | | | |
|
| | | //! A function to check if passed integer is a power of 2 | |
| | | template<typename integer_type> | |
| | | inline bool is_power_of_two(integer_type arg) { | |
| | | return arg && (0 == (arg & (arg - 1))); | |
| | | } | |
| | | | |
| | | //! A function to compute arg modulo divisor where divisor is a power of 2. | |
| | | template<typename argument_integer_type, typename divisor_integer_type> | |
| | | inline argument_integer_type modulo_power_of_two(argument_integer_type arg, | |
| | | divisor_integer_type divisor) { | |
| | | // Divisor is assumed to be a power of two (which is valid for current | |
| | | uses). | |
| | | __TBB_ASSERT( is_power_of_two(divisor), "Divisor should be a power of t | |
| | | wo" ); | |
| | | return (arg & (divisor - 1)); | |
| | | } | |
| | | | |
| | | //! A function to determine if "arg is a multiplication of a number and a p | |
| | | ower of 2". | |
| | | // i.e. for strictly positive i and j, with j a power of 2, | |
| | | // determines whether i==j<<k for some nonnegative k (so i==j yields true). | |
| | | template<typename argument_integer_type, typename divisor_integer_type> | |
| | | inline bool is_power_of_two_factor(argument_integer_type arg, divisor_integ | |
| | | er_type divisor) { | |
| | | // Divisor is assumed to be a power of two (which is valid for current | |
| | | uses). | |
| | | __TBB_ASSERT( is_power_of_two(divisor), "Divisor should be a power of t | |
| | | wo" ); | |
| | | return 0 == (arg & (arg - divisor)); | |
| | | } | |
| | | | |
| // Struct to be used as a version tag for inline functions. | | // Struct to be used as a version tag for inline functions. | |
| /** Version tag can be necessary to prevent loader on Linux from using the
wrong | | /** Version tag can be necessary to prevent loader on Linux from using the
wrong | |
| symbol in debug builds (when inline functions are compiled as out-of-li
ne). **/ | | symbol in debug builds (when inline functions are compiled as out-of-li
ne). **/ | |
| struct version_tag_v3 {}; | | struct version_tag_v3 {}; | |
| | | | |
| typedef version_tag_v3 version_tag; | | typedef version_tag_v3 version_tag; | |
| | | | |
| } // internal | | } // internal | |
| //! @endcond | | //! @endcond | |
| | | | |
| | | | |
End of changes. 5 change blocks. |
| 14 lines changed or deleted | | 45 lines changed or added | |
|